text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-09-24 05:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_user_token'),
]
operations = [
migrations.AddField(
model_name='user',
name='status',
field=models.CharField(choices=[('free', 'Free'), ('bronze', 'Bronze'), ('silver', 'Silver'), ('gold', 'Gold')], default='free', max_length=20),
),
]
|
from django.shortcuts import render
from django.views import generic
from .models import Author, Blog, Comment
# Create your views here.
def index(request):
"""
View function for home page site
It has numbers of various models
"""
# Number of authors
num_author = Author.objects.count() # all() is implied
num_blog = Blog.objects.count()
num_comment = Comment.objects.count()
average_comment = num_comment / num_blog
return render(
request,
'blog/index.html',
{'num_author': num_author,
'num_blog': num_blog,
'num_comment': num_comment,
'average_comment': average_comment}
)
class BlogListView(generic.ListView):
model = Blog
paginate_by = 3
class BlogDetailView(generic.DetailView):
model = Blog
class AuthorListView(generic.ListView):
model = Author
paginate_by = 3
class AuthorDetailView(generic.DetailView):
model = Author
|
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
def power(x, y):
return x ** y
def main():
while True:
print("Select calculator function please.")
print("1 = Add")
print("2 = Subtract")
print("3 = Multiply")
print("4 = Divide")
print("5 = Indicies")
choice = input("Enter choice 1, 2, 3, 4 or 5: ")
answerOne = ("y" or "Y")
answerTwo = ("n" or "N")
num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
if choice == '1':
print(num1,"+",num2,"=", add(num1,num2))
again = input("Would you like to use the calculator again? Enter y/n: ")
if again == answerTwo:
print ("Thanks for using!")
return
elif again == answerOne:
print ("Lets do it again..")
else:
print ("You should enter either \"y\" or \"n\".")
elif choice == '2':
print(num1,"-",num2,"=", subtract(num1,num2))
again = input("Would you like to use the calculator again? Enter y/n: ")
if again == answerTwo:
print ("Thanks for using!")
return
elif again == answerOne:
print ("Lets do it again..")
else:
print ("You should enter either \"y\" or \"n\".")
elif choice == '3':
print(num1,"*",num2,"=", multiply(num1,num2))
again = input("Would you like to use the calculator again? Enter y/n: ")
if again == answerTwo:
print ("Thanks for using!")
return
elif again == answerOne:
print ("Lets do it again..")
else:
print ("You should enter either \"y\" or \"n\".")
elif choice == '4':
print(num1,"/",num2,"=", divide(num1,num2))
again = input("Would you like to use the calculator again? Enter y/n: ")
if again == answerTwo:
print ("Thanks for using!")
return
elif again == answerOne:
print ("Lets do it again..")
else:
print ("You should enter either \"y\" or \"n\".")
elif choice == '5':
print(num1,"to the power of ",num2,"=", power(num1,num2))
again = input("Would you like to use the calculator again? Enter y/n: ")
if again == answerTwo:
print ("Thanks for using!")
return
elif again == answerOne:
print ("Lets do it again..")
else:
print ("You should enter either \"y\" or \"n\".")
else:
print("Invalid input")
again = input("Would you like to try again? Enter y/n: ")
if again == answerTwo:
print ("Thanks for using!")
return
elif again == answerOne:
print ("Lets do it again..")
else:
print ("You should enter either \"y\" or \"n\".")
again = input("Would you like to try again? Enter y/n: ")
if again == answerTwo:
print ("Thanks for using!")
return
elif again == answerOne:
print ("Lets do it again..")
else:
print ("You should enter either \"y\" or \"n\".")
kill()
if __name__ == "__main__":
main()
|
"""
This is a Pure Python module to hyphenate text.
Wilbert Berendsen, March 2008
info@wilbertberendsen.nl
License: LGPL.
"""
import sys
import re
__all__ = ("Hyphenator")
hdcache = {}
parse_hex = re.compile(r'\^{2}([0-9a-f]{2})').sub
parse = re.compile(r'(\d?)(\D?)').findall
def hexrepl(matchObj):
return unichr(int(matchObj.group(1), 16))
class parse_alt(object):
def __init__(self, pat, alt):
alt = alt.split(',')
self.change = alt[0]
if len(alt) > 2:
self.index = int(alt[1])
self.cut = int(alt[2]) + 1
else:
self.index = 1
self.cut = len(re.sub(r'[\d\.]', '', pat)) + 1
if pat.startswith('.'):
self.index += 1
def __call__(self, val):
self.index -= 1
val = int(val)
if val & 1:
return dint(val, (self.change, self.index, self.cut))
else:
return val
class dint(int):
def __new__(cls, value, data=None, ref=None):
obj = int.__new__(cls, value)
if ref and type(ref) == dint:
obj.data = ref.data
else:
obj.data = data
return obj
class Hyph_dict(object):
def __init__(self, filename):
self.patterns = {}
f = open(filename)
charset = f.readline().strip()
if charset.startswith('charset '):
charset = charset[8:].strip()
for pat in f:
pat = pat.decode(charset).strip()
if not pat or pat[0] == '%':
continue
pat = parse_hex(hexrepl, pat)
if '/' in pat:
pat, alt = pat.split('/', 1)
factory = parse_alt(pat, alt)
else:
factory = int
tag, value = zip(*[(s, factory(i or "0")) for i, s in parse(pat)])
if max(value) == 0:
continue
start, end = 0, len(value)
while not value[start]:
start += 1
while not value[end - 1]:
end -= 1
self.patterns[''.join(tag)] = start, value[start:end]
f.close()
self.cache = {}
self.maxlen = max(map(len, self.patterns.keys()))
def positions(self, word):
word = word.lower()
points = self.cache.get(word)
if points is None:
prepWord = '.%s.' % word
res = [0] * (len(prepWord) + 1)
for i in range(len(prepWord) - 1):
for j in range(i + 1, min(i + self.maxlen, len(prepWord)) + 1):
p = self.patterns.get(prepWord[i:j])
if p:
offset, value = p
s = slice(i + offset, i + offset + len(value))
res[s] = map(max, value, res[s])
points = [dint(i - 1, ref=r) for i, r in enumerate(res) if r % 2]
self.cache[word] = points
return points
class Hyphenator(object):
def __init__(self, filename, left=2, right=2, cache=True):
self.left = left
self.right = right
if not cache or filename not in hdcache:
hdcache[filename] = Hyph_dict(filename)
self.hd = hdcache[filename]
def positions(self, word):
right = len(word) - self.right
return [i for i in self.hd.positions(word) if self.left <= i <= right]
def iterate(self, word):
if isinstance(word, str):
word = word.decode('latin1')
for p in reversed(self.positions(word)):
if p.data:
change, index, cut = p.data
if word.isupper():
change = change.upper()
c1, c2 = change.split('=')
yield word[:p + index] + c1, c2 + word[p + index + cut:]
else:
yield word[:p], word[p:]
def wrap(self, word, width, hyphen='-'):
width -= len(hyphen)
for w1, w2 in self.iterate(word):
if len(w1) <= width:
return w1 + hyphen, w2
def inserted(self, word, hyphen='-'):
if isinstance(word, str):
word = word.decode('latin1')
l = list(word)
for p in reversed(self.positions(word)):
if p.data:
change, index, cut = p.data
if word.isupper():
change = change.upper()
l[p + index: p + index + cut] = change.replace('=', hyphen)
else:
l.insert(p, hyphen)
return ''.join(l)
__call__ = iterate
if __name__ == "__main__":
dict_file = sys.argv[1]
word = sys.argv[2].decode('latin1')
h = Hyphenator(dict_file, left=2, right=2)
for i in h(word):
print(i)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Hodor voting contest: level 3
Script that votes exactly 'votes_total' times for a given ID.
Using the 'requests' module, this task requires to send as POST the ID, the
'holdthedoor' fields for which to properly tally a vote, and a key field from
the form hidden from view.
It generates a valid User-Agent from the user_agent module and pushes it into
the HTTP request headers to validate the vote. Only Windows users are
able to vote.
It also includes the referred URL to the HTTP requests header as a way to
validate the vote.
For deciphering the captcha, it uses the pytesseract and the PIL modules.
It also uses the 're' module searching the number of votes from a given ID,
and 'islice' from 'itertools' for the regex to find only 2 numeric instances
(ID and number of votes).
"""
import requests
import user_agent
import re
from itertools import islice
import pytesseract
from PIL import Image
votes_total = 1024
url = "http://158.69.76.135/level3.php"
captcha_url = "http://158.69.76.135/captcha.php"
user_ag = user_agent.generate_user_agent(os='win')
headers = {'user-agent': user_ag, 'referer': url}
session = requests.Session()
session.headers.update(headers)
success_txt = "Hold the Door challenge - Level 3"
search_votes = session.get(url, headers=headers)
if search_votes.status_code != 200:
print("Couldn't connect to the website. Try again later.")
exit(1)
content = search_votes.text
print("Welcome to the Hodor voting contest!!")
is_number = False
while not is_number:
print("Please insert your ID for placing your {:d} votes, \
or 'e' to exit.".format(votes_total))
vote_id = input('ID: ')
if vote_id == "e":
exit(0)
try:
vote_id = int(vote_id)
if vote_id < 0:
print("Please insert a valid ID, or 'e' to exit.")
else:
payload = {'id': vote_id, 'holdthedoor': 'Submit'}
is_number = True
except TypeError:
print("Please insert a valid ID.")
try:
vote_row = "<tr>\n <td>\n{:d} </td>".format(vote_id)
index_vote = content.index(vote_row)
slice_int_row = islice(re.finditer(r'\d+', content[index_vote:]), 2)
vote_id, votes = map(int, (num.group() for num in slice_int_row))
except ValueError:
print("ID non existent. Creating new ID...")
votes = 0
print("{:d} initial votes for ID {:d}.".format(votes, vote_id))
if votes >= votes_total:
print("Can't vote more than {:d} times in this contest. \
Select another ID.".format(votes_total))
exit(1)
votes_ok, votes_fail = 0, 0
while (votes + votes_ok < votes_total):
try:
search_key = session.get(url, headers=headers)
content = search_key.text
index_key = content.index("value=") + 7
payload['key'] = content[index_key:index_key + 40]
except ValueError:
print("Couldn't fetch key.")
votes_fail += 1
captcha_resp = session.get(captcha_url, headers=headers)
with open('captcha.png', 'wb') as captcha:
captcha.write(captcha_resp.content)
captcha = Image.open('captcha.png')
captcha_txt = pytesseract.image_to_string(captcha)
payload['captcha'] = captcha_txt[:4]
try:
post = session.post(url, data=payload)
if post.status_code == 200 and success_txt in post.text:
votes_ok += 1
print("+1 vote. Total votes = {:d}.".format(votes + votes_ok))
else:
print("Couldn't vote. Trying again...")
votes_fail += 1
except Exception as exc:
print(exc)
votes_fail += 1
print("""Finished voting!
{:d} votes added with a total of {:d}.
Total votes failed: {:d}
Thanks for playing!!""".format(votes_ok, votes + votes_ok, votes_fail))
|
from django.urls import path,include,re_path
from rest_framework.routers import DefaultRouter
from quiz import views
from quiz.views import SaveUsersAnswer,UsersAnswerSerializer,Resultview
router=DefaultRouter()
router.register('Quiz',views.QuizViewSet)
router.register('Questiondetail',views.QuestionDetailViewset)
urlpatterns=[
path('',include(router.urls)),
path('save-answer/<pk>/', views.SaveUsersAnswer.as_view()),
path('quizzes/(?P<slug>[\w\-]+)/result/$", views.Resultview.as_view()),
]
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/6/6 11:13
@Author : QDY
@FileName: 25. K 个一组翻转链表_hard.py
给你一个链表,每k个节点一组进行翻转,请你返回翻转后的链表。
k是一个正整数,它的值小于或等于链表的长度。
如果节点总数不是k的整数倍,那么请将最后剩余的节点保持原有顺序。
示例:
给你这个链表:1->2->3->4->5
当k= 2 时,应当返回: 2->1->4->3->5
当k= 3 时,应当返回: 3->2->1->4->5
说明:
你的算法只能使用常数的额外空间。
你不能只是单纯的改变节点内部的值,而是需要实际进行节点交换。
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def len_k(self, head, k): # 判断该链表长度是否大于等于k
cur = head
length = 0
while cur and length < k:
cur = cur.next
length += 1
return length == k
def reverseKGroup(self, head, k):
# 对于以head为头的链表,若其长度不足k,则不用翻转
if not head or not self.len_k(head, k): return head
prev, cur = head, head.next
for i in range(k - 1): # 1->2->3->4
next_node = cur.next # head=prev=1 cur=2 next_node = 3
cur.next = prev # 1<->2 3->4
prev = cur # prev = 2 # new_head
cur = next_node # cue = 3
# prev指向新的头节点,cur指向下一个待交换的链表头
head.next = self.reverseKGroup(cur, k) # 递归 2->1->reverse_k(3->4)
return prev
|
#First test evolution function. Oddly, performs only a little worse than Runge-Kutta.
def Euler_Step(x,xres,dydx,Y):
return dydx(x,xres,Y)*xres,xres
def Ystep_euler(g,mx,sigmav,xstep=1e-2,Deltax=1e-4):
Yeqset = lambda x: Yeq(mx,mx/x,g,0,0)
neqset = lambda x: neq(mx,mx/x,g,0,0)
#Find a point shortly before freezeout
xstart=brentq(DeltaCond,1,100,args=(mx,Deltax,g,sigmav,Delta_Y_Condition,))
Y = Yeqset(xstart)
xi=xstart
xmax=xstart+20
dydx = lambda x, xstep, Y: -Yeqset(x+xstep)/x*neqset(x+xstep)*sigmav(x+xstep) /Hub(mx/(x+xstep))*((Y/Yeqset(x+xstep))**2-1)
while True:
if Y>2.5*Yeqset(xi) or xi>xmax:
break
deltay,xstep = Euler_Step(xi,xstep,dydx,Y)
#print(xi,Y,Yeqset(xi),deltay)
Y+=deltay
xi+=xstep
Yinf_val,Yinf_error = quad(Yevolution_integrand,xi,1000,epsabs=1e-300,epsrel=1e-4,limit=400,args=(mx,sigmav,))
if Yinf_val < 100*Yinf_error:
print("Error in Ystep integration")
print(Yinf_val,Yinf_error)
print(xi,mx)
Yinf = 1.0/(1.0/(2.5*Yeqset(xi))+Yinf_val)
return Yinf,xi+xstep
def Ysearch_euler(g,alpha_D,mv,mx,tol=1e-3,xstep=1e-2,Deltax=1e-4):
kappa = math.sqrt(relic_density_sigma/sigmav(mx/20.0,alpha_D,1.0,mv,mx)/conversion)
#print(kappa)
while True:
sig = lambda x: sigmav(mx/x,alpha_D,kappa,mv,mx)
Y,xf = Ystep_euler(g,mx,sig,xstep,Deltax)
Omega = Omega_from_Y(Y,mx)
if abs(OmegaCDM-Omega)<tol:
break
#print(kappa,Omega)
kappa = math.sqrt(kappa**2*Omega/OmegaCDM)
return kappa,Omega,xf,Y
|
from django.contrib import admin
from .models import CarOwner
admin.site.register(CarOwner)
from .models import Car
admin.site.register(Car)
from .models import OwnerShip
admin.site.register(OwnerShip)
from .models import DrivingLicense
admin.site.register(DrivingLicense)
|
from numpy import*
n=input("")
i=0
d=3
o="."
s=""
while(i<len(n)):
if(i<(len(n)-4)):
s=s+n[i:d]+o
else:
s=s+n[i:d]
i=i+3
d=d+3
print(s) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 6 10:38:45 2023
@author: george
"""
#%matplotlib qt
from matplotlib import pyplot as plt
import numpy as np
import skimage.io as skio
from skimage.filters import threshold_otsu
from skimage.morphology import closing, square, remove_small_objects, binary_dilation, disk
from skimage.measure import label, points_in_poly, regionprops, find_contours
import glob, os
from tqdm import tqdm
import pandas as pd
'''https://scikit-image.org/docs/stable/auto_examples/index.html'''
def cropHighIntensityRegions(fileName, thresh = 250, autoThresh = False, minSize = 90, plotResult = False, addNoise = False, noiseScale = 1.0, dilationRadius = 6):
''''takes tiff stack as input and returns copy with high intensity regions cropped out'''
#load image stack
A = skio.imread(fileName)
#get max intensity
maxIntensity = np.max(A, axis=0)
#threshold to binary image
if autoThresh:
thresh = threshold_otsu(maxIntensity) # automatic detection of thresh
binary = closing(maxIntensity > thresh, square(6))
#dilate bright objects
dilate = binary_dilation(binary,disk(dilationRadius, dtype=bool))
# label image regions and remove small objects
label_image = label(dilate)
largeRegions = remove_small_objects(label_image, min_size = minSize)
#generate mask and crop image
mask = largeRegions > 0
# Crop out the regions defined by the mask array
cropped_stack = A
for img in cropped_stack:
if addNoise:
#get mean and sd of image intensities
mu = np.mean(img) * noiseScale
sigma = np.std(img)
#make random array
randomValues = np.random.normal(mu, sigma, mask.size)
randomArray = np.reshape(randomValues, mask.shape)
#mask
img[mask] = randomArray[mask]
else:
img[mask] = 0
#plot results
if plotResult:
fig1, axs1 = plt.subplots(1, 5, figsize=(20, 5))
axs1[0].imshow(A[0])
axs1[0].set_title('1st frame')
axs1[1].imshow(maxIntensity)
axs1[1].set_title('max Intensity')
axs1[2].imshow(binary)
axs1[2].set_title('thresholded >{}'.format(thresh))
axs1[3].imshow(largeRegions)
axs1[3].set_title('objects >{}'.format(minSize))
axs1[4].imshow(cropped_stack[0])
axs1[4].set_title('1st frame after crop')
return cropped_stack
def add_within_region_column(labels, df):
'''Add a new column to a DataFrame indicating if each point is within any labelled region'''
df['within_region'] = False
for region_label in np.unique(labels):
if region_label == 0:
continue
mask = labels == region_label
df['within_region'] = df['within_region'] | mask[df['y'], df['x']]
return df
def removeLocsFromHighIntensityRegions(fileName, locs_fileName, thresh = 250, autoThresh = False, minSize = 90, dilationRadius = 6, pixelSize = 108, plotResult=False):
''''takes tiff stack and locs file as inputs - returns new locs file with locs in high intensity regions removed'''
#load image stack
A = skio.imread(fileName)
#get max intensity
maxIntensity = np.max(A, axis=0)
#threshold to binary image
if autoThresh:
thresh = threshold_otsu(maxIntensity) # automatic detection of thresh
binary = closing(maxIntensity > thresh, square(6))
#dilate bright objects
dilate = binary_dilation(binary,disk(dilationRadius, dtype=bool))
# label image regions and remove small objects
label_image = label(dilate)
largeRegions = remove_small_objects(label_image, min_size = minSize)
#load locs
df = pd.read_csv(locs_fileName)
#add xy positions in pixels rounded to nearest pixel
df['x'] = df['x [nm]'] / pixelSize
df['y'] = df['y [nm]'] / pixelSize
df['x'] = df['x'].astype(int)
df['y'] = df['y'].astype(int)
#add column indicating if point in any of the regions
df = add_within_region_column(largeRegions, df)
#filter df
df_excluded = df[df['within_region']]
df_included = df[~df['within_region']]
#plot results
if plotResult:
plt.scatter(df_included['x'],df_included['y'], color='white', s=0.1)
plt.scatter(df_excluded['x'],df_excluded['y'], color='red', s=0.1)
plt.imshow(A[0])
return df_included.drop(['x','y','within_region'], axis=1)
if __name__ == '__main__':
##### RUN ANALYSIS
path = r'/Users/george/Data/thresholdTest'
#get tiff folder list
tiffList = glob.glob(path + '/**/*.tif', recursive = True)
for file in tqdm(tiffList):
'''############### crop autofluorescence from image stack ###############'''
# =============================================================================
# cropped_stack = cropHighIntensityRegions(file)
# #save new image stack
# saveName = file.split('.')[0] + '_crop.tif'
# skio.imsave(saveName, cropped_stack)
# =============================================================================
'''###### remove localizations from regions of high fluorescence from locs file ##########'''
locsFile = os.path.splitext(file)[0].split('.tiff')[0] + '_locs.csv'
locsDF = removeLocsFromHighIntensityRegions(file, locsFile, plotResult=True)
#save locs
saveName = os.path.splitext(file)[0].split('.tiff')[0] + '_highFluroRemoved_locs.csv'
locsDF.to_csv(saveName, index=None)
|
#encoding:utf-8
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# Clase abstracta para el modelo de Personas que participan en el congreso
class Persona(models.Model):
username = models.CharField(max_length=30, unique=False)
password = models.CharField(max_length=60, unique=False)
correo = models.EmailField(unique=False)
nombre = models.CharField(max_length=30, unique=False)
apellido = models.CharField(max_length=30, unique=False)
inst_afil = models.CharField(max_length=30, unique=False, verbose_name='Institución afiliada')
class Meta:
abstract = True
unique_together = ("username", "correo")
# Clase para el modelo de autores. Hereda de Persona
class Autor(Persona):
pais = models.CharField(max_length=20)
def __unicode__(self):
return str(self.correo)
# Clase para el modelo de miembros del comite. Hereda de Persona
class MiembroComite(Persona):
es_presidente = models.BooleanField()
def __unicode__(self):
return str(self.correo)
# Clase para el modelo de isncritos. Hereda de Persona
class Inscrito(Persona):
url = models.CharField(blank=True, null=True, max_length=100, unique=False)
telefono = models.BigIntegerField(unique=False)
def __unicode__(self):
return str(self.correo)
|
import json
from pathlib import Path
from card2vec.feature_extraction.data_reading import read_cards, read_decks
def test_read_cards(fixtures_dir):
expected = json.load(Path(fixtures_dir, "read", "cards.json").open())
expected["int_to_card"] = {int(k): v for k, v in expected["int_to_card"].items()}
actual = read_cards(Path(fixtures_dir, "raw"))
assert expected == actual
def test_read_decks(fixtures_dir):
expected = json.load(Path(fixtures_dir, "read", "decks.json").open())
card_info = json.load(Path(fixtures_dir, "read", "cards.json").open())
decks = read_decks(
Path(fixtures_dir, "raw"),
variants=card_info["variants"],
card_to_int=card_info["card_to_int"],
)
assert decks == expected
|
# Generated by Django 2.2 on 2020-11-14 02:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('paper', '0068_paper_sift_risk_score'),
]
operations = [
migrations.RemoveField(
model_name='paper',
name='sift_risk_score',
),
]
|
S=str(input("Introduceti sirul de caractere:"))
a=S.count('A')
print('Numarul de caract, "A" in sir: ', a)
b=S.replace("A", "*")
print('Substituirea caracterului A prin caract. *', b)
c=S.translate({ord('B'):None})
print('Sirul fara de caract. B: ', c)
d=S.count("MA")
print("Numarul de silabe MA in sir: ", d)
e=S.replace("MA", "TA")
print("Sirul unde MA este subst. de TA: ", e)
f=S.replace("TO",'')
print("Sirul fara de Silaba TO: ", f)
print("Forma inversa a sirului: ", S[::-1]) |
import csv
with open('C:\Users\Venric\Desktop/productsales.csv') as csvfile:
mpg = list(csv.DictReader(csvfile))
print mpg
for adict in mpg:
print adict['PREDICT'] |
# -*- coding: utf-8 -*-
import os
import sys
from functools import partial
import maya.api.OpenMaya as om
from Qt import QtWidgets, _loadUi
from hz.naming_api import NamingAPI
from lgt_import_tool.core import assign_shader, core, utils
from lgt_import_tool.gui import basic_gui
class PreviewWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(PreviewWidget, self).__init__(parent)
self.setWindowTitle('Lighting import tool')
self.current_file_path = core.get_current_scene_file()
try:
NamingAPI.parser(self.current_file_path)
except RuntimeError:
om.MGlobal.displayWarning('Please open a lgt file that give a legal path.')
return
ui_file = os.path.join(os.path.dirname(__file__), 'win.ui')
_loadUi(ui_file, self)
self.init_ui()
self.init_layout()
self.init_connectiond()
self.get_abc_from_version()
self.get_lay_abc_from_version()
def init_ui(self):
self.set_version_combobox(self.comboBox_anim_version, 'anim')
self.set_version_combobox(self.comboBox_lay_version, 'layout')
def set_version_combobox(self, combo_box, step):
all_versions = utils.get_all_published_versions(self.current_file_path, step)
combo_box.clear()
combo_box.addItems(all_versions)
@staticmethod
def get_radio_button_options(frame_widget):
for child in frame_widget.children():
if not isinstance(child, QtWidgets.QRadioButton):
continue
if child.isChecked():
return str(child.objectName())
@staticmethod
def get_check_box_options(check_box):
return check_box.isChecked()
@staticmethod
def get_combobox_options(combobox):
return combobox.currentText()
@staticmethod
def get_line_edit_options(line_edit):
return line_edit.text()
def widget_enable(self):
from_anim = self.checkBox_from_anim.isChecked()
self.Qwidget_from_anim.setEnabled(from_anim)
from_layout = self.checkBox_from_layout.isChecked()
self.Qwidget_from_layout.setEnabled(from_layout)
def load_mode_opt(self, QFrame, QWidget_from_version, QWidget_from_path):
self.switch_bool = self.get_radio_button_options(QFrame).endswith('from_version')
QWidget_from_version.setEnabled(self.switch_bool)
QWidget_from_path.setEnabled(not self.switch_bool)
def init_layout(self):
self.listWidget_anim_abc = basic_gui.ListWidget()
self.verticalLayout_anim_items.addWidget(self.listWidget_anim_abc)
self.listWidget_layout_abc = basic_gui.ListWidget()
self.verticalLayout_lauout_items.addWidget(self.listWidget_layout_abc)
def init_connectiond(self):
self.checkBox_from_anim.clicked.connect(self.widget_enable)
self.checkBox_from_layout.clicked.connect(self.widget_enable)
self.radioButton_from_version.clicked.connect(partial(self.load_mode_opt, self.QFrame_opt,
self.QWidget_from_version,
self.QWidget_from_path))
self.radioButton_from_path.clicked.connect(partial(self.load_mode_opt, self.QFrame_opt,
self.QWidget_from_version,
self.QWidget_from_path))
self.radioButton_lay_from_version.clicked.connect(partial(self.load_mode_opt, self.QFrame_lay_opt,
self.QWidget_lay_from_version,
self.QWidget_lay_from_path))
self.radioButton_lay_from_path.clicked.connect(partial(self.load_mode_opt, self.QFrame_lay_opt,
self.QWidget_lay_from_version,
self.QWidget_lay_from_path))
self.comboBox_anim_version.currentIndexChanged.connect(self.get_abc_from_version)
self.lineEdit_anim_path.textEdited.connect(self.get_anim_abc_from_path)
self.comboBox_lay_version.currentIndexChanged.connect(self.get_lay_abc_from_version)
self.lineEdit_lay_path.textEdited.connect(self.get_lay_abc_from_path)
self.pushButton_apply.clicked.connect(self.run)
self.pushButton_cancle.clicked.connect(self.close)
def get_abc(self, full_path, listWidget_abc):
listWidget_abc.clear_item()
abc_list = filter(lambda x: os.path.splitext(x)[-1] == '.abc', os.listdir(full_path))
rig_info_file = os.path.join(full_path, 'rigging_info.json')
if os.path.isfile(rig_info_file):
rigging_dict = utils.read_in_json(os.path.join(full_path, 'rigging_info.json'))
for abc in abc_list:
abc_path = os.path.join(str(full_path), abc.encode())
asset_name = abc.rsplit('.', 1)[0].rsplit('_', 1)[-1]
rigging_path = rigging_dict[asset_name].values()[0]
naming = NamingAPI.parser(rigging_path)
naming.task = 'shd'
latest_shd_path = os.path.dirname(naming.get_latest_version())
metadata = {'abc_name': abc, 'abc_path': abc_path,
'namespace': abc.rsplit('.', 1)[0],
'asset_name': asset_name,
'shader_path': os.path.join(latest_shd_path, '{}.ma'.format(asset_name)),
'json_path': os.path.join(latest_shd_path, '{}.json'.format(asset_name))
}
listWidget_abc.add_item(basic_gui.MotionItem(abc, enable=True), metadata)
def get_abc_from_version(self):
version = self.get_combobox_options(self.comboBox_anim_version)
file_path = utils.get_certain_version(self.current_file_path, version, 'anim')
full_path = os.path.dirname(file_path)
if os.path.exists(full_path):
self.get_abc(full_path, self.listWidget_anim_abc)
print 'Get abc cache from: ', full_path
def get_lay_abc_from_version(self):
version = self.get_combobox_options(self.comboBox_lay_version)
file_path = utils.get_certain_version(self.current_file_path, version, 'layout')
full_path = os.path.dirname(file_path)
if os.path.exists(full_path):
self.get_abc(full_path, self.listWidget_layout_abc)
print 'Get abc cache from: ', full_path
def get_anim_abc_from_path(self):
full_path = self.get_line_edit_options(self.lineEdit_anim_path)
if os.path.exists(full_path):
self.get_abc(full_path, self.listWidget_anim_abc)
def get_lay_abc_from_path(self):
full_path = self.get_line_edit_options(self.lineEdit_lay_path)
if os.path.exists(full_path):
self.get_abc(full_path, self.listWidget_layout_abc)
def run(self):
print 'run'
abc_widgets = []
if self.checkBox_from_anim.isChecked():
abc_widgets.append(self.listWidget_anim_abc)
if self.checkBox_from_layout.isChecked():
abc_widgets.append(self.listWidget_layout_abc)
num = 0
for listWidget_abc in abc_widgets:
for each in listWidget_abc:
metadata = each.metadata
load_abc = each.widget.abc_checked
load_texture = each.widget.texture_checked
assign_shader.main(metadata.get('abc_path'),
metadata.get('json_path'),
metadata.get('shader_path'),
metadata.get('namespace')+'_{}'.format(num), load_abc=load_abc,
load_texture=load_texture)
num += 1
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
aa = PreviewWidget()
aa.show()
sys.exit(app.exec_())
|
# http://www.hackerrank.com/contests/python-tutorial/challenges/itertools-combinations
from itertools import combinations
s, k = input().split()
for i in range(1, int(k)+1):
print('\n'.join(sorted(map(lambda tup: ''.join(sorted(tup)), combinations(s, i)))))
|
import unittest
import os, sys
sim_path = os.path.abspath(os.path.join('..',))
sys.path.append(sim_path)
from simulators.tennis import tennis_match, tennis_set, tennis_game
import pandas as pd
class TennisSimulatorTest(unittest.TestCase):
### Tests for Game class ###
def test_game_minimum_amount_of_points(self):
spw = [1.0,0.0]
m = tennis_match.Match(spw, 3, False)
g = tennis_game.Game(m,spw, False)
winner = g.play()
assert g.points_won[0] == 4
assert g.points_won[1] == 0
assert g.points_played() == 4
assert winner == 0
assert g.points_to_win == 4
def test_game_has_player_won(self):
m = tennis_match.Match(None, 3, False)
g = tennis_game.Game(m,None, False)
g.points_won = [6,4]
assert g.player_won(1) == False
assert g.player_won(0) == True
def test_game_tiebreak_rules(self):
spw = [1.0,0.0]
tiebreak = True
m = tennis_match.Match(spw, 3, False)
g = tennis_game.Game(m,spw, tiebreak)
winner = g.play()
assert winner == 0
assert g.points_won[0] == 6
assert g.points_won[1] == 0
assert g.points_to_win == 6
def test_game_tiebreak_switches_server(self):
spw = [1.0,0.0]
tiebreak = True
m = tennis_match.Match(spw, 3, False)
g = tennis_game.Game(m,spw, tiebreak)
winner = g.play()
assert m.total_serves == [3,3]
assert g.points_won == [6,0]
### Tests for Set class ###
def test_set_min_amount_of_games(self):
spw = [1.0, 0.0]
m = tennis_match.Match(spw, 3, False)
s = tennis_set.Set(m,False,False,spw)
winner = s.play()
assert winner == 0
assert s.games_won == [6,0]
assert len(s.games_played) == 6
def test_set_player_b_wins(self):
spw = [0.0, 1.0]
m = tennis_match.Match(spw, 3, False)
s = tennis_set.Set(m,False,False,spw)
winner = s.play()
assert winner == 1
assert s.games_won == [0,6]
def test_set_tiebreaker_is_set_on_match(self):
spw = [0.0, 1.0]
m = tennis_match.Match(spw, 3, False)
s = tennis_set.Set(m,False,False,spw)
s.games_won = [6,6]
winner = s.play()
assert m.tiebreaker == True
### Tests for Match class ###
def test_match_min_amount_of_sets_best_of_3(self):
spw = [1.0, 0.0]
m = tennis_match.Match(spw,3, False)
winner = m.play()
assert winner == 0
assert m.sets_won == [2,0]
assert m.total_serves_won == [24,0]
assert m.total_serves == [24,24]
def test_match_min_amount_of_sets_best_of_5(self):
spw = [1.0, 0.0]
m = tennis_match.Match(spw,5, False)
winner = m.play()
assert winner == 0
assert m.sets_won == [3,0]
assert m.total_serves_won == [36,0]
assert m.total_serves == [36,36]
def test_match_min_player_b_wins(self):
spw = [0.0, 1.0]
m = tennis_match.Match(spw,5, False)
winner = m.play()
assert winner == 1
assert m.sets_won == [0,3]
assert m.total_serves_won == [0,36]
assert m.total_serves == [36,36]
def test_match_tiebreaker_final_set(self):
spw = [0.0,1.0]
tiebreaker_final_set = True
m = tennis_match.Match(spw, 5, tiebreaker_final_set)
winner = m.play()
assert m.tiebreaker_final_set == True
assert winner == 1
assert m.sets_won == [0,3]
assert m.total_serves_won == [0.0,36.0]
def test_match_switch_server(self):
m = tennis_match.Match([0,0], 3, False)
m.switch_server()
assert m.get_server() == 1
m.switch_server()
assert m.get_server() == 0
m.switch_server()
assert m.get_server() == 1
### Tests for STATISTICS ###
# def test_statistics_median_game_length(self):
# assert False
# def tsst_statistics_average_game_length(self):
# assert False
# def test_statistics_win_game_probability(self):
# assert False
# def test_statistics_win_set_probability(self):
# assert False
# def test_statistics_win_match_probability(self):
# assert False
# def test_statistics_serve_win_probability_both_players(self):
# assert False
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-26 02:18
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0004_auto_20160426_1058'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='cart_date',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 11, 18, 11, 485239)),
),
migrations.AlterField(
model_name='order',
name='order_date',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 11, 18, 11, 486266)),
),
]
|
# -*- coding: utf-8 -*-
__author__ = 'chuter'
import util as qa_util
from weixin.message.handler.message_handler import MessageHandler
from core import emotion
from weixin.message import generator
from watchdog.utils import watchdog_warning, watchdog_error
"""
默认的消息处理,对任何消息均回复自动回复内容
"""
class AutoQaDefaultMessageHandler(MessageHandler):
def handle(self, context, is_from_simulator=False):
return self._get_auto_reply_response(context.user_profile, context.message, is_from_simulator)
def _get_auto_reply_response(self, user_profile, message, is_from_simulator):
if message.content == 'weizoom_coustomer':
from_weixin_user = self._get_from_weixin_user(message)
response = generator.get_counstomer_service_text(from_weixin_user.username, message.toUserName)
return response
else:
return None
|
from django.shortcuts import render, redirect
from django.contrib import messages
from .models.import User
# Create your views here.
def index(request):
return render(request, 'index.html')
def register(request):
errors = User.objects.registration_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request.value)
return redirect('/')
else:
hash_slinger_slasher = bcrypt.hashpw(request.POST['password'], bcrypt.gensalt()).decode()
new_user = User.objects.create(
first_name = request.POST['first_name'],
last_name = request.POST['last_name'],
email = request.POST['email'],
password = hash_slinger_slasher
)
request.session['uuid'] = new_user.id
return redirect('/dashboard')
def login(request):
user_list = User.objects.filter(email = request.POST['email'])
if len(user_list) > 0:
user = user_list[0]
if bcrypt.checkpw(request.POST['password'].encode(), user.password.encode()):
request.session['uuid'] = user.index
return redirect('/dashboard')
return redirect('/')
def logout(request):
del request.session['uuid']
return redirect('/')
def dashboard(request):
context = {
"logged_in_user": User.objects.get(id = request.session['uuid']),
""
}
return render(request, 'dashboard.html', context) |
# priority Queue using minheap here highest priority given to lowest value
class minheap: #creating minheap class
pq=list() #heap array
def __init__(self,arr=list()): #constructor function
if(len(arr)==0):
for i in input("Enter the enements for priority queue:").split():
self.add(int(i))
else:
for _ in arr:
self.add(int(_))
def swap(self,x,y): # A swap function
c=self.pq[x]
self.pq[x]=self.pq[y]
self.pq[y]=c
def minheapify(self,i): # function to maintain the heap from top to down complexity O(logn)
p = self.pq
l = 2*i+1
r = 2*i+2
smallest = i
if (l<len(p) and p[l]<p[i]):
smallest = l
if (r<len(p) and p[r]<p[smallest]):
smallest = r
if smallest != i :
self.swap(smallest,i)
self.minheapify(smallest)
def printheap(self): #displaying heap function in array form
print(self.pq)
def add(self,k): # Adding an element to heap complexity O(logn)
i = len(self.pq)
self.pq.append(k)
while(i!=0 and self.pq[int(i/2)]>k ):
self.swap(i,int(i/2))
i = int(i/2)
def peek(self): # peek the highest priority element complexity O(1)
if(len(self.pq)==0):
print("The priority queue is uderflow")
return
return self.pq[0]
def remove(self): # Remove the highest priority element complexity O(logn)
if(len(self.pq)==0):
print("The priority queue is uderflow")
return
self.swap(0,len(self.pq)-1)
c=self.pq.pop()
self.minheapify(0) # heap maintain complexity O(logn)
return c
# The running body
a = minheap([20,-1,2,3,67,100,50]) # creating an object of class minheap
print("The given priority queue")
a.printheap()
while(True): # Program Running while loop
print("Choose any of the following the opperation:")
print("1.To display the priority queue ")
print("2. To add an element to queue ")
print("3.To peek the highest priority element")
print("4.To remove the highest prority element")
print("5. To exit\n\n")
t=int(input("Enter Here: "))
if(t==1):
a.printheap()
elif(t==2):
k=int(input("Enter the element to be enter: "))
a.add(k)
elif(t==3):
print("The highest priority element is: ",a.peek())
elif(t==4):
print("Removed highest priority element is: ",a.remove())
elif(t==5):
print("Exiting the program.")
break
else:
print("Invalid input.Please try again.")
print("\n\n")
|
# Generated by Django 3.1.7 on 2021-04-06 06:55
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Class',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('room_no', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(999), django.core.validators.MinValueValidator(0)])),
],
),
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('address', models.CharField(max_length=255)),
('phone', models.CharField(max_length=12)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('age', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(99), django.core.validators.MinValueValidator(1)])),
('marks', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(99), django.core.validators.MinValueValidator(1)])),
('room_no', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='school.class')),
],
),
migrations.AddField(
model_name='class',
name='school_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='school.school'),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys
from winsys._compat import unittest
import win32api
import win32security
from winsys.tests import utils as testutils
from winsys import accounts
@unittest.skipUnless(testutils.i_am_admin(), "These tests must be run as Administrator")
class TestAccounts(unittest.TestCase):
def setUp(self):
testutils.create_user("alice", "Passw0rd")
testutils.create_group("winsys")
testutils.add_user_to_group("alice", "winsys")
def tearDown(self):
testutils.delete_user("alice")
testutils.delete_group("winsys")
def test_principal_None(self):
assert accounts.principal(None) is None
def test_principal_sid(self):
everyone, domain, type = win32security.LookupAccountName(None, "Everyone")
assert accounts.principal(everyone).pyobject() == everyone
def test_principal_Principal(self):
everyone, domain, type = win32security.LookupAccountName(None, "Everyone")
principal = accounts.Principal(everyone)
assert accounts.principal(principal) is principal
def test_principal_string(self):
everyone, domain, type = win32security.LookupAccountName(None, "Everyone")
assert accounts.principal("Everyone") == everyone
def test_principal_invalid(self):
with self.assertRaises(accounts.exc.x_not_found):
accounts.principal(object)
def text_context(self):
assert win32api.GetUserName() != "alice"
with accounts.principal("alice").impersonate("Passw0rd"):
assert win32api.GetUserName() == "alice"
assert win32api.GetUserName() != "alice"
if __name__ == "__main__":
unittest.main()
if sys.stdout.isatty(): raw_input("Press enter...")
|
from SMU_device import SMUDevice
from PUND.PUND_waveform import create_waveform
from PUND.plot_fig import *
instrument_id = 'GPIB0::24::INSTR'
smu = SMUDevice(instrument_id)
smu.connect()
"""
params is a dictionary with key parameters for a PUND sweep.
Vf - first voltage
Vs - second voltage
rise - number of measurements during the rise
hold - number of measurements to be done while maintaining the applied voltage
space - number of measurements between pulses
n_cycles - number of PUND cycles
Time required for a single measurement is approximately 1 ms. It is the limit for this SMU. The only way to control
rise/hold/space time is to change the number of measurements.
"""
params = {
'Vf': -3,
'Vs': 3,
'rise': 20,
'hold': 10,
'space': 10,
'n_cycles': 2,
}
area = 200 ** 2 * 1e-8 # contact area in cm^2
waveform = []
for _ in range(params['n_cycles']):
waveform = waveform + create_waveform(params) # function "create_waveform() creates voltage list for given params"
smu.setup_sense_subsystem(compl=1e-5, rang=1e-5, nplc=0.1)
smu.custom_list_sweep(waveform, delay=0)
smu.enable_output()
smu.measure()
smu.wait()
smu.disable_output()
smu.check_for_errors()
data = smu.get_traces()
plot_fig(data, params, area, save=False) # one can set save=True and call save_data() function
|
#!/usr/bin/env python
from fuzzyui import fuzzyui
items = ["baseball", "football", "soccer", "programming", "cooking", "sleeping"]
initial_search = ''
fui = fuzzyui()
found = fui.find(items)
print(found)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-12-03 11:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('system', '0042_insured_insured_ec_number'),
]
operations = [
migrations.CreateModel(
name='PolicyVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version_date', models.DateTimeField(auto_now_add=True)),
('renewal_version', models.IntegerField(default=0)),
('endorsement_version', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='dependant',
name='endorsement_version',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='dependant',
name='policy_version',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='policy',
name='policy_version',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='system.PolicyVersion'),
preserve_default=False,
),
]
|
import requests
import json
import string
from pmisimilarity import *
from cui_methods import *
from nltk.corpus import stopwords
from threading import Thread
import nltk
nltk.download('stopwords')
# Construct the CUI matrix and store as a global variable
print("--------------------------------------------------")
print("Loading CUI Distance File...")
cuiDistanceFile = "data/cui2vec_precomputed.bin"
matrix = readCuiDistance(cuiDistanceFile)
print("CUI Distance File Loaded.")
# Construct the CUI to Title dict and store as a global variable
print("--------------------------------------------------")
print("Loading CUI To Term Dictionary...")
titleFile = "data/cuis.csv"
cui2titleDict = readCuiTitle(titleFile)
print("CUI To Term Dictionary Loaded.")
# Load the config file
print("--------------------------------------------------")
print("Loading Config File...")
with open('config.json') as configFile:
config = json.load(configFile)
ESConfig = config["ES"]
waitressConfig = config["Waitress"]
print("Config File Loaded.")
print("--------------------------------------------------")
class Index():
"""
# An index of the documents used (retrieved from elastic search index).
# calling createDocuments() to get the terms and scores
# word: the input word from user to get alternatives
# pool: specifies how many documents to be retrieved from the elastic search index
"""
def __init__(self, word, pool):
# Get all configurations from the config file for elastic search index
self.username = ESConfig["username"]
self.secret = ESConfig["secret"]
self.preurl = ESConfig["url"]
self.indexName = ESConfig["index_name"]
# Avoid pool to be 0 which retrieves none documents from the ES index
# If pool is 0, set pool to be the default pool size
# If pool is larger than the max pool size specified in the configuration
# Set pool size to be the max pool size
if pool is 0:
self.pool = ESConfig["default_pool"]
elif pool > ESConfig["max_pool"]:
self.pool = ESConfig["max_pool"]
else:
self.pool = pool
# Construct the elastic search index url
url = self.preurl + "/" + self.indexName + "/_search"
# Construct the params for the url
param = {
"size": self.pool,
"q": word
}
# Make the request and get response from the ES index
# The response is documents retrieved from ES index
response = requests.get(url, params=param, auth=(self.username, self.secret))
res = json.loads(response.content)
self.res = res
self.docs = []
# Assemble the retrieved documents in desired format
self.createDocuments()
query = []
# Get the count for how many relevant documents in total
self.D = self.getDocumentCount(query)
# Construct the empty total result list, the length is the length of docs
self.wordsRanking = [{} for item in self.docs]
"""
# query is a list of word(s) that passed into the ES index to get the count
# query length can be 1 or 2
# if query length is 1, then get number of relevant documents from ES index based on this single word
# if query length is 2, then get the intersection number of relevant documents from ES index based on the two words
"""
def getDocumentCount(self, query):
url = self.preurl + "/" + self.indexName + "/_count"
if len(query) is 1:
response = requests.get(url, params={"q": query}, auth=(self.username, self.secret))
elif len(query) is 2:
param = {
"q": "(" + str(query[0]) + ")AND(" + str(query[1]) + ")"
}
response = requests.get(url, params=param, auth=(self.username, self.secret))
else:
response = requests.get(url, auth=(self.username, self.secret))
results = json.loads(response.content)
# result["count"] can be 0
return results["count"]
"""
# process the documents retrieved from the ES index
# the original documents retrieved contain a lot redundant information
# we only need title and abstract
# concat title and abstract together then split into words
# assign a list of candidate words as finalRes to self.docs
"""
def createDocuments(self):
res = self.res
docs = res["hits"]["hits"]
absTitleStr = []
documents = []
translator = str.maketrans('', '', string.punctuation)
for item in docs:
title = item["_source"]["title"]
abstract = item["_source"]["abstract"]
# convert all words into lowercase
procTitle = title.translate(translator).lower()
procAbs = abstract.translate(translator).lower()
absTitleStr.append(procTitle.strip())
absTitleStr.append(procAbs.strip())
wordStr = " ".join(absTitleStr)
words = wordStr.split(" ")
# filter empty words
result = list(filter(None, words))
# filter stopwords
filteredRes = [term for term in result if term not in stopwords.words('english')]
documents += filteredRes
seen = set()
finalRes = []
# filter duplicate words
for item in documents:
if item not in seen:
seen.add(item)
finalRes.append(item)
self.docs = finalRes
"""
# calculates the similarity of two words in the collection
# s1 is the input word from user to get the alternative words
# s2 is the word from self.docs to compare with s1
# index is the number passed in to allocate position in self.wordsRanking
"""
def pmiSimilarity(self, s1, s2, index):
D = self.D
# to get f1 f2 and f12, three requests to the ES index are made
# which heavily depend on the internet speed
# implemented multithreading to speed up
f1 = self.getDocumentCount([s1])
f2 = self.getDocumentCount([s2])
f12 = self.getDocumentCount([s1, s2])
score = calculateSimilarity(D, f1, f2, f12)
self.wordsRanking[index] = {
"term": s2,
"score": float("{0:.3f}".format(score))
}
"""
# calculate the pmi similarity score for input word and words in self.docs
# word is the input word
# size is the return size which means how many terms to be returned
# pool is the number of documents to be retrieved from the ES index
"""
def getESWordsRanking(self, word, size, pool):
threads = []
# use multithreading to speed up the process
for i in range(len(self.docs)):
process = Thread(target=self.pmiSimilarity, args=[word, self.docs[i], i])
process.start()
threads.append(process)
for process in threads:
process.join()
# sort the items in descending order by scores
totalResult = []
try:
totalResult = sorted(self.wordsRanking, key=lambda i: i["score"], reverse=True)
except:
self.getESWordsRanking(word, size, pool)
returned = []
count = 0
# similar to pool, check the size constraints
if size is 0:
size = ESConfig["default_retSize"]
elif size > ESConfig["max_retSize"]:
size = ESConfig["max_retSize"]
for item in totalResult:
if count < size:
count += 1
returned.append(item)
return returned
"""
# A CUI 2 Vec module that adopt CUI and distance measure to get the similar words as alternatives
# word is the input word from user
# size is the returned number of words
# we are using a pre-computed cui distance bin file with compressed size
# the bin file contains 10 cuis with minimum distance for each cui
# so the max return size is 10 in this case
# if larger return size is required, need to regenerate the bin file
"""
class CUI2Vec():
def __init__(self, word):
wordCUI = ""
data = word
response = requests.post('http://ielab-metamap.uqcloud.net/mm/candidates', data=data)
content = json.loads(response.content)
# use try block to avoid the exceptions when there is no CandidateCUI exists for a word
try:
wordCUI = content[0]["CandidateCUI"]
except:
self.wordCUI = ""
if wordCUI is not "":
self.wordCUI = wordCUI
"""
# from a word's cui, find the 10 relevant cuis in the pre-loaded matrix
# /data/cui2vec_precomputed.bin
# size is the returned size
"""
def findAlternativeTerms(self, size):
alternatives = ""
res = []
# the size should be > 0 and < max size
if size is 0:
self.size = ESConfig["default_retSize"]
elif size > ESConfig["max_retSize"]:
self.size = ESConfig["max_retSize"]
else:
self.size = size
wordCUI = self.wordCUI
# use try block to aviod that the wordCUI is empty
try:
intWordCUI = cui2int(wordCUI)
except:
intWordCUI = 0
# use try block to avoid that there is no alternatives exist for this input word in matrix
try:
alternatives = matrix[intWordCUI]
except:
alternatives = ""
if alternatives != "":
res = convertCUI2Term(alternatives, self.size)
return res
"""
# from a list of cuis, convert the cuis to terms by looking in the pre-loaded dict
# /data/cuis.csv
# size is the returned size
"""
def convertCUI2Term(alternatives, size):
infos = []
for key in alternatives.keys():
term = ""
# use try block to avoid key does not exist
try:
term = cui2titleDict[str(key)]
except:
term = ""
if term is not "":
info = {
"score": alternatives[key],
"term": term
}
infos.append(info)
returned = []
count = 0
# sort the list by scores in descending order
rankedInfo = []
try:
rankedInfo = sorted(infos, key=lambda i: i["score"], reverse=True)
except:
convertCUI2Term(alternatives, size)
for item in rankedInfo:
if count < size:
count += 1
returned.append(item)
return returned
def minmax(res, size):
unorderedRes = []
scoreDict = {}
for k in res:
if res[k]:
maxScore = max(res[k], key=lambda x: x["score"])["score"]
minScore = min(res[k], key=lambda x: x["score"])["score"]
scoreDict[k] = {
"max": maxScore,
"min": minScore
}
for ky in scoreDict:
if res[ky]:
for term in res[ky]:
term["score"] = (term["score"] - scoreDict[ky]["min"]) / (scoreDict[ky]["max"] - scoreDict[ky]["min"])
term["source"] = ky
unorderedRes = unorderedRes + res[ky]
finalRes = []
try:
finalRes = sorted(unorderedRes, key=lambda i: i["score"], reverse=True)
except:
minmax(res, size)
for t in finalRes:
t["score"] = float("{0:.3f}".format(t["score"]))
if size < len(finalRes):
return finalRes[0:size]
else:
return finalRes
|
#!/usr/bin/env python3
"""
:Author: Anemone Xu
:Email: anemone95@qq.com
:copyright: (c) 2019 by Anemone Xu.
:license: Apache 2.0, see LICENSE for more details.
"""
import preprocessing
import settings
import numpy
from _theano.tokenizer import *
def load_data(data_dir: str, label_dir: str,
tokenizer: Tokenizer = None,
valid_portion: float = 0.0,
test_portion: float = 0.1,
update_dict=True) -> (list, list, list):
if label_dir is None:
label_dict = None
else:
label_dict = preprocessing.load_label(label_dir)
samples = []
labels = []
for _slice, label in preprocessing.get_data_generator(preprocessing.preprocessing, data_dir, label_dict)():
samples.append(_slice)
labels.append(label)
# FIXME for debug
# if len(labels) > 100:
# break
if update_dict:
tokenizer.update_dict(samples)
samples = list(map(lambda e: tokenizer.encode(e), samples))
# split all set into test set
all_set_x, all_set_y = samples, labels
n_samples = len(all_set_x)
sidx = numpy.random.permutation(n_samples)
n_train = int(numpy.round(n_samples * (1. - test_portion)))
test_set_x = [all_set_x[s] for s in sidx[n_train:]]
test_set_y = [all_set_y[s] for s in sidx[n_train:]]
train_set_x = [all_set_x[s] for s in sidx[:n_train]]
train_set_y = [all_set_y[s] for s in sidx[:n_train]]
# split train set into valid set
# TODO 每次训练都应重新shuffle
n_samples = len(train_set_x)
sidx = numpy.random.permutation(n_samples)
n_train = int(numpy.round(n_samples * (1. - valid_portion)))
valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
valid_set_y = [train_set_y[s] for s in sidx[n_train:]]
real_train_set_x = [train_set_x[s] for s in sidx[:n_train]]
real_train_set_y = [train_set_y[s] for s in sidx[:n_train]]
# TODO sort_by_len 感觉没啥用啊 会影响实验结果吗?
train = (real_train_set_x, real_train_set_y)
valid = (valid_set_x, valid_set_y)
test = (test_set_x, test_set_y)
return train, valid, test
if __name__ == '__main__':
load_data(settings.relative_path_from_root('data/slice/benchmark'),
settings.relative_path_from_root('data/label/benchmark'))
|
from django.contrib import admin
from models import Article
class ArticleModelAdmin(admin.ModelAdmin):
list_display = ["Title", "Start", "End"]
list_display_links = None
list_filter = ["Tag"]
search_fields = ["Tag"]
class Meta:
model = Article
admin.site.register(Article, ArticleModelAdmin)
|
from django.urls import path
from systems import views
urlpatterns = [
path ('', views.home, name='home'), #redirects to views.py and searches for home function for functionality
path ('', views.base, name='base'),
path ('', views.carList, name='carList'),
path ('', views.popularCar, name='popularCar'),
path ('', views.availableCar, name='availableCar'),
path ('', views.orderCreate, name='orderCreate'),
path ('', views.orderDetails, name='orderDetails'),
path ('', views.orderList, name='orderList'),
path ('', views.forms, name='forms'),
path ('', views.carCreate, name='carCreate'),
path ('', views.carDetails, name='carDetails'),
path ('', views.newCar, name='newCar'),
path ('', views.adminIndex, name='adminIndex'),
path ('', views.adminMessage, name='adminMessage'),
path ('', views.contact, name='contact'),
] |
# coding: utf-8
from django.conf.urls import patterns, include, url
from django.views.generic import DetailView
from .views import HomeView
from .views import Noticia
urlpatterns = patterns('',
url(r'^$', HomeView.as_view(), name='home'),
url(r'(?P<secao>\w+)/$', HomeView.as_view(), name='capa-secao'),
url(r'^bits/(?P<pk>\d+)$', DetailView.as_view(model=Noticia), name='noticia-detalhe'),
)
|
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("http://ocjene.skole.hr/")
element = driver.find_element_by_name("user_login")
element.send_keys("EMAIL_GOES_HERE")
elementP = driver.find_element_by_name("user_password")
elementP.send_keys("PASSWORD_GOES_HERE")
driver.find_element_by_css_selector('.button.bold').click()
driver.find_element_by_css_selector("a[href*='/pregled/predmeti/1395529310']").click()
|
#!/usr/bin/env python
'''
Order merging for normalized eShel spectra.
Author: Leon Oostrum
E-Mail: l.c.oostrum@uva.nl
'''
from __future__ import division
import os
import sys
import glob
from distutils.util import strtobool
from bisect import bisect_left, bisect_right
import numpy as np
import matplotlib.pyplot as plt
def read_file(filename):
'''
Read an ascii file as produced by specnorm.py.
'''
wave, flux = np.loadtxt(filename).T
return list(wave), list(flux)
if __name__ == '__main__':
if not len(sys.argv) in (2, 3):
print 'Usage: order_merging.py DATADIR SAVEDIR (optional)'
sys.exit(1)
# set data dir
data_dir = sys.argv[1]
if not os.path.isdir(data_dir):
print 'Data directory does not exist.'
sys.exit(1)
# set save dir
if len(sys.argv) == 2:
# save dir was not specified
try:
ans = strtobool(raw_input('Save directory not specified. Use data directory? [Y/n]\n'))
except ValueError:
ans = 1
if ans:
save_dir = data_dir[:]
else:
sys.exit(1)
else:
save_dir = sys.argv[2]
# check if save dir exists
if not os.path.isdir(save_dir):
print 'Save directory does not exist.'
sys.exit(1)
# get list of files
filelist = glob.glob(os.path.join(data_dir, '*P_1B_[0-9][0-9]_norm.dat'))
pre = filelist[0].split('P_1B')[0] + 'P_1B_'
aft = '_norm.dat'
# Get object name
obj = filelist[0].split('-')[2]
# create dict which returns filename as function of order
files = dict([(int(f.split(pre)[1].split(aft)[0]), f) for f in filelist])
# get list of orders. Reverse to get shorter wavelengths first
orders = sorted(files.keys(), reverse=True)
# load first order
print 'Processing order {0} (1/{1}'.format(orders[0], len(orders))
wave_full, flux_full = read_file(files[orders[0]])
# array to save order merge locations
merge_locs = []
# loop over orders, but skip first one
for i, order in enumerate(orders[1:]):
print 'Processing order {0} ({1}/{2})'.format(order, i+1, len(orders))
# load data
wave, flux = read_file(files[order])
# find overlap with previous order
min_old = bisect_left(wave_full, wave[0])
max_new = bisect_right(wave, wave_full[-1])
# save merge locations
merge_locs.append([wave[0], wave_full[-1]])
# average the overlapping part
part1 = np.array(flux_full[min_old:])
part2 = np.array(flux[:max_new])
flux_avg = list(np.mean(np.array([part1, part2]), axis=0))
# add to final data
wave_full.extend(wave[max_new:])
flux_full[min_old:] = flux_avg
flux_full.extend(flux[max_new:])
# make a plot if needed
try:
ans = strtobool(raw_input('Show the spectrum [Y/n]?\n'))
except ValueError:
ans = 1
if ans == 1:
fig, ax = plt.subplots()
ax.plot(wave_full, flux_full, c='k')
for wav_min, wav_max in merge_locs:
ax.axvspan(wav_min, wav_max, alpha=.1)
ax.set_xlim(wave_full[0], wave_full[-1])
ax.set_ylim(ymin=max(ax.set_ylim()[0], 0))
ax.ticklabel_format(axis='both', useOffset=False)
ax.set_xlabel(r'Wavelength ($\AA$)')
ax.set_ylabel('Flux (norm.)')
fig.suptitle(obj)
plt.show()
# save spectrum
try:
ans = strtobool(raw_input('Save the spectrum? [Y/n]?\n'))
except ValueError:
ans = 1
if ans == 1:
filename = os.path.join(save_dir, obj.lower()+'_merged.dat')
if os.path.isfile(filename):
try:
ans = strtobool(raw_input('File already exists: {0}, overwrite? [Y/n]?\n'.format(filename)))
except ValueError:
ans == 1
if not ans:
exit()
data = np.array([wave_full, flux_full]).T
np.savetxt(os.path.join(save_dir, obj.lower()+'_merged.dat'), data)
|
if __name__=="__main__":
str = 'Runoob'
print(str) # 输出字符串
print(str[0:-2]) # 输出第一个到倒数第二个的所有字符
print(str[0]) # 输出字符串第一个字符
print(str[2:5]) # 输出从第三个开始到第五个的字符
print(str[2:]) # 输出从第三个开始后的所有字符
print(str * 2) # 输出字符串两次
print(str + '你好') # 连接字符串
|
import sys
def main(locid):
f=open('tests','r')
lines=f.readlines()
newtxt=''
for line in lines:
newtxt+=line.replace('LOCID',locid)
print newtxt
if __name__ == '__main__':
main(sys.argv[1]) |
#! /usr/bin/env python
# Copyright (c) 2017, Cuichaowen. All rights reserved.
# -*- coding: utf-8 -*-
try:
from caffe_pb2 import *
except ImportError:
raise ImportError(' No module named caffe_pb2 . ')
|
import os
import re
import requests
import subprocess
import sys
from string import Template
def load_template(filename):
if os.path.isfile(filename):
with open(filename, 'r') as template_file:
template_string = template_file.read()
if template_string:
return Template(template_string)
if __name__ == '__main__':
######
# initialize a couple variables
nvd_data_feeds_url = 'https://nvd.nist.gov/vuln/data-feeds'
# this template is used to create uniqueness constraints before any
# data is loaded.
nvd_constraint_template = load_template('code/constraints.cypher')
# this template is specific to the 1.0 version of the NVD JSON feed
nvd_loader_template = load_template('code/loader-template.cypher')
# after we're done loading, create indexes, a process that should
# kick off in the background
nvd_index_template = load_template('code/indexes.cypher')
# this pattern will get gzipped json data URLs for years 2000-2999
# hard-coding the version number in the file name pattern as "1.0"
# should help ensure that this script doesn't process any NVD feeds
# that don't match the pattern expected in nvd_loader_template
nvd_json_pattern = re.compile('(https:\/\/nvd\.nist\.gov\/feeds\/json\/cve\/1\.0\/nvdcve-1\.0-2\d{3}.json.gz)')
# end variable initialization
######
print(f'Fetching NVD json feed URLs from {nvd_data_feeds_url}')
sys.stdout.flush()
nvd_feeds_page = requests.get(nvd_data_feeds_url)
nvd_json_files = re.finditer(nvd_json_pattern, nvd_feeds_page.content.decode('utf-8'))
if nvd_feeds_page.status_code == 200:
# this is after we request the main data feeds page, no reason to do this
# step if we can't get it
print('Creating uniqueness constraints')
sys.stdout.flush()
cypher_shell_result = subprocess.run(['cypher-shell'],
input=nvd_constraint_template.safe_substitute().encode('utf-8'),
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
if cypher_shell_result.returncode > 0:
sys.exit('Error creating uniqueness constraints: {}'.format(cypher_shell_results))
for nvd_file_url_match in nvd_json_files:
nvd_file_url = nvd_file_url_match.group(0)
nvd_file_name_gzip = nvd_file_url.split('/')[-1]
nvd_file_name = nvd_file_name_gzip.strip('.gz')
print(f'Fetching {nvd_file_name_gzip}')
sys.stdout.flush()
nvd_file_contents = requests.get(nvd_file_url, stream=True)
if nvd_file_contents.status_code == 200:
with open(nvd_file_name_gzip, 'wb') as nvd_file:
for chunk in nvd_file_contents.iter_content(chunk_size=1024):
if chunk:
nvd_file.write(chunk)
# by default this should unzip to nvd_file_name
subprocess.run(['gunzip', nvd_file_name_gzip])
else:
print(f'Error fetching {nvd_file_contents}')
print(f'Loading {nvd_file_name} to Neo4j')
sys.stdout.flush()
cypher_shell_result = subprocess.run(['cypher-shell'],
input=nvd_loader_template.safe_substitute(nvd_file_name = nvd_file_name).encode('utf-8'),
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
os.remove(nvd_file_name)
if cypher_shell_result.returncode == 0:
print(f'Successfully loaded {nvd_file_name}')
else:
sys.exit('Error loading {}: {}'.format(nvd_file_name, cypher_shell_result))
# if we've made it this far, we loaded every JSON NVD year file
# on the data feeds page. time to create the non-unique indexes!
print('Creating non-unique indexes')
sys.stdout.flush()
cypher_shell_result = subprocess.run(['cypher-shell'],
input=nvd_index_template.safe_substitute().encode('utf-8'),
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
if cypher_shell_result.returncode > 0:
sys.exit('Error creating uniqueness constraints: {}'.format(cypher_shell_result))
else:
sys.exit('Error fetching NVD data feeds page.')
print('Finished loading NVD json.') |
TEST_SETTINGS = {
"CELERY_TASK_ALWAYS_EAGER": True,
"CELERY_TASK_EAGER_PROPAGATES": True,
"CELERY_BROKER_URL": "memory",
"LOG_LEVEL": "ERROR",
}
|
from scarpkg.log import logStart, logStop, logMsg, Log
from scarpkg.get_variables import get_info, save_info
from scarpkg.bot import Bot
from scarpkg.bitso_functions import create_api
import scarpkg.bitso_functions
import scarpkg.images
|
"""This module contains the ``PlaywrightMiddleware`` scrapy middleware"""
from importlib import import_module
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.http import HtmlResponse
import random
from .http import PlaywrightRequest
from playwright.sync_api import sync_playwright
class PlaywrightMiddleware:
"""Scrapy middleware handling the requests using playwright"""
def __init__(self, proxies_capabilities=None, headless=True):
self.proxies_capabilities = proxies_capabilities
self.headless = headless
@classmethod
def from_crawler(cls, crawler):
headless = crawler.settings.get('HEADLESS')
proxies_capabilities = crawler.settings.get('PROXIES')
middleware = cls(
headless=headless,
proxies_capabilities=proxies_capabilities,
)
crawler.signals.connect(middleware.spider_closed, signals.spider_closed)
return middleware
def process_request(self, request, spider):
"""Process a request using the playwright if applicable"""
if not isinstance(request, PlaywrightRequest):
return None
if request.user_agent is None:
request.user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'
if request.browser is None:
p = sync_playwright().start()
self.browser = p.chromium.launch(headless=self.headless)
else:
self.browser = request.browser
if request.context is not None:
self.context = request.context
else:
if self.proxies_capabilities is not None:
if len(self.proxies_capabilities) > 0:
proxy = random.choice(self.proxies_capabilities)
pr = proxy.split('@')
if len(pr) > 1:
proxy_server = pr[0]
pr_auth = pr[1].split(':')
proxy_user = pr_auth[0]
proxy_pass = pr_auth[1]
self.context = self.browser.new_context(viewport={'width': 2640, 'height': 1440},
user_agent=request.user_agent,
proxy={'server': proxy_server, 'username': proxy_user,
'password': proxy_pass})
else:
proxy_server = pr[0]
self.context = self.browser.new_context(viewport={'width': 2640, 'height': 1440},
user_agent=request.user_agent,
proxy={'server': proxy_server})
else:
self.context = self.browser.new_context(viewport={'width': 2640, 'height': 1440},
user_agent=request.user_agent, )
else:
self.context = self.browser.new_context(viewport={'width': 2640, 'height': 1440},
user_agent=request.user_agent, )
self.page = self.context.new_page()
if request.timeout:
self.page.set_default_timeout(request.timeout)
self.page.goto(request.url, wait_until="domcontentloaded")
# self.page.wait_for_load_state("networkidle")
body = str.encode(self.page.content())
# Expose the driver via the "meta" attribute
request.meta.update({'context': self.context, 'browser': self.browser, 'page': self.page})
return HtmlResponse(
self.page.url,
body=body,
encoding='utf-8',
request=request
)
def spider_closed(self):
"""Shutdown the browser when spider is closed"""
self.browser.close()
|
# Generated by Django 2.2.4 on 2020-02-04 08:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organizations', '0097_remove_disciplinecredit_control_form'),
]
operations = [
# migrations.AlterUniqueTogether(
# name='studentdiscipline',
# unique_together={('student',
# # 'uuid1c',
# 'study_plan_uid_1c',
# 'acad_period',
# 'discipline_code',
# 'discipline',
# 'load_type',
# 'hours',
# 'language',
# 'cycle',
# 'study_year',)},
# ),
]
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
import math
img = cv2.imread("../img/Emma.jpg")
img = cv2.resize(img,(600,800))
img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
kernel_size = (25,25)
gauss = cv2.GaussianBlur(img_gray, kernel_size, 0)
umbral_minimo = 85
umbral_maximo = 275
canny = cv2.Canny(img_gray, umbral_minimo, umbral_maximo)
cv2.imshow("Original", img)
cv2.imshow("Grises", img_gray)
cv2.imshow("Canny", canny)
cv2.waitKey(0)
cv2.destroyAllWindows() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from copy import copy
class Buckets:
def __init__(self, length=0, default=None):
# set default 'length' and 'default' so it passes b = Buckets() test
self.default = copy(default)
# copy passes assertNotEqual(id(default), id(b.default)) test
self.buckets = [copy(default)] * length
# copy passes assertFalse(b.find(3, [3])) test
# after default.append([3]) and b.clear(3) were made
def add(self, index, element):
self.buckets[index].append(copy(element))
# copy passes assertTrue(b.find(0, item))
# after item was added to bucket and then changed
def find(self, index, element):
return element in self.buckets[index]
def clear(self, index):
self.buckets[index] = self.default
if __name__ == "__main__":
import doctest
doctest.testmod()
|
# Generated by Django 3.1.2 on 2020-10-17 16:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20201017_1540'),
]
operations = [
migrations.RenameField(
model_name='savedhouses',
old_name='favourites',
new_name='house',
),
]
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class AdvFecRange(Base):
"""A set of FECs to be included in the LDP router.
The AdvFecRange class encapsulates a list of advFecRange resources that are managed by the user.
A list of resources can be retrieved from the server using the AdvFecRange.find() method.
The list can be managed by using the AdvFecRange.add() and AdvFecRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = "advFecRange"
_SDM_ATT_MAP = {
"EnablePacking": "enablePacking",
"EnableReplyingLspPing": "enableReplyingLspPing",
"Enabled": "enabled",
"FirstNetwork": "firstNetwork",
"LabelMode": "labelMode",
"LabelValueStart": "labelValueStart",
"MaskWidth": "maskWidth",
"NumberOfNetworks": "numberOfNetworks",
}
_SDM_ENUM_MAP = {
"labelMode": ["none", "increment"],
}
def __init__(self, parent, list_op=False):
super(AdvFecRange, self).__init__(parent, list_op)
@property
def EnablePacking(self):
# type: () -> bool
"""
Returns
-------
- bool: (For IPv4 FEC ranges and in Unsolicited Label Distribution Mode ONLY) If checked, FEC ranges will be aggregated within a single LDP PDU to conserve bandwidth and processing.
"""
return self._get_attribute(self._SDM_ATT_MAP["EnablePacking"])
@EnablePacking.setter
def EnablePacking(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP["EnablePacking"], value)
@property
def EnableReplyingLspPing(self):
# type: () -> bool
"""
Returns
-------
- bool: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP["EnableReplyingLspPing"])
@EnableReplyingLspPing.setter
def EnableReplyingLspPing(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP["EnableReplyingLspPing"], value)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables this FEC range for use in label mapping messages. By default, the Ixia LDP emulation uses the prefix FEC type.
"""
return self._get_attribute(self._SDM_ATT_MAP["Enabled"])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP["Enabled"], value)
@property
def FirstNetwork(self):
# type: () -> str
"""
Returns
-------
- str: The first network address in the range (in IP address format).
"""
return self._get_attribute(self._SDM_ATT_MAP["FirstNetwork"])
@FirstNetwork.setter
def FirstNetwork(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["FirstNetwork"], value)
@property
def LabelMode(self):
# type: () -> str
"""
Returns
-------
- str(none | increment): Indicates whether the same label or incrementing labels should be used in the VC ranges.
"""
return self._get_attribute(self._SDM_ATT_MAP["LabelMode"])
@LabelMode.setter
def LabelMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["LabelMode"], value)
@property
def LabelValueStart(self):
# type: () -> int
"""
Returns
-------
- number: The first label in the range of labels.
"""
return self._get_attribute(self._SDM_ATT_MAP["LabelValueStart"])
@LabelValueStart.setter
def LabelValueStart(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["LabelValueStart"], value)
@property
def MaskWidth(self):
# type: () -> int
"""
Returns
-------
- number: The number of bits in the mask applied to the network address. The masked bits in the First Network address form the address prefix.
"""
return self._get_attribute(self._SDM_ATT_MAP["MaskWidth"])
@MaskWidth.setter
def MaskWidth(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MaskWidth"], value)
@property
def NumberOfNetworks(self):
# type: () -> int
"""
Returns
-------
- number: The number of network addresses to be included in the range. The maximum number of valid possible addresses depends on the values for the first network and the network mask.
"""
return self._get_attribute(self._SDM_ATT_MAP["NumberOfNetworks"])
@NumberOfNetworks.setter
def NumberOfNetworks(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["NumberOfNetworks"], value)
def update(
self,
EnablePacking=None,
EnableReplyingLspPing=None,
Enabled=None,
FirstNetwork=None,
LabelMode=None,
LabelValueStart=None,
MaskWidth=None,
NumberOfNetworks=None,
):
# type: (bool, bool, bool, str, str, int, int, int) -> AdvFecRange
"""Updates advFecRange resource on the server.
Args
----
- EnablePacking (bool): (For IPv4 FEC ranges and in Unsolicited Label Distribution Mode ONLY) If checked, FEC ranges will be aggregated within a single LDP PDU to conserve bandwidth and processing.
- EnableReplyingLspPing (bool): NOT DEFINED
- Enabled (bool): Enables this FEC range for use in label mapping messages. By default, the Ixia LDP emulation uses the prefix FEC type.
- FirstNetwork (str): The first network address in the range (in IP address format).
- LabelMode (str(none | increment)): Indicates whether the same label or incrementing labels should be used in the VC ranges.
- LabelValueStart (number): The first label in the range of labels.
- MaskWidth (number): The number of bits in the mask applied to the network address. The masked bits in the First Network address form the address prefix.
- NumberOfNetworks (number): The number of network addresses to be included in the range. The maximum number of valid possible addresses depends on the values for the first network and the network mask.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(
self,
EnablePacking=None,
EnableReplyingLspPing=None,
Enabled=None,
FirstNetwork=None,
LabelMode=None,
LabelValueStart=None,
MaskWidth=None,
NumberOfNetworks=None,
):
# type: (bool, bool, bool, str, str, int, int, int) -> AdvFecRange
"""Adds a new advFecRange resource on the server and adds it to the container.
Args
----
- EnablePacking (bool): (For IPv4 FEC ranges and in Unsolicited Label Distribution Mode ONLY) If checked, FEC ranges will be aggregated within a single LDP PDU to conserve bandwidth and processing.
- EnableReplyingLspPing (bool): NOT DEFINED
- Enabled (bool): Enables this FEC range for use in label mapping messages. By default, the Ixia LDP emulation uses the prefix FEC type.
- FirstNetwork (str): The first network address in the range (in IP address format).
- LabelMode (str(none | increment)): Indicates whether the same label or incrementing labels should be used in the VC ranges.
- LabelValueStart (number): The first label in the range of labels.
- MaskWidth (number): The number of bits in the mask applied to the network address. The masked bits in the First Network address form the address prefix.
- NumberOfNetworks (number): The number of network addresses to be included in the range. The maximum number of valid possible addresses depends on the values for the first network and the network mask.
Returns
-------
- self: This instance with all currently retrieved advFecRange resources using find and the newly added advFecRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained advFecRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(
self,
EnablePacking=None,
EnableReplyingLspPing=None,
Enabled=None,
FirstNetwork=None,
LabelMode=None,
LabelValueStart=None,
MaskWidth=None,
NumberOfNetworks=None,
):
# type: (bool, bool, bool, str, str, int, int, int) -> AdvFecRange
"""Finds and retrieves advFecRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve advFecRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all advFecRange resources from the server.
Args
----
- EnablePacking (bool): (For IPv4 FEC ranges and in Unsolicited Label Distribution Mode ONLY) If checked, FEC ranges will be aggregated within a single LDP PDU to conserve bandwidth and processing.
- EnableReplyingLspPing (bool): NOT DEFINED
- Enabled (bool): Enables this FEC range for use in label mapping messages. By default, the Ixia LDP emulation uses the prefix FEC type.
- FirstNetwork (str): The first network address in the range (in IP address format).
- LabelMode (str(none | increment)): Indicates whether the same label or incrementing labels should be used in the VC ranges.
- LabelValueStart (number): The first label in the range of labels.
- MaskWidth (number): The number of bits in the mask applied to the network address. The masked bits in the First Network address form the address prefix.
- NumberOfNetworks (number): The number of network addresses to be included in the range. The maximum number of valid possible addresses depends on the values for the first network and the network mask.
Returns
-------
- self: This instance with matching advFecRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of advFecRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the advFecRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from snisi_core.models.Projects import Domain
logger = logging.getLogger(__name__)
PROJECT_BRAND = "SMIR"
DOMAIN_SLUG = 'epidemiology'
# epidemio is based on traditional weeks
# period ends on Friday noon and collect ends on Sunday noon.
# District validation by Monday noon
# Region validation by Tuesday noon
ROUTINE_REPORTING_END_WEEKDAY = 4 # Friday
ROUTINE_DISTRICT_AGG_DAYS_DELTA = 3
ROUTINE_REGION_AGG_DAYS_DELTA = 4
def get_domain():
return Domain.get_or_none(DOMAIN_SLUG)
|
# What is your favourite day of the week? Check if it's
# the most frequent day of the week in the year.
# You are given a year as integer (e. g. 2001).
# You should return the most frequent day(s) of the week in that year.
# The result has to be a list of days sorted by the order of days in week
# (e. g. ['Monday', 'Tuesday'], ['Saturday', 'Sunday'], ['Monday', 'Sunday']).
# Week starts with Monday.
# Input: Year as an int.
# Output: The list of most frequent days sorted by the order of days in week (from Monday to Sunday).
# Preconditions:
# Week starts on Monday.
# Year is between 1583 and 4000.
# Calendar is Gregorian.
def most_frequent_days(year):
import datetime
# if it's a leap year then there are 366 days and the first three days will be the most
# if not a leap year then only the first two
if (year % 4) == 0 and (year % 100) == 0 and (year % 400) == 0:
# get what is the first day of the year
day = {'0':'Monday','1':'Tuesday','2':'Wednesday','3':'Thursday','4':'Friday','5':'Saturday','6':'Sunday'}
d = datetime.date(year,1,1)
key = d.weekday()
if key < 5:
a = str(key)
b = str(key+1)
c = str(key+2)
ans = [day[a],day[b],day[c]]
return ans
elif key == 5:
a = str(key)
b = str(key+1)
c = str(key-6)
ans = [day[c],day[a],day[b]]
return ans
else:
a = str(key)
b = str(key-6)
c = str(key-5)
ans = [day[b],day[c],day[a]]
return day[key]
else:
# not a leap year
day = {'0':'Monday','1':'Tuesday','2':'Wednesday','3':'Thursday','4':'Friday','5':'Saturday','6':'Sunday'}
d = datetime.date(year,1,1)
key = d.weekday()
if key < 6:
a = str(key)
b = str(key+1)
ans = [day[a],day[b]]
return ans
else:
a = str(key)
b = str(key-6)
ans = [day[b],day[a]]
return ans
most_frequent_days(2427) |
# Generated by Django 2.2 on 2020-08-08 14:41
from django.db import migrations
import django_resized.forms
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20200808_1638'),
]
operations = [
migrations.AlterField(
model_name='menu',
name='image',
field=django_resized.forms.ResizedImageField(crop=None, force_format=None, keep_meta=True, null=True, quality=75, size=[200, 200], upload_to='static/images/'),
),
]
|
#!/usr/bin/env python
from modshogun import StreamingVwFile
from modshogun import T_SVMLIGHT
from modshogun import StreamingVwFeatures
from modshogun import VowpalWabbit
parameter_list=[[None]]
def streaming_vw_modular (dummy):
"""Runs the VW algorithm on a toy dataset in SVMLight format."""
# Open the input file as a StreamingVwFile
input_file = StreamingVwFile("../data/fm_train_sparsereal.dat")
# Tell VW that the file is in SVMLight format
# Supported types are T_DENSE, T_SVMLIGHT and T_VW
input_file.set_parser_type(T_SVMLIGHT)
# Create a StreamingVwFeatures object, `True' indicating the examples are labelled
features = StreamingVwFeatures(input_file, True, 1024)
# Create a VW object from the features
vw = VowpalWabbit(features)
# Train
vw.train()
#return vw
if __name__ == "__main__":
0#streaming_vw_modular(*parameter_list[0])
|
# Created by MechAviv
# ID :: [4000022]
# Maple Road : Adventurer Training Center 1
sm.showFieldEffect("maplemap/enter/1010100", 0)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
import re
class EmployeeChangeIBAN(Document):
def validate(self):
self.validate_IBAN()
self.validate_commitment_with_bank()
# if self.workflow_state:
# if "Rejected" in self.workflow_state:
# self.docstatus = 1
# self.docstatus = 2
def validate_IBAN(self):
#pattern = r"^[a-zA-Z]{2}[0-9]{2}[a-zA-Z0-9]{4}[0-9]{7}([a-zA-Z0-9]?){0,16}$"
pattern = r'(^(SA.{22})$)'
if not re.match(pattern, self.new_iban):
frappe.throw(_("Invalid IBAN number, IBAN should be like SAxxxxxxxxxxxxxxxxxxxxxx."))
def validate_commitment_with_bank(self):
if self.commitment_with_bank=='1':
frappe.throw("You are commited with a bank ")
def before_submit(self):
employee = frappe.get_doc("Employee", self.employee)
employee.db_set("iban", self.new_iban)
employee.db_set("bank_name", self.new_bank)
self.validate_commitment_with_bank()
# def get_permission_query_conditions(user):
# pass
# def get_permission_query_conditions22(user):
# return ""
# if not user: user = frappe.session.user
# employees = frappe.get_list("Employee", fields=["name"], filters={'user_id': user}, ignore_permissions=True)
# if employees:
# employee = frappe.get_doc('Employee', {'name': employees[0].name})
# if u'Change IBAN Approver' in frappe.get_roles(user) :
# return ""
# elif u'Employee' in frappe.get_roles(user):
# return """(`Employee Change IBAN`.owner = '{user}' or `Employee Change IBAN`.employee = '{employee}')""" \
# .format(user=frappe.db.escape(user), employee=frappe.db.escape(employee.name))
# else:
# return None
def get_permission_query_conditions(user):
pass
# if not user: user = frappe.session.user
# employees = frappe.get_list("Employee", fields=["name"], filters={'user_id': user}, ignore_permissions=True)
# if employees:
# query = ""
# employee = frappe.get_doc('Employee', {'name': employees[0].name})
# if u'Employee' in frappe.get_roles(user):
# if query != "":
# query+=" or "
# query+=""" employee = '{0}'""".format(employee.name)
# return query
|
"""
===============
Specific images
===============
"""
import matplotlib.pyplot as plt
import matplotlib
from skimage import data
matplotlib.rcParams['font.size'] = 18
######################################################################
#
# Stereo images
# =============
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()
images = data.stereo_motorcycle()
ax[0].imshow(images[0])
ax[1].imshow(images[1])
fig.tight_layout()
plt.show()
######################################################################
#
# PIV images
# =============
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()
images = data.vortex()
ax[0].imshow(images[0])
ax[1].imshow(images[1])
fig.tight_layout()
plt.show()
######################################################################
#
# Faces and non-faces dataset
# ===========================
#
# A sample of 20 over 200 images is displayed.
fig, axes = plt.subplots(4, 5, figsize=(20, 20))
ax = axes.ravel()
images = data.lfw_subset()
for i in range(20):
ax[i].imshow(images[90+i], cmap=plt.cm.gray)
ax[i].axis('off')
fig.tight_layout()
plt.show()
|
from django.http import HttpResponse
from django.template import loader
from django.http import JsonResponse
from django.core import serializers
import json
import sys
import OmniDB_app.include.Spartacus as Spartacus
import OmniDB_app.include.Spartacus.Database as Database
import OmniDB_app.include.Spartacus.Utils as Utils
from OmniDB_app.include.Session import Session
from datetime import datetime
def get_tree_info(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_database = v_session.v_databases[v_database_index]['database']
v_return['v_data'] = {
'v_mode': 'database',
'v_database_return': {
'v_database': v_database.GetName(),
'create_tablespace': v_database.TemplateCreateTablespace().v_text,
'alter_tablespace': v_database.TemplateAlterTablespace().v_text,
'drop_tablespace': v_database.TemplateDropTablespace().v_text,
'create_role': v_database.TemplateCreateRole().v_text,
'alter_role': v_database.TemplateAlterRole().v_text,
'drop_role': v_database.TemplateDropRole().v_text,
'create_database': v_database.TemplateCreateDatabase().v_text,
'alter_database': v_database.TemplateAlterDatabase().v_text,
'drop_database': v_database.TemplateDropDatabase().v_text,
'create_schema': v_database.TemplateCreateSchema().v_text,
'alter_schema': v_database.TemplateAlterSchema().v_text,
'drop_schema': v_database.TemplateDropSchema().v_text,
#create_table
#alter_table
'drop_table': v_database.TemplateDropTable().v_text,
'create_sequence': v_database.TemplateCreateSequence().v_text,
'alter_sequence': v_database.TemplateAlterSequence().v_text,
'drop_sequence': v_database.TemplateDropSequence().v_text,
'create_function': v_database.TemplateCreateFunction().v_text,
'drop_function': v_database.TemplateDropFunction().v_text,
'create_view': v_database.TemplateCreateView().v_text,
'drop_view': v_database.TemplateDropView().v_text
}
}
return JsonResponse(v_return)
def get_tables(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_tables = []
try:
v_tables = v_database.QueryTables(False,v_schema)
for v_table in v_tables.Rows:
v_table_data = {
'v_name': v_table['table_name'],
'v_has_primary_keys': v_database.v_has_primary_keys,
'v_has_foreign_keys': v_database.v_has_foreign_keys,
'v_has_uniques': v_database.v_has_uniques,
'v_has_indexes': v_database.v_has_indexes
}
v_list_tables.append(v_table_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_tables
return JsonResponse(v_return)
def get_views(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_tables = []
try:
v_tables = v_database.QueryViews(False,v_schema)
for v_table in v_tables.Rows:
v_table_data = {
'v_name': v_table['table_name'],
}
v_list_tables.append(v_table_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_tables
return JsonResponse(v_return)
def get_schemas(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_schemas = []
try:
v_schemas = v_database.QuerySchemas()
for v_schema in v_schemas.Rows:
v_schema_data = {
'v_name': v_schema['schema_name']
}
v_list_schemas.append(v_schema_data)
except Exception as exc:
v_return['v_data'] = {'password_timeout': False, 'message': str(exc) }
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_schemas
return JsonResponse(v_return)
def get_databases(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_databases = []
try:
v_databases = v_database.QueryDatabases()
for v_database in v_databases.Rows:
v_database_data = {
'v_name': v_database['database_name']
}
v_list_databases.append(v_database_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_databases
return JsonResponse(v_return)
def get_tablespaces(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_tablespaces = []
try:
v_tablespaces = v_database.QueryTablespaces()
for v_tablespace in v_tablespaces.Rows:
v_tablespace_data = {
'v_name': v_tablespace['tablespace_name']
}
v_list_tablespaces.append(v_tablespace_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_tablespaces
return JsonResponse(v_return)
def get_roles(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_roles = []
try:
v_roles = v_database.QueryRoles()
for v_role in v_roles.Rows:
v_role_data = {
'v_name': v_role['role_name']
}
v_list_roles.append(v_role_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_roles
return JsonResponse(v_return)
def get_columns(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_table = json_object['p_table']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_columns = []
try:
v_columns = v_database.QueryTablesFields(v_table,False,v_schema)
for v_column in v_columns.Rows:
v_column_data = {
'v_column_name': v_column['column_name'],
'v_data_type': v_column['data_type'],
'v_data_length': v_column['data_length'],
'v_nullable': v_column['nullable']
}
v_list_columns.append(v_column_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_columns
return JsonResponse(v_return)
def get_views_columns(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_table = json_object['p_table']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_columns = []
try:
v_columns = v_database.QueryViewFields(v_table,False,v_schema)
for v_column in v_columns.Rows:
v_column_data = {
'v_column_name': v_column['column_name'],
'v_data_type': v_column['data_type'],
'v_data_length': v_column['data_length'],
}
v_list_columns.append(v_column_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_columns
return JsonResponse(v_return)
def get_pk(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_table = json_object['p_table']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_pk = []
try:
v_tables = v_database.QueryTablesPrimaryKeys(v_table,False,v_schema)
for v_table in v_tables.Rows:
v_pk_data = []
v_pk_data.append(v_table['constraint_name'])
v_pk_data.append(v_table['column_name'])
v_list_pk.append(v_pk_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_pk
return JsonResponse(v_return)
def get_fks(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_table = json_object['p_table']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_fk = []
try:
v_tables = v_database.QueryTablesForeignKeys(v_table, False, v_schema)
for v_table in v_tables.Rows:
v_fk_data = []
v_fk_data.append(v_table['constraint_name'])
v_fk_data.append(v_table['column_name'])
v_fk_data.append(v_table['r_table_name'])
v_fk_data.append(v_table['r_column_name'])
v_fk_data.append(v_table['delete_rule'])
v_fk_data.append(v_table['update_rule'])
v_list_fk.append(v_fk_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_fk
return JsonResponse(v_return)
def get_uniques(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_table = json_object['p_table']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_unique = []
try:
v_tables = v_database.QueryTablesUniques(v_table,False,v_schema)
for v_table in v_tables.Rows:
v_unique_data = []
v_unique_data.append(v_table['constraint_name'])
v_unique_data.append(v_table['column_name'])
v_list_unique.append(v_unique_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_unique
return JsonResponse(v_return)
def get_indexes(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_table = json_object['p_table']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_index = []
try:
v_tables = v_database.QueryTablesIndexes(v_table, False, v_schema)
for v_table in v_tables.Rows:
v_index_data = []
v_index_data.append(v_table['index_name'])
v_index_data.append(v_table['uniqueness'])
v_index_data.append(v_table['column_name'])
v_list_index.append(v_index_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_index
return JsonResponse(v_return)
def get_functions(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_functions = []
try:
v_functions = v_database.QueryFunctions(False,v_schema)
for v_function in v_functions.Rows:
v_function_data = {
'v_name': v_function['name'],
'v_id': v_function['id']
}
v_list_functions.append(v_function_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_functions
return JsonResponse(v_return)
def get_function_fields(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_function = json_object['p_function']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_fields = []
try:
v_fields = v_database.QueryFunctionFields(v_function,v_schema)
for v_field in v_fields.Rows:
v_field_data = {
'v_name': v_field['name'],
'v_type': v_field['type']
}
v_list_fields.append(v_field_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_fields
return JsonResponse(v_return)
def get_function_definition(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_function = json_object['p_function']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
try:
v_return['v_data'] = v_database.GetFunctionDefinition(v_function)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def get_sequences(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
v_list_sequences = []
try:
v_sequences = v_database.QuerySequences(False,v_schema)
for v_sequence in v_sequences.Rows:
v_list_sequences.append(v_sequence['sequence_name'])
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = v_list_sequences
return JsonResponse(v_return)
def get_view_definition(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_database_index = json_object['p_database_index']
v_view = json_object['p_view']
v_schema = json_object['p_schema']
v_database = v_session.v_databases[v_database_index]['database']
#Check database prompt timeout
if v_session.DatabaseReachPasswordTimeout(v_database_index):
v_return['v_data'] = {'password_timeout': True, 'message': '' }
v_return['v_error'] = True
return JsonResponse(v_return)
try:
v_return['v_data'] = v_database.QueryViewDefinition(v_view, v_schema)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
|
class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# maximum product must be max1 * max2 * max3 or max1 * min1 * min2
max1 = - sys.maxint
max2 = - sys.maxint
max3 = - sys.maxint
min1 = sys.maxint
min2 = sys.maxint
for num in nums:
if num >= max1:
max3 = max2
max2 = max1
max1 = num
elif num >= max2:
max3 = max2
max2 = num
elif num >= max3:
max3 = num
if num <= min1:
min2 = min1
min1 = num
elif num <= min2:
min2 = num
return max(max1 * max2 * max3, max1 * min1 * min2) |
from backend.myBluePrint.ericic.paramVerify.dataCenterCheck import DataCenterCheck
from backend.myBluePrint.ericic.service.dataCenterSercice import DataCenterService
from backend.customer.myCustomer import APiMethodView
from flask import request, make_response
import uuid
class DataCenter(APiMethodView):
check_cls = DataCenterCheck
# @dec_auth
def get(self, *args, **kwargs):
"""
this function is to get the information about the instance of cee,
this function has 2 scenes , the param in url has id or the has no id.
when there is id in param:
get the details of instance from db with id
when there is no id in param:
get all basic info of instances from db
: param id --> request.args.get('id') // the specified id of the instance to query info from db
:return: return json type with structure with following
{
code: int,
data: [
{
id:
name:
country:
province:
city:
data_center:
vim:
cee_version:
lcm_ip:
lcm_user:
lcm_pwd:
openrc_dir:
},
],
status: bool
}
"""
dc_id = request.args.get('id')
if dc_id:
data = DataCenterService.select_instance(dc_id)
if data:
response = make_response(dict(status=True, data=data, code=200))
else:
response = make_response(dict(status=False, message="The data doesn't exist", code=400))
return response
limit = request.args.get('limit', 10)
offset = request.args.get("offset", 0)
if limit or offset is not int:
try:
limit = int(limit)
offset = int(offset)
except Exception as e:
limit = 10
offset = 0
data = DataCenterService.select_instances(limit=int(limit), offset=int(offset))
total = data[1]
response = make_response(dict(status=True, data=data[0], total=total, code=200))
return response
# @dec_auth
def post(self, *args, **kwargs):
"""
this function is to add a instance into the db,
the request from client will use request body to transfer the param,
and this function only allow to add one item as one time.
: param name: name of the instance--> request.body.get('name')
: param country: pass
: param province: pass
: param city: pass
: param data_center: pass
: param vim: pass
: param cee_version: pass
: param lcm_ip: pass
: param lcm_user: pass
: param lcm_pwd: pass
: param openrc_dir: pass
:return: when successfully insert a item , return with the following structure
{
code: int, //the code should be 201
data:{
id: str, // the newly generated id by the service
}
status: true
}
"""
dc_info = request.get_json()
first_id = ''.join(str(uuid.uuid1()).split('-'))
dc_name = dc_info["name"]
mode = dc_info.get('mode')
country = dc_info["country"]
province = dc_info["province"]
city = dc_info["city"]
system_name = dc_info["system_name"]
cee_version = dc_info['cee_version']
lcm_ip = dc_info['lcm_ip']
lcm_user = dc_info['lcm_user']
lcm_pwd = dc_info['lcm_pwd']
openstackrc_dir = dc_info['openstackrc_dir']
lcmrc_dir = dc_info['lcmrc_dir']
data = DataCenterService.add_instance(first_id, dc_name, mode,
country, province, city, system_name,
cee_version,
lcm_ip,
lcm_user, lcm_pwd,
openstackrc_dir, lcmrc_dir)
if data:
response = make_response(dict(status=True, message='Success add', data=data, code=200))
return response
else:
response = make_response(dict(status=False, message='Name duplication', code=400)), 400
return response
# @dec_auth
def delete(self, *args, **kwargs):
"""
this function is to delete instance from db by the id.
and this function can not do multi op, so one request can only has one id in the request body
: param id --> request.body.get('id')
:return: when the item has been deleted successfully , return the following
{
code: 200,
status: true,
}
"""
dc_info = request.get_json()
dc_id = dc_info["id"]
dc_del_id = DataCenterService.delete_instance(dc_id)
if dc_del_id:
response = make_response(dict(status=True, message="Deletion succeeded by id", data=dc_del_id, code=200))
return response
else:
return make_response(dict(status=False, message="Data does not exist"))
# @dec_auth
def put(self, *args, **kwargs):
"""
this function is to update the info of instance by id,
: param id:
: param name: name of the instance--> request.body.get('name')
: param country: pass
: param province: pass
: param city: pass
: param data_center: pass
: param vim: pass
: param cee_version: pass
: param lcm_ip: pass
: param lcm_user: pass
: param lcm_pwd: pass
: param openrc_dir: pass
:return:
{
code: 200,
status: true
}
"""
dc_info = request.get_json()
first_id = dc_info["id"]
mode = dc_info["mode"]
if bool(mode) == False:
country = dc_info["country"]
province = dc_info["province"]
city = dc_info["city"]
system_name = dc_info["system_name"]
cee_version = dc_info['cee_version']
DataCenterService.update_mof_instance(first_id, mode, country, province, city, system_name, cee_version)
data = DataCenterService.select_instance(first_id)
if data:
response = make_response(dict(status=True, message="modified success", code=200))
else:
response = make_response(dict(status=False, message="The data you modified does not exist", code=400))
return response
country = dc_info["country"]
province = dc_info["province"]
city = dc_info["city"]
system_name = dc_info["system_name"]
cee_version = dc_info['cee_version']
lcm_ip = dc_info['lcm_ip']
lcm_user = dc_info['lcm_user']
lcm_pwd = dc_info['lcm_pwd']
openstackrc_dir = dc_info['openstackrc_dir']
lcmrc_dir = dc_info['lcmrc_dir']
DataCenterService.update_instance(first_id, mode, country, province, city, system_name,
cee_version,
lcm_ip,
lcm_user, lcm_pwd,
openstackrc_dir, lcmrc_dir)
data = DataCenterService.select_instance(first_id)
if data:
response = make_response(dict(status=True, message="modified success", code=200))
else:
response = make_response(dict(status=False, message="The data you modified does not exist", code=400))
return response
|
# Uses python3
#TFind the minimum number of coins needed to change the input value (an integer) into coins
# with denominations 1, 5, and 10.
# Input Format. The input consists of a single integer m.
# Constraints. 1 ≤ m ≤ 103. Output Format.
# Output the minimum number of coins with denominations 1, 5, 10 that changes m.
import sys
def get_change(m):
coins = [10, 5, 1]
n = m//10 + (m % 10) // 5 + (m % 10) % 5
#write your code here
return n
if __name__ == '__main__':
m = int(sys.stdin.read())
print(get_change(m))
|
from django.urls import path
from trivia_builder.views import (TriviaQuizDeleteView,
TriviaQuizUpdateView,
TriviaQuizCreateView,
TriviaQuizDetailView,
TriviaQuizListView)
urlpatterns = [
path('id/<int:pk>/', TriviaQuizDetailView.as_view(), name='quiz-detail'),
path('new/', TriviaQuizCreateView.as_view(), name='quiz-create'),
path('<int:pk>/update/', TriviaQuizUpdateView.as_view(), name='quiz-update'),
path('<int:pk>/delete/', TriviaQuizDeleteView.as_view(), name='quiz-delete'),
path('', TriviaQuizListView.as_view(), name='quiz-list'),
]
|
import pygame
import db.db_service
DefenseMode = 0
AttackMode = 1
# Id, Size, Moverange, Healthpoints, Firerange, Firepower, Cannon sound
# TODO use a dictionary instead?
Scout = (0, 2, 4, 4, 3, 2, 'cannon_small')
Avenger = (1, 3, 3, 5, 4, 3, 'cannon_big')
QueenMary = (2, 4, 2, 6, 4, 4, 'cannon_big')
class Ship:
def __init__(self, tile, owner, type=Scout, mode=AttackMode):
self.type = type
self.id = type[0]
self.x = tile.x
self.y = tile.y
self.tile = tile
self.font = pygame.font.SysFont("monospace", 30, 1)
self.image = pygame.image.load('resources/ships/' + str(type[0]) + '.png')
self.rect = pygame.rect.Rect(self.x * self.tile.width, self.y * self.tile.height, self.tile.width, self.tile.height)
self.owner = owner
self.last_special_card_turn = 0
self.tile.set_ship(self)
self.fire_count = 0
# Gameplay attributes of the ship
self.mode = mode
self.size = type[1]
self.moverange = type[2]
self.health = type[3]
self.firerange = type[4]
self.firepower = type[5]
self.cannon_sound = type[6]
# Whether this ship is disabled for a turn
self.disabled = False
# The amount of tiles the ship has left to move over
self.remaining_tiles = self.moverange
# The amount of remaining times a ship can attack other ship
self.firelimit = 1
# Card effects
self.fmj_upgrade = False
self.rifling = False
self.emp = False
self.better_rifling = False
self.reinforced_hull = False
self.applied_smokescreen = False
self.sabotage = False
self.extra_fuel = False
self.extra_fuel_two = False
self.rally = False
self.adrenaline_rush = False
self.repair = False
self.mine_armor = False
self.far_sight = False
self.alumininum_hull = False
#Save the Boat info in the databse
x = 0 + self.tile.x
y = 0 + self.tile.y
health = 0 + self.health
BID = self.id
mode = self.mode # 0 = Defense, 1 = Attack, for further coding.
firerange = 0 + self.firerange
firepower = 0 + self.firepower
# For the coming: False = Negative, True = Applied
applied_smokescreen = self.applied_smokescreen
mine_armor = self.mine_armor
sabotage = self.sabotage
remaining_tiles = self.remaining_tiles
# db.db_service.execute("TRUNCATE TABLE boats;")
db.db_service.execute("INSERT INTO Boats (XPos, YPos, HP, BID, State, BRange, Attack, ShotDef, MineDef, ReflDef, BoatMovementLeft) VALUES (" + str(x) + "," + str(y) + "," + str(health) + "," + str(BID) + "," + str(mode) + "," + str(firerange) + "," + str(firepower) + "," + str(applied_smokescreen) + "," + str(mine_armor) + "," + str(sabotage) + "," + str(remaining_tiles) + ");")
# Updates the grid and pixel coordinates of this ship
def update_pos(self, x, y):
self.x = x
self.y = y
self.rect.x = x * self.tile.width
self.rect.y = y * self.tile.height
# Returns a list of tile positions that this ship would occupy if it were in the specified mode.
def occupied_tile_pos(self, in_attack_mode):
positions = []
if in_attack_mode:
for y_offset in range(self.size):
y = self.y + y_offset
positions.append((self.x, y))
else:
for x_offset in range(self.size):
x = self.x + x_offset
positions.append((x, self.y))
return positions
# Returns the appropriate move range.
def get_moverange(self):
if self.remaining_tiles > self.moverange:
return int(self.moverange)
else:
return int(self.remaining_tiles)
# Switches the state of this ship to Attack mode.
def switch_attack_mode(self):
if self.mode != AttackMode:
self.mode = AttackMode
self.transform(-90)
# Switches the state of this ship to Defense mode.
def switch_defense_mode(self):
if self.mode != DefenseMode:
self.mode = DefenseMode
self.transform(90)
# Transforms the image to be set to the specified angle.
def transform(self, angle):
self.image = pygame.transform.rotate(self.image, angle)
# Returns whether this ship is currently in Attack mode or not.
def in_attack_mode(self):
return self.mode == AttackMode
# Returns whether this ship is currently in Defense mode or not.
def in_defense_mode(self):
return self.mode == DefenseMode
# Returns whether this ship has reached its firing limit of 1.
def reached_fire_limit(self):
return self.fire_count == self.firelimit
# Returns whether this ship has reached its moving limit of 1.
def reached_move_limit(self):
return self.remaining_tiles == 0
# Resets all of its action counters.
def reset_counts(self):
self.remaining_tiles = self.moverange
self.fire_count = 0
# Resets the deactivation of this ship
def reset_deactivation(self):
self.disabled = False
# Turns the image of this ship into a wreck.
def wreck(self):
self.image = pygame.image.load('resources/ships/3.png')
self.size = 1
# Apply card effects
def apply_card_effect(self, card):
# Normal cards
if card.id == 'refh': # Refinement Hull, adds a health point to the ship
self.health += 1
elif card.id == 'fuel': # Fuel, adds one extra tile to your movement capacity
self.remaining_tiles += 1
elif card.id == 'fue2': # Extra fuel, adds two extra tiles to your movement capacity
self.remaining_tiles += 2
elif card.id == 'adr': # Adrenaline Rush, adds a second chance to move the ship around
self.remaining_tiles += self.moverange
elif card.id == 'rif': # Normal rifling, increases firerange by 1
self.firerange += 1
elif card.id == 'arif': # Advanced rifling, increases firerange by 2
self.firerange += 2
elif card.id == 'fmj': # FMJ Upgrade, adds one point to the current firepower
self.firepower += 1
elif card.id == 'rally': # Rally, adds extra movement to a ship for a single turn
self.remaining_tiles += 1
elif card.id == 'sab': # Sabotage, bouncing an attack back to the attacker
self.sabotage = True
elif card.id == 'emp': # EMP Upgrade, deactivating an opponent ship for one turn
self.emp = True
elif card.id == 'smok': # Smokescreen, dismissing an attack.
self.applied_smokescreen = True
# Special cards
elif card.id == 'rep': # Repairs the ship
self.restore_health()
elif card.id == 'alu': # Aluminium Hall, increasing the current moverange
self.moverange *= 2 # TODO instead make the ship able to move twice?
self.remaining_tiles = self.moverange
elif card.id == 'far:': # Far sight, increasing the current firerange
self.firerange += 2
elif card.id == 'flak': # Mine armor, invulnerable against mines
self.mine_armor = True
print(card.id)
# Resets all of the card flags of attack related effects that only last for a single attack.
def reset_attack_effects(self):
self.fmj_upgrade = False
self.rifling = False
self.better_rifling = False
# We do not flag `sabotage` and `applied_smokescreen` here as these last until they are activated.
# Reset all of the stats back to its original state
self.reset_firepower()
self.reset_firerange()
# Restores this ship's health
def restore_health(self):
self.health = self.type[3]
# Resets the firerange back to its original state
def reset_firerange(self):
self.firerange = self.type[4]
# Resets the firepower back to its original state
def reset_firepower(self):
self.firepower = self.type[5]
# Updates the state of this ship per frame.
def update(self):
pass
# Draws this ship onto the given surface.
def draw(self, surface):
surface.blit(self.image, self.rect)
draw_x = self.x
draw_y = self.y
if self.y == 0:
draw_y += 1
ship_health = self.font.render(str(self.health), 1, (0, 255, 0))
ship_health_x = draw_x * self.tile.width
ship_health_y = draw_y * self.tile.height
surface.blit(ship_health, (ship_health_x + self.tile.width / 4, ship_health_y - 15))
# Translates the mode id to a reusable name.
def mode_id_to_name(self):
if self.mode == AttackMode:
return "Attack"
else:
return "Defense" |
import os, time, string, re
import numpy as np
if __name__ == "__main__":
vocab = np.asarray([])
occurance = np.asarray([])
start_time = time.time()
count = 0
with open("test_train.txt") as fx:
for review in fx:
temp = re.sub(r'[^\w\s]','', review).upper().split()
temp = [i for i in temp if i != "br"]
vocab = np.append(vocab, temp)
#print [word.strip(string.punctuation) for word in review.split()]
print vocab |
from django.contrib.admin.sites import AdminSite
from jobadvisor.polls.admin import VariantInline
from jobadvisor.polls.models import Variant
# def test_variant_admin(rf):
# request = rf.get("")
# variant_inline = VariantInline(parent_model=Variant, admin_site=AdminSite())
# assert not variant_inline.has_add_permission(request)
|
word = input()
word_askii = 'a'
count_list = [0 for i in range(26)]
max_count = 0
max_count_index = 0
same_count = 0
for i in range(0, len(word)):
word_askii = ord(word[i])
if 90 >= word_askii > 64:
count_list[word_askii - 65] = count_list[word_askii - 65] + 1
elif 122 >= word_askii > 96:
count_list[word_askii - 97] = count_list[word_askii - 97] + 1
for j in range(0, 26):
if j == 0:
max_count = count_list[0]
max_count_index = 0
else:
if max_count < count_list[j]:
max_count = count_list[j]
max_count_index = j
elif max_count == count_list[j]:
same_count = max_count
if max_count == same_count:
print('?')
else:
print(chr(max_count_index + 65))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 14 10:28:01 2017
@author: kthomas1
"""
next_week = 3
import warnings
warnings.filterwarnings('ignore')
# import pandas and numpy for
import pandas as pd
import numpy as np
# connect to PostgreSQL
import psycopg2
conn=psycopg2.connect("dbname='nfldb' user='kthomas1' host='localhost' password='' port=5432")
# query game results
game_results=pd.read_sql("""select season_year, week, home_team, home_score, away_team, away_score
from game
where season_type='Regular'""",con=conn)
# replace la with stl
game_results.replace(to_replace='LA', value='STL', inplace=True)
# compute wins and ties
game_results['home_win'] = [1 if x>y else 0 for x,y in zip(game_results['home_score'],game_results['away_score'])]
game_results['away_win'] = [1 if x<y else 0 for x,y in zip(game_results['home_score'],game_results['away_score'])]
game_results['tie'] = [1 if x==y else 0 for x,y in zip(game_results['home_score'],game_results['away_score'])]
# sort the dataframe
game_results=game_results.sort_values(by=['season_year','home_team','week'])
# rename the year
game_results=game_results.rename(columns = {'season_year':'year'})
# print first 10 entries
game_results.head(10)
# total number of games
total_games = len(game_results)
# total number of home wins
home_wins = game_results.home_win.sum()
# total home wins/total number of games
home_win_rate = home_wins/total_games
print("Home Team Win Rate: {:.2f}% ".format(home_win_rate*100))
stats=pd.read_sql("""select drive.pos_team, drive.drive_id, drive.pos_time, drive.first_downs, drive.yards_gained, drive.play_count, drive.result, game.season_year, game.week, game.season_type, game.home_team, game.away_team
from drive
inner join game on drive.gsis_id=game.gsis_id
where season_type='Regular'""",con=conn)
#replace la with stl
stats.replace(to_replace='LA', value='STL', inplace=True)
stats.replace(to_replace='LAC', value='SD', inplace=True)
# encode points results
stats['points'] = [3 if x=="Field Goal" else 7 if x=="Touchdown" else 0 for x in stats['result']]
# encode turnover results
stats['turnover'] = [1 if x==("Interception" or "Fumble" or "Safety" or "Blocked FG" or "Fumble, Safety" or "Blocked Punt" or "Blocked Punt, Downs" or "Blocked FG, Downs") else 0 for x in stats['result']]
# look at the table
stats.head(10)
#add next weeks stats
games_17 = game_results[game_results['year']==2017]
nweek = pd.DataFrame({
'year': np.array([2017] * 32,dtype='int32'),
'week': np.array([next_week] * 32,dtype='int32'),
'team': np.array((list(set(stats.pos_team))),dtype=str)})
nweek_d = nweek
nweek_o = nweek
# defense
stats_d = stats
stats_d['opp_team'] = np.where(stats_d['pos_team']==stats_d['home_team'], stats_d['away_team'], stats_d['home_team'])
#subset to defensive stats
stats_d = stats_d[['season_year','week','opp_team','yards_gained','points','turnover']]
# rename columns
stats_d.columns = ['year','week','team','yards_allowed','points_allowed','turnovers_forced']
#add next week
stats_d = stats_d.append(nweek_d)
# aggregate rolling 5 week
## sort at year, team, week
stats_d.sort_values(by=['team','year','week'],inplace=True)
## sum across year team week
stats_d=stats_d.groupby(by=['team','year','week'],as_index=False).sum()
## rolling 2 week lagged
rolling = stats_d.groupby(['team'],as_index=False)['yards_allowed','points_allowed','turnovers_forced'].rolling(5).sum().shift(1).reset_index()
## join together
stats_d=stats_d.join(rolling,lsuffix='_weekly',rsuffix='_rolling')
stats_d.head(10)
# offense
stats_o = stats
stats_o=stats_o.rename(columns = {'pos_team':'team'})
stats_o=stats_o.rename(columns = {'season_year':'year'})
stats_o = stats_o[['team','year','week','first_downs','yards_gained','play_count','points','turnover']]
#add next week
stats_o = stats_o.append(nweek_o)
# aggregate rolling 5 week
## sort at year, team, week
stats_o.sort_values(by=['team','year','week'],inplace=True)
## sum across year team week
stats_o=stats_o.groupby(by=['team','year','week'],as_index=False).sum()
## rolling 5 week lagged
rolling = stats_o.groupby(['team'],as_index=False)['first_downs','yards_gained','play_count','points','turnover'].rolling(5).sum().shift(1).reset_index()
## join together
stats_o=stats_o.join(rolling,lsuffix='_weekly',rsuffix='_rolling')
stats_o.head(10)
# drop the level variables from offense and defensive
stats_o = stats_o.drop(['level_0','level_1'], axis=1)
stats_d = stats_d.drop(['level_0','level_1'], axis=1)
# combine offense and defense stats
stats_od=pd.concat([stats_d,stats_o],axis=1)
stats_od=stats_od.T.drop_duplicates().T
x = pd.merge(stats_d,stats_o,how='inner',on=['team','year','week'])
# drop the year 2009 becasue of the blank weeks
stats_od=stats_od[stats_od['year']!=2009]
# drop the weekly stats because we won't be needing them
weekly_stats = [col for col in stats_od if col.endswith('weekly')]
stats_od = stats_od.drop(weekly_stats, axis=1)
# convert to numeric
stats_od=stats_od.apply(pd.to_numeric, errors='ignore')
# create a new games dataframe from game_results
games = game_results
# rename columns
stats_od.columns=['team','year','week','ya','pa','tf','fd','yg','pc','p','t']
# merge game results with stats; there need to be two merges because both home and away teams need statistics
games=pd.merge(pd.merge(games,stats_od,left_on=['home_team','year','week'],right_on=['team','year','week']),stats_od,left_on=['away_team','year','week'],right_on=['team','year','week'],suffixes=['_home','_away'])
# comptue diffs for each variable
diffs=['ya','pa','tf','fd','yg','pc','p','t']
for i in diffs:
diff_column = i + "_diff"
home_column = i + "_home"
away_column = i + "_away"
games[diff_column] = games[home_column] - games[away_column]
# we only need the diffs, so drop all the home/away specific stats columns
home = [col for col in games if col.endswith('home')]
away = [col for col in games if col.endswith('away')]
games = games.drop(home,axis=1)
games = games.drop(away,axis=1)
import statsmodels.api as sm
# create past games df that will be used to train our model
past_games = games[(games['year']!=max(games.year)) & (games['week']!=max(games_17.week))]
# create future games df that will be predicted using our trained model
future_games = games[(games['year']==max(games.year)) & (games['week']==next_week)]
# for statsmodels, we need to specify
past_games['intercept'] = 1.0
future_games['intercept'] = 1.0
# our training columns will be the diffs
training_cols = [col for col in games if col.endswith('diff')]
# need to add the intercept column
training_cols = training_cols + ["intercept"]
# perform the regression
logit = sm.Logit(past_games['home_win'], past_games[training_cols])
# save the results and print
result = logit.fit()
print(result.summary())
#log odds
print(np.exp(result.params))
# predict the results
preds=result.predict(future_games[training_cols])
# add probabilities to next week
future_games['win_prob'] = preds
# home team wins if team has greater than 50% chance of winning
future_games['winner'] = np.where(future_games['win_prob']>.5,future_games['home_team'],future_games['away_team'])
# show select columns
future_games[['home_team','away_team','winner','win_prob']]
# import sklearn
from sklearn.linear_model import LogisticRegression
# define sklearn logit with default intercept
logit = LogisticRegression(fit_intercept=True)
# fit the
logit.fit(past_games[training_cols],past_games['home_win'])
# retrieve and display the probabilities
preds=logit.predict(future_games[training_cols])
future_games['prediction'] = preds
future_games['winner'] = np.where(future_games['prediction']==1,future_games['home_team'],future_games['away_team'])
future_games['win_prob'] = logit.predict_proba(future_games[training_cols])[:,1]
future_games[['home_team','away_team','winner','win_prob']]
from sklearn import preprocessing
from sklearn import metrics, cross_validation
# scale
past_games_scaled = pd.DataFrame(preprocessing.scale(past_games[training_cols]))
# cross validate
scores = cross_validation.cross_val_score(logit, past_games_scaled, past_games['home_win'], cv=10)
# accuracy
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# import cross validated logistic regression
from sklearn.linear_model import LogisticRegressionCV
# define sklearn logit with default intercept
logitcv = LogisticRegressionCV()
# fit the
logitcv.fit(past_games[training_cols],past_games['home_win'])
preds=logitcv.predict(future_games[training_cols])
future_games['prediction'] = preds
future_games['winner'] = np.where(future_games['prediction']==1,future_games['home_team'],future_games['away_team'])
future_games['win_prob'] = logitcv.predict_proba(future_games[training_cols])[:,1]
future_games[['home_team','away_team','winner','win_prob']] |
import argparse
import math
import os
import pdb
import pickle
import random
import shutil
import time
from pprint import pprint
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import torchvision
import torchvision.models as models
import torchvision.transforms as transforms
from cirtorch.datasets.datahelpers import cid2filename, collate_tuples
from cirtorch.datasets.testdataset import configdataset
from cirtorch.datasets.traindataset import TuplesDataset
from cirtorch.examples.attack.myutil.baseline import result as baseline_result
from cirtorch.examples.attack.myutil.mi_sgd import (MI_SGD, SIGN_AdaBound,
SIGN_Adam)
from cirtorch.examples.attack.myutil.sfm_dataset import SfMDataset
from cirtorch.examples.attack.myutil.triplet_dataset import MyTripletDataset
from cirtorch.examples.attack.myutil.utils import (MultiLoss, bcolors,
do_whiten, idcg, inv_gfr,
one_hot, rescale_check)
from cirtorch.layers.loss import ContrastiveLoss
from cirtorch.networks.imageretrievalnet import extract_vectors, init_network
from cirtorch.utils.download import download_test, download_train
from cirtorch.utils.evaluate import compute_map_and_print
from cirtorch.utils.general import get_data_root, htime
from cirtorch.utils.whiten import whitenapply, whitenlearn
f = os.path.realpath(__file__)
f = open(f, "r")
print("".join(f.readlines()))
f.close()
training_dataset_names = ["retrieval-SfM-120k", "Landmarks"]
test_datasets_names = [
"oxford5k,paris6k",
"roxford5k,rparis6k",
"oxford5k,paris6k,roxford5k,rparis6k",
]
test_whiten_names = ["retrieval-SfM-30k", "retrieval-SfM-120k"]
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
base = {} # storing the feature of base
MAX_EPS = 10.0 / 255 # max eps of perturbation
MODE = "bilinear" # mode of resize
parser = argparse.ArgumentParser(description="PyTorch CNN Image Retrieval Training")
# export directory, training and val datasets, test datasets
parser.add_argument(
"--test-datasets",
"-td",
metavar="DATASETS",
default="oxford5k,paris6k,roxford5k,rparis6k",
choices=test_datasets_names,
help="comma separated list of test datasets: "
+ " | ".join(test_datasets_names)
+ " (default: oxford5k,paris6k)",
)
parser.add_argument(
"--network-path", help="network path, destination where network is saved"
)
parser.add_argument(
"--image-size",
default=1024,
type=int,
metavar="N",
help="maximum size of longer image side used for training (default: 1024)",
)
# standard train/val options
parser.add_argument(
"--gpu-id",
"-g",
default="0",
metavar="N",
help="gpu id used for training (default: 0)",
)
parser.add_argument(
"--workers",
"-j",
default=1,
type=int,
metavar="N",
help="number of data loading workers (default: 8)",
)
parser.add_argument(
"--epochs",
default=100,
type=int,
metavar="N",
help="number of total epochs to run (default: 100)",
)
parser.add_argument(
"--batch-size",
"-b",
default=1,
type=int,
metavar="N",
help="number of (q,p,n1,...,nN) tuples in a mini-batch (default: 5)",
)
parser.add_argument(
"--print-freq",
default=500,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--noise-path", type=str, help="noise path")
parser.add_argument(
"--loss-margin",
"-lm",
metavar="LM",
default=0.8,
type=float,
help="loss margin: (default: 0.7)",
)
parser.add_argument(
"--image-size-L", default=256, type=int, help="min of image size for random"
)
parser.add_argument(
"--image-size-H", default=1024, type=int, help="max of image size for random"
)
parser.add_argument("--noise-size", default=1024, type=int, help="noise-size")
# loss
parser.add_argument(
"--point_wise", dest="point_wise", action="store_true", help="point-wise loss"
)
parser.add_argument(
"--label_wise", dest="label_wise", action="store_true", help="label-wise loss"
)
parser.add_argument(
"--pair_wise", dest="pair_wise", action="store_true", help="pair-wise loss"
)
parser.add_argument(
"--list_wise", dest="list_wise", action="store_true", help="list_wise loss"
)
parser.add_argument("--max-eps", default=10, type=int, help="max eps")
args = parser.parse_args()
pprint(args)
def main():
global base
global MAX_EPS
MAX_EPS = args.max_eps / 255.0
# load base
fname = args.network_path.replace("/", "_") + ".pkl"
if os.path.exists(f"base/{fname}"):
with open(f"base/{fname}", "rb") as f:
base = pickle.load(f)
# for saving noise
os.makedirs(args.noise_path, exist_ok=True)
# set cuda visible device
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
np.random.seed(1234)
random.seed(1234)
torch.backends.cudnn.deterministic = True
# load retrieval model
state = torch.load(args.network_path)
model = init_network(
model=state["meta"]["architecture"],
pooling=state["meta"]["pooling"],
whitening=state["meta"]["whitening"],
mean=state["meta"]["mean"],
std=state["meta"]["std"],
pretrained=False,
)
model.load_state_dict(state["state_dict"])
model.meta["Lw"] = state["meta"]["Lw"]
model.cuda()
# perturbation for training
noise = torch.zeros((3, args.noise_size, args.noise_size)).cuda()
print(state["meta"]["architecture"])
print(state["meta"]["pooling"])
noise.requires_grad = True
optimizer = MI_SGD(
[
{"params": [noise], "lr": MAX_EPS / 10, "momentum": 1, "sign": True},
# {"params": [noise], "lr": 1e-2, "momentum": 1, "sign": True},
],
max_eps=MAX_EPS,
)
print(optimizer)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=math.exp(-0.01))
# Data loading code
normalize = transforms.Normalize(mean=model.meta["mean"], std=model.meta["std"])
transform = transforms.Compose(
[
transforms.ToTensor(),
# normalize,
]
)
mean = torch.Tensor(normalize.mean).view(3, 1, 1)
std = torch.Tensor(normalize.std).view(3, 1, 1)
# dataloader
val_dataset = MyTripletDataset(
imsize=(args.image_size_L, args.image_size_H),
transform=transform,
norm=(mean, std),
filename="base/" + args.network_path.replace("/", "_") + "_triplet",
)
val_dataset.create_epoch_tuples(model)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
worker_init_fn=lambda _: random.seed(1234),
)
# load classifier model
if args.label_wise:
classification_model = torch.load(
"base/" + args.network_path.replace("/", "_") + "_triplet.KMeans_cls.pth"
)
else:
classification_model = None
noise_best = None
min_loss = float("inf")
min_epoch = -1
for epoch in range(args.epochs):
# set manual seeds per epoch
np.random.seed(epoch + 1234)
torch.manual_seed(epoch + 1234)
torch.cuda.manual_seed_all(epoch + 1234)
random.seed(epoch + 1234)
# train for one epoch on train set
scheduler.step()
begin_time = time.time()
loss, noise = train(
val_loader,
model,
noise,
epoch,
normalize,
classification_model,
optimizer,
None,
)
print("epoch time", time.time() - begin_time)
# evaluate on test datasets
loss = test(args.test_datasets, model, noise.cpu(), 1024)
print(bcolors.str(f"test fgr: {1-loss}", bcolors.OKGREEN))
# remember best loss and save checkpoint
is_best = loss < min_loss
min_loss = min(loss, min_loss)
save_noise(noise, is_best, epoch)
if is_best:
min_epoch = epoch
noise_best = noise.clone().detach()
if epoch - min_epoch > 5:
break
print("Best")
loss = test(args.test_datasets, model, noise_best.cpu(), 1024)
print(bcolors.str(f"test fgr: {1-loss}", bcolors.OKGREEN))
def train(train_loader, model, noise, epoch, normalize, cls, optimizer, multiLoss):
""" train perturbation
train_loader: data loader
model: victim retrieval model
noise: perturbation to be optimized
epoch: current epoch
normalize: data normalize parameter
cls: classification model
optimizer: optimizer for iter
multiLoss: multi loss
"""
global args
noise.requires_grad = True
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
model.eval()
# normalize
mean = normalize.mean
std = normalize.std
mean = torch.Tensor(mean).view(1, 3, 1, 1).cuda()
std = torch.Tensor(std).view(1, 3, 1, 1).cuda()
# whitening
Lw = model.meta["Lw"]["retrieval-SfM-120k"]["ss"]
Lw_m = torch.from_numpy(Lw["m"]).cuda().float()
Lw_p = torch.from_numpy(Lw["P"]).cuda().float()
# cluster center and base cluster id
pool_clusters_centers = train_loader.dataset.pool_clusters_centers.cuda().float()
clustered_pool = train_loader.dataset.clustered_pool
end = time.time()
optimizer.zero_grad()
optimizer.rescale()
for i, (input) in enumerate(train_loader):
# measure data loading time.
data_time.update(time.time() - end)
model.zero_grad()
input = input.cuda()
with torch.no_grad():
norm_output = (input - mean) / std
feature = model(norm_output)
feature = do_whiten(feature, Lw_m, Lw_p).detach()
optimizer.zero_grad()
current_noise = noise
current_noise = F.interpolate(
current_noise.unsqueeze(0),
mode=MODE,
size=tuple(input.shape[-2:]),
align_corners=True,
).squeeze()
perturted_input = torch.clamp(input + current_noise, 0, 1)
perturbed_input = (perturted_input - mean) / std
perturbed_feature = model(perturbed_input)
perturbed_feature = do_whiten(perturbed_feature, Lw_m, Lw_p)
# pair-wise
if args.pair_wise:
with torch.no_grad():
scores = torch.mm((pool_clusters_centers), feature)
scores, ranks = torch.sort(scores, dim=0, descending=True)
pos_i = ranks[0, 0].item()
neg_i = ranks[-1, 0].item()
# neg_feature = torch.from_numpy(
# np.concatenate(
# (
# clustered_pool[neg_i][
# np.random.choice(clustered_pool[neg_i].shape[0]), :
# ].reshape(1, -1),
# )
# )
# ).cuda()
# pos_feature = (
# torch.from_numpy(
# clustered_pool[pos_i][
# np.random.choice(clustered_pool[pos_i].shape[0]), :
# ]
# )
# .cuda()
# .unsqueeze(0)
# )
neg_feature = pool_clusters_centers[neg_i, :].view(1, -1)
pos_feature = pool_clusters_centers[pos_i, :].view(1, -1)
perturbed_feature = perturbed_feature.t()
# neg_feature = torch.cat((neg_feature, -feature.t()))
# pos_feature = torch.cat((pos_feature, feature.t()))
# perturbed_feature = torch.cat(
# (perturbed_feature.t(), perturbed_feature.t())
# )
neg_feature = neg_feature * 10
pos_feature = pos_feature * 10
perturbed_feature = perturbed_feature * 10
pair_loss = F.triplet_margin_loss(
perturbed_feature, neg_feature, pos_feature, args.loss_margin
)
else:
pair_loss = torch.zeros(1).cuda()
# point-wise
if args.point_wise:
point_loss = (
torch.dot(perturbed_feature.squeeze(), feature.squeeze()) + 1
) / 2
else:
point_loss = torch.zeros(1).cuda()
# label-wise
if args.label_wise:
actual_pred = cls(feature.t())
perturbed_pred = cls(perturbed_feature.t())
actual_label = actual_pred.max(1, keepdim=True)[1].item()
one_hot_actual_label = one_hot(
perturbed_pred.size(1), torch.LongTensor([actual_label]).cuda()
).float()
label_loss = F.relu(
(perturbed_pred * one_hot_actual_label).sum()
- (perturbed_pred * (1 - one_hot_actual_label)).max()
)
else:
label_loss = torch.zeros(1).cuda()
if args.list_wise:
clean_scores = torch.mm((pool_clusters_centers), feature)
_, clean_ranks = torch.sort(clean_scores, dim=0, descending=True)
# pos_i = clean_ranks[:256, :].squeeze()
# neg_i = clean_ranks[256:, :].squeeze()
pos_i = clean_ranks[:, :].squeeze()
neg_i = torch.flip(pos_i, (0,))
scores = -torch.mm((pool_clusters_centers), perturbed_feature)
_, ranks = torch.sort(scores, dim=0, descending=True)
doc_ranks = torch.zeros(pool_clusters_centers.size(0)).to(feature.device)
doc_ranks[ranks] = 1 + torch.arange(pool_clusters_centers.size(0)).to(
feature.device
).float().view((-1, 1))
doc_ranks = doc_ranks.view((-1, 1))
score_diffs = scores[pos_i] - scores[neg_i].view(neg_i.size(0))
exped = score_diffs.exp()
N = 1 / idcg(pos_i.size(0))
ndcg_diffs = (1 / (1 + doc_ranks[pos_i])).log2() - (
1 / (1 + doc_ranks[neg_i])
).log2().view(neg_i.size(0))
lamb_updates = -1 / (1 + exped) * N * ndcg_diffs.abs()
lambs = torch.zeros((pool_clusters_centers.shape[0], 1)).to(feature.device)
lambs[pos_i] += lamb_updates.sum(dim=1, keepdim=True)
lambs[neg_i] -= lamb_updates.sum(dim=0, keepdim=True).t()
scores.backward(lambs)
list_loss = torch.zeros(1).cuda()
else:
list_loss = torch.zeros(1).cuda()
label_loss = label_loss.view(1)
point_loss = point_loss.view(1)
pair_loss = pair_loss.view(1)
list_loss = list_loss.view(1)
loss = label_loss + point_loss + pair_loss
if not args.list_wise:
loss.backward()
losses.update(loss.item())
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
# optimizer.rescale()
print(
">> Train: [{0}][{1}/{2}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Noise l2: {noise:.4f}".format(
epoch + 1,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
noise=noise.norm(),
)
)
noise.requires_grad = False
print(bcolors.str(f"Train {epoch}: Loss: {losses.avg}", bcolors.OKGREEN))
return losses.avg, noise
def test(datasets, net, noise, image_size):
global base
print(">> Evaluating network on test datasets...")
net.cuda()
net.eval()
normalize = transforms.Normalize(mean=net.meta["mean"], std=net.meta["std"])
def add_noise(img):
n = noise
n = F.interpolate(
n.unsqueeze(0), mode=MODE, size=tuple(img.shape[-2:]), align_corners=True
).squeeze()
return torch.clamp(img + n, 0, 1)
transform_base = transforms.Compose([transforms.ToTensor(), normalize])
transform_query = transforms.Compose(
[transforms.ToTensor(), transforms.Lambda(add_noise), normalize]
)
if "Lw" in net.meta:
Lw = net.meta["Lw"]["retrieval-SfM-120k"]["ss"]
else:
Lw = None
# evaluate on test datasets
datasets = args.test_datasets.split(",")
attack_result = {}
for dataset in datasets:
start = time.time()
print(">> {}: Extracting...".format(dataset))
cfg = configdataset(dataset, os.path.join(get_data_root(), "test"))
images = [cfg["im_fname"](cfg, i) for i in range(cfg["n"])]
qimages = [cfg["qim_fname"](cfg, i) for i in range(cfg["nq"])]
bbxs = [tuple(cfg["gnd"][i]["bbx"]) for i in range(cfg["nq"])]
# extract database and query vectors
print(">> {}: database images...".format(dataset))
with torch.no_grad():
if dataset in base and str(image_size) in base[dataset]:
vecs = base[dataset][str(image_size)]
else:
vecs = extract_vectors(net, images, image_size, transform_base)
if dataset not in base:
base[dataset] = {}
base[dataset][str(image_size)] = vecs
fname = args.network_path.replace("/", "_") + ".pkl"
with open(f"base/{fname}", "wb") as f:
pickle.dump(base, f)
print(">> {}: query images...".format(dataset))
qvecs = extract_vectors(net, qimages, image_size, transform_query, bbxs)
print(">> {}: Evaluating...".format(dataset))
# convert to numpy
vecs = vecs.numpy()
qvecs = qvecs.numpy()
# whiten the vectors
vecs_lw = whitenapply(vecs, Lw["m"], Lw["P"])
qvecs_lw = whitenapply(qvecs, Lw["m"], Lw["P"])
# search, rank, and print
scores = np.dot(vecs_lw.T, qvecs_lw)
ranks = np.argsort(-scores, axis=0)
r = compute_map_and_print(dataset + " + whiten", ranks, cfg["gnd"])
attack_result[dataset] = r
print(">> {}: elapsed time: {}".format(dataset, htime(time.time() - start)))
return inv_gfr(
attack_result, baseline_result[net.meta["architecture"]][net.meta["pooling"]]
)
def save_noise(noise, is_best, epoch):
filename = os.path.join(args.noise_path, "noise_%d" % epoch)
np.save(filename, noise.cpu().numpy())
torchvision.utils.save_image(noise, filename + ".png", normalize=True)
if is_best:
filename_best = os.path.join(args.noise_path, "noise_best")
shutil.copyfile(filename + ".npy", filename_best + ".npy")
shutil.copyfile(filename + ".png", filename_best + ".png")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == "__main__":
main()
|
try:
import sys
except ImportError:
print("Module sysT not available.")
try:
import xmltodict
except ImportError:
print("Module xmltodict not available.")
try:
import xml.dom.minidom as dom
except ImportError:
print("Module XML.DOM.MINDOM not available.")
pass
try:
import xml.etree.ElementTree as xml
except ImportError:
print("Module XM.ETREE.ELEMENTTREE not available.")
pass
try:
import requests
except ImportError:
print("Module REQUESTS not available.")
pass
try:
import lxml.etree as ET
except ImportError:
print("Module LXML.ETREE as ET not available.")
pass
try:
import ipaddress
except ImportError:
print("Module IPADDRESS is not available.")
pass
try:
import paramiko
except ImportError:
print("Module PARAMIKO is not available.")
pass
try:
import readline
except ImportError:
print("Module READLINE not available.")
pass
try:
import urllib3
except ImportError:
print("Module URLLIB3 not available.")
pass
try:
import warnings
except ImportError:
print("Module WARNING not available.")
pass
try:
import pathlib
except ImportError:
print("Module PATHLIB not available.")
pass
try:
import time
except ImportError:
print("Module TIME not available.")
pass
##################################################################
def disable_paging(remote_conn):
remote_conn.send("terminal length 0\n")
time.sleep(1)
output = remote_conn.recv(1000)
return output
def paramiko_login(command):
try:
ip = input("Please enter a IP address: ")
username = 'USERNAME'
password = 'PASSWORD'
remote_conn_pre = paramiko.SSHClient()
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
remote_conn_pre.connect(ip, username=username, password=password, look_for_keys=False, allow_agent=False)
print("SSH connection established to %s" % ip)
remote_conn = remote_conn_pre.invoke_shell()
print("Interactive SSH session established")
output = remote_conn.recv(1000)
print(output)
disable_paging(remote_conn)
remote_conn.send(command)
time.sleep(2)
output = remote_conn.recv(5000)
output_str = output.decode('utf-8')
print(output_str)
while True:
try:
repeat_selection = input("Do you want to repeat command? ")
if repeat_selection == "yes":
remote_conn.send(command)
time.sleep(2)
output = remote_conn.recv(5000)
output_str = output.decode('utf-8')
print(output_str)
continue
elif repeat_selection == "no":
main()
break
else:
print("\n")
print("Invalid Selection")
print("\n")
continue
except paramiko.ssh_exception:
print("\n")
print("Connection Unsuccessful")
print("\n")
main()
except paramiko.ssh_exception.NoValidConnectionsError:
pass
main()
except paramiko.ssh_exception:
print("\n")
print("Connection Unsuccessful")
print("\n")
device_admin()
except paramiko.ssh_exception.NoValidConnectionsError:
pass
main()
#################################################################
def select_configuration_file():
# CONFIGURATION FILES CAN BE VIEWED FROM THIS DIRECTORY AND SENT. THIS CAN BE USED TO SEND FILES THAT INTIALLY FAILED
while True:
print("\n")
print(" 1: File Select")
print(" 2: Main Menu")
print("\n")
question_1 = input("Please select an option: ")
print("\n")
if question_1 == "1":
dir_files = []
for p in pathlib.Path("C:\Python\ACI").iterdir():
if p.is_file():
print(p)
dir_files.append(p)
print("\n")
config_file = input("Please enter a filename: ")
file = "C:\Python\ACI" + "\\" + config_file
send_configuration(file)
break
elif question_1 == "2":
main()
break
else:
print("\n")
print("Invalid Selection")
print("\n")
################################
#Gets configured Tenant on currentAPIC
################################
def enpoint_tracker():
endpoint = input("Please enter an endpoint: ")
headers = {'content-type': 'text/xml'}
uri = "https://%s" % apic + "/api/node/class/fvCEp.xml?rsp-subtree=full&rsp-subtree-class=fvCEp,fvRsCEpToPathEp,fvIp,fvRsHyper,fvRsToNic,fvRsToVm&query-target-filter=eq(fvCEp.mac," + "\"%s\"" % endpoint
try:
r = session.get(uri,verify=False, headers=headers)
print("\n")
file= open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Endpoint: %s " % endpoint)
print("\n")
for fvCEp in root.iter("fvCEp"):
ep_name = fvCEp.get("name")
ep_mac = fvCEp.get("mac")
encap = fvCEp.get("encap")
ep_loc = fvCEp.get("dn")
ep_ip = fvCEp.get("ip")
print("Name: {0:20} EP MAC: {1:<20} Encapsulation: {2:<20} Location: {3:<20} IP: {4:<20}".format(ep_name, ep_mac, encap, ep_loc, ep_ip))
print("\n")
for fvRsCEpToPathEp in root.iter("fvRsCEpToPathEp"):
endp_path = fvRsCEpToPathEp.get("tDn")
print("Path: %s" % endp_path)
except UnboundLocalError:
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
def view_tenant():
headers = {'content-type': 'text/xml'}
uri = "https://%s/api/class/fvTenant.xml" % apic
try:
r = session.get(uri,verify=False, headers=headers)
print("\n")
file= open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Tenants on APIC %s: " % apic)
print("\n")
for fvTenant in root.iter("fvTenant"):
tenant = fvTenant.get("name")
print(tenant)
except UnboundLocalError:
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
def tenant_vrf(tenant_input):
#########################
# Gets VRF from selected Tenant
#########################
headers = {'content-type': 'text/xml'}
uri = "https://" + apic + "/api/node/mo/uni/tn-" + tenant_input + ".xml?query-target=children&target-subtree-class=fvCtx"
try:
r = session.get(uri, verify=False, headers=headers)
print("\n")
file = open(get_vrf_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("VRFs in Tenant %s " % apic)
print("\n")
for fvCtx in root.iter("fvCtx"):
vrf = fvCtx.get("name")
print(vrf)
##########################################################
# The follwing two exceptions will be thrown in you dont log in to the APIC
######################################### ################
except UnboundLocalError:
print("403 Forbidden - Please log into APIC to vew/push configurations")
print("\n")
apic_login()
except NameError:
print("403 Forbidden - Please log into APIC to vew/push configurations")
print("\n")
apic_login()
def infr():
headers = {'content-type': 'text/xml'}
uri = "https://%s/api/node/mo/topology/pod-1.xml?query-target=children" % apic
try:
r = session.get(uri,verify=False, headers=headers)
print("\n")
file= open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Fabric Nodes %s: " % apic)
print("\n")
for fabricNode in root.iter("fabricNode"):
fabric_node = fabricNode.get("name")
model_node = fabricNode.get("model")
serial_node = fabricNode.get("serial")
device_info = [fabric_node, model_node,serial_node ]
print ("{0:12} {1:<16} {2:>16}".format(fabric_node, model_node, serial_node))
except UnboundLocalError:
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
print("Unknown Error. Relogging into APIC")
apic_login()
def infr_2():
headers = {'content-type': 'text/xml'}
uri = "https://%s/api/node/mo/uni/infra.xml?query-target=subtree" % apic
try:
r = session.get(uri,verify=False, headers=headers)
print("\n")
file= open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Fabric Ports %s: " % apic)
print("\n")
for infraAccPortP in root.iter("infraAccPortP"):
leaf_pro = infraAccPortP.get("name")
for infraHPortS in root.iter("infraHPortS"):
assign_ints = infraHPortS.get("name")
port_range = infraHPortS.get("name")
print(leaf_pro, port_range)
except UnboundLocalError:
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
print("Unknown Error. Relogging into APIC")
apic_login()
def find_subnet():
headers = {'content-type': 'text/xml'}
uri_1 = "https://%s/api/class/fvBD.xml?query-target=subtree" % apic
try:
r = session.get(uri_1,verify=False, headers=headers)
file = open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
line = 5
subnet_array = []
for fvSubnet in root.iter("fvSubnet"):
subnets = fvSubnet.get("ip")
subnet_array.append(subnets)
print("Gateway: : %s" % subnets)
except UnboundLocalError:
print("\n")
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("\n")
print("Unknown Error. Relogging into APIC")
print("\n")
apic_login()
print("\n")
print("Enter gateway IP plus prefix length (ex. 1.1.1.1/24)")
subnet_id = input("Please enter a gateway IP: ")
headers = {'content-type': 'text/xml'}
uri_2 = "https://%s/api/class/fvBD.xml?query-target=subtree" % apic
try:
r = session.get(uri_2,verify=False, headers=headers)
file = open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
for fvSubnet in root.iter("fvSubnet"):
location = fvSubnet.get("dn")
ip = fvSubnet.get("ip")
if subnet_id in ip:
print("\n")
print (location)
##########################################################
# The follwing two exceptions will be thrown in you dont log in to the APIC
######################################### ################
except UnboundLocalError:
print("\n")
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("\n")
print("Unknown Error. Relogging into APIC")
print("\n")
apic_login()
def view_bd(tenant_input):
#########################
# Gets Bridge Domains from selected Tenant
#########################
headers = {'content-type': 'text/xml'}
uri = "https://" + apic + "/api/node/mo/uni/tn-" + tenant_input + ".xml?query-target=children&target-subtree-class=fvBD"
try:
r = session.get(uri,verify=False, headers=headers)
print("\n")
file = open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Bridge Domains in %s: " % tenant_input)
print("\n")
for fvBD in root.iter("fvBD"):
bridge_domain = fvBD.get("name")
UnicastRoute = fvBD.get("unicastRoute")
pc_tag = fvBD.get("pcTag")
print ("{0:35} {1:<15} {2:<15}".format(bridge_domain, UnicastRoute, "pcTag: %s" % pc_tag))
##########################################################
# The follwing two exceptions will be thrown in you dont log in to the APIC
######################################### ################
except UnboundLocalError:
print("\n")
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("\n")
print("Unknown Error. Relogging into APIC")
print("\n")
apic_login()
def view_app_profiles(tenant_input):
#######################################
# Gets Application Profiles from selected Tenant
#######################################
headers = {'content-type': 'text/xml'}
uri = "https://" + apic + "/api/node/mo/uni/tn-" + tenant_input + ".xml?query-target=children&target-subtree-class=fvAp"
try:
r = session.get(uri, verify=False, headers=headers)
print("\n")
file = open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Tenant %s Application Profiles: " % tenant_input)
print("\n")
for fvAp in root.iter("fvAp"):
app_p = fvAp.get("name")
print(app_p)
############################################################
# The follwing two exceptions will be thrown in you dont log in to the APIC
######################################### ##################
except UnboundLocalError:
print("\n")
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("\n")
print("Unknown Error. Relogging into APIC")
apic_login()
###################################################################
def view_vlan_pools():
####################################
# Gets EndPoint Groups from selected Tenant
####################################
headers = {'content-type': 'text/xml'}
uri = "https://" + apic + "/api/class/fvnsVlanInstP.xml?query-target=subtree"
try:
r= session.get(uri, verify=False, headers=headers)
print("\n")
file = open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Configured VLAN pools: ")
print("\n")
for fvnsVlanInstP in root.iter("fvnsVlanInstP"):
vlan_pool = fvnsVlanInstP.get("name")
for fvnsEncapBlk in root.iter("fvnsEncapBlk"):
vlan_start = fvnsEncapBlk.get("from")
vlan_end = fvnsEncapBlk.get("to")
alloc_mode= fvnsEncapBlk.get("allocMode")
print ("{0:25} {1:<15} {2:<15} {3:<15}".format(vlan_pool, vlan_start, vlan_end, alloc_mode))
############################################################
# The follwing two exceptions will be thrown in you dont log in to the APIC
######################################### ##################
except UnboundLocalError:
print("\n")
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("\n")
print("Unknown Error. Relogging into APIC")
print("\n")
apic_login()
##########################################################################
def view_epgs(tenant_input, app_input):
####################################
# Gets EndPoint Groups from selected Tenant
####################################
headers = {'content-type': 'text/xml'}
uri = "https://" + apic + "/api/node/mo/uni/tn-" + tenant_input + "/ap-" + app_input + ".xml?query-target=children&target-subtree-class=fvAEPg"
try:
r = session.get(uri, verify=False, headers=headers)
print("\n")
file = open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Tenant %s Endpoint Groups: " % tenant_input)
print("\n")
for fvAEPg in root.iter("fvAEPg"):
EPG = fvAEPg.get("name")
pcTag = fvAEPg.get("pcTag")
print ("{0:35} {1:>15}".format(EPG, "pcTag: %s" % pcTag))
############################################################
# The follwing two exceptions will be thrown in you dont log in to the APIC
######################################### ##################
except UnboundLocalError:
print("\n")
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("\n")
print("Unknown Error. Relogging into APIC")
apic_login()
######################################################
def view_int_profiles():
####################################
# Gets EndPoint Groups from selected Tenant
####################################
headers = {'content-type': 'text/xml'}
uri = "https://" + apic + "/api/node/mo/uni/infra.xml?query-target=subtree"
try:
r = session.get(uri, verify=False, headers=headers)
print("\n")
file = open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Interface Profiles" )
print("\n")
for infraAccBndlGrp in root.iter("infraAccBndlGrp"):
int_path = infraAccBndlGrp.get("name")
print(int_path)
############################################################
# The follwing two exceptions will be thrown in you dont log in to the APIC
######################################### ##################
except UnboundLocalError:
print("\n")
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("\n")
print("Unknown Error. Relogging into APIC")
apic_login()
###################################################################
def apic_login():
#########################################################
# APIC login credential code - Please fill in USERNAME and PASSWORD
#########################################################
raw_data = """<!-- AAA LOGIN -->
<aaaUser name="USERNAME" pwd="PASSWORD"/>
"""
global apic
apic = input("Please enter an APIC IP: ")
headers = {'content-type': 'text/xml'}
uri = "https://%s/api/mo/aaaLogin.xml" % apic
global session
while True:
try:
session = requests.Session()
r = session.post(uri, data=raw_data, verify=False, headers=headers)
print("\n")
print("Status Code:", r.status_code)
main()
break
except requests.exceptions.InvalidURL:
print("\n")
print("Invalid APIC IP")
apic_login()
####################################################################
def view_contracts(tenant_input):
####################################
# Gets EndPoint Groups from selected Tenant
####################################
headers = {'content-type': 'text/xml'}
uri = "https://" + apic + "/api/class/vzBrCP.xml"
try:
r = session.get(uri, verify=False, headers=headers)
print("\n")
file = open(get_file, 'w')
file.write(r.text)
file.close()
tree = ET.parse('C:\Python\ACI\Get_ACI.xml')
root = tree.getroot()
print("Tenant %s Contracts: " % tenant_input)
print("\n")
for vzBrCP in root.iter("vzBrCP"):
contract = vzBrCP.get("name")
scope = vzBrCP.get("scope")
dn = vzBrCP.get("dn")
print("{0:35} {1:<20} {2:<20}".format(contract, "Scope: %s" % scope, "Tenant: %s" % dn))
############################################################
# The follwing two exceptions will be thrown in you dont log in to the APIC
######################################### ##################
except UnboundLocalError:
print("\n")
print("403 Forbidden - Please log into APIC to vew or push configurations")
print("\n")
apic_login()
except NameError:
print("\n")
print("Unknown Error. Relogging into APIC")
apic_login()
def send_configuration(file):
config_file = open(file=file).read()
print("\n")
print(dom.parseString(str(config_file)).toprettyxml())
url = "https://%s/api/mo/uni.xml" % apic
headers = {'content-type': 'application/xml'}
try:
r = session.post(url=url, data=config_file, verify=False, headers=headers)
warnings.filterwarnings("ignore")
print("Status Code:", r.status_code)
except:
print("Please log into APIC")
print("\n")
apic_login()
try:
r = session.post(url=url, data=config_file, verify=False, headers=headers)
print("Status Code:", r.status_code) # 200 mean successfule
except:
print("Error:", r.reason)
print("Please log into APIC")
print("\n")
main()
######################################################################
def yes_no_answer(text, state):
answer_yes_no = ["yes", "no"]
answer_yes_no_commands = [cmd for cmd in answer_yes_no if cmd.startswith(text)]
if state < len(answer_yes_no_commands):
return answer_yes_no_commands[state]
else:
return None
#######################################################################
def ip_scope(text, state):
bd_ip_scope = ["public", "private", "shared"]
bd_ip_scope_commands = [cmd for cmd in bd_ip_scope if cmd.startswith(text)]
if state < len(bd_ip_scope_commands):
return bd_ip_scope_commands[state]
else:
return None
########################################################################
def contract_filter_entry(text, state):
filter_entry = ["unspecified"]
filter_entry_commands = [cmd for cmd in filter_entry if cmd.startswith(text)]
if state < len(filter_entry_commands):
return filter_entry_commands[state]
else:
return None
def tenant_array(text, state):
tenant = [tenants]
tenant_commands = [cmd for cmd in tenant if cmd.startswith(text)]
if state < len(tenant_commands):
return tenant_commands[state]
else:
return None
def flood_scope(text, state):
unk_flood_scope = ["proxy", "flood"]
unk_flood_scope_commands = [cmd for cmd in unk_flood_scope if cmd.startswith(text)]
if state < len(unk_flood_scope_commands):
return unk_flood_scope_commands[state]
else:
return None
def contract_scopes(text, state):
contract_scope = ["tenant", "vrf", "global", "application profile"]
contract_scope_commands = [cmd for cmd in contract_scope if cmd.startswith(text)]
if state < len(contract_scope_commands):
return contract_scope_commands[state]
else:
return None
###########################################################################3
########################################################################
def main():
config_selection = ' '
while config_selection != 'q':
print("\n")
print("Datacenter Network Programabiltiy and Automation Program")
print("\n")
print("\n")
print(" 1: Tenant")
print(" 2: Application Profile")
print(" 3: Bridge Domain")
print(" 4: Contracts")
print(" 5: View Config")
print(" 6: Send Config")
print(" 7: APIC Login")
print(" 8: Troubleshooting")
print(" 9: Infrastructure")
print("[q] (quit)")
print("\n")
config_selection = input("Please select an option: ")
if config_selection == "1":
tenant_configuration()
elif config_selection == "2":
app_profile_configuration()
elif config_selection == "3":
bridge_domain_configuration()
elif config_selection == "4":
contract_configuration()
elif config_selection == "5":
view_config()
elif config_selection == "6":
select_configuration_file()
elif config_selection == "7":
apic_login()
elif config_selection == "8":
troubleshooting()
elif config_selection == "9":
infr()
elif config_selection == "q":
print("Exiting Program")
print("\n")
else:
print("\n")
print("Invalid Selection")
print("\n")
print("Thank you for using the Datacenter Network Programabiltiy and Automation Program")
quit()
###########################################################################
def troubleshooting():
config_selection = ' '
while config_selection != '2':
print("\n")
print("Troubleshooting: ")
print("\n")
print("\n")
print(" 1: Zoning (Contracts:)")
print(" 2: Endpoint Tracker")
print(" 3: Subnet Finder")
print(" 4: Main")
print("\n")
config_selection = input("Please select an option: ")
if config_selection == "1":
view_tenant()
print("\n")
tenant_input = input("Please select a Tenant: ")
view_app_profiles(tenant_input)
print("\n")
app_input = input("Please select a App profile: ")
view_epgs(tenant_input, app_input)
print("\n")
pcTag = input("Please select a pcTag: ")
paramiko_login("show zoning-rule src-epg %s\n" % pcTag)
troubleshooting()
elif config_selection == "2":
enpoint_tracker()
main()
elif config_selection == "3":
find_subnet()
main()
elif config_selection == "4":
main()
else:
print("\n")
print("Invalid Selection")
print("\n")
def view_config():
print("View Configurations")
print("\n")
print(" 1: Tenant")
print(" 2: Application Profile/EPG")
print(" 3: Bridge Domain")
print(" 4: VLAN Pools")
print(" 5: Interface Profiles")
print(" 6: Contracts")
print(" 7: Infrastructure")
config_selection = input("Please select an option: ")
if config_selection == "1":
view_tenant()
elif config_selection == "2":
view_tenant()
print("\n")
tenant_input = input("Please select a Tenant: ")
view_app_profiles(tenant_input)
print("\n")
app_input = input("Please create a Application Profile: ")
view_epgs(tenant_input, app_input)
elif config_selection == "3":
view_tenant()
print("\n")
tenant_input = input("Please select a Tenant: ")
view_bd(tenant_input)
elif config_selection == "4":
view_vlan_pools()
elif config_selection == "5":
view_int_profiles()
elif config_selection == "6":
view_tenant()
tenant_input = input("Please select a Tenant: ")
view_contracts(tenant_input)
elif config_selection == "7":
infr_2()
def tenant_configuration():
print("\n")
print("TAB option can be use on some options, this will avoid configuration failures")
view_tenant()
root = xml.Element("fvTenant")
print("\n")
tenant_input = input("Please create a Tenant: ")
root.set("name", tenant_input)
vrf = xml.Element("fvCtx")
vrf_input = input("Please create a vrf: ")
vrf.set("name", vrf_input)
root.append(vrf)
tree = xml.ElementTree(root)
with open(tenant_file, "wb") as fh:
tree.write(fh)
send_configuration(tenant_file)
########################################## Displays current app profiles. Reads APIC via URI, saves ouput to file and then iterates file to find Application Profiles
def app_profile_configuration():
print("\n")
print("TAB option can be use on some options, this will avoid configuration failures")
print("\n")
view_tenant()
print("\n")
tenant_input = input("Please enter a Tenant: ")
view_app_profiles(tenant_input)
print("\n")
root = xml.Element("fvTenant")
root.set("name", tenant_input)
############################################## Application Profile Configuration - This input is also used to display available App Profile and EPGS
print("\n")
app = xml.Element("fvAp")
app_input = input("Please enter a Application Profile: ")
root.append(app)
app.set("name", app_input)
app_descr = input("Please enter a description: ")
app.set("descr", app_descr)
############################################## Displays current EPGs. Reads APIC via URI, saves ouput to file and then iterates file to find EPGss
print("Endpoint Group Configuration")
view_epgs(tenant_input, app_input)
print("\n")
epg = xml.Element("fvAEPg")
app.append(epg)
epg_input = input("Please enter a Endpoint Group(EPG): ")
epg.set("name", epg_input)
epg_descr = input("Please enter a description: ")
epg.set("descr", epg_descr)
########################################## Assign Bridge Domain to EPG
view_bd(tenant_input)
epg_bd = xml.Element("fvRsBd")
epg.append(epg_bd)
epg_bd_input = input("Please assign a Bridge Domain: ")
epg_bd.set("tnFvBDName", epg_bd_input)
########################################## Add contracts to EPG
view_contracts(tenant_input)
epg_con_prov = xml.Element("fvRsCons")
epg.append(epg_con_prov)
con_contract = input("Please enter a consumed contract: ")
epg_con_prov.set("tnVzBrCPName", con_contract)
epg_con_prov = xml.Element("fvRsProv ")
epg.append(epg_con_prov)
prov_contract = input("Please enter a provided contract: ")
epg_con_prov.set("tnVzBrCPName", prov_contract)
########################################## Attach physical and VMM domains, tags static paths with vlans
print("Static PathConfiguration")
print("View Configurations")
print("\n")
print(" 1: VMM Domain")
print(" 2: Physical Domain")
print(" 2: Other Static Path")
print(" 3: Main")
config_selection = input("Please select an option: ")
if config_selection == "1":
domain_attach = xml.Element("fvRsDomAtt")
epg.append(domain_attach)
vmm_dom = input("Please attach VMM Domian: ")
domain_attach.set("tDn", "uni/vmmp-VMware/dom-%s" % vmm_dom )
vla_encap = input("Please assign vlan encapsulation: ")
domain_attach.set("encap", "vlan-" + vla_encap)
tree = xml.ElementTree(root)
with open(app_file, "wb") as fh:
tree.write(fh)
send_configuration(epg_file)
if config_selection == "2":
domain_attach = xml.Element("fvRsDomAtt")
epg.append(domain_attach)
phys_domain = input("Please assign physical domain: ")
domain_attach.set("tDn", "uni/phys-%s" % phys_domain)
tree = xml.ElementTree(root)
with open(epg_file, "wb") as fh:
tree.write(fh)
send_configuration(epg_file)
def bridge_domain_configuration(): ##########################Create Bridge Domain, enable routing, flood unknown unicast
print("\n")
print("TAB option can be use on some options, this will avoid configuration failures")
print("\n")
view_tenant()
tenant_input = input("Please enter a Tenant: ")
root = xml.Element("fvTenant")
root.set("name", tenant_input)
view_bd(tenant_input)
bd = xml.Element("fvBD")
root.append(bd)
print("\n")
bd_input = input("Please enter a bridge domain: ")
bd.set("name", bd_input)
readline.parse_and_bind("tab: complete")
readline.set_completer(yes_no_answer)
unicast_route_input = input("Enable unicast route (yes/no): ")
bd.set("unicastRoute", unicast_route_input)
readline.parse_and_bind("tab: complete")
readline.set_completer(flood_scope)
flood_type_input = input("Hardware Proxy or Flood: ")
bd.set("unkMacUcastAct", flood_type_input)
##########################Associate to L3Out
l3out= xml.Element("fvRsBDToOut")
l3out_asso = input("Associate with L3Out: ")
l3out.set("tnL3extOutName", l3out_asso)
##########################Subnet Configuration
subnet= xml.Element("fvSubnet")
readline.parse_and_bind("tab: complete")
readline.set_completer(ip_scope)
scope_adv = input("Please enter a scope: ")
subnet.set("scope", scope_adv)
bd.append(subnet)
while True:
try:
ip = input("Please enter a ip address and mask as CIDR: ")
ipaddress.IPv4Interface(ip)
subnet.set("ip", ip)
bd_vrf = xml.Element("fvRsCtx")
bd.append(bd_vrf)
bd_vrf_input = input("Please assign a VRF: ")
bd_vrf.set("tnFvCtxName", bd_vrf_input)
tree = xml.ElementTree(root)
with open(bd_file, "wb") as fh:
tree.write(fh)
send_configuration(bd_file)
print("\n")
break
except ipaddress.AddressValueError:
print("\n")
print("Invalid IP Address")
print("\n")
def contract_configuration():
print("\n")
print("TAB option can be use on some options, this will avoid configuration failures")
contract_file = "C:\Python\ACI\Create_Contract_ACI.xml"
root = xml.Element("fvTenant")
view_tenant(get_tenant_file)
print("\n")
tenant_input = input("Please select a Tenant: ")
root.set("name", tenant_input)
vz_filter = xml.Element("vzFilter")
filter_input = input("Please create a contract filter: ")
root.append(vz_filter)
vz_filter.set("name", filter_input)
##################################################Create Filter
print ("Set filter parameters: ")
vz_entry = xml.Element("vzEntry")
filter_entry_input = input("Please assign filter entry a name: ")
vz_filter.append(vz_entry)
vz_entry.set("name", filter_entry_input)
filter_entry_desc_input = input("Entry description: ")
vz_entry.set("descr", filter_entry_desc_input)
readline.parse_and_bind("tab: complete")
readline.set_completer(yes_no_answer)
stateful_input = input("Stateful (yes/no): ")
vz_entry.set("stateful", stateful_input)
ether_type = ("unspecified")
vz_entry.set("etherT", ether_type)
readline.parse_and_bind("tab: complete")
readline.set_completer(contract_filter_entry)
source_port_input = input("Source port: ")
vz_entry.set("sToPort", source_port_input)
dest_input = input("Destination port: ")
vz_entry.set("sFromPort", dest_input)
dest_source_input = input("Destination source port: ")
vz_entry.set("dToPort", dest_source_input)
dest_dest_input = input("Destination port: ")
vz_entry.set("dFromPort", dest_dest_input)
################################################## Create contract
contract = xml.Element("vzBrCP")
contract_input = input("Please create a contract: ")
root.append(contract)
contract.set("name", contract_input)
readline.parse_and_bind("tab: complete")
readline.set_completer(contract_scopes)
contr_scope =input("Please intput a contract scope: ")
contract.set("scope", contr_scope)
##################################################Create Contract Subject
subject = xml.Element("vzSubj")
contract.append(subject)
subject_name =input("Please enter a subject name: : ")
subject.set("name", subject_name)
contr_desc =input("Please input a subject description: ")
subject.set("descr", contr_desc)
rev_ports =input("Reverse ports (yes/no:) ")
subject.set("revFltPorts", rev_ports)
filter = xml.Element("vzRsSubjFiltAtt")
subject.append(filter)
assign_filter = input("Please assign a filter to the contract: ")
filter.set(" tnVzFilterName", assign_filter)
tree = xml.ElementTree(root)
with open(contract_file, "wb") as fh:
tree.write(fh)
send_configuration(contract_file)
if __name__ == '__main__':
###########################################################
#Configuration and view files that can be access anywhere in the program
###########################################################
tenant_file = "C:\Python\ACI\Create_Tenant_ACI.xml"
contract_file = "C:\Python\ACI\Create_Contract_ACI.xml"
bd_file = "C:\Python\ACI\Create_BD_ACI.xml"
epg_file = "C:\Python\ACI\Create_EPG_ACI.xml"
app_file = "C:\Python\ACI\Create_App_ACI.xml"
get_vlan_pool= "C:\Python\ACI\Get_VLAN_Pool_ACI.xml"
get_file= "C:\Python\ACI\Get_ACI.xml"
warnings.filterwarnings("ignore")
apic = " "
apic_login()
|
import os
import random as rd
board = [" "," "," "," "," "," "," "," "," "]
computer_board = ['1', '2', '3', '4', '5', '6', '7', '8','9']
empty_error = False
wrong_place = False
wrong_input = False
player = 'X'
choice = None
def print_header():
print('''
_____ _ ____ _____ ____ ____ _____ ____ _____
/__ __\/ \/ _\ /__ __\/ _ \/ _\ /__ __\/ _ \/ __/ 1|2|3
/ \ | || / _____ / \ | / \|| / _____ / \ | / \|| \ 4|5|6
| | | || \_\____\| | | |-||| \_\____\| | | \_/|| /_ 7|8|9
\_/ \_/\____/ \_/ \_/ \|\____/ \_/ \____/\____\
''')
def print_board():
print(" | | ")
print(board[0]+"|"+board[1]+"|"+board[2])
print("_|_|_")
print(" | | ")
print(board[3]+"|"+board[4]+"|"+board[5])
print("_|_|_")
print(" | | ")
print(board[6]+"|"+board[7]+"|"+board[8])
print(" | | ")
def print_error():
global empty_error,wrong_place,wrong_input
if empty_error :
print("\nPlease enter a value.\n")
empty_error = False
elif wrong_place:
print("\nPlease enter a correct place.\n")
wrong_place = False
elif wrong_input:
print("\nPlease enter a correct value.\n")
wrong_input = False
def print_mode(mode):
if mode == "p_to_c":
print("\nPerson To Comouter Mode\n")
else:
print("\nPerson To Person Mode\n")
def repeat_or_exit(status='win'):
print_header()
print_board()
if status == "win":
print("\n"+ player +" win !\n")
elif status == "tie":
print("\n Tie !! \n")
while True:
answer = input("To repeat the game press Enter and to exit enter exit")
if(len(answer)==0):
global board , computer_board
board = [" "," "," "," "," "," "," "," "," "]
computer_board = ['1', '2', '3', '4', '5', '6', '7', '8','9']
break
elif answer == "exit":
exit()
def is_winner(player):
global board
if((board[0] == player and board[1] == player and board[2] == player) or \
(board[3] == player and board[4] == player and board[5] == player) or \
(board[6] == player and board[7] == player and board[8] == player) or \
(board[0] == player and board[3] == player and board[6] == player) or \
(board[1] == player and board[4] == player and board[7] == player) or \
(board[2] == player and board[5] == player and board[8] == player) or \
(board[2] == player and board[4] == player and board[6] == player) or \
(board[0] == player and board[4] == player and board[8] == player)):
return True
def check_error():
global empty_error,wrong_place,wrong_input,choice
if choice == "":
empty_error = True
return True
elif choice not in ['1', '2', '3', '4', '5', '6', '7', '8','9']:
wrong_input = True
return True
elif board[int(choice)-1] != " ":
wrong_place = True
return True
def computer_move():
global computer_board,board
for i in range(9):
if board[i] == " ":
board[i] = "X"
if is_winner("X"):
board[i] = " "
return str(i+1)
board[i] = " "
for i in range(9):
if board[i] == " ":
board[i] = "O"
if is_winner("O"):
board[i] = " "
return str(i+1)
board[i] = " "
if len(computer_board) == 9:
if board[4] == " ":
return "5"
elif len(computer_board) == 7:
if board[0] == " ":
return "1"
elif board[2] == " ":
return "3"
elif board[6] == " ":
return "7"
elif board[8] == " ":
return "9"
elif len(computer_board) == 5:
if board[4] == " ":
return "5"
else:
if board[0] == " ":
return "1"
elif board[2] == " ":
return "3"
elif board[6] == " ":
return "7"
elif board[8] == " ":
return "9"
choice = rd.choice(computer_board)
return choice
def play(mode):
computer_turn = True
global choice,computer_board,player, board,empty_error,wrong_place,wrong_input
while True:
if mode == "p_to_p" or computer_turn == False :
os.system("clear")
print_header()
print_mode(mode)
print_board()
print_error()
choice = input("Enter the place of your "+player+" :")
elif computer_turn :
choice = computer_move()
print(choice)
computer_board.remove(choice)
# check user input
if(check_error()):
continue
if mode == "p_to_c":
if computer_turn == False:
computer_board.remove(choice)
computer_turn = not(computer_turn)
board[int(choice)-1] = player
# to check there is a winner or it's tie
if(is_winner(player)):
repeat_or_exit()
elif " " not in board:
repeat_or_exit("tie")
# change the player
player = "O" if player == "X" else "X"
while True:
print_header()
print(("{:^60}").format("Welcome to Tic-Tac-Toe Game"))
print(("{:^60}").format("The Menu"))
print("\n 1- play person vs person.")
print(" 2- play person vs computer.")
print(" 3- exit.\n")
game_mode = input("Enter your choice: ")
if game_mode == "1":
play("p_to_p")
elif game_mode == "2":
play("p_to_c")
elif game_mode == "3":
break
else:
continue
|
from django import forms
from django.db import models
from .models import Persona
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class FormularioPersona(forms.ModelForm) :
class Meta:
model = Persona
fields = ('nombre' ,'apellido', 'email' ,'celular', 'usuario', 'region' , 'contraseña', 'contraseñaconfirmar')
class RegisterForm(forms.UserCreationForm):
class Meta:
model = User
fields = ["nombre" ,"apellido", "email" ,"celular", "usuario", "region" , "contraseña", "contraseñaconfirmar"]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-03-26 18:38
from __future__ import unicode_literals
import apps.store.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0002_auto_20190325_2357'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='isUsed',
),
migrations.AlterField(
model_name='item',
name='image_one',
field=models.ImageField(default='item_pictures/None/no-img.jpg', upload_to=apps.store.models.path_and_rename),
),
migrations.AlterField(
model_name='item',
name='image_three',
field=models.ImageField(blank=True, default='item_pictures/None/no-img.jpg', upload_to=apps.store.models.path_and_rename),
),
migrations.AlterField(
model_name='item',
name='image_two',
field=models.ImageField(blank=True, default='item_pictures/None/no-img.jpg', upload_to=apps.store.models.path_and_rename),
),
]
|
#%%
import math
import os
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from graspy.cluster import GaussianCluster
from graspy.embed import AdjacencySpectralEmbed, OmnibusEmbed
from graspy.models import SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.utils import binarize, cartprod, pass_to_ranks
from joblib.parallel import Parallel, delayed
from matplotlib.colors import LogNorm
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from spherecluster import SphericalKMeans
from src.data import load_everything, load_networkx
from src.utils import savefig, meta_to_array
from src.visualization import sankey
from node2vec import Node2Vec
#%%
MB_VERSION = "mb_2019-09-23"
BRAIN_VERSION = "2019-09-18-v2"
GRAPH_TYPES = ["Gad", "Gaa", "Gdd", "Gda"]
GRAPH_TYPE_LABELS = [r"A $\to$ D", r"A $\to$ A", r"D $\to$ D", r"D $\to$ A"]
N_GRAPH_TYPES = len(GRAPH_TYPES)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
adj, class_labels, side_labels = load_everything(
"G", version=BRAIN_VERSION, return_class=True, return_side=True
)
graph = load_networkx("G", BRAIN_VERSION)
node2vec = Node2Vec(
graph, dimensions=6, workers=12, p=0.5, q=0.5, walk_length=100, num_walks=20
)
model = node2vec.fit(window=20, min_count=1, batch_words=4)
vecs = [model.wv.get_vector(n) for n in graph.node()]
embedding = np.array(vecs)
pairplot(embedding, labels=meta_to_array(graph, "Class"), palette="tab20")
|
'''
_ protected
__ private
'''
_a = "Protected"
__a = "Private"
class Produto:
def __init__(self, nome, valor) -> None:
self.__nome = nome
self.__valor = valor
# Getter
def ler_nome(self):
return self.__nome
# Getter
def ler_valor(self):
return self.__valor
# Setter
def configurar_nome(self, nome):
self.__nome= nome
# Setter
def configurar_valor(self, valor):
self.__valor = valor
def _falar_algo(self):
print("Falei alguma coisa")
def __falei_um_segredo(self):
print("N sera possivel ver isso de fora")
produto = Produto("Refrigerante", 5.80)
print(produto.ler_nome())
print(produto.ler_valor())
produto.configurar_nome("Agua")
produto.configurar_valor(3)
print(produto.ler_nome())
print(produto.ler_valor())
produto._falar_algo()
produto.__falei_um_segredo() |
# -*- python -*-
# ex: set syntax=python:
import distroconf
# This is a sample buildmaster config file. It must be installed as
# 'master.cfg' in your buildmaster's base directory (although the filename
# can be changed with the --basedir option to 'mktap buildbot master').
# It has one job: define a dictionary named BuildmasterConfig. This
# dictionary has a variety of keys to control different aspects of the
# buildmaster. They are documented in docs/config.xhtml .
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
####### BUILDSLAVES
# the 'slaves' list defines the set of allowable buildslaves. Each element is
# a tuple of bot-name and bot-password. These correspond to values given to
# the buildslave's mktap invocation.
from buildbot.buildslave import BuildSlave
c['slaves'] = [
BuildSlave("pkgbuilderbot", "pkgbuilderbot"),
BuildSlave("gensysbot", "gensysbot")
]
# to limit to two concurrent builds on a slave, use
# c['slaves'] = [BuildSlave("bot1name", "bot1passwd", max_builds=2)]
# 'slavePortnum' defines the TCP port to listen on. This must match the value
# configured into the buildslaves (with their --master option)
c['slavePortnum'] = 9989
####### CHANGESOURCES
# the 'change_source' setting tells the buildmaster how it should find out
# about source code changes. Any class which implements IChangeSource can be
# put here: there are several in buildbot/changes/*.py to choose from.
from buildbot.changes.svnpoller import SVNPoller
from auxsvnutils import split_svn_path
c['change_source'] = SVNPoller(distroconf.svn,
pollinterval=distroconf.polling_time,
split_file=split_svn_path,
svnuser="usuario",
svnpasswd="usuario")
# For example, if you had CVSToys installed on your repository, and your
# CVSROOT/freshcfg file had an entry like this:
#pb = ConfigurationSet([
# (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
# ])
# then you could use the following buildmaster Change Source to subscribe to
# the FreshCVS daemon and be notified on every commit:
#
#from buildbot.changes.freshcvs import FreshCVSSource
#fc_source = FreshCVSSource("cvs.example.com", 4519, "foo", "bar")
#c['change_source'] = fc_source
# or, use a PBChangeSource, and then have your repository's commit script run
# 'buildbot sendchange', or use contrib/svn_buildbot.py, or
# contrib/arch_buildbot.py :
#
#from buildbot.changes.pb import PBChangeSource
#c['change_source'] = PBChangeSource()
####### SCHEDULERS & BUILDERS
from os import sep
from buildbot.scheduler import AnyBranchScheduler, Scheduler, Nightly, Periodic, Triggerable
from buildbot.process import factory
from buildbot.steps import source, shell, transfer
from buildbot.steps.transfer import FileUpload
from buildbot.steps.source import SVN
from buildbot.steps import trigger
from customsteps import SVNLastTag
from custombuildsteps import RemoveSVN, SetSVNRev, GCSBuild, PBuildPkg, Unittests
from custombuildsteps import Reprepro, SetRepoPerms
from custombuildsteps import LiveHelper
c['schedulers'] = []
c['builders'] = []
### GENSYS
gensys_hour, gensys_minute = distroconf.gensys_time.split(':')
c['schedulers'].append(Nightly(name="gensysSched", builderNames=["gensys"], hour=int(gensys_hour), minute=int(gensys_minute)))
gensysfactory = factory.BuildFactory()
# livehelper
gensysfactory.addStep(LiveHelper(haltOnFailure=True, env=distroconf.livehelper_env, timeout=40*60))
#TODO: Habria que encapsular esto en una clase
gensysfactory.addStep(transfer.FileUpload(slavesrc=distroconf.rawimage, masterdest=distroconf.ftpimage, mode=0644))
genisobuilder = {'name': 'gensys',
'slavename': 'gensysbot',
'builddir': 'gensys',
'factory': gensysfactory,
}
c['builders'].append(genisobuilder)
### APPS
for app in distroconf.apps:
c['schedulers'].append(AnyBranchScheduler(name=app+"Sched",
branches=[distroconf.apps_dir+sep+app+sep+distroconf.tags_dir],
treeStableTimer=distroconf.app_timer,
builderNames=[app]))
appfactory = factory.BuildFactory()
appfactory.addStep(SVNLastTag(baseURL=distroconf.svn+sep+distroconf.apps_dir+sep+app+sep,
defaultBranch=distroconf.tags_dir,
mode="export",
retry=(60,3)))
#appfactory.addStep(RemoveSVN(haltOnFailure=True))
#appfactory.addStep(CheckBuildDeps(haltOnFailure=True))
appfactory.addStep(PBuildPkg(haltOnFailure=True))
appfactory.addStep(Unittests(haltOnFailure=True))
appfactory.addStep(Reprepro(haltOnFailure=True))
appfactory.addStep(SetRepoPerms(haltOnFailure=True))
#appfactory.addStep(UploadPkg(haltOnFailure=True))
appbuilder = {'name': app,
'slavename': 'pkgbuilderbot',
'builddir': app,
'factory': appfactory,
}
c['builders'].append(appbuilder)
### METAPKGS
for metapkg in distroconf.metapkgs:
c['schedulers'].append(AnyBranchScheduler(name=metapkg+"Sched",
branches=[distroconf.metapkgs_dir+sep+metapkg],
treeStableTimer=distroconf.metapkg_timer,
builderNames=[metapkg]))
metapkgfactory = factory.BuildFactory()
metapkgfactory.addStep(source.SVN(baseURL=distroconf.svn+sep,
mode="clobber",
retry=(60,3)))
metapkgfactory.addStep(SetSVNRev(haltOnFailure=True))
metapkgfactory.addStep(RemoveSVN(haltOnFailure=True))
#metapkgfactory.addStep(CheckBuildDeps(haltOnFailure=True))
metapkgfactory.addStep(GCSBuild(haltOnFailure=True))
metapkgfactory.addStep(Unittests(haltOnFailure=True))
metapkgfactory.addStep(Reprepro(haltOnFailure=True))
metapkgfactory.addStep(SetRepoPerms(haltOnFailure=True))
# metapkgfactory.addStep(UploadPkg(haltOnFailure=True))
metapkgbuilder = {'name': metapkg,
'slavename': 'pkgbuilderbot',
'builddir': metapkg,
'factory': metapkgfactory,
}
c['builders'].append(metapkgbuilder)
####### STATUS TARGETS
# 'status' is a list of Status Targets. The results of each build will be
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
# including web pages, email senders, and IRC bots.
c['status'] = []
from buildbot.status import html
c['status'].append(html.WebStatus(http_port=18010, allowForce=True))
c['status'].append(html.WebStatus(http_port=18011, allowForce=False))
# from buildbot.status import mail
# c['status'].append(mail.MailNotifier(fromaddr="buildbot@localhost",
# extraRecipients=["builds@example.com"],
# sendToInterestedUsers=False))
#
#from buildbot.status import words
#c['status'].append(words.IRC(host="irc.freenode.org", nick="buildbot",
# channels=["#distroconf"]))
#
# from buildbot.status import client
# c['status'].append(client.PBListener(9988))
####### DEBUGGING OPTIONS
# if you set 'debugPassword', then you can connect to the buildmaster with
# the diagnostic tool in contrib/debugclient.py . From this tool, you can
# manually force builds and inject changes, which may be useful for testing
# your buildmaster without actually commiting changes to your repository (or
# before you have a functioning 'sources' set up). The debug tool uses the
# same port number as the slaves do: 'slavePortnum'.
#c['debugPassword'] = "debugpassword"
# if you set 'manhole', you can ssh into the buildmaster and get an
# interactive python shell, which may be useful for debugging buildbot
# internals. It is probably only useful for buildbot developers. You can also
# use an authorized_keys file, or plain telnet.
#from buildbot import manhole
#c['manhole'] = manhole.PasswordManhole("tcp:9999:interface=127.0.0.1",
# "admin", "password")
####### PROJECT IDENTITY
# the 'projectName' string will be used to describe the project that this
# buildbot is working on. For example, it is used as the title of the
# waterfall HTML page. The 'projectURL' string will be used to provide a link
# from buildbot HTML pages to your project's home page.
c['projectName'] = "Guadalinex Guadalinfo V6 (Codename ggv6-karmic)"
c['projectURL'] = "http://svn.emergya.info/svn/ggv6/"
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.Waterfall page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = "http://ggv6.emergya.info:18010/"
|
from Camera import Camera
class CameraHubsand(Camera):
def createCamera(self):
super().createCamera()
return "Cámara de Hubsand"
|
from PIL import Image
import PIL
from python.utils.image_helper_utils import image_color_detection, landscape
def append_exif_scoring_comments(model_comments, images1, tech_exif_score, null_count, flag_dim_v1, flag_dim_v2,
flag_iso_v1, flag_iso_v2, flag_res_v1, flag_res_v2, flag_ape_v1, flag_ape_v2, bwhite,
lscape):
if bwhite > 0:
bwhite_score = 0
comment = str(bwhite) + ' images are Black and White for this property'
model_comments.append(comment)
else:
bwhite_score = 10
if lscape > 0:
lscape_score = 0
comment = str(lscape) + ' images are not in landscape mode for this property'
model_comments.append(comment)
else:
lscape_score = 10
if null_count / len(images1) > 0:
comment = 'Overall EXIF details are not present for ' + str(
round(null_count / len(images1) * 100, 1)) + '% of images'
# model_comments.append(comment)
if flag_dim_v1 / len(images1) > 0:
comment = 'Image Dimension creteria is not satisfied for ' + str(
round(flag_dim_v1 / len(images1) * 100, 1)) + '% of images'
# model_comments.append(comment)
if flag_dim_v2 / len(images1) > 0:
comment = 'EXIF details are not present for Dimension in ' + str(
round(flag_dim_v2 / len(images1) * 100, 1)) + '% of images'
# model_comments.append(comment)
if flag_iso_v1 / len(images1) > 0:
comment = 'Image ISO creteria is not satisfied for ' + str(
round(flag_iso_v1 / len(images1) * 100, 1)) + '% of images'
# model_comments.append(comment)
if flag_iso_v2 / len(images1) > 0:
comment = 'EXIF details are not present for ISO in ' + str(
round(flag_iso_v2 / len(images1) * 100, 1)) + '% of images'
# model_comments.append(comment)
if flag_res_v1 / len(images1) > 0:
comment = 'Image Resolution creteria is not satisfied for ' + str(
round(flag_res_v1 / len(images1) * 100, 1)) + '% of images'
# model_comments.append(comment)
if flag_res_v2 / len(images1) > 0:
comment = 'EXIF details are not present for Resolution in ' + str(
round(flag_res_v2 / len(images1) * 100, 1)) + '% of images'
# model_comments.append(comment)
if flag_ape_v1 / len(images1) > 0:
comment = 'Image Aperture creteria is not satisfied for ' + str(
round(flag_ape_v1 / len(images1) * 100, 1)) + '% of images'
# model_comments.append(comment)
if flag_ape_v2 / len(images1) > 0:
comment = 'EXIF details are not present for Aperture in ' + str(
round(flag_ape_v2 / len(images1) * 100, 1)) + '% of images'
# model_comments.append(comment)
return model_comments, bwhite_score, lscape_score
def exif_scoring(images1, mypath):
# Technical Exif scoring logic
exif_score = []
null_count = 0
flag_dim_v1 = 0
flag_dim_v2 = 0
flag_iso_v1 = 0
flag_iso_v2 = 0
flag_res_v1 = 0
flag_res_v2 = 0
flag_ape_v1 = 0
flag_ape_v2 = 0
bwhite = 0
lscape = 0
for j in range(len(images1)):
img = Image.open(mypath + images1[j])
try:
bwhite += image_color_detection(img, 40, 22, True)
except:
pass
try:
lscape += landscape(img)
except:
pass
try:
if img._getexif():
exif = {
PIL.ExifTags.TAGS[k]: v
for k, v in img._getexif().items()
if k in PIL.ExifTags.TAGS
}
else:
exif = {}
except:
from PIL.TiffTags import TAGS
exif = {TAGS[key]: img.tag[key] for key in img.tag.keys()}
ex_score = 0
if bool(exif) == False:
null_count += 1
else:
if 'ExifImageHeight' in exif and 'ExifImageWidth' in exif:
if exif["ExifImageHeight"] == 1920 and exif["ExifImageWidth"] == 2880:
ex_score += 2.5
else:
flag_dim_v1 += 1 # Image Dimension creteria is not satisfied
else:
flag_dim_v2 += 1 # EXIF details are not present for Dimensions
if 'ISOSpeedRatings' in exif:
if exif["ISOSpeedRatings"] <= 200:
ex_score += 2.5 * 1.0
elif 200 < exif["ISOSpeedRatings"] <= 400:
ex_score += 2.5 * 0.7
elif 400 < exif["ISOSpeedRatings"] <= 800:
ex_score += 2.5 * 0.4
else:
flag_iso_v1 += 1 # Image ISO creteria is not satisfied
else:
flag_iso_v2 += 1 # EXIF details are not present for ISO
try:
if 'XResolution' in exif:
if exif["XResolution"][0] / exif["XResolution"][1] >= 300:
ex_score += 2.5 * 1.0
elif 200 <= exif["XResolution"][0] / exif["XResolution"][1] < 300:
ex_score += 2.5 * 0.7
elif 150 <= exif["XResolution"][0] / exif["XResolution"][1] < 200:
ex_score += 2.5 * 0.4
else:
flag_res_v1 += 1 # Image Resolution creteria is not satisfied
else:
flag_res_v2 += 1 # EXIF details are not present for Resolution
except:
if 'XResolution' in exif:
if exif["XResolution"][0][0] / exif["XResolution"][0][1] >= 300:
ex_score += 2.5 * 1.0
elif 200 <= exif["XResolution"][0][0] / exif["XResolution"][0][1] < 300:
ex_score += 2.5 * 0.7
elif 150 <= exif["XResolution"][0][0] / exif["XResolution"][0][1] < 200:
ex_score += 2.5 * 0.4
else:
flag_res_v1 += 1 # Image Resolution creteria is not satisfied
else:
flag_res_v2 += 1 # EXIF details are not present for Resolution
if 'FNumber' in exif:
if 4 <= exif["FNumber"][0] / exif["FNumber"][1] < 8:
ex_score += 2.5 * 1.0
elif 8 <= exif["FNumber"][0] / exif["FNumber"][1] < 12:
ex_score += 2.5 * 0.7
elif 12 <= exif["FNumber"][0] / exif["FNumber"][1] < 14:
ex_score += 2.5 * 0.4
else:
flag_ape_v1 += 1 # Image Aperture creteria is not satisfied
else:
flag_ape_v2 += 1 # EXIF details are not present for Apreture
exif_score.append(ex_score)
if len(exif_score) > 0:
tech_exif_score = round(sum(exif_score) / len(exif_score), 4)
else:
tech_exif_score = 0
return exif_score, tech_exif_score, null_count, flag_dim_v1, flag_dim_v2, flag_iso_v1, flag_iso_v2, flag_res_v1, flag_res_v2, flag_ape_v1, flag_ape_v2, bwhite, lscape |
import requests
import json
apikey = "ENTER YOUR KEY HERE"
response = requests.post("https://api.capmonster.cloud/getBalance", json = {
"clientKey": apikey
})
errorId = json.loads(response.text)['errorId']
try:
balance = json.loads(response.text)['balance']
except KeyError:
print("Invalid api key! Error code:" + f"{errorId}")
print('$'+f'{balance}'+' left')
|
from django.conf.urls import url
from Ufanisi import views
app_name= 'Ufanisi'
urlpatterns = [
url(r'^$', views.HomePageView.as_view()),
url(r'^about/$', views.AboutPageView.as_view(),name='about'),
url(r'^projects/$', views.MissionPageView.as_view(),name='projects'),
url(r'^blog/$', views.BlogPageView.as_view(),name='blog'),
url(r'^contact/$', views.contact, name='contact'),
url(r'portal/', views.MembersArea.as_view(), name='members-home'),
] |
#-*- encoding=UTF-8 -*-
"""
Crie uma função que retorna o fatorial de um dado número.
o fatorial é representado pela seguência. 1,1,2,3,5,..n, onde um número k é sempre
a soma dos seus dois anteriores.
"""
def fatorial(n):
"""
>>> fatorial(1)
1
>>> fatorial(2)
1
>>> fatorial(3)
2
>>> fatorial(5)
5
"""
return 0
import pydojo
pydojo.testmod()
|
import numpy as np
import requests, torch, os, json
import numpy as np
from torch import nn
from config import setting
import tensorflow
class UniversalEncoder():
FEATURE_SIZE = 512
BATCH_SIZE = 32
storage_dir = str(os.path.realpath("."))+"/search_data/faiss.json"
def __init__(self, host, port):
self.server_url = "http://{host}:{port}/v1/models/model:predict".format(
host = host,
port = port
)
@staticmethod
def load_index(dir:str):
file = open("test.json","r",buffering=1)
vectors = {}
for line in file:
vectors.update(json.load(io.StringIO(line)))
return vectors
@staticmethod
def save_index(vectors,dir:str):
json.dump(vectors, open("test.json","a"))
@staticmethod
def _standardized_input(sentence:str):
return sentence.replace("\n", "").lower().strip()[:1000]
def encode(self,data):
data = [self._standardized_input(sentence=sentence) for sentence in data]
all_vectors = []
result = {}
for i in range(0, len(data), self.BATCH_SIZE):
batch = data[i:i+self.BATCH_SIZE]
res = requests.post(
url=self.server_url,
json = {"instances":batch}
)
if not res.ok:
print("FALSE")
vectors = [list(res.json()["predictions"])]
all_vectors += torch.transpose(torch.Tensor(vectors),0,1)
[result.update({data[i]:all_vectors[i].tolist()}) for i in range(len(data))]
return result
def build_index(self, data:list, append:bool=True):
new_vectors = self.encode(data)
if append == True:
setting.index_on_ram = ()
try:
torch.save(setting.index_on_ram,self.storage_dir)
except:
os.mkdir(self.storage_dir.split("/")[-2])
torch.save(setting.index_on_ram,self.storage_dir)
return setting.index_on_ram
def search(self,data, query, numb_result:int=1):
query_vector = self.encode([query])[0] #converter data to vectors
cos = nn.CosineSimilarity(dim=1, eps=1e-6) #init comparse functions
if setting.index_on_ram == []:
setting.index_on_ram = torch.load(self.storage_dir)
temp_vectors = setting.index_on_ram.copy()
distances = [] #comparse distances
for i in range(len(temp_vectors)):
distances.append(float(cos(temp_vectors[i], query_vector)))
index_results = [] #get the top n index has closest semantics
min_distance = min(distances)
for i in range(numb_result):
index = distances.index(max(distances))
index_results.append(index)
distances[index] = min_distance
result = [] #get top n result
for i in index_results:
result.append(data[i])
return result
def remove_index(self, query):
query_vector = self.encode([query])[0] #converter data to vectors
cos = nn.CosineSimilarity(dim=1, eps=1e-6) #init comparse functions
if setting.index_on_ram == []:
setting.index_on_ram = torch.load(self.storage_dir)
temp_vectors = setting.index_on_ram.copy()
distances = [] #comparse distances
for i in range(len(temp_vectors)):
distances.append(float(cos(temp_vectors[i], query_vector)))
index = distances.index(max(distances)) #get the delete index
setting.index_on_ram.pop(index)
torch.save(setting.index_on_ram,self.storage_dir)
return setting.index_on_ram
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pesquisa.py
#
# Copyright 2018 20181bsi0121 <20181bsi0121@SR6192>
#
#
def main():
sexo = ''; olhos = ''; cabelo = ''; idade = '';
maior_idade = 0; total = 0; total_filtro = 0; porcentagem = 0;
# recebendo valores
print((total+1),'º) CADASTRO: ')
sexo = input('Sexo (feminino, masculino): ')
olhos = input('Cor do olho (azuis, verdes, castanho): ')
cabelo = input('Cor do cabelo (louros, catanhos, pretos): ')
idade = float(input('Idade: '))
while idade != -1:
#cont registros
total = total+1
# acumula os valores da idade maiores
if idade > maior_idade:
maior_idade = idade
# porcentagem do (sexo feminino) (idade entre 18 e 35) (olhos verdes) (cabelos claros)
if idade >= 18 and idade <= 35 and sexo == 'feminino' and olhos == 'verdes' and cabelo == 'louros':
total_filtro = total_filtro + 1
print('----------------------//----------------------')
# recebendo valores
print((total+1),'º) CADASTRO: ')
sexo = input('Sexo (femino, masculino): ')
olhos = input('Cor do olho (azuis, verdes, castanho): ')
cabelo = input('Cor do cabelo (louros, catanhos, pretos): ')
idade = float(input('Idade: '))
# fim while
porcentagem = (100*total_filtro)/total #ultima linha não corresponde a ninguem, logo não tenho que contabilzar ela
print('\n\nRESULTADO----------------------')
print('Maior idade: ',maior_idade)
print('Porcentagem (femino , idade entre 18 e 35 anos , olhos verdes, cabelos loiros): %2.f *porcento'%porcentagem)
return 0
if __name__ == '__main__':
main()
|
"""
Compare observed and modeled amplitude and phase for all constituents
at all stations. The goal is to determine optimum factors to use
when modifying the tidal forcing in the model.
"""
import os
import sys
pth = os.path.abspath('../../LiveOcean/alpha')
if pth not in sys.path:
sys.path.append(pth)
import Lfun
Ldir = Lfun.Lstart()
import pandas as pd
import pickle
import numpy as np
import obsfun as ofn
dir0 = Ldir['parent'] + 'ptools_output/tide/'
# select model run
#for gtagex in ['cas4_v2_lo6biom', 'cas5_v3_lo8', 'cas6_v1_lo8', 'cas6_v2_lo8', 'cas6_v3_lo8b']:
for gtagex in ['cas6_v3_lo8b']:
year = 2017
noaa_sn_dict, dfo_sn_dict, sn_dict = ofn.get_sn_dicts()
sn_list = sn_dict.keys()
def get_AG(hn, Hobs, Hmod):
ho = Hobs
hm = Hmod
Ao = ho.A[ho.name==hn]
Am = hm.A[hm.name==hn]
Go = ho.g[ho.name==hn]
Gm = hm.g[hm.name==hn]
Fo = 24*ho.aux.frq[ho.name==hn] # cycles per day
Fm = 24*hm.aux.frq[hm.name==hn]
#
return Ao, Am, Go, Gm, Fo, Fm
sn_coast = ['Charleston', 'South Beach', 'Garibaldi', 'Toke Point',
'Westport', 'La Push', 'Neah Bay', 'Tofino', 'Bamfield']
sn_salish = ['Port Angeles', 'Friday Harbor', 'Cherry Point', 'Port Townsend',
'Seattle', 'Tacoma', 'Point Atkinson', 'Vancouver', 'Patricia Bay',
'Victoria Harbour', 'Campbell River', 'New Westminster']
df_dict = dict() # each DataFrame has one constituent
df = pd.DataFrame(index=sn_list, columns=['ar', 'dph'])
for hn in ofn.hn_list:
df_dict[hn] = df.copy()
for name in sn_list:
# load observational data
obs_dir = dir0 + 'obs_data/'
sn = sn_dict[name]
hfn = obs_dir + 'h_' + str(sn) + '_' + str(year) + '.p'
Hobs = pickle.load(open(hfn, 'rb'))
# load model data
mod_dir = dir0 + 'mod_data/' + gtagex + '/'
sn = sn_dict[name]
hfn = mod_dir + 'h_' + str(sn) + '_' + str(year) + '.p'
Hmod = pickle.load(open(hfn, 'rb'))
# get constituent info
for hn in ofn.hn_list:
Ao, Am, Go, Gm, Fo, Fm = get_AG(hn, Hobs, Hmod)
df_dict[hn].loc[name, 'ar'] = Am/Ao
# fix phase difference when they straddle 360
if (Gm - Go) > 180:
Gm = Gm - 360
elif (Gm - Go) < -180:
Gm = Gm + 360
else:
pass
df_dict[hn].loc[name, 'dph'] = Gm - Go
print('')
print(50*'=')
print(gtagex)
print(50*'=')
print('\nCoast Stations: mean (std)')
for hn in ofn.hn_list:
df = df_dict[hn]
dff = df.loc[sn_coast,:]
print(' %s: Amplitude Ratio = %5.2f (%5.5f), Phase Difference = %5.1f (%5.1f) [deg]' % (hn,
dff['ar'].mean(), dff['ar'].std(), dff['dph'].mean(), dff['dph'].std()))
print('\nSalish Stations: mean (std)')
for hn in ofn.hn_list:
df = df_dict[hn]
dff = df.loc[sn_salish,:]
print(' %s: Amplitude Ratio = %5.2f (%5.5f), Phase Difference = %5.1f (%5.1f) [deg]' % (hn,
dff['ar'].mean(), dff['ar'].std(), dff['dph'].mean(), dff['dph'].std()))
|
# encoding:utf-8
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.views import APIView
from omv.serializers import OmvSerializer
from rest_framework import status
from rest_framework.permissions import IsAuthenticated, AllowAny
from omv.models import *
from wifibytes.authlib import *
from wifibytes.omv_functions import (
activarBuzonDeVoz, desactivarBuzonDeVoz,
activarRoaming, desactivarRoaming
)
from cliente.models import MobilsClients
class OmvAPIListView(APIView):
def get(self, request, format=None):
queryset = Omv.objects.all()
serializer = OmvSerializer(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class SetServiciosView(APIView):
permission_classes = (AllowAny,)
def post(self, request, format=None):
received = request.data
try:
client = Cliente.objects.get(consumer_user=request.user)
#client = Cliente.objects.get(codcliente=900000)
except Exception as error:
print(error)
return Response(
'Cliente Incorrecto',
status=status.HTTP_400_BAD_REQUEST
)
try:
mobil = MobilsClients.objects.get(
codcliente=client,
mobil=received['linea']
)
except Exception as error:
print (error)
return Response(
'Linea Movil Incorrecta',
status=status.HTTP_400_BAD_REQUEST
)
try:
if 'function' in list(received.keys()):
if received['function'] == 'activarBuzonDeVoz':
try:
activarBuzonDeVoz(mobil)
except Exception as error:
print (error)
mobil.buzon_voz = True
if received['function'] == 'desactivarBuzonDeVoz':
try:
desactivarBuzonDeVoz(mobil)
except Exception as error:
print (error)
mobil.buzon_voz = False
if received['function'] == 'activarRoaming':
try:
activarRoaming(mobil)
except Exception as error:
print (error)
mobil.roaming = True
if received['function'] == 'desactivarRoaming':
try:
desactivarRoaming(mobil)
except Exception as error:
print (error)
mobil.roaming = False
mobil.save()
else:
return Response(
'Funcion Incorrecta',
status=status.HTTP_400_BAD_REQUEST
)
except Exception as error:
return Response(
'Error procesando la Peticion',
status=status.HTTP_400_BAD_REQUEST
)
return Response(
{
'buzon_voz': {
'status': mobil.buzon_voz_procesing,
'value': mobil.buzon_voz
},
'roaming': {
'status:': mobil.roaming,
'value': mobil.roaming_procesing
}
}
)
'''
return Response(
'Solicitud Procesada',
status=status.HTTP_200_OK
)
'''
|
# coding: utf-8
"""
"""
import torch
import torch.optim as optim
import torch.nn as nn
import os
import time
import copy
import numpy as np
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from sklearn.metrics import confusion_matrix
from visual_confuse_matrix import make_confusion_matrix
from dataset import genDataset
from model import SegClsModule
from sklearn.metrics import cohen_kappa_score
import argparse
import logging
import os
import sys
import torchvision.transforms as transforms
import cv2
import numpy as np
import math
import random
import yaml
from pathlib import Path
from loss import Weighted_Jaccard_loss
from utils import dice_coef, probs2one_hot
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def setup_logger(name, save_dir, distributed_rank, filename="log.txt"):
"""terminal and log file
name: application information
save_dir: log dir
distributed_rank: only host 0 can generate log
filename: log file name
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def set_visible_gpu(gpu_idex):
"""
to control which gpu is visible for CUDA user
set_visible_gpu(1)
print(os.environ["CUDA_DEVICE_ORDER"])
print(os.environ["CUDA_VISIBLE_DEVICES"])
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{0}".format(gpu_idex)
def get_results(val_labels, val_outs, save_cf_png_dir, save_metric_dir):
CM = confusion_matrix(val_labels, val_outs)
labels = ['True Neg','False Pos','False Neg','True Pos']
categories = ['0', '1']
make_confusion_matrix(CM,
group_names=labels,
categories=categories,
cmap='Blues',save_dir=save_cf_png_dir)
#make_confusion_matrix(CM, figsize=(8,6), cbar=False)
TN = CM[0][0]
FN = CM[1][0]
TP = CM[1][1]
FP = CM[0][1]
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy
ACC = (TP+TN)/(TP+FP+FN+TN)
result_str = "Sensitivity=%.3f, Specificity=%.3f, PPV=%.3f, NPV=%.3f, FPR=%.3f, FNR=%.3f, FDR=%.3f, ACC=%.3f\n" % (TPR, TNR, PPV, NPV, FPR, FNR, FDR, ACC)
save_dir = save_metric_dir
with open(save_dir, "a+") as f:
f.writelines([result_str])
return result_str
def get_learning_rate(optimizer):
return optimizer.param_groups[0]["lr"]
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, log_dir="./log/", scheduler=None, writer=None, logger=None, opt=None):
print(opt)
since = time.time()
val_acc_history = []
best_acc = 0.0
batch_size = dataloaders['train'].batch_size
print_step = 5 # print info per 10 batches
val_losses = []
tgt_cls_train_iter = iter(dataloaders['tgt_cls_train'])
for epoch in range(num_epochs):
logger.info('Epoch {}/{}'.format(epoch, num_epochs - 1))
logger.info('-' * 10)
learning_rate = get_learning_rate(optimizer)
writer.add_scalar("lr", learning_rate, epoch)
epoch_val_preds = []
epoch_val_y = []
epoch_train_preds = []
epoch_train_y = []
# Each epoch has a training and validation phase
model.train() # Set model to training mode
running_loss = 0.0
# Iterate over data.
for batch_idx, (inputs, labels) in enumerate(dataloaders["train"], 0):
inputs = inputs.to(device)
# adjust labels
labels[labels==opt.drr_mask_value_dict["lung"]] = 1
labels[labels==opt.drr_mask_value_dict["infection"]] = 2
labels = labels[:,-1].to(device)
tag_labels = ((labels == 2).sum(-1).sum(-1) > 0).type(torch.long).to(device) # batch_size, 1
c_labels = tag_labels if opt.do_cls_mmd else None
s_labels = labels if opt.do_seg_mmd else None
if opt.do_cls_mmd or opt.do_seg_mmd:
# tgt_cls
try:
tgt_inputs, _ = tgt_cls_train_iter.next()
except StopIteration:
tgt_cls_train_iter = iter(dataloaders['tgt_cls_train'])
tgt_inputs, _ = tgt_cls_train_iter.next()
tgt_inputs = tgt_inputs.to(device)
else:
tgt_inputs = None
# zero the parameter gradients
optimizer.zero_grad()
model.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(True):
src_cls_logits, loss_cls_lmmd, src_seg_logits, loss_seg_lmmd, _ = model(inputs, tgt_img=tgt_inputs, c_label=c_labels, s_label=s_labels)
lambd = 2 / (1 + math.exp(-10 * (epoch) / num_epochs)) - 1
if opt.do_cls and opt.do_cls_mmd:
loss_cls_lmmd = lambd * loss_cls_lmmd * opt.lambda_cls_mmd
loss_cls_lmmd_item = loss_cls_lmmd.item()
else:
loss_cls_lmmd = 0
loss_cls_lmmd_item = 0
if opt.do_seg and opt.do_seg_mmd:
loss_seg_lmmd = lambd * loss_seg_lmmd * opt.lmabda_seg_mmd
loss_seg_lmmd_item = loss_seg_lmmd.item()
else:
loss_seg_lmmd = 0
loss_seg_lmmd_item = 0
if opt.do_seg:
loss_seg = criterion(labels, src_seg_logits, class_weights=opt.seg_class_weights) * opt.lambda_seg
loss_seg_item = loss_seg.item()
else:
loss_seg = 0
loss_seg_item = 0
if opt.do_cls:
loss_cls = F.cross_entropy(src_cls_logits, tag_labels) * opt.lambda_cls
loss_cls_item = loss_cls.item()
else:
loss_cls = 0
loss_cls_item = 0
loss = loss_seg + loss_cls + loss_seg_lmmd + loss_cls_lmmd
loss_item = loss.item()
loss.backward()
optimizer.step()
# statistics
if batch_idx % print_step == 0: # print info
print_loss = running_loss / ((batch_idx+1)*batch_size)
logger.info("Train E{:>03} B{:>05} LR:{:.8f} Loss: {:.4f} LSeg: {:.4f} SegMmd: {:.4f} LCls: {:.4f} ClsMmd: {:.4f}".format(epoch, batch_idx, learning_rate, loss_item, loss_seg_item, loss_seg_lmmd_item, loss_cls_item, loss_cls_lmmd_item))
scheduler.step()
weight_path = os.path.join(log_dir, "latest.pth")
torch.save(model.state_dict(), weight_path)
if ((epoch+1) % opt.eval_times == 0 or epoch+1 == num_epochs) and opt.do_seg:
# eval lung segmentation
logger.info("-"*8+"eval lung segmentation"+"-"*8)
model.eval()
all_dices = []
for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_lung_seg_val"], 0):
annotation = dataloaders["tgt_lung_seg_val"].dataset.annotations[batch_idx]
img_dir = annotation.strip().split(',')[0]
img_name = Path(img_dir).name
inputs = inputs.to(device)
# adjust labels
labels[labels==opt.xray_mask_value_dict["lung"]] = 1
labels = labels[:,-1].to(device)
labels = torch.stack([labels == c for c in range(2)], dim=1)
with torch.set_grad_enabled(False):
_, _, seg_logits, _, _ = model(inputs)
seg_probs = torch.softmax(seg_logits, dim=1)
predicted_mask = probs2one_hot(seg_probs.detach())
# change the infection to Lung
predicted_mask_lung = predicted_mask[:,:-1]
predicted_mask_lung[:,-1] += predicted_mask[:,-1]
dices = dice_coef(predicted_mask_lung, labels.detach().type_as(predicted_mask)).cpu().numpy()
all_dices.append(dices) # [(B,C)]
predicted_mask_lung = predicted_mask_lung.squeeze().cpu().numpy() # 3xwxh
mask_inone = (np.zeros_like(predicted_mask_lung[0])+predicted_mask_lung[1]*255).astype(np.uint8)
# save dir:
save_dir = os.path.join(opt.logs, "tgt_lung_seg_val", "ep%03d"%epoch)
#
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(os.path.join(save_dir, img_name), mask_inone)
avg_dice = np.mean(np.concatenate(all_dices, 0), 0) #
logger.info("tgt_lung_seg_val:EP%03d,[%d/%d],dice0:%.03f,dice1:%.03f,dice:%.03f"
% (epoch, batch_idx, len(dataloaders['tgt_lung_seg_val'].dataset)//inputs.shape[0],
avg_dice[0], avg_dice[1], np.mean(np.concatenate(all_dices, 0))))
if ((epoch+1) % opt.eval_cls_times == 0 or epoch+1 == num_epochs):
# eval infection segmentation and cls
logger.info("-"*8+"eval infection cls"+"-"*8)
model.eval()
val_gt = []
val_cls_pred = []
val_seg_pred = []
for batch_idx, (inputs, labels) in enumerate(dataloaders["tgt_cls_val"], 0):
inputs = inputs.to(device)
# adjust label
val_gt.append(labels.cpu().data.numpy())
with torch.set_grad_enabled(False):
annotation = dataloaders["tgt_cls_val"].dataset.annotations[batch_idx]
img_dir = annotation.strip().split(',')[0]
img_name = Path(img_dir).name
cls_logits, _, seg_logits, _, _ = model(inputs)
if opt.do_seg:
seg_probs = torch.softmax(seg_logits, dim=1)
predicted_mask_onehot = probs2one_hot(seg_probs.detach())
# for save
predicted_mask = predicted_mask_onehot.squeeze().cpu().numpy() # 3xwxh
mask_inone = (np.zeros_like(predicted_mask[0])+predicted_mask[1]*128+predicted_mask[2]*255).astype(np.uint8)
# save dir:
save_dir = os.path.join(opt.logs, "tgt_cls_val", "ep%03d"%epoch)
#
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(os.path.join(save_dir, img_name), mask_inone)
# seg2cls
preds_cls_seg = (predicted_mask_onehot[:,-1:].sum(-1).sum(-1) > 0).cpu().numpy().astype(np.uint8)
val_seg_pred.append(preds_cls_seg)
# cls
#print(cls_logits)
if opt.do_cls:
probs_cls = torch.softmax(cls_logits, dim=1)
preds_cls = (probs_cls[...,1:] > 0.5).type(torch.long)
val_cls_pred.append(preds_cls.cpu().data.numpy())
if not os.path.exists(os.path.join(opt.logs, "cf")):
os.makedirs(os.path.join(opt.logs, "cf"))
val_gt = np.concatenate(val_gt, axis=0)
if opt.do_cls:
val_cls_pred = np.concatenate(val_cls_pred, axis=0)
save_cf_png_dir = os.path.join(opt.logs, "cf", "ep%03d_cls_cf.png"%epoch)
save_metric_dir = os.path.join(opt.logs, "metric_cls.txt")
result_str = get_results(val_gt, val_cls_pred, save_cf_png_dir, save_metric_dir)
logger.info("tgt_cls_val:EP%03d,[cls]: %s" % (epoch, result_str))
if opt.do_seg:
val_seg_pred = np.concatenate(val_seg_pred, axis=0)
# seg2cls
save_cf_png_dir = os.path.join(opt.logs, "cf", "ep%03d_seg_cf.png"%epoch)
save_metric_dir = os.path.join(opt.logs, "metric_seg.txt")
result_str = get_results(val_gt, val_seg_pred, save_cf_png_dir, save_metric_dir)
logger.info("tgt_seg_val:EP%03d,[seg2cls]: %s" % (epoch, result_str))
time_elapsed = time.time() - since
logger.info("Training complete in {:.0f}m {:.0f}s".format(time_elapsed // 60, time_elapsed % 60))
def get_argument():
parser = argparse.ArgumentParser()
parser.add_argument('--config', default="./cfgs/experiment.yaml", type=str)
opt = parser.parse_args()
with open(opt.config) as f:
config = yaml.load(f)
for k, v in config['common'].items():
setattr(opt, k, v)
# repalce experiment
opt.experiment = opt.experiment.replace("only", "seg")
opt.seg_augment = True
opt.cls_augment = True
opt.do_cls_mmd = False
opt.do_seg = True
opt.do_cls = True
opt.do_seg_mmd = False
opt.eval_cls_times = 50
opt.eval_times = 50
opt.random_seed = 5050
selected_drr_datasets_indexes = np.array(opt.selected_drr_datasets_indexes+opt.selected_drr_datasets_indexes)
#print(selected_drr_datasets_indexes)
# # [[0, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 1]]
print(selected_drr_datasets_indexes[-1][-1])
selected_drr_datasets_indexes[2][-1] = 1
selected_drr_datasets_indexes[3][-1] = 1
opt.selected_drr_datasets_indexes = [list(_) for _ in list(selected_drr_datasets_indexes)]
opt.logs = "logs_experiment04_r5050"
log_dir = "./{}/{}/".format(opt.logs, opt.experiment)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
opt.logs = log_dir
return opt
if __name__ == "__main__":
opt = get_argument()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(opt.gpuid)
setup_seed(opt.random_seed)
assert opt.mode == 12, ("opt.mode is not supported in %s" % __file__)
log_dir = opt.logs
logger = setup_logger("{}".format(os.path.basename(__file__).split(".")[0]),
save_dir=opt.logs, distributed_rank=0, filename="log.txt")
logger.info(opt)
batch_size = opt.batch_size
num_epochs = opt.num_epochs
use_pretrained = True
device_name = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device_name)
model_ft = SegClsModule(opt)
train_dataset, tgt_cls_train_dataset, tgt_cls_val_dataset, tgt_lung_seg_val_dataset = genDataset(opt)
logger.info("-"*8+"train:"+"-"*8)
logger.info(train_dataset.annotations)
logger.info("-"*8+"tgt_cls_train:"+"-"*8)
logger.info(tgt_cls_train_dataset.annotations)
logger.info("-"*8+"tgt_cls_val:"+"-"*8)
logger.info(tgt_cls_val_dataset.annotations)
logger.info("-"*8+"tgt_lung_seg_val:"+"-"*8)
logger.info(tgt_lung_seg_val_dataset.annotations)
image_datasets = {'train': train_dataset, 'tgt_cls_train': tgt_cls_train_dataset, 'tgt_cls_val': tgt_cls_val_dataset, "tgt_lung_seg_val": tgt_lung_seg_val_dataset}
shuffles = {"train": True,'tgt_cls_train': True, 'tgt_cls_val': False, "tgt_lung_seg_val": False}
batch_sizes_dict = {"train": batch_size,'tgt_cls_train': batch_size, 'tgt_cls_val': 1, "tgt_lung_seg_val": 1}
drop_lasts = {"train": True,'tgt_cls_train': True, 'tgt_cls_val': False, "tgt_lung_seg_val": False}
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_sizes_dict[x], shuffle=shuffles[x], num_workers=4, drop_last=drop_lasts[x]) for x in ['train', 'tgt_cls_train', 'tgt_cls_val', "tgt_lung_seg_val"]}
# Send the model to GPU
model_ft = model_ft.to(device)
params_to_update = model_ft.parameters()
logger.info("Params to learn:")
for name,param in model_ft.named_parameters():
param.requires_grad = True
logger.info("\t"+name)
# Observe that all parameters are being optimized
optimizer_ft = optim.Adam(params_to_update, lr=opt.lr)
criterion = Weighted_Jaccard_loss#nn.CrossEntropyLoss()
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.num_epochs//2 - opt.num_epochs) / float(opt.num_epochs//2 + 1)
return lr_l
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer_ft, lr_lambda=lambda_rule)
## summarywriter
writer = SummaryWriter(log_dir=log_dir)
train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs, is_inception=False, log_dir=log_dir, scheduler=scheduler, writer=writer, logger=logger, opt=opt)
|
import os
import cv2
import sys
from sqlite3 import connect
from PIL import Image
import numpy as np
from PIL.ImageQt import ImageQt
from PySide6.QtCore import QThread, Signal
from PySide6.QtCore import *
from PySide6.QtGui import QIcon, QPixmap
from PySide6.QtWidgets import QApplication, QWidget, QStackedWidget, QDialog, QTableWidgetItem, QVBoxLayout
from PySide6.QtUiTools import QUiLoader
def convertCvImage2QtImage(cv_image):
rgb_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
PIL_image = Image.fromarray(rgb_image).convert('RGB')
return QPixmap.fromImage(ImageQt(PIL_image)).scaled(480, 360, Qt.KeepAspectRatio)
class Camera(QThread):
frame_signal = Signal(object, object)
def __init__(self):
super(Camera, self).__init__()
# self.frame_signal = Signal()
def make_masks(self, image):
# w, h = image.shape
# mask 1
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_gray_inv = cv2.GaussianBlur(255 - image_gray, (21, 21), 0)
out_1 = cv2.divide(image_gray, 255 - image_gray_inv, scale=256.0)
out_1 = cv2.resize(out_1, (0, 0), fx=0.5, fy=0.5) # half size
out_1 = cv2.cvtColor(out_1, cv2.COLOR_GRAY2BGR)
# mask 2
image_equal = cv2.equalizeHist(image_gray)
image_equal = cv2.resize(image_equal, (0, 0), fx=0.5, fy=0.5) # half size
image_equal = cv2.cvtColor(image_equal, cv2.COLOR_GRAY2BGR)
# mask 3
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_rgb = cv2.resize(image_rgb, (0, 0), fx=0.5, fy=0.5)
# mask 4
image_bt_not = cv2.bitwise_not(image)
image_bt_not = cv2.resize(image_bt_not, (0, 0), fx=0.5, fy=0.5)
# mask 5
image_2 = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
overlay = np.full((image.shape[0], image.shape[1], 4), (20,66,112,1), dtype='uint8')
cv2.addWeighted(overlay, 0.6, image_2, 1.0, 0, image_2)
image_sepia = cv2.cvtColor(image_2, cv2.COLOR_BGRA2BGR)
image_sepia = cv2.resize(image_sepia, (0, 0), fx=0.5, fy=0.5)
# mask 6
half_gray_1 = image_gray[:round(image.shape[0]/2), :]
half_gray_2 = image_gray[round(image.shape[0] / 2):, :]
_, mask1 = cv2.threshold(half_gray_1, 90, 255, cv2.THRESH_TOZERO)
_, mask2 = cv2.threshold(half_gray_2, 120, 255, cv2.THRESH_TRUNC)
half_binary = cv2.vconcat([mask1, mask2])
half_binary = cv2.cvtColor(half_binary, cv2.COLOR_GRAY2BGR)
half_binary = cv2.resize(half_binary, (0, 0), fx=0.5, fy=0.5)
# mask 7
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image_hsv)
image_h = cv2.merge((h+50, s, v))
image_h = cv2.cvtColor(image_h, cv2.COLOR_HSV2BGR)
image_h = cv2.resize(image_h, (0, 0), fx=0.5, fy=0.5)
# mask 8
part_1 = image[:round(image.shape[0]/3), :, :]
part_1 = cv2.bitwise_not(part_1)
part_2 = image[round(image.shape[0]/3):round(image.shape[0]/1.5), :, :]
part_3 = image[round(image.shape[0] / 1.5):, :, :]
part_3 = cv2.bitwise_not(part_3)
three_part = cv2.vconcat([part_1, part_2, part_3])
three_part = cv2.resize(three_part, (0, 0), fx=0.5, fy=0.5)
image = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)
# print(half_binary.shape, image_h.shape, three_part.shape)
return cv2.vconcat([
cv2.hconcat([image, out_1, image_bt_not]),
cv2.hconcat([image_equal, image_rgb, image_sepia]),
cv2.hconcat([half_binary, image_h, three_part])
])
# ----------------------------------------------------------------------------
def save_pic(self, event, x, y, flags, param):
# print(x, y, self.frame_masked.shape[0], self.frame_masked.shape[1])
if event == cv2.EVENT_LBUTTONDOWN:
img = np.copy(self.frame_masked)
if y < (img.shape[0] / 3):
if x < img.shape[1] / 3:
img = img[:round(img.shape[0]/3), :round(img.shape[1]/3), :]
elif img.shape[1] / 3 < x < img.shape[1] / 1.5:
img = img[:round(img.shape[0]/3), round(img.shape[1]/3):round(img.shape[1]/1.5), :]
elif img.shape[1] / 1.5 < x:
img = img[:round(img.shape[0]/3), round(img.shape[1]/1.5):, :]
elif (img.shape[0] / 3) < y < (img.shape[0] / 1.5):
if x < img.shape[1] / 3:
img = img[round(img.shape[0]/3):round(img.shape[0]/1.5), :round(img.shape[1]/3), :]
elif img.shape[1] / 3 < x < img.shape[1] / 1.5:
img = img[round(img.shape[0]/3):round(img.shape[0]/1.5), round(img.shape[1]/3):round(img.shape[1]/1.5), :]
elif img.shape[1] / 1.5 < x:
img = img[round(img.shape[0]/3):round(img.shape[0]/1.5), round(img.shape[1]/1.5):, :]
elif (img.shape[0] / 1.5) < y:
if x < img.shape[1] / 3:
img = img[round(img.shape[0]/1.5):, :round(img.shape[1]/3), :]
elif img.shape[1] / 3 < x < img.shape[1] / 1.5:
img = img[round(img.shape[0]/1.5):, round(img.shape[1]/3):round(img.shape[1]/1.5), :]
elif img.shape[1] / 1.5 < x:
img = img[round(img.shape[0]/1.5):, round(img.shape[1]/1.5):, :]
cv2.imwrite('employee_masked.jpg', img)
orig = self.frame_masked[:round(self.frame_masked.shape[0]/3), :round(self.frame_masked.shape[1]/3), :]
cv2.imwrite('employee_original.jpg', orig)
# ----------------------------------------------------------------------------
def run(self):
video = cv2.VideoCapture(0)
while True:
flag, self.frame = video.read()
# Wait for 'q' key to stop the program
if cv2.waitKey(1) == ord('q'):
break
self.frame_masked = self.make_masks(self.frame)
if flag:
# cv2.imshow('pic', self.frame_masked)
self.frame_signal.emit(self.frame, self.frame_masked)
# cv2.setMouseCallback('pic', self.save_pic)
# window.ui.stackedWidget.setCurrentWidget(window.ui.blue)
# self.frame = convertCvImage2QtImage(self.frame)
# window.ui.video_main.setPixmap(self.frame)
# ****************************************************************************************
class MainWindow(QWidget):
def __init__(self):
super(MainWindow, self).__init__()
loader = QUiLoader()
self.ui = loader.load('stacked.ui')
self.ui.show()
self.ui.stackedWidget.setCurrentWidget(self.ui.home)
self.ui.blue_btn.clicked.connect(self.show_blue)
self.ui.red_btn.clicked.connect(self.show_red)
self.ui.yellow_btn.clicked.connect(self.show_yellow)
self.ui.menu_btn.clicked.connect(self.back_to_home)
self.webcam = Camera()
self.picture_signal = Signal()
def show_red(self):
self.ui.stackedWidget.setCurrentWidget(self.ui.red)
self.ui.camera_btn.clicked.connect(self.camera)
# self.ui.take_photo.clicked.connect(self.record_picture)
self.ui.submit_btn.clicked.connect(self.submit_employee)
def submit_employee(self):
if self.ui.fname.text() == '' or self.ui.lname.text() == '' or self.ui.code.text() == '':
self.ui.lbl_error.setText('please fill all fields')
elif not os.path.exists('employee_masked.jpg'):
self.ui.lbl_error.setText('please take a picture')
else:
self.ui.lbl_error.setText('ok, please wait...')
# check database (not duplicated information)
data_in_database = self.db_fetch()
duplicate = any(self.ui.code.text() in item for item in data_in_database)
if duplicate:
self.ui.lbl_error.setText('this person submitted before.')
else:
# face detection
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
img = cv2.imread('employee_original.jpg')
img_masked = cv2.imread('employee_masked.jpg')
# print(img.shape)
faces = face_detector.detectMultiScale(img, 1.3)
x, y, w, h = faces[0]
face_orig = img[y:y + h, x:x + w]
face_masked = img_masked[y:y + h, x:x + w]
if not os.path.exists('./image_faces/'):
os.makedirs('./image_faces/')
cv2.imwrite(f'./image_faces/face_{self.ui.fname.text()}_{self.ui.lname.text()}.jpg', face_masked)
# add to database
# temp_var = self.ui.birthday.text()
status = self.insert_to_database(self.ui.fname.text(), self.ui.lname.text(), self.ui.code.text(),
self.ui.birthday.text(),
f'face_{self.ui.fname.text()}_{self.ui.lname.text()}.jpg')
if status:
self.ui.lbl_error.setText('successfully added to database.')
else:
self.ui.lbl_error.setText('there is a problem with database.')
def insert_to_database(self, name, family, code, birthday, image_path):
print(name, family, code, birthday, image_path)
try:
my_con = connect('employee.db')
my_cursor = my_con.cursor()
my_cursor.execute(f"INSERT INTO employee(fname, lname, code, birthday, image_path) "
f"VALUES('{name}','{family}','{code}','{birthday}', '{image_path}')")
my_con.commit()
my_con.close()
return True
except:
return False
def db_fetch(self):
my_con = connect('employee.db')
my_cursor = my_con.cursor()
my_cursor.execute("SELECT * FROM time")
result = my_cursor.fetchall()
my_con.close()
return result
def show_yellow(self):
self.ui.stackedWidget.setCurrentWidget(self.ui.yellow)
def back_to_home(self):
self.ui.stackedWidget.setCurrentWidget(self.ui.home)
def camera(self):
# self.ui.stackedWidget.setCurrentWidget(self.ui.blue)
self.webcam.frame_signal.connect(self.show_blue)
self.webcam.start()
def show_blue(self, frame, frame_masked):
self.ui.stackedWidget.setCurrentWidget(self.ui.blue)
frame = convertCvImage2QtImage(frame)
self.ui.video_main.setPixmap(frame)
# def record_picture(self):
# self.picture_signal.emit()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec())
|
#!/usr/bin/env python3
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
from test_framework.key import CECKey
from genesis_upgrade_tests.test_base import GenesisHeightBasedSimpleTestsCase
from test_framework.height_based_test_framework import SimpleTestDefinition
from test_framework.script import CScript, OP_TRUE, OP_HASH160, OP_EQUAL, hash160, OP_FALSE, OP_RETURN, SignatureHashForkId, SIGHASH_ALL, SIGHASH_FORKID, OP_CHECKSIG
def make_key(bytes=b"randombytes"):
key = CECKey()
key.set_secretbytes(bytes)
return key
def make_unlock_default(tx, tx_to_spend):
sighash = SignatureHashForkId(tx_to_spend.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID,
tx_to_spend.vout[0].nValue)
sig = HandleTxsDefaultNode.THE_KEY.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))
return CScript([sig])
def make_unlock_modified11(tx, tx_to_spend):
sighash = SignatureHashForkId(tx_to_spend.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID,
tx_to_spend.vout[0].nValue)
sig = HandleTxsModified11Node.THE_KEY.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))
return CScript([sig])
def make_unlock_modified10(tx, tx_to_spend):
sighash = SignatureHashForkId(tx_to_spend.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID,
tx_to_spend.vout[0].nValue)
sig = HandleTxsModified10Node.THE_KEY.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))
return CScript([sig])
def make_unlock_modified00(tx, tx_to_spend):
sighash = SignatureHashForkId(tx_to_spend.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID,
tx_to_spend.vout[0].nValue)
sig = HandleTxsModified00Node.THE_KEY.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))
return CScript([sig])
class HandleTxsDefaultNode(GenesisHeightBasedSimpleTestsCase):
ARGS = GenesisHeightBasedSimpleTestsCase.ARGS + ['-banscore=1000000', '-whitelist=127.0.0.1', '-acceptnonstdtxn=0', '-acceptnonstdoutputs=1']
NAME = "Reject nonstandard transactions and accept p2sh transactions before Genesis. Accept nonstandard and reject p2sh transactions after Genesis"
THE_KEY = make_key()
P2PK_LOCKING_SCRIPT = CScript([THE_KEY.get_pubkey(), OP_CHECKSIG])
TEST_PRE_GENESIS_STANDARD_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_default)
]
TEST_PRE_GENESIS_NONSTANDARD_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_default, test_tx_locking_script=CScript([OP_TRUE]),
p2p_reject_reason = b'scriptpubkey')
]
TEST_PRE_GENESIS_P2SH_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_default, test_tx_locking_script=CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))
]
TEST_GENESIS_STANDARD_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_default)
]
TEST_GENESIS_NONSTANDARD_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_default, test_tx_locking_script=CScript([OP_TRUE]))
]
# P2SH transaction will be rejected from p2p, but not rejected as part of the block
TEST_GENESIS_P2SH_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_default, test_tx_locking_script=CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]),
p2p_reject_reason=b'bad-txns-vout-p2sh')
]
TESTS = TEST_PRE_GENESIS_STANDARD_TX + TEST_PRE_GENESIS_NONSTANDARD_TX + TEST_PRE_GENESIS_P2SH_TX + TEST_GENESIS_STANDARD_TX + TEST_GENESIS_NONSTANDARD_TX + TEST_GENESIS_P2SH_TX
class HandleTxsModified11Node(GenesisHeightBasedSimpleTestsCase):
ARGS = GenesisHeightBasedSimpleTestsCase.ARGS + ['-banscore=1000000', '-whitelist=127.0.0.1', '-acceptnonstdtxn=1', '-acceptnonstdoutputs=1']
NAME = "Accept nonstandard transactions and p2sh transactions before Genesis. Accept nonstandard and reject p2sh transactions after Genesis"
THE_KEY = make_key()
P2PK_LOCKING_SCRIPT = CScript([THE_KEY.get_pubkey(), OP_CHECKSIG])
TEST_PRE_GENESIS_STANDARD_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_modified11)
]
TEST_PRE_GENESIS_NONSTANDARD_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_modified11, test_tx_locking_script=CScript([OP_TRUE]))
]
TEST_PRE_GENESIS_P2SH_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_modified11, test_tx_locking_script=CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))
]
TEST_GENESIS_STANDARD_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_modified11)
]
TEST_GENESIS_NONSTANDARD_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_modified11, test_tx_locking_script=CScript([OP_TRUE]))
]
TEST_GENESIS_P2SH_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_modified11, test_tx_locking_script=CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]),
p2p_reject_reason = b'flexible-bad-txns-vout-p2sh',
block_reject_reason=b'bad-txns-vout-p2sh')
]
TESTS = TEST_PRE_GENESIS_STANDARD_TX + TEST_PRE_GENESIS_NONSTANDARD_TX + TEST_PRE_GENESIS_P2SH_TX + TEST_GENESIS_STANDARD_TX + TEST_GENESIS_NONSTANDARD_TX + TEST_GENESIS_P2SH_TX
class HandleTxsModified10Node(GenesisHeightBasedSimpleTestsCase):
ARGS = GenesisHeightBasedSimpleTestsCase.ARGS + ['-banscore=1000000', '-whitelist=127.0.0.1', '-acceptnonstdtxn=1', '-acceptnonstdoutputs=0']
NAME = "Accept nonstandard transactions and p2sh transactions before Genesis. Reject nonstandard and p2sh transactions after Genesis"
THE_KEY = make_key()
P2PK_LOCKING_SCRIPT = CScript([THE_KEY.get_pubkey(), OP_CHECKSIG])
TEST_PRE_GENESIS_STANDARD_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_modified10)
]
TEST_PRE_GENESIS_NONSTANDARD_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_modified10, test_tx_locking_script=CScript([OP_TRUE]))
]
TEST_PRE_GENESIS_P2SH_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_modified10, test_tx_locking_script=CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))
]
TEST_GENESIS_STANDARD_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_modified10)
]
TEST_GENESIS_NONSTANDARD_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_modified10, test_tx_locking_script=CScript([OP_TRUE]),
p2p_reject_reason = b'scriptpubkey')
]
TEST_GENESIS_P2SH_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_modified10, test_tx_locking_script=CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]),
p2p_reject_reason=b'flexible-bad-txns-vout-p2sh',
block_reject_reason=b'bad-txns-vout-p2sh')
]
TESTS = TEST_PRE_GENESIS_STANDARD_TX + TEST_PRE_GENESIS_NONSTANDARD_TX + TEST_PRE_GENESIS_P2SH_TX + TEST_GENESIS_STANDARD_TX + TEST_GENESIS_NONSTANDARD_TX + TEST_GENESIS_P2SH_TX
class HandleTxsModified00Node(GenesisHeightBasedSimpleTestsCase):
ARGS = GenesisHeightBasedSimpleTestsCase.ARGS + ['-banscore=1000000', '-whitelist=127.0.0.1', '-acceptnonstdtxn=0', '-acceptnonstdoutputs=0']
NAME = "Reject nonstandard transactions and accept p2sh transactions before Genesis. Reject nonstandard and p2sh transactions after Genesis"
THE_KEY = make_key()
P2PK_LOCKING_SCRIPT = CScript([THE_KEY.get_pubkey(), OP_CHECKSIG])
TEST_PRE_GENESIS_STANDARD_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_modified00)
]
TEST_PRE_GENESIS_NONSTANDARD_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_modified00, test_tx_locking_script=CScript([OP_TRUE]),
p2p_reject_reason = b'scriptpubkey')
]
TEST_PRE_GENESIS_P2SH_TX = [
SimpleTestDefinition("PRE-GENESIS", P2PK_LOCKING_SCRIPT,
"PRE-GENESIS", make_unlock_modified00, test_tx_locking_script=CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))
]
TEST_GENESIS_STANDARD_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_modified00)
]
TEST_GENESIS_NONSTANDARD_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_modified00, test_tx_locking_script=CScript([OP_TRUE]),
p2p_reject_reason = b'scriptpubkey')
]
TEST_GENESIS_P2SH_TX = [
SimpleTestDefinition("GENESIS", P2PK_LOCKING_SCRIPT,
"GENESIS", make_unlock_modified00, test_tx_locking_script=CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]),
p2p_reject_reason = b'flexible-bad-txns-vout-p2sh',
block_reject_reason=b'bad-txns-vout-p2sh')
]
TESTS = TEST_PRE_GENESIS_STANDARD_TX + TEST_PRE_GENESIS_NONSTANDARD_TX + TEST_PRE_GENESIS_P2SH_TX + TEST_GENESIS_STANDARD_TX + TEST_GENESIS_NONSTANDARD_TX + TEST_GENESIS_P2SH_TX
|
# Напишите программу, которая считывает со стандартного ввода целые числа,
# по одному числу в строке, и после первого введенного нуля выводит сумму
# полученных на вход чисел.
s = 0
while True:
a = int(input())
i = a
s += i
if a == 0:
break
print(s)
|
#!/usr/bin/env python3
'''
A friend of mine created this version. It's slower (2x), but more pythonic.
A 25% gain was seen by using my file write function; although, that function could be improved.
'''
import collections
import itertools
import sys
#delims = (' ', '\n', '\t')
delims = (32, 10, 9)
def strcspn(haystack, needles):
return len(list(itertools.takewhile(lambda x: x not in needles, haystack)))
def partition(haystack, needles):
index = strcspn(haystack, needles)
return (haystack[:index], haystack[index:index+1], haystack[index+1:])
def flush(path, index, queue):
with open(f'{path}/out.{index}', 'wb') as file:
file.write(b''.join(queue))
def main(argv):
queue = collections.deque()
length = 0
outfileindex = 0
with open(argv[1], 'rb') as infile:
for line in infile:
left, delim, rest = partition(line, delims)
while True:
for item in (left, delim):
if length + len(item) > 100:
flush(argv[2], outfileindex, queue)
queue.clear()
length = 0
outfileindex += 1
queue.append(item)
length += len(item)
if rest:
left, delim, rest = partition(rest, delims)
else:
break
flush(argv[2], outfileindex, queue)
if __name__ == '__main__':
if len(sys.argv) == 3:
main(sys.argv)
else:
print(f'Usage: {sys.argv[0]} <infile> <outpath>')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import fantaapp.models
import fantaapp.models.auxfun
class Migration(migrations.Migration):
dependencies = [
('fantaapp', '0021_auto_20150808_1507'),
]
operations = [
migrations.AddField(
model_name='incontrocoppa',
name='indice',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='lega',
name='codice',
field=models.CharField(default=fantaapp.models.auxfun.randomHash, unique=True, max_length=20, db_index=True),
),
migrations.AlterUniqueTogether(
name='incontrocoppa',
unique_together=set([('lega', 'indice', 'tipo')]),
),
]
|
import cv2
import numpy as np
lighting = {
'regular_lower_blue' : np.array([100, 160, 50]),
'regular_upper_blue' : np.array([135, 255, 255]),
'regular_lower_red1' : np.array([0,180,120]),
'regular_upper_red1' : np.array([10,255,255]),
'regular_lower_red2' : np.array([170,180,120]),
'regular_upper_red2' : np.array([180,255,255]),
'bad_lower_blue' : np.array([100, 150, 50]),
'bad_upper_blue' : np.array([140, 255, 255]),
'bad_lower_red1' : np.array([0,70,70]),
'bad_upper_red1' : np.array([10,255,255]),
'bad_lower_red2' : np.array([170,70,70]),
'bad_upper_red2' : np.array([180,255,255])
}
def fixBadContrast(image):
img = image.copy()
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
lab[...,0] = clahe.apply(lab[...,0])
contrast = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return contrast
def fixBadLighting(image):
img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
# equalize the histogram of the Y channel
img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
# convert the YUV image back to RGB format
lighting = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
return lighting
# Apply image segmentation, in order to get a result with only blue and red colors
def colorDetection(image, type):
hsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_blue = lighting[type + '_lower_blue']
upper_blue = lighting[type + '_upper_blue']
lower_red = lighting[type + '_lower_red1']
upper_red = lighting[type + '_upper_red1']
mask1 = cv2.inRange(hsvImg, lower_red, upper_red)
# upper mask (170-180)
lower_red = lighting[type + '_lower_red2']
upper_red = lighting[type + '_upper_red2']
mask2 = cv2.inRange(hsvImg, lower_red, upper_red)
# join my masks
redMask = mask1+mask2
# Threshold the HSV image to get only blue colors
blueMask = cv2.inRange(hsvImg, lower_blue, upper_blue)
redMask = cv2.morphologyEx(redMask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))
redMask = cv2.morphologyEx(redMask, cv2.MORPH_DILATE, np.ones((3,3),np.uint8))
blueMask = cv2.morphologyEx(blueMask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))
blueMask = cv2.morphologyEx(blueMask, cv2.MORPH_DILATE, np.ones((3,3),np.uint8))
# Bitwise-AND mask and original image
redRes = cv2.bitwise_and(image, image, mask=redMask)
# Bitwise-AND mask and original image
blueRes = cv2.bitwise_and(image, image, mask=blueMask)
mask = blueMask + redMask
res = cv2.bitwise_and(image, image, mask=mask)
return res, blueRes, redRes
|
from vpyp.corpus import Vocabulary
def segmentations(word):
for k in range(1, len(word)+1):
yield word[:k], word[k:]
def affixes(words):
prefixes, suffixes = zip(*[seg for w in words for seg in segmentations(w)])
prefixes = Vocabulary(start_stop=False, init=set(prefixes))
suffixes = Vocabulary(start_stop=False, init=set(suffixes))
return prefixes, suffixes
def segmentation_mapping(vocab, prefixes, suffixes):
mapping = {}
for w in vocab:
segs = segmentations(w)
segs = [(prefixes[p], suffixes[s]) for p, s in segs]
mapping[vocab[w]] = set(segs)
return mapping
|
#!/usr/bin/env python3
n, *a = map(int, open(0).read().split())
A = 1
for i in a:
A *= 2 - i%2
print(3**n - A) |
#!/usr/bin/python3
import dnstwist
import whois
import sys
import signal
import time
import argparse
import warnings
from os import path, environ
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import json
import queue
global args
def standardize_json(domains=[],
fields=['fuzzer','domain-name','dns-a','dns-aaaa','dns-mx','dns-ns','geoip-country','whois-created','ssdeep-score']):
domains = list(domains)
for domain in domains:
for field in fields:
if field in domain and field == 'domain-name':
domain[field] = domain[field].encode('idna').decode()
elif field not in domain:
domain[field] = [] if field.startswith('dns') else ""
return domains
# tweaked to output uniformly
def create_csv(domains=[],fields=['fuzzer','domain-name','dns-a','dns-aaaa','dns-mx','dns-ns','geoip-country','whois-created','ssdeep-score']):
csv = [",".join(fields)]
for domain in domains:
csv.append(','.join([domain.get('fuzzer',""),
domain.get('domain-name',"").encode('idna').decode(),
';'.join(domain.get('dns-a', [])),
';'.join(domain.get('dns-aaaa', [])),
';'.join(domain.get('dns-mx', [])),
';'.join(domain.get('dns-ns', [])),
domain.get('geoip-country',""),
domain.get('whois-created',""),
str(domain.get('ssdeep-score', ""))]))
return '\n'.join(csv)
def _exit(code):
print(dnstwist.FG_RST + dnstwist.ST_RST, end='')
sys.exit(code)
def p_cli(text):
if args.format == 'cli': print(text, end='', flush=True)
def p_err(text):
print(text, file=sys.stderr, flush=True)
def signal_handler(signal, frame):
print('\nStopping threads... ', file=sys.stderr, end='', flush=True)
for worker in threads:
worker.stop()
worker.join()
print('Done', file=sys.stderr)
_exit(0)
def write_log(message,cli=False):
if cli:
p_cli(message)
else:
print(message)
def write_warning(warning,cli=False):
if cli:
p_cli(warning)
else:
warnings.warn(warning)
def write_error(error,cli=False):
if cli:
p_err(error)
_exit(-1)
else:
raise error
#TODO: rework this
def dnstwister(domain,all=False,banners=False,dictionary=None,geoip=False,mxcheck=False,output=None,registered=False,ssdeep=False,ssdeep_url=None,threadcount=dnstwist.THREAD_COUNT_DEFAULT,whois=False,tld=None,nameservers=None,port=53,useragent=None,cli=False,format="cli"):
# When args are parsed in from the cli, they create a Namespace object
# this object is essentially just strings that are parsed out to objects at time of use
# most are bool or string, so nbd, but namespaces can take a list... kind of
# it's expecting a comma separated list, not an actual list() object
#
# uses the same params as main() with the exception of format which is assumed to be json
global args
global threads
if isinstance(nameservers, list):
nameservers = ",".join(nameservers)
args = argparse.Namespace(**locals())
threads = []
nameservers = []
dictionary = []
tld = []
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if args.threadcount < 1:
args.threadcount = dnstwist.THREAD_COUNT_DEFAULT
if args.nameservers:
nameservers = args.nameservers.split(',')
for r in nameservers:
if len(r.split('.')) != 4:
write_error(ValueError('Error: Invalid DNS nameserver',cli))
if args.dictionary:
if not path.exists(args.dictionary):
write_error(FileNotFoundError('Error: Dictionary file not found: %s\n' % args.dictionary),cli)
with open(args.dictionary) as f:
dictionary = set(f.read().splitlines())
dictionary = [x for x in dictionary if x.isalnum()]
if args.tld:
if not path.exists(args.tld):
write_error(FileNotFoundError('Error: Dictionary file not found: %s\n' % args.tld),cli)
with open(args.tld) as f:
tld = set(f.read().splitlines())
tld = [x for x in tld if x.isalpha()]
if args.output:
try:
sys.stdout = open(args.output, 'x')
except FileExistsError:
write_error(FileExistsError('File already exists: %s' % args.output),cli)
raise
except FileNotFoundError:
write_error(FileNotFoundError('No such file or directory: %s' % args.output),cli)
raise
except PermissionError:
write_error(PermissionError('Permission denied: %s' % args.output),cli)
raise
if args.ssdeep_url:
try:
ssdeep_url = dnstwist.UrlParser(args.ssdeep_url)
except ValueError:
write_error(ValueError('Invalid domain name: ' + args.ssdeep_url),cli)
try:
url = dnstwist.UrlParser(args.domain)
except ValueError as err:
write_error(ValueError('Error: %s\n' % err),cli)
raise
fuzz = dnstwist.DomainFuzz(url.domain, dictionary=dictionary, tld_dictionary=tld)
fuzz.generate()
domains = fuzz.domains
if args.format == 'list' and cli:
print(dnstwist.create_list(domains))
_exit(0)
if not dnstwist.MODULE_DNSPYTHON:
write_warning('Notice: Missing module DNSPython (DNS features limited)\n',cli)
if not dnstwist.MODULE_GEOIP and args.geoip:
write_warning('Notice: Missing module GeoIP (geographical location not available)\n',cli)
if not dnstwist.MODULE_WHOIS and args.whois:
write_warning('Notice: Missing module whois (WHOIS database not accessible)\n',cli)
if not dnstwist.MODULE_SSDEEP and args.ssdeep:
write_warning('Notice: Missing module ssdeep (fuzzy hashes not available)\n',cli)
if not dnstwist.MODULE_REQUESTS and args.ssdeep:
write_warning('Notice: Missing module Requests (webpage downloads not possible)\n',cli)
if cli:
p_cli(dnstwist.FG_RND + dnstwist.ST_BRI +
''' _ _ _ _
__| |_ __ ___| |___ _(_)___| |_
/ _` | '_ \/ __| __\ \ /\ / / / __| __|
| (_| | | | \__ \ |_ \ V V /| \__ \ |_
\__,_|_| |_|___/\__| \_/\_/ |_|___/\__| {%s}
''' % dnstwist.__version__ + dnstwist.FG_RST + dnstwist.ST_RST)
ssdeep_init = str()
ssdeep_effective_url = str()
if args.ssdeep and dnstwist.MODULE_SSDEEP and dnstwist.MODULE_REQUESTS:
request_url = ssdeep_url.full_uri() if ssdeep_url else url.full_uri()
write_log('Fetching content from: ' + request_url + ' ... ',cli)
try:
req = dnstwist.requests.get(request_url, timeout=dnstwist.REQUEST_TIMEOUT_HTTP, headers={'User-Agent': args.useragent})
except dnstwist.requests.exceptions.ConnectionError:
write_log('Connection error\n')
args.ssdeep = False
pass
except dnstwist.requests.exceptions.HTTPError:
write_log('Invalid HTTP response\n')
args.ssdeep = False
pass
except dnstwist.requests.exceptions.Timeout:
write_log('Timeout (%d seconds)\n' % dnstwist.REQUEST_TIMEOUT_HTTP)
args.ssdeep = False
pass
except Exception:
write_log('Failed!\n')
args.ssdeep = False
pass
else:
if len(req.history) > 1:
p_cli('➔ %s ' % req.url.split('?')[0])
write_log('%d %s (%.1f Kbytes)\n' % (req.status_code, req.reason, float(len(req.text))/1000),cli)
if req.status_code / 100 == 2:
ssdeep_init = dnstwist.ssdeep.hash(''.join(req.text.split()).lower())
ssdeep_effective_url = req.url.split('?')[0]
else:
args.ssdeep = False
write_log('Processing %d premutations ' % len(domains))
jobs = queue.Queue()
for i in range(len(domains)):
jobs.put(domains[i])
for i in range(args.threadcount):
worker = dnstwist.DomainThread(jobs)
worker.setDaemon(True)
worker.uri_scheme = url.scheme
worker.uri_path = url.path
worker.uri_query = url.query
worker.domain_init = url.domain
if dnstwist.MODULE_DNSPYTHON:
worker.option_extdns = True
if dnstwist.MODULE_GEOIP and args.geoip:
worker.option_geoip = True
if args.banners:
worker.option_banners = True
if args.ssdeep and dnstwist.MODULE_REQUESTS and dnstwist.MODULE_SSDEEP and 'ssdeep_init' in locals():
worker.option_ssdeep = True
worker.ssdeep_init = ssdeep_init
worker.ssdeep_effective_url = ssdeep_effective_url
if args.mxcheck:
worker.option_mxcheck = True
if args.nameservers:
worker.nameservers = nameservers
worker.useragent = args.useragent
worker.start()
threads.append(worker)
qperc = 0
while not jobs.empty():
if cli:
p_cli('.')
qcurr = 100 * (len(domains) - jobs.qsize()) / len(domains)
if qcurr - 20 >= qperc:
qperc = qcurr
write_log('%u%%' % qperc,cli)
time.sleep(1.0)
for worker in threads:
worker.stop()
worker.join()
hits_total = sum(('dns-ns' in d and len(d['dns-ns']) > 1) or ('dns-a' in d and len(d['dns-a']) > 1) for d in domains)
hits_percent = 100 * hits_total / len(domains)
write_log(' %d hits (%d%%)\n\n' % (hits_total, hits_percent),cli)
if args.registered:
domains[:] = [d for d in domains if 'dns-a' in d and len(d['dns-a']) > 0]
if dnstwist.MODULE_WHOIS and args.whois and not fuzz.subdomain:
write_log('Querying WHOIS servers ',cli)
for domain in domains:
domain['whois-created'] = str()
domain['whois-updated'] = str()
if len(domain) > 2:
if cli:
p_cli('·')
try:
whoisq = whois.query(domain['domain-name'].encode('idna').decode())
if whoisq:
domain['whois-created'] = str(whoisq.creation_date).split(' ')[0]
domain['whois-updated'] = str(whoisq.last_updated).split(' ')[0]
except Exception:
pass
write_log(' Done\n',cli)
write_log('\n',cli)
if not args.all:
for i in range(len(domains)):
for k in ['dns-ns', 'dns-a', 'dns-aaaa', 'dns-mx']:
if k in domains[i]:
domains[i][k] = domains[i][k][:1]
if domains:
if not cli:
return standardize_json(domains)
else:
if args.format == 'csv':
print(create_csv(domains))
elif args.format == 'json':
print(dnstwist.create_json(domains))
else:
print(dnstwist.create_cli(domains))
_exit(0)
def compare_domains(old,new,keys):
# check key existence; if none, fail
# if new value != new value add a message to updates list
updates = list()
if old != new:
for key in keys:
if key not in old or key not in new:
raise KeyError("Missing key in dictionary:",key)
if old[key] != new[key]:
updates.append(f"{key} changed from {old[key]}")
return updates
def set_diff(old,new):
additions = list(new.difference(old))
additions.sort()
subtractions = list(old.difference(new))
subtractions.sort()
intersection = list(new.intersection(old))
intersection.sort()
return additions, subtractions, intersection
def compareData(old_domains,new_domains,comparison_keys):
# first handle the origin domains that have been added/removed
report_list = dict()
new_origins = set([d for d in new_domains.keys()])
old_origins = set([d for d in old_domains.keys()])
origin_additions, origin_subtractions, origin_intersection = set_diff(old_origins,new_origins)
# add additions
for d in origin_additions:
report_list[d] = list(new_domains[d])
# mark as additions
for origin_domain in origin_additions:
for fuzzed in new_domains[origin_domain]:
fuzzed['action'] = 'added'
# add subtractions
for d in origin_subtractions:
report_list[d] = list(old_domains[d])
# mark as removals
for origin_domain in origin_subtractions:
for fuzzed in old_domains[origin_domain]:
fuzzed['action'] = 'removed'
# next, handle the intersection
for origin_domain in origin_intersection:
# these have been presorted, and sets are unordered so have to create arrays to be able to match index
# correctly for the dictionary objects that represent the fuzzed domains
new_domain_names = [d['domain-name'] for d in new_domains[origin_domain]]
old_domain_names = [d['domain-name'] for d in old_domains[origin_domain]]
new_fuzz = set(new_domain_names)
old_fuzz = set(old_domain_names)
fuzz_additions, fuzz_subtractions, fuzz_intersection = set_diff(old_fuzz,new_fuzz)
report_list[origin_domain] = list()
prev_index_new = 0
prev_index_old = 0
prev_i_new_intersect = 0
prev_i_old_intersect = 0
# additions
for d in fuzz_additions:
# search and add to report_list
index = new_domain_names.index(d,prev_index_new)
prev_index_new = index
fuzzed = dict(new_domains[origin_domain][index])
fuzzed['action'] = 'added'
report_list[origin_domain].append(fuzzed)
# subtractions
for d in fuzz_subtractions:
# search and add to report_list
index = old_domain_names.index(d,prev_index_old)
prev_index_old = index
fuzzed = dict(old_domains[origin_domain][index])
fuzzed['action'] = 'removed'
report_list[origin_domain].append(fuzzed)
# handle intersection
for d in fuzz_intersection:
# get old dict
old_index = old_domain_names.index(d,prev_i_old_intersect)
prev_i_old_intersect = old_index
old_fuzzed = dict(old_domains[origin_domain][old_index])
# get new dict
new_index = new_domain_names.index(d,prev_i_new_intersect)
prev_i_new_intersect = new_index
new_fuzzed = dict(new_domains[origin_domain][new_index])
# compare
updates = compare_domains(old_fuzzed,new_fuzzed,comparison_keys)
if len(updates):
fuzzed = dict(new_fuzzed)
fuzzed['action'] = ",".join(updates)
report_list[origin_domain].append(fuzzed)
return report_list
def send_email_report(from_email,to_emails,html_content,subject="Domain Monitor Report"):
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
# assumes API key in SENDGRID_API_KEY environment variable
message = Mail(from_email,to_emails,subject,html_content)
try:
sg = SendGridAPIClient(environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message)
def generate_email_body(domains):
#make it pretty
html = "<head><style>table, th, td {border: 1px solid black;border-collapse: collapse;}</style></head>"
for _,d in enumerate(domains):
html += f"<table><caption>{d}</caption><tr>"
keys = [k for _, k in enumerate(domains[d][0])]
for key in keys:
html += f"<th>{key}</th>"
html += "</tr>"
for _,permutation in enumerate(domains[d]):
html += "<tr>"
for key in keys:
html += f"<td>{permutation[key]}</td>"
html += "</tr>"
html += "</table>"
return html
def domain_monitor(domain_list = r"./domains.txt",data_file = r"./domainData.json",
base_options = {"registered":True,"geoip":True,"ssdeep":True,"nameservers":["8.8.8.8","4.4.4.4"],"threadcount":25},
new_origin_options = {},from_email=None,to_emails=None):
""" This function is meant to monitor domains read from a new line delimited file.
It will compare against ./domainData.json if it exists, if not results will be written there
for future comparison. The base_options parameter is passed for all domains that dnstwist is run on.
It is HIGHLY RECOMMENDED to leave "registered" set to True.
The new_origin_options holds params that will be passed only to based domains not found in the data_file param.
The results will be a diff of the data_file and the current run indicating what changed.
Return type will be a map with entries from domain_list file as keys and a list the corresponding
diffed dnstwist results as the values.
"""
fuzzed_domains = dict()
current_list = dict()
report_list = dict()
comparison_keys = ['domain-name','dns-a','dns-aaaa','dns-ns','dns-mx']
try:
with open(domain_list,"r") as file:
domains = [d.rstrip() for d in file.readlines()]
except FileNotFoundError as err:
print(err)
raise
print("Successfully imported domain monitor list.\nMonitoring {0} domains".format(len(domains)))
# pulling from google's DNS for the time being
# get all variations of fuzzed domains
# if output file doesn't exist or domain not in list, use whois option
if path.exists(data_file):
with open(data_file,"r") as file:
current_list = json.load(file)
print("Successfully loaded previous data for {0} base domains".format(len(current_list.keys())))
print("Starting domain twisting")
for domain in domains:
if domain not in current_list.keys():
fuzzed_domains[domain] = dnstwister(domain,**new_origin_options,**base_options)
else:
fuzzed_domains[domain] = dnstwister(domain,**base_options)
# alphabetically sort all the fuzzed domain results to simplify comparison
print("Sorting domain results")
for _, domain in fuzzed_domains.items():
domain.sort(key=lambda d: d['domain-name'])
# if no data file, it's all new
# otherwise, compare the two lists
if len(current_list.keys()) == 0:
print("No previous base domains found. Treating all information as new.")
report_list = dict(fuzzed_domains)
for _, origin_domain in report_list.items():
for domain in origin_domain:
domain['action'] = 'added'
else:
# compare logic adding changed domains with status
print("Comparing new results against data file...")
report_list = compareData(current_list,fuzzed_domains,comparison_keys)
for key, origin_domain in report_list.items():
# adding this to avoid mass whois lookups;
# allows us to multithread the rest of the domain lookups,
# get only those that are registered,
# and then to go back for a significantly smaller subset
# single threaded for whois to avoid IP blocking
print("Checking whois information for {0}".format(key))
for domain in origin_domain:
try:
whoisdb = whois.query(domain['domain-name'])
domain['whois-created'] = str(whoisdb.creation_date).split(' ')[0]
domain['whois-updated'] = str(whoisdb.last_updated).split(' ')[0]
except:
domain['whois-created'] = str()
domain['whois-updated'] = str()
# overwrite the datafile with newest results
print("Writing new results to data file")
with open(data_file,"w") as outfile:
json.dump(fuzzed_domains, outfile)
#fire off reprot with sendgrid, there are certainly other methods here
if from_email is not None and to_emails is not None:
html = generate_email_body(report_list)
send_email_report(from_email,to_emails,html)
else:
return report_list
if __name__ == "__main__":
domain_monitor() |
import unittest
from exc01 import divide_ten
class TestExc01(unittest.TestCase):
def test_divide(self):
self.assertEqual(divide_ten(0), 'fail!')
if __name__ == '__main__':
unittest.main()
|
# x만큼의 데이터를 입력 받아서 x+1 의 값을 예측해낸다.
#Import the libraries
import math
import pandas_datareader as web
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
#Get the stock quote
df = web.DataReader('005930.KS', data_source='yahoo', start='2012-01-01', end='2020-2-14')
#Show teh data
print("df: \n",df)
#Get the number of rows and columns in the data set
print("df.shape:\n",df.shape)
#Visualize the closing price history
plt.figure(figsize=(16,8))
plt.title('Close Price History')
plt.plot(df['Close'])
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price USD ($)', fontsize=18)
plt.show()
#Create a new dataframe with only the 'Close column
data = df.filter(['Close'])
#Convert the dataframe to a numpy array
dataset = data.values
#Get the number of rows to train the model on
training_data_len = math.ceil( len(dataset) * .9 )
print("training_data_len:\n",training_data_len)
#Scale the data
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(dataset)
print("scaled_data:\n",scaled_data)
#Create the training data set
#Create the scaled training data set
train_data = scaled_data[0:training_data_len , :]
#Split the data into x_train and y_train data sets
x_train = []
y_train = []
for i in range(60, len(train_data)):
x_train.append(train_data[i-60:i, 0])
y_train.append(train_data[i, 0])
if i<= 61:
print(x_train)
print(y_train)
print("-------------------------------------")
#Convert the x_train and y_train to numpy arrays
x_train, y_train = np.array(x_train), np.array(y_train)
#Reshape the data
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
print("x_train.shape:\n",x_train.shape)
#Build the LSTM model
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape= (x_train.shape[1], 1)))
model.add(LSTM(50, return_sequences= False))
model.add(Dense(25))
model.add(Dense(1))
#Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
#Train the model
model.fit(x_train, y_train, batch_size=1, epochs=1)
#Create the testing data set
#Create a new array containing scaled values from index 1543 to 2002
test_data = scaled_data[training_data_len - 60: , :]
#Create the data sets x_test and y_test
x_test = []
y_test = dataset[training_data_len:, :]
for i in range(60, len(test_data)):
x_test.append(test_data[i-60:i, 0])
#Convert the data to a numpy array
x_test = np.array(x_test)
#Reshape the data
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1 ))
#Get the models predicted price values
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)
#Get the root mean squared error (RMSE)
rmse=np.sqrt(np.mean(((predictions- y_test)**2)))
print("squared error:\n",rmse)
#Plot the data
train = data[:training_data_len]
valid = data[training_data_len:]
valid['Predictions'] = predictions
#Visualize the data
plt.figure(figsize=(16,8))
plt.title('Model')
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price USD ($)', fontsize=18)
plt.plot(train['Close'])
plt.plot(valid[['Close', 'Predictions']])
plt.legend(['Train', 'Val', 'Predictions'], loc='lower right')
plt.show()
# predict dec 18 2019
#Get the quote
targetStock = web.DataReader('005930.KS', data_source='yahoo', start='2012-01-01', end='2020-2-14')
#Create a new dataframe
new_df = targetStock.filter(['Close'])
#Get teh last 60 day closing price values and convert the dataframe to an array
last_60_days = new_df[-60:].values
#Scale the data to be values between 0 and 1
last_60_days_scaled = scaler.transform(last_60_days)
#Create an empty list
X_test = []
#Append teh past 60 days
X_test.append(last_60_days_scaled)
#Convert the X_test data set to a numpy array
X_test = np.array(X_test)
print("X_test shape: ",X_test.shape)
print("X_test: ",X_test)
#Reshape the data
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
print("X_test.shape[0]: ",X_test.shape[0])
print(" X_test.shape[1]: ", X_test.shape[1])
#Get the predicted scaled price
pred_price = model.predict(X_test)
#undo the scaling
pred_price = scaler.inverse_transform(pred_price)
print("내일가격: ",pred_price)
# 5일치 가격을 알고싶으면
# 내일가격을 append 해서 또한번 계산 하기를 5번 하면 된다.
pred_price=pred_price.astype('float64')
print("pred_price data type: ",pred_price.dtype)
print("pred price[0]: ",pred_price[0])
x=pred_price[0].astype('number')
last_60_days.append(x)
last_60_days_scaled = scaler.transform(last_60_days)
#Create an empty list
X_test = []
#Append teh past 60 days
X_test.append(last_60_days_scaled)
#Convert the X_test data set to a numpy array
X_test = np.array(X_test)
print("X_test shape: ",X_test.shape)
print("X_test: ",X_test)
#Reshape the data
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
print("X_test.shape[0]: ",X_test.shape[0])
print(" X_test.shape[1]: ", X_test.shape[1])
#Get the predicted scaled price
pred_price = model.predict(X_test)
#undo the scaling
pred_price = scaler.inverse_transform(pred_price)
print("내일가격2: ",pred_price) |
# Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
import typing
from pullenti.unisharp.Utils import Utils
from pullenti.ner.Token import Token
from pullenti.morph.MorphCase import MorphCase
from pullenti.ner.Referent import Referent
from pullenti.ner.ReferentToken import ReferentToken
from pullenti.ner.core.BracketParseAttr import BracketParseAttr
from pullenti.ner.core.GetTextAttr import GetTextAttr
from pullenti.ner.MorphCollection import MorphCollection
from pullenti.ner.person.PersonIdentityReferent import PersonIdentityReferent
from pullenti.ner.TextToken import TextToken
from pullenti.morph.MorphGender import MorphGender
from pullenti.ner.core.NumberHelper import NumberHelper
from pullenti.ner.person.internal.PersonAttrTerminType import PersonAttrTerminType
from pullenti.morph.MorphNumber import MorphNumber
from pullenti.morph.MorphBaseInfo import MorphBaseInfo
from pullenti.ner.person.PersonPropertyReferent import PersonPropertyReferent
from pullenti.ner.mail.internal.MailLine import MailLine
from pullenti.ner.core.MiscHelper import MiscHelper
from pullenti.ner.person.PersonReferent import PersonReferent
from pullenti.ner.core.BracketHelper import BracketHelper
from pullenti.ner.person.internal.PersonItemToken import PersonItemToken
from pullenti.ner.person.PersonAnalyzer import PersonAnalyzer
from pullenti.ner.person.internal.PersonAttrToken import PersonAttrToken
class PersonHelper:
@staticmethod
def _createReferentToken(p : 'PersonReferent', begin : 'Token', end : 'Token', morph_ : 'MorphCollection', attrs : typing.List['PersonAttrToken'], ad : 'PersonAnalyzerData', for_attribute : bool, after_be_predicate : bool) -> 'ReferentToken':
from pullenti.ner.person.internal.PersonIdentityToken import PersonIdentityToken
if (p is None):
return None
has_prefix = False
if (attrs is not None):
for a in attrs:
if (a.typ == PersonAttrTerminType.BESTREGARDS):
has_prefix = True
else:
if (a.begin_char < begin.begin_char):
begin = a.begin_token
if (a.typ != PersonAttrTerminType.PREFIX):
if (a.age is not None):
p.addSlot(PersonReferent.ATTR_AGE, a.age, False, 0)
if (a.prop_ref is None):
p.addSlot(PersonReferent.ATTR_ATTR, a.value, False, 0)
else:
p.addSlot(PersonReferent.ATTR_ATTR, a, False, 0)
elif (a.gender == MorphGender.FEMINIE and not p.is_female):
p.is_female = True
elif (a.gender == MorphGender.MASCULINE and not p.is_male):
p.is_male = True
elif ((isinstance(begin.previous, TextToken)) and (begin.whitespaces_before_count < 3)):
if ((begin.previous).term == "ИП"):
a = PersonAttrToken(begin.previous, begin.previous)
a.prop_ref = PersonPropertyReferent()
a.prop_ref.name = "индивидуальный предприниматель"
p.addSlot(PersonReferent.ATTR_ATTR, a, False, 0)
begin = begin.previous
m0 = MorphCollection()
for it in morph_.items:
bi = MorphBaseInfo(it)
bi.number = MorphNumber.SINGULAR
if (bi.gender == MorphGender.UNDEFINED):
if (p.is_male and not p.is_female):
bi.gender = MorphGender.MASCULINE
if (not p.is_male and p.is_female):
bi.gender = MorphGender.FEMINIE
m0.addItem(bi)
morph_ = m0
if ((attrs is not None and len(attrs) > 0 and not attrs[0].morph.case_.is_undefined) and morph_.case_.is_undefined):
morph_.case_ = attrs[0].morph.case_
if (attrs[0].morph.number == MorphNumber.SINGULAR):
morph_.number = MorphNumber.SINGULAR
if (p.is_male and not p.is_female):
morph_.gender = MorphGender.MASCULINE
elif (p.is_female):
morph_.gender = MorphGender.FEMINIE
if (begin.previous is not None):
ttt = begin.previous
if (ttt.isValue("ИМЕНИ", "ІМЕНІ")):
for_attribute = True
else:
if (ttt.isChar('.') and ttt.previous is not None):
ttt = ttt.previous
if (ttt.whitespaces_after_count < 3):
if (ttt.isValue("ИМ", "ІМ")):
for_attribute = True
if (for_attribute):
return ReferentToken._new2329(p, begin, end, morph_, p._m_person_identity_typ)
if ((begin.previous is not None and begin.previous.is_comma_and and (isinstance(begin.previous.previous, ReferentToken))) and (isinstance(begin.previous.previous.getReferent(), PersonReferent))):
rt00 = Utils.asObjectOrNull(begin.previous.previous, ReferentToken)
ttt = rt00
while ttt is not None:
if (ttt.previous is None or not ((isinstance(ttt.previous.previous, ReferentToken)))):
break
if (not ttt.previous.is_comma_and or not ((isinstance(ttt.previous.previous.getReferent(), PersonReferent)))):
break
rt00 = (Utils.asObjectOrNull(ttt.previous.previous, ReferentToken))
ttt = (rt00)
if (isinstance(rt00.begin_token.getReferent(), PersonPropertyReferent)):
ok = False
if ((rt00.begin_token).end_token.next0_ is not None and (rt00.begin_token).end_token.next0_.isChar(':')):
ok = True
elif (rt00.begin_token.morph.number == MorphNumber.PLURAL):
ok = True
if (ok):
p.addSlot(PersonReferent.ATTR_ATTR, rt00.begin_token.getReferent(), False, 0)
if (ad is not None):
if (ad.overflow_level > 10):
return ReferentToken._new2329(p, begin, end, morph_, p._m_person_identity_typ)
ad.overflow_level += 1
attrs1 = None
has_position = False
open_br = False
t = end.next0_
first_pass3095 = True
while True:
if first_pass3095: first_pass3095 = False
else: t = t.next0_
if (not (t is not None)): break
if (t.is_table_control_char):
break
if (t.is_newline_before):
if (t.newlines_before_count > 2):
break
if (attrs1 is not None and len(attrs1) > 0):
break
ml = MailLine.parse(t, 0)
if (ml is not None and ml.typ == MailLine.Types.FROM):
break
if (t.chars.is_capital_upper):
attr1 = PersonAttrToken.tryAttach(t, (None if ad is None else ad.local_ontology), PersonAttrToken.PersonAttrAttachAttrs.NO)
ok1 = False
if (attr1 is not None):
if (has_prefix or attr1.is_newline_after or ((attr1.end_token.next0_ is not None and attr1.end_token.next0_.is_table_control_char))):
ok1 = True
else:
tt2 = t.next0_
while tt2 is not None and tt2.end_char <= attr1.end_char:
if (tt2.is_whitespace_before):
ok1 = True
tt2 = tt2.next0_
else:
ttt = PersonHelper.__correctTailAttributes(p, t)
if (ttt is not None and ttt != t):
t = ttt
end = t
continue
if (not ok1):
break
if (t.is_hiphen or t.isCharOf("_>|")):
continue
if (t.isValue("МОДЕЛЬ", None)):
break
tt = PersonHelper.__correctTailAttributes(p, t)
if (tt != t and tt is not None):
t = tt
end = t
continue
is_be = False
if (t.isChar('(') and t == end.next0_):
open_br = True
t = t.next0_
if (t is None):
break
pit1 = PersonItemToken.tryAttach(t, None, PersonItemToken.ParseAttr.NO, None)
if ((pit1 is not None and t.chars.is_capital_upper and pit1.end_token.next0_ is not None) and (isinstance(t, TextToken)) and pit1.end_token.next0_.isChar(')')):
if (pit1.lastname is not None):
inf = MorphBaseInfo._new2321(MorphCase.NOMINATIVE)
if (p.is_male):
inf.gender = Utils.valToEnum((inf.gender) | (MorphGender.MASCULINE), MorphGender)
if (p.is_female):
inf.gender = Utils.valToEnum((inf.gender) | (MorphGender.FEMINIE), MorphGender)
sur = PersonIdentityToken.createLastname(pit1, inf)
if (sur is not None):
p._addFioIdentity(sur, None, None)
t = pit1.end_token.next0_
end = t
continue
elif (t.is_comma):
t = t.next0_
if ((isinstance(t, TextToken)) and (t).isValue("WHO", None)):
continue
elif ((isinstance(t, TextToken)) and (t).is_verb_be):
t = t.next0_
elif (t.is_and and t.is_whitespace_after and not t.is_newline_after):
if (t == end.next0_):
break
t = t.next0_
elif (t.is_hiphen and t == end.next0_):
t = t.next0_
elif (t.isChar('.') and t == end.next0_ and has_prefix):
t = t.next0_
ttt2 = PersonHelper.createNickname(p, t)
if (ttt2 is not None):
end = ttt2
t = end
continue
if (t is None):
break
attr = None
attr = PersonAttrToken.tryAttach(t, (None if ad is None else ad.local_ontology), PersonAttrToken.PersonAttrAttachAttrs.NO)
if (attr is None):
if ((t is not None and t.getReferent() is not None and t.getReferent().type_name == "GEO") and attrs1 is not None and open_br):
continue
if ((t.chars.is_capital_upper and open_br and t.next0_ is not None) and t.next0_.isChar(')')):
if (p.findSlot(PersonReferent.ATTR_LASTNAME, None, True) is None):
p.addSlot(PersonReferent.ATTR_LASTNAME, t.getSourceText().upper(), False, 0)
t = t.next0_
end = t
if (t is not None and t.isValue("КОТОРЫЙ", None) and t.morph.number == MorphNumber.SINGULAR):
if (not p.is_female and t.morph.gender == MorphGender.FEMINIE):
p.is_female = True
p._correctData()
elif (not p.is_male and t.morph.gender == MorphGender.MASCULINE):
p.is_male = True
p._correctData()
break
if (attr.morph.number == MorphNumber.PLURAL):
break
if (attr.typ == PersonAttrTerminType.BESTREGARDS):
break
if (attr.is_doubt):
if (has_prefix):
pass
elif (t.is_newline_before and attr.is_newline_after):
pass
elif (t.previous is not None and ((t.previous.is_hiphen or t.previous.isChar(':')))):
pass
else:
break
if (not morph_.case_.is_undefined and not attr.morph.case_.is_undefined):
if (((morph_.case_) & attr.morph.case_).is_undefined and not is_be):
break
if (open_br):
if (PersonAnalyzer._tryAttachPerson(t, ad, False, 0, True) is not None):
break
if (attrs1 is None):
if (t.previous.is_comma and t.previous == end.next0_):
ttt = attr.end_token.next0_
if (ttt is not None):
if (ttt.morph.class0_.is_verb):
if (MiscHelper.canBeStartOfSentence(begin)):
pass
else:
break
attrs1 = list()
attrs1.append(attr)
if (attr.typ == PersonAttrTerminType.POSITION or attr.typ == PersonAttrTerminType.KING):
if (not is_be):
has_position = True
elif (attr.typ != PersonAttrTerminType.PREFIX):
if (attr.typ == PersonAttrTerminType.OTHER and attr.age is not None):
pass
else:
attrs1 = (None)
break
t = attr.end_token
if (attrs1 is not None and has_position and attrs is not None):
te1 = attrs[len(attrs) - 1].end_token.next0_
te2 = attrs1[0].begin_token
if (te1.whitespaces_after_count > te2.whitespaces_before_count and (te2.whitespaces_before_count < 2)):
pass
elif (attrs1[0].age is not None):
pass
elif (((te1.is_hiphen or te1.isChar(':'))) and not attrs1[0].is_newline_before and ((te2.previous.is_comma or te2.previous == end))):
pass
else:
for a in attrs:
if (a.typ == PersonAttrTerminType.POSITION):
te = attrs1[len(attrs1) - 1].end_token
if (te.next0_ is not None):
if (not te.next0_.isChar('.')):
attrs1 = (None)
break
if (attrs1 is not None and not has_prefix):
attr = attrs1[len(attrs1) - 1]
ok = False
if (attr.end_token.next0_ is not None and attr.end_token.next0_.chars.is_capital_upper):
ok = True
else:
rt = PersonAnalyzer._tryAttachPerson(attr.begin_token, ad, False, -1, False)
if (rt is not None and (isinstance(rt.referent, PersonReferent))):
ok = True
if (ok):
if (attr.begin_token.whitespaces_before_count > attr.end_token.whitespaces_after_count):
attrs1 = (None)
elif (attr.begin_token.whitespaces_before_count == attr.end_token.whitespaces_after_count):
rt1 = PersonAnalyzer._tryAttachPerson(attr.begin_token, ad, False, -1, False)
if (rt1 is not None):
attrs1 = (None)
if (attrs1 is not None):
for a in attrs1:
if (a.typ != PersonAttrTerminType.PREFIX):
if (a.age is not None):
p.addSlot(PersonReferent.ATTR_AGE, a.age, True, 0)
elif (a.prop_ref is None):
p.addSlot(PersonReferent.ATTR_ATTR, a.value, False, 0)
else:
p.addSlot(PersonReferent.ATTR_ATTR, a, False, 0)
end = a.end_token
if (a.gender != MorphGender.UNDEFINED and not p.is_female and not p.is_male):
if (a.gender == MorphGender.MASCULINE and not p.is_male):
p.is_male = True
p._correctData()
elif (a.gender == MorphGender.FEMINIE and not p.is_female):
p.is_female = True
p._correctData()
if (open_br):
if (end.next0_ is not None and end.next0_.isChar(')')):
end = end.next0_
crlf_cou = 0
t = end.next0_
first_pass3096 = True
while True:
if first_pass3096: first_pass3096 = False
else: t = t.next0_
if (not (t is not None)): break
if (t.is_table_control_char):
break
if (t.is_newline_before):
ml = MailLine.parse(t, 0)
if (ml is not None and ml.typ == MailLine.Types.FROM):
break
crlf_cou += 1
if (t.isCharOf(":,(") or t.is_hiphen):
continue
if (t.isChar('.') and t == end.next0_):
continue
r = t.getReferent()
if (r is not None):
if (r.type_name == "PHONE" or r.type_name == "URI" or r.type_name == "ADDRESS"):
ty = r.getStringValue("SCHEME")
if (r.type_name == "URI"):
if ((ty != "mailto" and ty != "skype" and ty != "ICQ") and ty != "http"):
break
p._addContact(r)
end = t
crlf_cou = 0
continue
if (isinstance(r, PersonIdentityReferent)):
p.addSlot(PersonReferent.ATTR_IDDOC, r, False, 0)
end = t
crlf_cou = 0
continue
if (r is not None and r.type_name == "ORGANIZATION"):
if (t.next0_ is not None and t.next0_.morph.class0_.is_verb):
break
if (begin.previous is not None and begin.previous.morph.class0_.is_verb):
break
if (t.whitespaces_after_count == 1):
break
exist = False
for s in p.slots:
if (s.type_name == PersonReferent.ATTR_ATTR and (isinstance(s.value, PersonPropertyReferent))):
pr = Utils.asObjectOrNull(s.value, PersonPropertyReferent)
if (pr.findSlot(PersonPropertyReferent.ATTR_REF, r, True) is not None):
exist = True
break
elif (s.type_name == PersonReferent.ATTR_ATTR and (isinstance(s.value, PersonAttrToken))):
pr = Utils.asObjectOrNull(s.value, PersonAttrToken)
if (pr.referent.findSlot(PersonPropertyReferent.ATTR_REF, r, True) is not None):
exist = True
break
if (not exist):
pat = PersonAttrToken(t, t)
pat.prop_ref = PersonPropertyReferent._new2291("сотрудник")
pat.prop_ref.addSlot(PersonPropertyReferent.ATTR_REF, r, False, 0)
p.addSlot(PersonReferent.ATTR_ATTR, pat, False, 0)
continue
if (r is not None):
break
if (not has_prefix or crlf_cou >= 2):
break
rt = t.kit.processReferent("PERSON", t)
if (rt is not None):
break
if (ad is not None):
ad.overflow_level -= 1
return ReferentToken._new2329(p, begin, end, morph_, p._m_person_identity_typ)
@staticmethod
def createNickname(pr : 'PersonReferent', t : 'Token') -> 'Token':
""" Выделить кличку
Args:
pr(PersonReferent):
t(Token): начальный токен
Returns:
Token: если не null, то последний токен клички, а в pr запишет саму кличку
"""
has_keyw = False
is_br = False
first_pass3097 = True
while True:
if first_pass3097: first_pass3097 = False
else: t = t.next0_
if (not (t is not None)): break
if (t.is_hiphen or t.is_comma or t.isCharOf(".:;")):
continue
if (t.morph.class0_.is_preposition):
continue
if (t.isChar('(')):
is_br = True
continue
if ((t.isValue("ПРОЗВИЩЕ", "ПРІЗВИСЬКО") or t.isValue("КЛИЧКА", None) or t.isValue("ПСЕВДОНИМ", "ПСЕВДОНІМ")) or t.isValue("ПСЕВДО", None) or t.isValue("ПОЗЫВНОЙ", "ПОЗИВНИЙ")):
has_keyw = True
continue
break
if (not has_keyw or t is None):
return None
if (BracketHelper.isBracket(t, True)):
br = BracketHelper.tryParse(t, BracketParseAttr.NO, 100)
if (br is not None):
ni = MiscHelper.getTextValue(br.begin_token.next0_, br.end_token.previous, GetTextAttr.NO)
if (ni is not None):
pr.addSlot(PersonReferent.ATTR_NICKNAME, ni, False, 0)
t = br.end_token
tt = t.next0_
first_pass3098 = True
while True:
if first_pass3098: first_pass3098 = False
else: tt = tt.next0_
if (not (tt is not None)): break
if (tt.is_comma_and):
continue
if (not BracketHelper.isBracket(tt, True)):
break
br = BracketHelper.tryParse(tt, BracketParseAttr.NO, 100)
if (br is None):
break
ni = MiscHelper.getTextValue(br.begin_token.next0_, br.end_token.previous, GetTextAttr.NO)
if (ni is not None):
pr.addSlot(PersonReferent.ATTR_NICKNAME, ni, False, 0)
tt = br.end_token
t = tt
if (is_br and t.next0_ is not None and t.next0_.isChar(')')):
t = t.next0_
return t
else:
pli = PersonItemToken.tryAttachList(t, None, PersonItemToken.ParseAttr.NO, 10)
if (pli is not None and ((len(pli) == 1 or len(pli) == 2))):
ni = MiscHelper.getTextValue(pli[0].begin_token, pli[len(pli) - 1].end_token, GetTextAttr.NO)
if (ni is not None):
pr.addSlot(PersonReferent.ATTR_NICKNAME, ni, False, 0)
t = pli[len(pli) - 1].end_token
if (is_br and t.next0_ is not None and t.next0_.isChar(')')):
t = t.next0_
return t
return None
@staticmethod
def isPersonSayOrAttrAfter(t : 'Token') -> bool:
if (t is None):
return False
tt = PersonHelper.__correctTailAttributes(None, t)
if (tt is not None and tt != t):
return True
if (t.is_comma and t.next0_ is not None):
t = t.next0_
if (t.chars.is_latin_letter):
if (t.isValue("SAY", None) or t.isValue("ASK", None) or t.isValue("WHO", None)):
return True
if (t.isChar('.') and (isinstance(t.next0_, TextToken)) and ((t.next0_.morph.class0_.is_pronoun or t.next0_.morph.class0_.is_personal_pronoun))):
if (t.next0_.morph.gender == MorphGender.FEMINIE or t.next0_.morph.gender == MorphGender.MASCULINE):
return True
if (t.is_comma and t.next0_ is not None):
t = t.next0_
if (PersonAttrToken.tryAttach(t, None, PersonAttrToken.PersonAttrAttachAttrs.NO) is not None):
return True
return False
@staticmethod
def __correctTailAttributes(p : 'PersonReferent', t0 : 'Token') -> 'Token':
res = t0
t = t0
if (t is not None and t.isChar(',')):
t = t.next0_
born = False
die = False
if (t is not None and ((t.isValue("РОДИТЬСЯ", "НАРОДИТИСЯ") or t.isValue("BORN", None)))):
t = t.next0_
born = True
elif (t is not None and ((t.isValue("УМЕРЕТЬ", "ПОМЕРТИ") or t.isValue("СКОНЧАТЬСЯ", None) or t.isValue("DIED", None)))):
t = t.next0_
die = True
elif ((t is not None and t.isValue("ДАТА", None) and t.next0_ is not None) and t.next0_.isValue("РОЖДЕНИЕ", "НАРОДЖЕННЯ")):
t = t.next0_.next0_
born = True
while t is not None:
if (t.morph.class0_.is_preposition or t.is_hiphen or t.isChar(':')):
t = t.next0_
else:
break
if (t is not None and t.getReferent() is not None):
r = t.getReferent()
if (r.type_name == "DATE"):
t1 = t
if (t.next0_ is not None and ((t.next0_.isValue("Р", None) or t.next0_.isValue("РОЖДЕНИЕ", "НАРОДЖЕННЯ")))):
born = True
t1 = t.next0_
if (t1.next0_ is not None and t1.next0_.isChar('.')):
t1 = t1.next0_
if (born):
if (p is not None):
p.addSlot(PersonReferent.ATTR_BORN, r, False, 0)
res = t1
t = t1
elif (die):
if (p is not None):
p.addSlot(PersonReferent.ATTR_DIE, r, False, 0)
res = t1
t = t1
if (die and t is not None):
ag = NumberHelper.tryParseAge(t.next0_)
if (ag is not None):
if (p is not None):
p.addSlot(PersonReferent.ATTR_AGE, str(ag.value), False, 0)
t = ag.end_token.next0_
res = ag.end_token
if (t is None):
return res
if (t.isChar('(')):
br = BracketHelper.tryParse(t, BracketParseAttr.NO, 100)
if (br is not None):
t1 = t.next0_
born = False
if (t1.isValue("РОД", None)):
born = True
t1 = t1.next0_
if (t1 is not None and t1.isChar('.')):
t1 = t1.next0_
if (isinstance(t1, ReferentToken)):
r = t1.getReferent()
if (r.type_name == "DATERANGE" and t1.next0_ == br.end_token):
bd = Utils.asObjectOrNull(r.getSlotValue("FROM"), Referent)
to = Utils.asObjectOrNull(r.getSlotValue("TO"), Referent)
if (bd is not None and to is not None):
if (p is not None):
p.addSlot(PersonReferent.ATTR_BORN, bd, False, 0)
p.addSlot(PersonReferent.ATTR_DIE, to, False, 0)
res = br.end_token
t = res
elif (r.type_name == "DATE" and t1.next0_ == br.end_token):
if (p is not None):
p.addSlot(PersonReferent.ATTR_BORN, r, False, 0)
res = br.end_token
t = res
return res |
#!/usr/bin/python
# UDP Reflector
#
# Displays and optionally forwards packets to a wireshark receiver
# Listens on a well known port 27000 (ie fixed)
# By default displays all packets but will filter using simple character matching (no wild carding)
#
#
# Command line options
#
# filterstring - display packets containing this string
# IP Address and Port to send to wireshark
import logging
import os
import socket
import sys
import time
import threading
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',level=logging.INFO)
#logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',level=logging.DEBUG)
#logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s')
MIN_VERSION_PY3 = 5 # min. 3.x version
if (sys.version_info[0] < 3):
Warning_Message = "ERROR: This script requires a minimum of Python 3." + str(MIN_VERSION_PY3)
print('')
logging.critical(Warning_Message)
print('')
print('Invalid Version of Python running')
print('Running Python earlier than Python 3.0! ' + sys.version)
sys.exit(Warning_Message)
elif (sys.version_info[0] == 3 and sys.version_info[1] < MIN_VERSION_PY3):
Warning_Message = "ERROR: This script requires a minimum of Python 3." + str(MIN_VERSION_PY3)
print('')
logging.critical(Warning_Message)
print('')
print('Invalid Version of Python running')
print('Running Python ' + sys.version)
sys.exit(Warning_Message)
# Used for command line parsing
from optparse import OptionParser
def CleanUpAndExit():
try:
# Catch Ctl-C and quit
print('')
print('Exiting')
print('')
try:
serverSock.close()
except:
logging.critical('Unable to close server socket')
sys.exit(0)
except Exception as other:
logging.critical('Error in CleanUpAndExit: ' + str(other))
sys.exit(0)
# Global Variables
debugging = False
config_file = 'UDP_Reflector_config.py'
filterString = ''
secondsBetweenKeepAlives = 60
# Initialise keepalive indicator
last_time_display = time.time()
packets_processed = 0
# Address and Port to listen on
UDP_IP_Address = ""
UDP_Port = 27000
# Wireshark target address and Port
wireshark_IP_Address = None
wireshark_Port = 27001
try:
if not (os.path.isfile(config_file)):
logging.info('Unable to find ' + config_file)
else:
try:
from config_file import *
except Exception as other:
logging.critical("Error in Initialisation: " + str(other))
print('Unable to open "' + config_file + '"' )
print('Or variable assignment incorrect - forgot quotes for string?')
print('Defaults used')
# The timeout is aggressive
serverSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serverSock.settimeout(0.0001)
serverSock.bind((UDP_IP_Address, UDP_Port))
wireshark_Sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
# See if value is assigned. First we checked config file and then
# command line arguments
logging.debug("Checking Command Line parameters")
parser = OptionParser()
parser.add_option("-w","--wh", dest="opt_W_Host",
help="Wireshark Target IP Address",metavar="opt_W_Host")
parser.add_option("-u","--wp", dest="opt_W_Port",
help="Wireshark Target Port. 27001 is used if not explicitly specified",
metavar="opt_W_Port")
(options, args) = parser.parse_args()
logging.debug("options:" + str(options))
logging.debug("arguments:" + str(args))
if options.opt_W_Host != None:
wireshark_IP_Address = str(options.opt_W_Host)
if options.opt_W_Port != None:
wireshark_Port = str(options.opt_W_Port)
if wireshark_IP_Address != None:
logging.info("Wireshark host is : " + wireshark_IP_Address)
logging.info("Wireshark UDP port is : " + str(wireshark_Port))
if len(args) != 0:
filterString = args[0]
logging.info("Display Filter is :" + str(args[0]))
except KeyboardInterrupt:
CleanUpAndExit()
except Exception as other:
logging.critical('Error in Setup: ' + str(other))
def ReceivePacket():
global last_time_display
global packets_processed
iterations_Since_Last_Packet = 0
while True:
try:
data, (Source_IP, Source_Port) = serverSock.recvfrom(1500)
ReceivedPacket = data
packets_processed = packets_processed + 1
Source_IP = str(Source_IP)
Source_Port = str(Source_Port)
ReceivedPacket = str(ReceivedPacket)
logging.debug("From: " + Source_IP + " " + Source_Port)
logging.debug("Message: " + ReceivedPacket)
ProcessReceivedString( str(ReceivedPacket), Source_IP , str(Source_Port) )
logging.debug("Iterations since last packet " + str(iterations_Since_Last_Packet))
iterations_Since_Last_Packet=0
except socket.timeout:
iterations_Since_Last_Packet = iterations_Since_Last_Packet + 1
if debugging == True and (iterations_Since_Last_Packet > 10000):
print("[i] Mid Receive Timeout - " + time.asctime())
iterations_Since_Last_Packet=0
# Throw something on console to show we haven't died
if time.time() - last_time_display > secondsBetweenKeepAlives:
#Calculate Packets per Second
if packets_processed != 0:
packets_per_Second = packets_processed / secondsBetweenKeepAlives
else:
packets_per_Second = 0
pps_string = ". " + str(packets_per_Second) + " packets per second."
logging.info('Keepalive check ' + str(packets_processed)
+ ' Packets Processed' + pps_string)
last_time_display = time.time()
packets_processed = 0
continue
except Exception as other:
logging.critical('Error in ReceivePacket: ' + str(other))
if time.time() - last_time_display > 5:
print('Keepalive ' + time.asctime())
last_time_display = time.time()
def ProcessReceivedString(ReceivedUDPString, Source_IP, Source_Port):
logging.debug('Processing UDP String')
global wireshark_Sock, wireshark_IP_Address, wireshark_Port
try:
if len(ReceivedUDPString) > 0:
ReceivedUDPString = str(ReceivedUDPString)
logging.debug("From: " + Source_IP + " " + Source_Port)
logging.info('Payload: ' + ReceivedUDPString)
Send_String = Source_IP + ':' + Source_Port + '---' + ReceivedUDPString
# Is Wireshark target address set - if so throw a copy of the packet in its direction
if wireshark_IP_Address != None:
wireshark_Sock.sendto(Send_String, (wireshark_IP_Address, int(wireshark_Port)))
# If we have passed a filter string via the command line - then display
# packets that contain the string (this includes the IP address of the sender)
if filterString !="":
if filterString in Send_String:
print(Send_String)
except Exception as other:
logging.critical('Error in ProcessReceivedString. Error is: ' + str(other))
def Main():
print('Listening on port ' + str(UDP_Port))
try:
ReceivePacket()
except KeyboardInterrupt:
# Catch Ctl-C and quit
CleanUpAndExit()
except Exception as other:
logging.critical('Error in Main: ' + str(other))
Main()
|
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tableau source module
"""
import json
import traceback
from typing import Iterable, List, Optional
from pydantic import BaseModel, Extra
from requests.utils import urlparse
from tableau_api_lib.utils import extract_pages
from metadata.generated.schema.api.classification.createClassification import (
CreateClassificationRequest,
)
from metadata.generated.schema.api.classification.createTag import CreateTagRequest
from metadata.generated.schema.api.data.createChart import CreateChartRequest
from metadata.generated.schema.api.data.createDashboard import CreateDashboardRequest
from metadata.generated.schema.api.lineage.addLineage import AddLineageRequest
from metadata.generated.schema.entity.classification.tag import Tag
from metadata.generated.schema.entity.data.dashboard import (
Dashboard as LineageDashboard,
)
from metadata.generated.schema.entity.data.table import Table
from metadata.generated.schema.entity.services.connections.dashboard.tableauConnection import (
TableauConnection,
)
from metadata.generated.schema.entity.services.connections.metadata.openMetadataConnection import (
OpenMetadataConnection,
)
from metadata.generated.schema.metadataIngestion.workflow import (
Source as WorkflowSource,
)
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.generated.schema.type.tagLabel import TagLabel
from metadata.ingestion.api.source import InvalidSourceException, SourceStatus
from metadata.ingestion.models.ometa_classification import OMetaTagAndClassification
from metadata.ingestion.source.dashboard.dashboard_service import DashboardServiceSource
from metadata.ingestion.source.dashboard.tableau import (
TABLEAU_GET_VIEWS_PARAM_DICT,
TABLEAU_GET_WORKBOOKS_PARAM_DICT,
)
from metadata.ingestion.source.dashboard.tableau.queries import (
TABLEAU_LINEAGE_GRAPHQL_QUERY,
)
from metadata.utils import fqn
from metadata.utils.filters import filter_by_chart
from metadata.utils.helpers import get_standard_chart_type
from metadata.utils.logger import ingestion_logger
logger = ingestion_logger()
TABLEAU_TAG_CATEGORY = "TableauTags"
class TableauBaseModel(BaseModel):
"""
Tableau basic configurations
"""
class Config:
extra = Extra.allow
id: str
name: str
class TableauOwner(TableauBaseModel):
"""
Tableau Owner Details
"""
email: str
class TableauChart(TableauBaseModel):
"""
Chart (View) representation from API
"""
workbook_id: str
sheet_type: str
view_url_name: str
tags: List[str]
class TableauDashboard(TableauBaseModel):
"""
Response from Tableau API
"""
description: Optional[str]
tags: List[str]
owner: TableauOwner
charts: Optional[List[TableauChart]]
webpage_url: Optional[str]
class TableauSource(DashboardServiceSource):
"""
Tableau Source Class
"""
config: WorkflowSource
metadata_config: OpenMetadataConnection
status: SourceStatus
def __init__(
self,
config: WorkflowSource,
metadata_config: OpenMetadataConnection,
):
super().__init__(config, metadata_config)
self.workbooks = None # We will populate this in `prepare`
self.tags = set() # To create the tags before yielding final entities
self.workbook_datasources = {}
def prepare(self):
"""
Restructure the API response to
"""
# Available fields information:
# https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_concepts_fields.htm#query_workbooks_site
# We can also get project.description as folder
self.workbooks = [
TableauDashboard(
id=workbook["id"],
name=workbook["name"],
description=workbook.get("description"),
tags=[
tag["label"] for tag in workbook.get("tags", {}).get("tag") or []
],
owner=TableauOwner(
id=workbook.get("owner", {}).get("id"),
name=workbook.get("owner", {}).get("name"),
email=workbook.get("owner", {}).get("email"),
),
webpage_url=workbook.get("webpageUrl"),
)
for workbook in extract_pages(
self.client.query_workbooks_for_site,
parameter_dict=TABLEAU_GET_WORKBOOKS_PARAM_DICT,
)
]
# For charts, we can also pick up usage as a field
charts = [
TableauChart(
id=chart["id"],
name=chart["name"],
# workbook.id is always included in the response
workbook_id=chart["workbook"]["id"],
sheet_type=chart["sheetType"],
view_url_name=chart["viewUrlName"],
tags=[tag["label"] for tag in chart.get("tags", {}).get("tag") or []],
)
for chart in extract_pages(
self.client.query_views_for_site,
content_id=self.client.site_id,
parameter_dict=TABLEAU_GET_VIEWS_PARAM_DICT,
)
]
# Add all the charts (views) from the API to each workbook
for workbook in self.workbooks:
workbook.charts = [
chart for chart in charts if chart.workbook_id == workbook.id
]
# Collecting all view & workbook tags
for container in [self.workbooks, charts]:
for elem in container:
self.tags.update(elem.tags)
if self.source_config.dbServiceNames:
try:
# Fetch Datasource information for lineage
graphql_query_result = self.client.metadata_graphql_query(
query=TABLEAU_LINEAGE_GRAPHQL_QUERY
)
self.workbook_datasources = json.loads(graphql_query_result.text)[
"data"
].get("workbooks")
except Exception:
logger.debug(traceback.format_exc())
logger.warning(
"\nSomething went wrong while connecting to Tableau Metadata APIs\n"
"Please check if the Tableau Metadata APIs are enabled for you Tableau instance\n"
"For more information on enabling the Tableau Metadata APIs follow the link below\n"
"https://help.tableau.com/current/api/metadata_api/en-us/docs/meta_api_start.html"
"#enable-the-tableau-metadata-api-for-tableau-server\n"
)
return super().prepare()
@classmethod
def create(cls, config_dict: dict, metadata_config: OpenMetadataConnection):
config: WorkflowSource = WorkflowSource.parse_obj(config_dict)
connection: TableauConnection = config.serviceConnection.__root__.config
if not isinstance(connection, TableauConnection):
raise InvalidSourceException(
f"Expected TableauConnection, but got {connection}"
)
return cls(config, metadata_config)
def get_dashboards_list(self) -> Optional[List[TableauDashboard]]:
"""
Get List of all dashboards
"""
return self.workbooks
def get_dashboard_name(self, dashboard: TableauDashboard) -> str:
"""
Get Dashboard Name
"""
return dashboard.name
def get_dashboard_details(self, dashboard: TableauDashboard) -> TableauDashboard:
"""
Get Dashboard Details. Returning the identity here as we prepare everything
during the `prepare` stage
"""
return dashboard
def get_owner_details(
self, dashboard_details: TableauDashboard
) -> Optional[EntityReference]:
"""Get dashboard owner
Args:
dashboard_details:
Returns:
Optional[EntityReference]
"""
if dashboard_details.owner.email:
user = self.metadata.get_user_by_email(dashboard_details.owner.email)
if user:
return EntityReference(id=user.id.__root__, type="user")
return None
def yield_tag(self, *_, **__) -> OMetaTagAndClassification:
"""
Fetch Dashboard Tags
"""
for tag in self.tags:
classification = OMetaTagAndClassification(
classification_request=CreateClassificationRequest(
name=TABLEAU_TAG_CATEGORY,
description="Tags associates with tableau entities",
),
tag_request=CreateTagRequest(
classification=TABLEAU_TAG_CATEGORY,
name=tag,
description="Tableau Tag",
),
)
yield classification
logger.info(f"Classification {TABLEAU_TAG_CATEGORY}, Tag {tag} Ingested")
def get_tag_labels(self, tags: List[str]) -> Optional[List[TagLabel]]:
return [
TagLabel(
tagFQN=fqn.build(
self.metadata,
Tag,
classification_name=TABLEAU_TAG_CATEGORY,
tag_name=tag,
),
labelType="Automated",
state="Suggested",
source="Tag",
)
for tag in tags
]
def yield_dashboard(
self, dashboard_details: TableauDashboard
) -> Iterable[CreateDashboardRequest]:
"""
Method to Get Dashboard Entity
"""
workbook_url = urlparse(dashboard_details.webpage_url).fragment
yield CreateDashboardRequest(
name=dashboard_details.id,
displayName=dashboard_details.name,
description=dashboard_details.description,
owner=self.get_owner_details(dashboard_details),
charts=[
EntityReference(id=chart.id.__root__, type="chart")
for chart in self.context.charts
],
tags=self.get_tag_labels(dashboard_details.tags),
dashboardUrl=f"#{workbook_url}",
service=EntityReference(
id=self.context.dashboard_service.id.__root__, type="dashboardService"
),
)
def yield_dashboard_lineage_details(
self, dashboard_details: TableauDashboard, db_service_name: str
) -> Optional[Iterable[AddLineageRequest]]:
"""
Get lineage between dashboard and data sources
"""
data_source = next(
(
data_source
for data_source in self.workbook_datasources or []
if data_source.get("luid") == dashboard_details.id
),
None,
)
to_fqn = fqn.build(
self.metadata,
entity_type=LineageDashboard,
service_name=self.config.serviceName,
dashboard_name=dashboard_details.id,
)
to_entity = self.metadata.get_by_name(
entity=LineageDashboard,
fqn=to_fqn,
)
try:
upstream_tables = data_source.get("upstreamTables")
for upstream_table in upstream_tables:
database_schema_table = fqn.split_table_name(upstream_table.get("name"))
from_fqn = fqn.build(
self.metadata,
entity_type=Table,
service_name=db_service_name,
schema_name=database_schema_table.get(
"database_schema", upstream_table.get("schema")
),
table_name=database_schema_table.get("table"),
database_name=database_schema_table.get("database"),
)
from_entity = self.metadata.get_by_name(
entity=Table,
fqn=from_fqn,
)
yield self._get_add_lineage_request(
to_entity=to_entity, from_entity=from_entity
)
except (Exception, IndexError) as err:
logger.debug(traceback.format_exc())
logger.error(
f"Error to yield dashboard lineage details for DB service name [{db_service_name}]: {err}"
)
def yield_dashboard_chart(
self, dashboard_details: TableauDashboard
) -> Optional[Iterable[CreateChartRequest]]:
"""
Method to fetch charts linked to dashboard
"""
for chart in dashboard_details.charts or []:
try:
if filter_by_chart(self.source_config.chartFilterPattern, chart.name):
self.status.filter(chart.name, "Chart Pattern not allowed")
continue
workbook_name = dashboard_details.name.replace(" ", "")
site_url = (
f"site/{self.service_connection.siteUrl}/"
if self.service_connection.siteUrl
else ""
)
chart_url = (
f"#/{site_url}" f"views/{workbook_name}/" f"{chart.view_url_name}"
)
yield CreateChartRequest(
name=chart.id,
displayName=chart.name,
chartType=get_standard_chart_type(chart.sheet_type),
chartUrl=chart_url,
tags=self.get_tag_labels(chart.tags),
service=EntityReference(
id=self.context.dashboard_service.id.__root__,
type="dashboardService",
),
)
self.status.scanned(chart.id)
except Exception as exc:
logger.debug(traceback.format_exc())
logger.warning(f"Error to yield dashboard chart [{chart}]: {exc}")
def close(self):
try:
self.client.sign_out()
except ConnectionError as err:
logger.debug(f"Error closing connection - {err}")
|
from django.contrib.auth.forms import AuthenticationForm
from django import forms
from models import *
from django.db import models
from django.forms import ModelForm
# If you don't do this you cannot use Bootstrap CSS
class LoginForm(AuthenticationForm):
username = forms.CharField(label="Usuario", max_length=30,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'username'}))
password = forms.CharField(label="Password", max_length=30,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password'}))
class FiltrosPersonasForm(forms.Form):
MY_CHOICES = (
('', '-----'),
('1', 'Bajo'),
('2', 'Medio'),
('3', 'Alto'),
)
SEXO_CHOICES = (
('', '-----'),
('1', 'Hombre'),
('2', 'Mujer'),
)
#FILTROS
nombre= forms.CharField(widget=forms.widgets.TextInput(attrs={'class':'form-control'}), required=False)
apellidos= forms.CharField(widget=forms.widgets.TextInput(attrs={'class':'form-control'}), required=False)
ingles= forms.CharField(widget=forms.Select(choices=MY_CHOICES, attrs={'class':'form-control'}), required= False)
sexo= forms.CharField(widget=forms.Select(choices=SEXO_CHOICES, attrs={'class':'form-control'}), required= False)
edad= forms.CharField(widget=forms.widgets.TextInput(attrs={'class':'form-control','placeholder':'ejemplos de opciones: >25, <23, 17-23, 23'}), required=False)
empresa = forms.ModelMultipleChoiceField(label="ETT",queryset=Empresa.objects.all().order_by('nombre'),widget=forms.SelectMultiple(attrs={'class':'form-control'}),required=False)
activo= forms.BooleanField(required=False,initial=False, label='Activo')
class PersonasForm(ModelForm):
MY_CHOICES = (
('', '-----'),
('1', 'Bajo'),
('2', 'Medio'),
('3', 'Alto'),
)
SEXO_CHOICES = (
('', '-----'),
('1', 'Hombre'),
('2', 'Mujer'),
)
#Campos
nombre= forms.CharField(widget=forms.widgets.TextInput(attrs={'class':'form-control'}), required=True)
apellidos= forms.CharField(widget=forms.widgets.TextInput(attrs={'class':'form-control'}), required=True)
telefono= forms.CharField(widget=forms.widgets.TextInput(attrs={'class':'form-control'}), required=True)
empresa= forms.ModelChoiceField(queryset=Empresa.objects.all().order_by('nombre'), widget=forms.Select(attrs={'class':'form-control'}),required=False)
english_level= forms.CharField(widget=forms.Select(choices=MY_CHOICES, attrs={'class':'form-control'}), required= True,label='Nivel de Ingles')
sexo= forms.CharField(widget=forms.Select(choices=SEXO_CHOICES, attrs={'class':'form-control'}), required= True,label='Sexo')
is_active= forms.BooleanField(widget=forms.CheckboxInput(),required=False,label='Activo')
date_born= forms.DateField(widget=forms.DateInput(format=('%Y-%m-%d'),attrs={'class':'form-control','placeholder':'Rellena una fecha con formato yyyy-mm-dd'}),required=True)
observaciones= forms.CharField(widget=forms.widgets.Textarea(attrs={'class':'form-control'}), required=False)
id = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Persona
fields = ('nombre', 'apellidos', 'telefono', 'empresa', 'english_level', 'sexo','is_active', 'date_born', 'observaciones', 'id')
|
#! /usr/bin/env python3
'''
A module providing a test class that tests the API independent of the actual underlying database,
'''
__author__ = 'Russel Winder'
__date__ = '2012-08-20'
__version__ = '1.2'
__copyright__ = 'Copyright © 2010–2012 Russel Winder'
__licence__ = 'GNU Public Licence (GPL) v3'
from personRecord import PersonRecord
from unittest import TestCase
# These values will be set by the importing code. It's effectively dependency injection so that features
# used here are provided by the importing module.
url = None
ConnectionClass = None
class TestContacts(TestCase):
'''
This is an integration test for a Connection class. The functions are not easily amenable to unit
testing using mocks. The point here is that we want more functionality/requirements-oriented testing
rather than implementation-oriented testing. This is more behaviour-driven rather than test-driven.
There is an assumption of the existence of a getConnectionInstance function returning a connection
instance.
'''
personA = PersonRecord('Winder', 'Russel', '41 Buckmaster Road, London SW11 1EN', '020 7585 2200', '07770 465 077')
personB = PersonRecord('Winder', 'Geri', '41 Buckmaster Road, London SW11 1EN', '020 7585 2200', '')
def setUp(self):
# Since each test is self-contained, indeed there must not be any data coupling between tests, we
# can use an in-memory SQLite3 database, thereby speeding up the tests.
self.connection = ConnectionClass(url)
self.connection.initializeDatabase()
def tearDown(self):
self.connection.close()
def test_emptyDatabaseHasNoEntries(self):
self.assertEqual((), self.connection.getAllEntries())
def test_addOnePerson(self):
self.connection.addEntry(TestContacts.personA)
self.assertEqual((TestContacts.personA,), self.connection.getAllEntries())
def test_addTwoPeople(self):
self.connection.addEntry(TestContacts.personA)
self.connection.addEntry(TestContacts.personB)
self.assertEqual((TestContacts.personA, TestContacts.personB), self.connection.getAllEntries())
def test_addOnePersonAndRemoveThemByLastname(self):
self.connection.addEntry(TestContacts.personA)
self.connection.deleteEntry(PersonRecord(TestContacts.personA.lastname,))
self.assertEqual((), self.connection.getAllEntries())
def test_addOnePersonAndRemoveThemByFirstname(self):
self.connection.addEntry(TestContacts.personA)
self.connection.deleteEntry(PersonRecord('', TestContacts.personA.firstname))
self.assertEqual((), self.connection.getAllEntries())
def test_addOnePersonAndRemoveThemByLastnameAndFirstname(self):
self.connection.addEntry(TestContacts.personA)
self.connection.deleteEntry(PersonRecord(TestContacts.personA.lastname, TestContacts.personA.firstname))
self.assertEqual((), self.connection.getAllEntries())
def test_addTwoPeopleAndGetByLastname(self):
self.connection.addEntry(TestContacts.personA)
self.connection.addEntry(TestContacts.personB)
self.assertEqual((TestContacts.personA, TestContacts.personB), self.connection.getEntry(PersonRecord('Winder',)))
def test_addTwoPeopleAndGetByFirstname(self):
self.connection.addEntry(TestContacts.personA)
self.connection.addEntry(TestContacts.personB)
self.assertEqual((TestContacts.personA,), self.connection.getEntry(PersonRecord('', 'Russel')))
def test_addTwoPeopleAndGetByLastnameAndFirstname(self):
self.connection.addEntry(TestContacts.personA)
self.connection.addEntry(TestContacts.personB)
self.assertEqual((TestContacts.personA,), self.connection.getEntry(PersonRecord('Winder', 'Russel')))
|
from rest_framework import serializers
from .models import order
from products.models import product
from products.serializer import productSerializer
class oderSerializer(serializers.ModelSerializer):
#orderProduct=productSerializer(many=True)
class Meta:
exclude=('seen',)
model=order
|
from app1.models import *
from app1.util.utils import *
def delStudent(request):
'''
URL:
http://127.0.0.1:8000/app6/delStudent?stid=2019003
调用参数:
学生编号:stid
'''
try:
if(request.method=='POST'):
studata=json.loads(request.body)
data=studata["data"]
cid=data['stid']
# cid=request.GET.get("stid")
result=Student()
result.sno=cid
result=Student.objects.filter(sno=cid).delete()
return showJsonresult(result)
except Exception as e:
response={}
response['msg']=str(e)
response['err_num']=1
return showJsonerror(response) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.