text stringlengths 38 1.54M |
|---|
import aocread
import numpy as np
box_strings = aocread.read_file('input03')
class Box:
def __init__(self, id, left, top, width, height):
self.id = int(id) - 1
self.left = int(left)
self.top = int(top)
self.width = int(width)
self.height = int(height)
self.right = self.left + self.width - 1
self.bottom = self.top + self.height - 1
def parse(box_string):
id = box_string.split('@')[0].strip().lstrip('#')
origin = box_string.split('@')[1].split(':')[0].strip()
dimension = box_string.split('@')[1].split(':')[1].strip()
left = origin.split(',')[0]
top = origin.split(',')[1]
width = dimension.split('x')[0]
height = dimension.split('x')[1]
return Box(id, left, top, width, height)
boxes = []
for box_string in box_strings:
box = parse(box_string)
boxes.append(box)
num_boxes = len(boxes)
overlaps = [0] * num_boxes
def is_overlap(boxa, boxb):
if boxa.left > boxb.right:
return False
if boxa.right < boxb.left:
return False
if boxa.top > boxb.bottom:
return False
if boxa.bottom < boxb.top:
return False
return True
for boxa in xrange(num_boxes):
for boxb in xrange(boxa + 1, num_boxes):
if is_overlap(boxes[boxa], boxes[boxb]):
overlaps[boxa] += 1
overlaps[boxb] += 1
for x in xrange(num_boxes):
if overlaps[x] == 0:
print(x + 1)
break |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 2015
@author: Oliver Lemke
"""
import numpy as np
import numpy.matlib as M
from numpy.matlib import rand,zeros,ones,empty,eye
import time
import sys
import argparse
import pickle
import os
# Define ismember function
def ismember(a, b):
bi = {}
for i, el in enumerate(b):
if el not in bi:
bi[el] = i
return [bi.get(itm, -1) for itm in a]
# Read Input from command line with options
try:
parser=argparse.ArgumentParser()
parser.add_argument('-Cut', '--Cutoff', type=float, help='Cutoff criterion (R)')
parser.add_argument('-Sim', '--Similarity', type=int, help='Criterion of similarity (N)')
parser.add_argument('-In', '--Input', type=str, help='Input: distance matrix (default=dist.npy)', default='dist.npy')
parser.add_argument('-Nl', '--Clustersize', type=int, help='Minimal Clustersize (M)(default=2)', default=2)
parser.add_argument('-Out', '--Output', type=str, help='Output: Summary-file (default=Summary.txt)', default='Summary.txt')
parser.add_argument('-Clf', '--Clusterfile', type=str, help='Output: Cluster-file (.p-file) (default=Cluster.p)', default='Cluster.p')
parser.add_argument('-Csf', '--Clustersizefile', type=str, help='Output: Clustersize-file (.p-file) (default=Clustersize.p)', default='Clustersize.p' )
parser.add_argument('-Spt', '--Softwarepath', type=str, help='Input: Software path (default=.)', default='.' )
parser.add_argument('-Screen', '--Screen', type=str, help='Clustering Screen (YES or NO) (default=NO)', default='NO' )
parser.add_argument('-Ntr', '--NumberTra', type=int, help='Number of trajectories (replica) (default=1)', default=1)
args=parser.parse_args()
except:
print ('Error: False number or type of input arguments')
sys.exit(1)
Algo='CNN'
Var='Yes'
try:
Auto = str(args.Screen)
except:
print ('Error: Auto missing or type of Auto incorrect')
sys.exit(1)
try:
Ntr = int(args.NumberTra)
except:
print ('Error: Similatiry criterion missing or type of similarity criterion incorrect')
sys.exit(1)
try:
rc = float(args.Cutoff)
except:
print ('Error: Cutoff missing or type of cutoff incorrect')
sys.exit(1)
try:
nnnc = int(args.Similarity)
except:
print ('Error: Similatiry criterion missing or type of similarity criterion incorrect')
sys.exit(1)
# Check for errors
if Auto!='YES' and Auto!='NO':
print ('Error: Choice of Auto mode incorrect')
sys.exit(1)
if Ntr <= 0:
print ('Error: Number of trajectories must be at least 1')
sys.exit(1)
if rc <=0:
print ('Error: Definition of the cutoff incorrect')
sys.exit(1)
if nnnc <= 0:
print ('Error: Definition of the nnnc incorrect')
sys.exit(1)
try:
Nl=int(args.Clustersize)
except:
print ('Error: Clustersize number must be an integer')
path=str(os.path.dirname(os.path.realpath(__file__)))
# Run cluster algorithm
try:
exec(open(path+'/Algorithms_large/CNN_yes.py').read())
except:
print ('Error: Algorithm-file not found or no distance matrix found')
sys.exit(1)
Software=str(args.Softwarepath)
print ('Clustering finished')
# Stop clustering time II
ende=time.clock()
# Time calculation
timet=ende-start
#timepp=endepp-startpp
timel=endel-startl
#timecp=timet+timepp
timetot=timet+timel
print ('Time for the clustering process: %.2f s' %(timet))
# Determine number of clusters and clustersize
Cs=np.zeros((2,N))
for a in range(0,N):
Ca=Clust[a]
if np.int_(np.shape(Ca)) > 1:
Cs[0,a]=np.int_(np.shape(Ca))
Cs[1,a]=a
(a1,Nc)=np.int_(np.shape(np.nonzero(Cs[0])))
Cmax=np.int_(np.max(Cs[0]))
Ncl= np.count_nonzero(np.extract(Cs[0,:]>=Nl,Cs))
# Percentage of the largest cluster
Pc=float(Cmax)/float(N)
# Determine number of noise points
Nn=float(N)-np.sum(Cs[0,:])
Pcn= 1-(np.sum(Cs[0,:])/float(N))
# Print Outcome
Ab='Parameters: rc = %.2f, nnnc = %d' %(rc, nnnc)
#print Ab
print ('%d Cluster(s) found' %(Nc))
print ('%d Cluster(s) found that have at least %d Members' %(Ncl, Nl))
print ('Largest cluster found: %d (Percentage: %.2f)' %(Cmax, Pc))
print ('Number of datapoints declared as noise: %d (Percentage: %.2f)' %(Nn, Pcn))
# Write output-files
summsave=str(args.Output)
Clf=str(args.Clusterfile)
Csf=str(args.Clustersizefile)
pickle.dump(Clust, open(Clf, 'wb')) # Clusterlist
pickle.dump(Cs, open(Csf, 'wb')) # Clustersize
output=open(summsave,'w')
output.write('Summary:\n\n')
output.write('**********************\n\n')
output.write('Settings:\n\n')
output.write('Algorithm used: %s\n' %(Algo))
if Algo=='CNN' or Algo=='JP':
output.write('Special criterium: %s\n' %(Var))
output.write('Number of datapoints %d\n' %(N))
output.write('%s\n\n' %(Ab))
output.write('**********************\n\n')
output.write('Results:\n\n')
output.write('Time for the loading of the distance matrix: %.2f s\n' %(timel))
output.write('Time for the clustering process: %.2f s\n' %(timet))
output.write('Total time: %.2f s\n' %(timetot))
output.write('%d Cluster(s) found\n' %(Nc))
output.write('%d Cluster(s) found that have at least %d Members\n' %(Ncl, Nl))
output.write('largest cluster found: %d (Percentage: %.2f)\n' %(Cmax, Pc))
output.write('Number of datapoints declared as noise: %d (Percentage: %.2f)\n\n' %(Nn, Pcn))
output.write('**********************\n\n')
output.close()
output=open('02_Isolate.sh','w')
output.write('#!/bin/bash \n')
output.write('\n')
output.write('# Input \n')
output.write('\n')
output.write('Path=. \n')
output.write('Software=%s \n' %Software)
output.write('Input=${Path} \n')
output.write('Output=${Path} \n')
output.write('\n')
output.write('Inc=%s \n' %Clf)
output.write('Ins=%s \n' %Csf)
output.write('Nci=%d \n' %Ncl)
output.write('Out=frames.ndx \n')
output.write('\n')
output.write('Plo=NO \n')
output.write('Ind=%s \n' %distload)
output.write('\n')
output.write('# Execution')
output.write('\n')
output.write('python ${Software}/02_Isolate_cluster.py -Inc ${Input}/${Inc} -Ins ${Input}/${Ins} -Nci ${Nci} -Out ${Output}/${Out} -Plo ${Plo} -Ind ${Ind}')
output.close()
output=open('04_Merge_all.sh','w')
output.write('#!/bin/bash \n')
output.write('\n')
output.write('# Input \n')
output.write('\n')
output.write('Path=. \n')
output.write('Software=%s \n' %Software)
output.write('Input=${Path} \n')
if Auto == "YES":
output.write('Input2=${Path}/../../.. \n')
if Auto == "NO":
output.write('Input2=${Path} \n')
output.write('Output=${Path} \n')
output.write('\n')
output.write('Cut=%.6f \n' %rc)
output.write('Sim=%d \n' %nnnc)
output.write('Nc=%d \n' %Ncl)
output.write('\n')
output.write('Fra=frames.ndx\n')
output.write('\n')
output.write('# Execution\n')
output.write('\n')
output.write('for i in {0..%d}\n' %(Ntr-1))
output.write('do\n')
output.write('\tOut=Trace_${i}.npy\n')
output.write('\tMap=dist_comp${i}.npy\n')
output.write('\tpython ${Software}/04_Merge.py -Nc ${Nc} -Cut ${Cut} -Sim ${Sim} -Fra ${Input}/${Fra} -Map ${Input2}/${Map} -Out ${Output}/${Out}\n')
output.write('done')
output.close()
output=open('04_Distmat.sh','w')
output.write('#!/bin/bash \n')
output.write('\n')
output.write('# Input \n')
output.write('\n')
output.write('Path=. \n')
output.write('Software=%s \n' %Software)
output.write('Input=${Path} \n')
if Auto == "YES":
output.write('Input2=${Path}/../../.. \n')
if Auto == "NO":
output.write('Input2=${Path} \n')
output.write('Output=${Path} \n')
output.write('\n')
output.write('Inc=%s \n' %Clf)
output.write('Ins=%s \n' %Csf)
output.write('Nci=1 \n')
output.write('\n')
output.write('Ind=dist.npy \n')
output.write('\n')
output.write('Out=dist \n')
output.write('\n')
output.write('# Execution')
output.write('\n')
output.write('python ${Software}/04_Isolate_distmat.py -Inc ${Input}/${Inc} -Ins ${Input}/${Ins} -Nci ${Nci} -Ind ${Input2}/${Ind} -Out ${Output}/${Out} \n')
output.close()
output=open('02_Translate.sh','w')
output.write('#!/bin/bash \n')
output.write('\n')
output.write('# Input \n')
output.write('\n')
output.write('Path=. \n')
output.write('Software=%s \n' %Software)
output.write('Input=${Path} \n')
output.write('Input2=${Path} \n')
output.write('Output=${Path} \n')
output.write('\n')
output.write('Fra= # Otimized Cluster\n')
output.write('Inf= # Name of new frame-File \n')
output.write('Inr=frames.ndx \n')
output.write('\n')
output.write('Out= # Name of translated File \n')
output.write('\n')
output.write('# Execution')
output.write('\n')
output.write('python ${Software}/02_Translate_Hierarchical.py -Fra ${Fra} -Inf ${Input}/${Inf} -Inr ${Input2}/${Inr} -Out ${Output}/${Out}\n')
output.close()
output=open('03_Evaluate_2D.sh','w')
output.write('#!/bin/bash \n')
output.write('\n')
output.write('# Input \n')
output.write('\n')
output.write('Path=. \n')
output.write('Software=%s \n' %Software)
output.write('Input=${Path} \n')
if Auto == "YES":
output.write('Input2=${Path}/../../.. \n')
if Auto == "NO":
output.write('Input2=${Path} \n')
output.write('Output=${Path} \n')
output.write('\n')
output.write('Inf=frames.ndx \n')
output.write('Ref=reduced_data.npy \n')
output.write('\n')
output.write('Fig=Scatter.png \n')
output.write('Cln=50 \n')
output.write('\n')
output.write('# Execution')
output.write('\n')
output.write('python ${Software}/03_Evaluate_cluster_2D.py -Inf ${Input}/${Inf} -Ref ${Input2}/${Ref} -Fig ${Output}/${Fig} -Cln ${Cln}\n')
output.close()
print ('Output-files written')
|
from django.contrib import admin
from django.core.urlresolvers import reverse
from django import forms
from django.utils.safestring import mark_safe
from utils import utils
import models
class MyThumbnailWidget(forms.TextInput):
def render(self, name, value, attrs=None):
return mark_safe(u'<h1>blah</h1>')
class PhotoAdmin(admin.ModelAdmin):
list_display = ('photo_filename', 'photo_size')
list_filter = ('moderation_status', 'region', 'organization', 'taken_season')
readonly_fields = ('photo', 'thumbnail')
def formfield_for_dbfield(self, db_field, **kwargs):
#TODO
#fix thumbnail and photo to show images
if db_field.name == 'thumbnail_url':
kwargs['widget'] = MyThumbnailWidget
return super(PhotoAdmin,self).formfield_for_dbfield(db_field,**kwargs)
admin.site.register(models.Organization)
admin.site.register(models.Region)
admin.site.register(models.Photo, PhotoAdmin)
admin.site.register(models.UserProfile)
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
class Heater(object):
__metaclass__ = ABCMeta
@abstractmethod
def on(self):
pass
@abstractmethod
def off(self):
pass
@abstractmethod
def is_hot(self):
pass
|
import json
import redis
import requests
from pymongo import MongoClient
import pymongo
class Data:
def __init__(self, account_name):
self.url = "https://proxy.eosnode.tools/v1/{}"
# self.url = "https://api-kylin.eosasia.one/v1/{}"
self.s = requests.Session()
self.s.headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}
self.account_name = account_name
self.m = db = MongoClient()["jacks"]
def getAction(self, pos=0, offset=500):
print("get ", pos)
data = self.postData(
self.url.format("history/get_actions"),
data={"pos": pos, "offset": offset, "account_name": self.account_name},
)
if data.get("actions"):
return data
return {"actions": []}
def getdata(self, lastPos=0, offset=100):
while True:
_data = self.getAction(pos=lastPos, offset=offset)["actions"]
if not _data:
return
_lastPos = _data[-1]["account_action_seq"] + 1
if _lastPos < lastPos + offset: # lastPos是上一次的,_lastPos是拿到数据最后的一条
self.mongoclient.insert_many(_data)
return
else:
self.mongoclient.insert_many(_data)
lastPos = _lastPos
def postData(self, url, data):
return self.s.post(url, json=data).json()
def pickleData(self, kname, data):
self.r.set(kname, json.dumps(data))
def run(self, dbclean=False, offset=100, lastpos=0):
kname = "{}_alldata".format(self.account_name)
d = self.m[kname]
self.mongoclient = d
a = (
self.mongoclient.find({}, {"account_action_seq": 1})
.sort("account_action_seq", pymongo.DESCENDING)
.limit(1)
)
if a.count():
lastpos = a[0]["account_action_seq"]
else:
lastpos = 0
self.getdata(offset=offset, lastPos=lastpos)
import os, sys, time
def main():
print("AutoRes is starting")
c = Data("gaojin.game")
c.run(offset=500)
executable = sys.executable
args = sys.argv[:]
print(args)
args.insert(0, sys.executable)
time.sleep(1)
print("Respawning")
os.execvp(executable, args)
if __name__ == "__main__":
#
main()
|
from django.shortcuts import get_object_or_404,render
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseRedirect
from django.urls import reverse
from django.db.models import Q, ProtectedError
from .models import Ingredient, MeasurementUnit
from .forms import IngredientForm
def index(request):
return render(request, 'ingredient/index.html', {'ingredient_list': Ingredient.objects.order_by('article_number')})
def filter(request):
strfilter = request.GET['ingredient-filter']
try:
strfilter = int(strfilter)
filtered = Ingredient.objects.filter(
Q(ingredient_name__icontains=strfilter) |
Q(article_number__exact=strfilter)
)
except Exception:
filtered = Ingredient.objects.filter(ingredient_name__icontains=strfilter).order_by('article_number')
return render(request, 'ingredient/index.html', {'ingredient_list': filtered, 'ingredient_filter': strfilter})
def edit(request, article_number):
try:
ingredient = Ingredient.objects.get(pk=article_number)
except Ingredient.DoesNotExist:
ingredient = Ingredient()
form = IngredientForm(instance=ingredient)
return render(request, 'ingredient/edit.html',
{'article_number': article_number,
'form':form })
def delete(request, article_number):
try:
Ingredient.objects.get(pk=article_number).delete()
except ProtectedError:
return render(request, 'ingredient/index.html', {
'ingredient_list': Ingredient.objects.order_by('article_number'),
'error_message': 'This ingredient is currently being used, try deleting the recipes first!'
})
return HttpResponseRedirect(reverse('ingredient:index', args=()))
def save(request, article_number):
try:
form = IngredientForm(request.POST)
if form.is_valid():
formvalue = form.save(commit=False)
if article_number != 0:
ingredient = formvalue
ingredient.article_number = article_number
else:
ingredient = Ingredient(
ingredient_name= formvalue.ingredient_name,
unit = formvalue.unit,
cost_per_unit = formvalue.cost_per_unit,
cost_amount = formvalue.cost_amount)
ingredient.save()
except Exception as e:
return render(request, 'ingredient/edit.html', {
'article_number': article_number,
'form': form,
'error_message': e,
})
else:
return HttpResponseRedirect(reverse('ingredient:index', args=())) |
# Homework Lesson 3
# 1 - 1
favorieten = ["Alison Wonderland"]
print(favorieten)
# Output = ['Alison Wonderland']
# 1 - 2
favorieten.append("Lido")
print(favorieten)
# Output = ['Alison Wonderland', 'Lido']
# 1 - 3
favorieten[1] = "Boris Brejcha"
print(favorieten)
# Output = ['Alison Wonderland', 'Boris Brejcha']
# 2 - 1
numberlist1 = [3, 7, -2, 12]
numberlist2 = [54, -32, 3, -88, 66]
print( abs( min(numberlist1) - max(numberlist1) ) )
# Output = 14
print( abs( min(numberlist2) - max(numberlist2) ) )
# Output = 154
# 3 - 1
letters = ('A', 'C', 'B', 'B', 'C', 'A', 'C', 'C', 'B')
lettersOccurrence = [letters.count("A"), letters.count("B"), letters.count("C")]
print(lettersOccurrence) |
from PyQt5.QtCore import QSize, QSizeF, Qt
from PyQt5.QtGui import QImage, QPainter, QTransform
from PyQt5.QtWidgets import QWidget
from QPanda3D.QPanda3D_Keys_Translation import QPanda3D_Key_translation
from QPanda3D.QPanda3D_Modifiers_Translation import QPanda3D_Modifier_translation, QTimer
from direct.showbase.MessengerGlobal import messenger
from direct.task.TaskManagerGlobal import taskMgr
from panda3d.core import Texture
from utils.parsers import sceneGraphAnalyzerMeterParser
class QPanda3DSynchronizer(QTimer):
def __init__(self, qPanda3DWidget, FPS=60):
QTimer.__init__(self)
self.qPanda3DWidget = qPanda3DWidget
dt = 1000 / FPS
self.setInterval(dt)
self.timeout.connect(self.tick)
def tick(self):
taskMgr.step()
self.qPanda3DWidget.update()
def get_panda_key_modifiers(evt):
panda_mods = []
qt_mods = evt.modifiers()
for qt_mod, panda_mod in QPanda3D_Modifier_translation.items():
if (qt_mods & qt_mod) == qt_mod:
panda_mods.append(panda_mod)
return panda_mods
def get_panda_key_modifiers_prefix(evt):
# join all modifiers (except NoModifier, which is None) with '-'
prefix = "-".join([mod for mod in get_panda_key_modifiers(evt) if mod is not None])
# if the prefix is not empty, append a '-'
if prefix:
prefix += '-'
return prefix
class PandaWidget(QWidget):
def __init__(self, panda3DWorld, label_info, parent=None, FPS=60, debug=False):
QWidget.__init__(self, parent)
# set fixed geometry
self.panda3DWorld = panda3DWorld
self.panda3DWorld.set_parent(self)
self.label_info = label_info
self.setFocusPolicy(Qt.StrongFocus)
self.paintSurface = QPainter()
self.rotate = QTransform()
self.rotate.rotate(180)
self.out_image = QImage()
size = self.panda3DWorld.cam.node().get_lens().get_film_size()
self.initial_film_size = QSizeF(size.x, size.y)
self.initial_size = self.size()
self.synchronizer = QPanda3DSynchronizer(self, FPS)
self.synchronizer.start()
self.debug = debug
self.update_sgam()
def get_scene_graph_analyzer_meter(self):
return self.panda3DWorld.get_scene_graph_analyzer_meter()
def update_sgam(self):
text = sceneGraphAnalyzerMeterParser(self.get_scene_graph_analyzer_meter()).toText()
self.label_info.setText(text)
def mousePressEvent(self, event):
self.panda3DWorld.camera_controller.mouse_press(event.button(), event.x(), event.y())
def mouseReleaseEvent(self, event):
self.panda3DWorld.camera_controller.mouse_release(event.button(), event.x(), event.y())
def mouseMoveEvent(self, event):
self.panda3DWorld.camera_controller.mouse_move(event.button(), event.x(), event.y())
def wheelEvent(self, evt):
delta = -evt.angleDelta().y()
self.panda3DWorld.camera_controller.zoom_event(delta)
def keyPressEvent(self, evt):
key = evt.key()
try:
k = "{}{}".format(get_panda_key_modifiers_prefix(evt), QPanda3D_Key_translation[key])
if self.debug:
print(k)
messenger.send(k)
except:
print("Unimplemented key. Please send an issue on github to fix this problem")
def keyReleaseEvent(self, evt):
key = evt.key()
try:
k = "{}{}-up".format(get_panda_key_modifiers_prefix(evt), QPanda3D_Key_translation[key])
if self.debug:
print(k)
messenger.send(k)
except:
print("Unimplemented key. Please send an issue on github to fix this problem")
def resizeEvent(self, evt):
lens = self.panda3DWorld.cam.node().get_lens()
lens.set_film_size(self.initial_film_size.width() * evt.size().width() / self.initial_size.width(),
self.initial_film_size.height() * evt.size().height() / self.initial_size.height())
self.panda3DWorld.buff.setSize(evt.size().width(), evt.size().height())
def minimumSizeHint(self):
return QSize(400, 300)
# Use the paint event to pull the contents of the panda texture to the widget
def paintEvent(self, event):
if self.panda3DWorld.screenTexture.mightHaveRamImage():
self.panda3DWorld.screenTexture.setFormat(Texture.FRgba32)
data = self.panda3DWorld.screenTexture.getRamImage().getData()
img = QImage(data, self.panda3DWorld.screenTexture.getXSize(), self.panda3DWorld.screenTexture.getYSize(),
QImage.Format_ARGB32).mirrored()
self.paintSurface.begin(self)
self.paintSurface.drawImage(0, 0, img)
self.paintSurface.end()
def rotateX(self, value):
self.panda3DWorld.camera_controller.rotateTheta(value)
def rotateY(self, value):
self.panda3DWorld.camera_controller.rotatePhi(value)
|
from flask import Flask
app = Flask(__name__)
@app.route('/aaa/<bbs_id>')
@app.route('/aaa', defaults={ 'bbs_id': 100 })
def aaa(bbs_id):
return "aaa의 {}번 글 입니다.".format(bbs_id)
if __name__ == '__main__':
app.run()
|
import xlrd
from datetime import datetime
from .models import *
def get_or_none(classmodel, **kwargs):
"""
the function will get the from the Model if exist other
wise will return none
@author : Arun Gopi
@date : 3/4/2016
"""
try:
return classmodel.objects.get(**kwargs)
except classmodel.DoesNotExist:
return None
def date_from_excel(d, dm):
"""
return date from excel file
@author : Arun Gopi
@date : 6/4/2016
"""
if d:
try:
if isinstance(d, float) or isinstance(d, int):
year, month, day, hour, minute, sec = xlrd.xldate_as_tuple(
d, dm)
d = "%04d-%02d-%02d" % (year, month, day)
elif isinstance(d, str) or isinstance(d, unicode):
d = datetime.strptime(d, '%d-%m-%Y %I:%M')
else:
print ("ERROR : Unknown datatype", type(d))
except ValueError:
d = None
else:
d = None
return d
def create_project_info(data):
"""
the function will
create a new record in project_info Table
@author : Arun Gopi
@date : 7/4/2016
"""
project = ProjectInfo()
project.name = data['name']
project.description = data['description']
project.start_date = data['start_date']
project.end_date = data['end_date']
project.save()
print ('Inserted')
return True
def update_project_info(data):
"""
the function will update
ProjectInfo table if you pass PK
@author : Arun Gopi
@date : 7/4/2016
"""
if 'pk' in data:
if data['pk'] is not None:
project = get_or_none(ProjectInfo, pk=data['pk'])
if project:
project.name = data['name']
project.description = data['description']
project.start_date = data['start_date']
project.end_date = data['end_date']
project.save()
print ('Updated')
return True
else:
return False
else:
return False
else:
print ("please provide pk for updating")
return False
def insertcolx():
"""
Excel row/colum style for excel
@Author : Arun Gopi
@date : 3/4/2016
"""
style = xlwt.XFStyle()
borders = xlwt.Borders()
borders.bottom = borders.top = borders.left\
= borders.right = xlwt.Borders.THIN
borders.top = xlwt.Borders.THIN
style.borders = borders
style.pattern.pattern = 26
style.pattern.pattern_fore_colour = 0x16
style.protection.cell_locked = False
style.protection.formula_hidden = False
return style
|
# coding: utf-8
"""
Загрузчик шаблонов (темплейтов) для генерации пользовательского интерфейса
для m3_ext_demo.ui.
Необходимость данного шаблона обусловлена спецификой реализации
template-loaders в django.
Для корректной работы загрузчика в settings.py прикладного приложения
необходимо добавить строку 'm3_ext_demo.ui.js_template_loader.load_template_source'
в tuple TEMPLATE_LOADERS
"""
import os
import sys
from django.conf import settings
from django.template import TemplateDoesNotExist
from django.utils._os import safe_join
from django.core.exceptions import SuspiciousOperation
from m3_django_compat import BaseLoader
# At compile time, cache the directories to search.
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
template_dir_ext = os.path.join(
os.path.dirname(__file__), 'templates')
template_dir_gears = os.path.join(
os.path.dirname(__file__), '../gears/templates')
app_template_dirs = (
template_dir_ext.decode(fs_encoding),
template_dir_gears.decode(fs_encoding),
)
def get_template_sources(template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = app_template_dirs
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
# В Django <1.8.X safe_join выбрасывает ValueError
# В Django >=1.8.X safe_join выбрасывает SuspiciousOperation
except (SuspiciousOperation, ValueError):
# The joined path was located outside of template_dir.
pass
class Loader(BaseLoader):
is_usable = True
def load_template_source(self, template_name, template_dirs=None):
return load_template_source(template_name, template_dirs)
def load_template_source(template_name, template_dirs=None):
for filepath in get_template_sources(template_name, template_dirs):
try:
return (
open(filepath).read().decode(settings.FILE_CHARSET), filepath
)
except IOError:
pass
raise TemplateDoesNotExist(template_name)
load_template_source.is_usable = True
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
"""Bot for Webex Teams
- Define decorator for bot
- Show/Create/Delete webhook for Webex Teams
Links:
- user account: https://developer.webex.com/
- webhook api: https://developer.webex.com/docs/api/v1/webhooks
- buttons and cards: https://developer.webex.com/docs/api/guides/cards
"""
import json
import logging
import os
import sys
import requests
requests.packages.urllib3.disable_warnings()
from requests_toolbelt.multipart.encoder import MultipartEncoder
logger = logging.getLogger(__name__)
class Bot:
TIMEOUT = (10.0, 30.0) # (connect timeout, read timeout)
def __init__(self, bot_name=None):
bot_name = os.getenv('bot_name') if bot_name is None else bot_name
if bot_name is None or bot_name.strip() == '':
sys.exit("please set environment variable 'bot_name' before run this script")
self.bot_name = bot_name
self._bot_id = None # get_bot_id() set this value and returns it
self.auth_token = self.get_auth_token(bot_name=bot_name)
if self.auth_token is None:
sys.exit("failed to get authentication token for {}".format(bot_name))
self.headers = {
'Authorization': "Bearer {}".format(self.auth_token),
'content-type': "application/json"
}
# functions with decorator will be stored in this dict object
self.on_message_functions = {}
self.on_command_functions = {}
def on_message(self, message_text):
"""Decorator for the on_message
Arguments:
message_text {str} -- the message text correspond to the function
Returns:
[func] -- decorator function
"""
def decorator(func):
self.on_message_functions[message_text] = func
return decorator
def on_command(self, command=None):
"""Decorator for the on_command
Arguments:
command {str} -- the command text correspond to the function
plugin
Returns:
[func] -- decorator function
"""
def decorator(func):
self.on_command_functions[command] = func
return decorator
@staticmethod
def get_auth_token(bot_name=None):
"""Get authentication token by bot name.
first, try to get token from environment variable,
then read from file ~/.{{ bot name }}
Keyword Arguments:
bot_name {str} -- name of the bot (default: {None})
Returns:
str -- authentication token if found else None
"""
# 1st get token from environment: bot_token
token = os.getenv('bot_token')
if token:
return token
# 2nd get from ~/.{{ bot_name }}
file_name = '~/.{}'.format(bot_name)
file_path = os.path.expanduser(file_name)
if not os.path.isfile(file_path):
logger.info('%s is not found', file_name)
return None
try:
with open(file_path, mode='r') as f:
return f.read().strip()
except IOError as e:
logger.exception(e)
return None
def _requests_get_as_json(self, api_path=None):
"""Send get method to api_path and return json data
Arguments:
api_path {str} -- api path, fqdn
Returns:
dict -- json data, or None
"""
get_result = None
try:
get_result = requests.get(api_path, headers=self.headers, timeout=self.TIMEOUT, verify=False)
except requests.exceptions.RequestException as e:
logger.exception(e)
if get_result is None:
return None
if get_result.ok:
logger.info("get success: %s", api_path)
return get_result.json()
logger.error("failed to get: %s", api_path)
logger.error(get_result.text)
return None
def _requests_get_pagination_as_items(self, api_path=None, params=None):
"""Get all items with pagination
pagination is not tested well.
see, https://developer.webex.com/docs/api/basics/pagination
in requests module, 'next' url could be retrieved easily
response.links['next']['url']
Arguments:
api_path {str} -- api path fqdn
Keyword Arguments:
params {dict} -- get request params (default: {None})
Returns:
list -- a list contains all items
"""
result_list = []
get_result = None
try:
get_result = requests.get(api_path, headers=self.headers, params=params, timeout=self.TIMEOUT, verify=False)
except requests.exceptions.RequestException as e:
logger.exception(e)
if get_result is None:
return []
if not get_result.ok:
logger.error("failed to get: %s", api_path)
logger.error(get_result.text)
return []
logger.info("get success: %s", api_path)
items = get_result.json().get('items')
if items:
result_list.extend(items)
else:
return []
while 'next' in get_result.links.keys():
get_result = None
try:
get_result = requests.get(api_path, headers=self.headers, params=params, timeout=self.TIMEOUT, verify=False)
except requests.exceptions.RequestException as e:
logger.exception(e)
if get_result is None:
return []
if get_result.ok:
logger.info("get success: %s", api_path)
items = get_result.json().get('items')
if items:
result_list.extend(items)
else:
logger.error("failed to get: %s", api_path)
logger.error(get_result.text)
return []
return result_list
def _requests_delete_as_bool(self, api_path=None):
"""Send delete method to api_path and return True if success
Arguments:
api_path {str} -- api path, fqdn
Returns:
bool -- True if success
"""
delete_result = None
try:
delete_result = requests.delete(api_path, headers=self.headers, timeout=self.TIMEOUT, verify=False)
except requests.exceptions.RequestException as e:
logger.exception(e)
if delete_result is None:
return False
if delete_result.ok:
logger.info("delete success: %s", api_path)
return True
logger.error("failed to delete: %s", api_path)
logger.error(delete_result.text)
return False
def _requests_post_as_json(self, api_path=None, payload=None):
post_result = None
try:
post_result = requests.post(api_path, json=payload, headers=self.headers, timeout=self.TIMEOUT, verify=False)
except requests.exceptions.RequestException as e:
logger.exception(e)
if post_result is None:
return None
if post_result.ok:
logger.info("post success: %s", api_path)
return post_result.json()
logger.error("failed to post: %s", api_path)
logger.error(post_result.text)
return None
def get_me(self):
"""Get my own details
GET /v1/people/me
https://developer.webex.com/docs/api/v1/people/get-my-own-details
Returns:
dict -- information about this bot obtained from rest api, or None
"""
api_path = 'https://api.ciscospark.com/v1/people/me'
return self._requests_get_as_json(api_path=api_path)
def get_bot_id(self):
"""Get a identifier for this bot
Returns:
str -- a unique identifier for this bot
"""
if self._bot_id:
return self._bot_id
me = self.get_me()
if not me:
return None
self._bot_id = me.get('id')
return self._bot_id
def get_people_by_email(self, email=None):
"""Get people in your organization by email attribute
GET /v1/people
https://developer.webex.com/docs/api/v1/people/list-people
Keyword Arguments:
email {str} -- get people with this email address (default: {None})
Returns:
list -- list of person objects
"""
if email is None:
return []
api_path = 'https://api.ciscospark.com/v1/people'
params = {
"email": email
}
return self._requests_get_pagination_as_items(api_path=api_path, params=params)
def get_person_details(self, person_id=None):
"""Get details for a person by person_id
GET /v1/people/{personId}
https://developer.webex.com/docs/api/v1/people/get-person-details
Keyword Arguments:
person_id {str} -- a unique identifier for the person (default: {None})
Returns:
dict -- object for the person, or None
"""
if person_id is None:
return None
api_path = 'https://api.ciscospark.com/v1/people/{}'.format(person_id)
return self._requests_get_as_json(api_path=api_path)
def get_rooms(self):
"""Get rooms to which the authenticated user belongs
GET /v1/rooms
https://developer.webex.com/docs/api/v1/rooms/list-rooms
Returns:
list -- list of the rooms
"""
api_path = 'https://api.ciscospark.com/v1/rooms'
return self._requests_get_pagination_as_items(api_path=api_path)
def get_room_details(self, room_id=None):
"""Get room details
GET /v1/rooms/{roomId}
https://developer.webex.com/docs/api/v1/rooms/get-room-details
Keyword Arguments:
room_id {str} -- The unique identifier for the room (default: {None})
Returns:
dict -- a object for the room
"""
if room_id is None:
return None
api_path = 'https://api.ciscospark.com/v1/rooms/{}'.format(room_id)
return self._requests_get_as_json(api_path=api_path)
def delete_room(self, room_id=None):
"""Deletes a room, by ID
DELETE /v1/rooms/{roomId}
https://developer.webex.com/docs/api/v1/rooms/delete-a-room
Keyword Arguments:
room_id {str} -- The unique identifier for the room. (default: {None})
Returns:
bool -- True if success
"""
if room_id is None:
return False
api_path = 'https://api.ciscospark.com/v1/rooms/{}'.format(room_id)
return self._requests_delete_as_bool(api_path=api_path)
def get_message_detail(self, message_id=None):
"""Get details for a message, by message_id.
GET /v1/messages/{messageId}
https://developer.webex.com/docs/api/v1/messages/get-message-details
Keyword Arguments:
message_id {str} -- the unique identifier for the message (default: {None})
Returns:
dict -- the object for the message
"""
if message_id is None:
return None
api_path = 'https://api.ciscospark.com/v1/messages/{}'.format(message_id)
return self._requests_get_as_json(api_path=api_path)
def get_message_text(self, message_id=None):
"""Same as get_message_detail() but returns text only
Keyword Arguments:
message_id {str} -- the unique identifier for the message (default: {None})
Returns:
str -- the message text
"""
json_data = self.get_message_detail(message_id=message_id)
if json_data is None:
return None
return json_data.get('text')
def send_message(self, text=None, room_id=None, to_person_id=None, to_person_email=None, attachments=None):
"""Create a message
POST /v1/messages
https://developer.webex.com/docs/api/v1/messages/create-a-message
Keyword Arguments:
text {str} -- The message, in plain text (default: {None})
room_id {str} -- The room ID of the message (default: {None})
to_person_id {str} -- The person ID of the recipient when sending a private 1:1 message. (default: {None})
to_person_email {str} -- The email address of the recipient when sending a private 1:1 message. (default: {None})
attachments {list} -- Content attachments to attach to the message. (default: {None})
Returns:
dict -- post response, or None
"""
if not any([room_id, to_person_id, to_person_email]):
return None
payload = {
'text': text or "message"
}
if room_id is not None:
payload.update({'roomId': room_id})
if to_person_id is not None:
payload.update({'toPersonId': to_person_id})
if to_person_email is not None:
payload.update({'toPersonEmail': to_person_email})
if attachments is not None and isinstance(attachments, list):
payload.update({'attachments': attachments})
api_path = 'https://api.ciscospark.com/v1/messages/'
return self._requests_post_as_json(api_path=api_path, payload=payload)
def send_image(self, text=None, room_id=None, to_person_id=None, to_person_email=None, image_filename=None):
"""Create a message with image
SEE
https://developer.webex.com/docs/api/basics/message-attachments
Keyword Arguments:
text {str} -- The message, in plain text (default: {None})
room_id {str} -- The room ID of the message (default: {None})
to_person_id {str} -- The person ID of the recipient when sending a private 1:1 message. (default: {None})
to_person_email {str} -- The email address of the recipient when sending a private 1:1 message. (default: {None})
image_filename {str} -- The filename of the image. (default: {None})
Returns:
dict -- post response, or None
"""
if not any([room_id, to_person_id, to_person_email]):
return None
if image_filename is None:
return None
payload = {
'text': text or "image"
}
if room_id is not None:
payload.update({'roomId': room_id})
if to_person_id is not None:
payload.update({'toPersonId': to_person_id})
if to_person_email is not None:
payload.update({'toPersonEmail': to_person_email})
payload.update(
{
'files': (image_filename, open(image_filename, 'rb'), 'image/png')
}
)
m = MultipartEncoder(payload)
headers = {
'Authorization': "Bearer {}".format(self.auth_token),
'content-type': m.content_type
}
logger.info(m.content_type)
api_path = 'https://api.ciscospark.com/v1/messages'
post_result = None
try:
post_result = requests.post(api_path, data=m, headers=headers, timeout=self.TIMEOUT, verify=False)
except requests.exceptions.RequestException as e:
logger.exception(e)
if post_result is None:
return None
if post_result.ok:
logger.info("post success: %s", api_path)
return post_result.json()
logger.error("failed to post: %s", api_path)
logger.error(post_result.text)
def get_attachment(self, attachment_id=None):
"""Get attachment action details
GET /v1/attachment/actions/{id}
https://developer.webex.com/docs/api/v1/attachment-actions/get-attachment-action-details
Keyword Arguments:
attachment_id {str} -- a unique identifier for the attachment action (default: {None})
Returns:
dict -- a object for the attachment
"""
if attachment_id is None:
return None
api_path = 'https://api.ciscospark.com/v1/attachment/actions/{}'.format(attachment_id)
return self._requests_get_as_json(api_path=api_path)
def get_webhooks(self, webhook_name=None):
# GET /v1/webhooks
# https://developer.webex.com/docs/api/v1/webhooks/list-webhooks
name = webhook_name
if name is None:
name = self.bot_name
api_path = 'https://api.ciscospark.com/v1/webhooks'
get_result = None
try:
get_result = requests.get(api_path, headers=self.headers, timeout=self.TIMEOUT, verify=False)
except requests.exceptions.RequestException as e:
logger.exception(e)
if get_result is None:
return []
if get_result.ok:
data = get_result.json()
webhooks = data.get('items') if data else []
if name is None:
return webhooks
return list(filter(lambda x: x.get('name') == name, webhooks))
logger.error("failed to get: %s", api_path)
logger.error(get_result.text)
return []
def has_webhooks(self, webhook_name=None):
name = webhook_name
if name is None:
name = self.bot_name
webhooks = self.get_webhooks(name)
if webhooks is None:
return False
if len(webhooks) > 0:
return True
return False
def show_webhooks(self, webhook_name=None):
name = webhook_name
if name is None:
name = self.bot_name
webhooks = self.get_webhooks(webhook_name=name)
for w in webhooks:
print(json.dumps(w, ensure_ascii=False, indent=2))
def delete_webhook(self, webhook_id=None):
"""Delete webhook by id
DELETE /v1/webhooks/{webhookId}
https://developer.webex.com/docs/api/v1/webhooks/delete-a-webhook
Keyword Arguments:
webhook_id {str} -- id to be deleted (default: {None})
Returns:
bool -- True if successfully deleted
"""
if not id:
return False
api_path = 'https://api.ciscospark.com/v1/webhooks/{}'.format(webhook_id)
return self._requests_delete_as_bool(api_path=api_path)
def delete_webhooks(self):
self.delete_webhooks_by_name(webhook_name=self.bot_name)
def delete_webhooks_by_name(self, webhook_name=None):
name = webhook_name
if name is None:
name = self.bot_name
webhooks = self.get_webhooks(webhook_name=name)
for webhook_id in [w.get('id') for w in webhooks]:
self.delete_webhook(webhook_id=webhook_id)
def regist_webhook(self, webhook_name=None, target_url=None):
# POST /v1/webhooks
# https://developer.webex.com/docs/api/v1/webhooks/create-a-webhook
name = webhook_name
if name is None:
name = self.bot_name
# delete same name webhooks, if any
self.delete_webhooks_by_name(webhook_name=name)
api_path = 'https://api.ciscospark.com/v1/webhooks'
payload = {
'resource': "messages",
'event': "all",
'targetUrl': target_url,
'name': name
}
post_result = self._requests_post_as_json(api_path=api_path, payload=payload)
if post_result is None:
logger.error('Failed to regist webhook for message')
return
logger.info('Success to regist webhook for message')
# regist one more webhook for attachment
payload = {
'resource': "attachmentActions",
'event': "all",
'targetUrl': target_url,
'name': name
}
post_result = self._requests_post_as_json(api_path=api_path, payload=payload)
if post_result is None:
logger.error('Failed to regist webhook for attachment action')
return
logger.info('Success to regist webhook for attachment action')
def update_webhook(self, webhook_id=None, webhook_name=None, target_url=None):
# PUT /v1/webhooks/{webhookId}
# https://developer.webex.com/docs/api/v1/webhooks/update-a-webhook
api_path = 'https://api.ciscospark.com/v1/webhooks/{}'.format(webhook_id)
payload = {
'name': webhook_name,
'targetUrl': target_url,
'status': 'active'
}
put_result = None
try:
put_result = requests.put(api_path, json=payload, headers=self.headers, timeout=self.TIMEOUT, verify=False)
except requests.exceptions.RequestException as e:
logger.exception(e)
if put_result is None:
return None
if put_result.ok:
logger.info('Webhook update successfuly')
return put_result.json()
logger.error("failed to put: %s", api_path)
logger.error(put_result.text)
return None
if __name__ == '__main__':
import argparse
logging.basicConfig(level=logging.INFO)
def test_decorator():
# pylint: disable=unused-variable
bot = Bot()
@bot.on_message('hi')
def on_message_hi(room_id=None):
print('My room_id is {}'.format(room_id))
@bot.on_message('*')
def on_message_default(room_id=None):
print(room_id)
# Python起動時にデコレータが付与されている関数は回収されて辞書型に格納される
# 上の2つの関数は辞書型に格納されているはず
print(bot.on_message_functions.keys())
# その辞書型を使えば任意のタイミングでデコレータの付いた関数を実行できる
message = 'hi'
if message in bot.on_message_functions:
func = bot.on_message_functions.get('hi')
func(room_id="1")
return 0
# sys.exit(test_decorator())
def main():
parser = argparse.ArgumentParser(description='webex teams bot related operations.')
parser.add_argument('bot_name', help='name of the bot')
parser.add_argument('-d', '--delete', action='store_true', default=False, help='Delete all webhooks')
parser.add_argument('-l', '--list', action='store_true', default=False, help='List all webhooks')
parser.add_argument('-r', '--room', action='store_true', default=False, help='List rooms')
parser.add_argument('-m', '--me', action='store_true', default=False, help='show my info')
args = parser.parse_args()
bot = Bot(bot_name=args.bot_name)
if args.list:
bot.show_webhooks()
elif args.delete:
webhooks = bot.get_webhooks()
for webhook_id in [w.get('id') for w in webhooks]:
result = bot.delete_webhook(webhook_id=webhook_id)
if result is not None and result.ok:
print("{} : {}".format(webhook_id, "successfuly deleted"))
else:
print("{} : {}".format(webhook_id, "delete failed"))
elif args.room:
rooms = bot.get_rooms()
print(json.dumps(rooms, ensure_ascii=False, indent=2))
elif args.me:
me = bot.get_me()
print(json.dumps(me, ensure_ascii=False, indent=2))
return 0
sys.exit(main())
|
import turtle
import random
for i in range(3):
col = random.randint(0, 2)
sel = random.randint(0, 1)
if(col == 0):
turtle.pencolor('yellow')
elif (col == 1):
turtle.pencolor('blue')
elif(col == 2):
turtle.pencolor('red')
if(sel == 0):
turtle.forward(100)
elif (sel == 1):
turtle.circle(50)
turtle.right(45)
|
def get_csv_and_pred(i=42):
elements = ['Pr', 'Ni', 'Ru', 'Ne', 'Rb', 'Pt', 'La', 'Na', 'Nb', 'Nd',
'Mg', 'Li', 'Pb', 'Re', 'Tl', 'Lu', 'Pd', 'Ti', 'Te', 'Rh',
'Tc', 'Sr', 'Ta', 'Be', 'Ba', 'Tb', 'Yb', 'Si', 'Bi', 'W',
'Gd', 'Fe', 'Br', 'Dy', 'Hf', 'Hg', 'Y', 'He', 'C', 'B', 'P',
'F', 'I', 'H', 'K', 'Mn', 'O', 'N', 'Kr', 'S', 'U', 'Sn', 'Sm',
'V', 'Sc', 'Sb', 'Mo', 'Os', 'Se', 'Th', 'Zn', 'Co', 'Ge',
'Ag', 'Cl', 'Ca', 'Ir', 'Al', 'Ce', 'Cd', 'Ho', 'As', 'Ar',
'Au', 'Zr', 'Ga', 'In', 'Cs', 'Cr', 'Tm', 'Cu', 'Er']
np.random.seed(seed=i)
elem_rnd_chem = {}
size=1200
for elem in elements:
elem_rnd_chem[elem] = np.random.normal(size=size)
df_rnd_chem = pd.DataFrame(elem_rnd_chem).T
csv_name = 'random_{:0.0f}.csv'.format(size)
df_rnd_chem.to_csv(csv_name, index_label='element')
if __name__ == '__main__':
get_csv_and_pred(i=42) |
import time
class PID:
def __init__(self, Kp=0, Ki=0, Kd=0, sat=1023):
#Initialise gains
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.sat = sat
#Initialise delta variables
self.cur_time = time.time()
self.prev_time = self.cur_time
self.prev_error = 0
#Result variables (absolute coefficients, after gains applied)
self.Cp = 0
self.Ci = 0
self.Cd = 0
self.output = 0
def SetKp(self, Kp):
self.Kp = Kp
def SetKi(self, Ki):
self.Ki = Ki
def SetKd(self, Kd):
self.Kd = Kd
#Set saturation value, outside of which output will be clipped
def SetSat(self, sat):
self.sat = sat
#Clip the output to be inside the saturation range
def Clip(self,value):
return min(max(value,-self.sat),self.sat)
#Calculate another interation of the PID Loop
def Control_Output(self, error):
self.cur_time = time.time()
dt = self.cur_time - self.prev_time
de = error - self.prev_error
self.Cp = self.Clip(error * self.Kp)
if dt > 0:
self.Cd = self.Clip(self.Kd*de/dt)
else:
self.Cd = 0
self.output = self.Clip(self.Cp + self.Ci + self.Cd)
#Use integral term only if output is within controllable region to prevent integral windup
if self.output < self.sat and self.output > -self.sat:
self.Ci += self.Ki*error*dt
#Because we assume only passive cooling, we also want to prevent any negative integral terms
if self.Ci < 0:
self.Ci = 0
self.output = self.Clip(self.Cp + self.Ci + self.Cd)
self.prev_time = self.cur_time
self.prev_error = error
return self.output
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 第一行注释为了告诉Linux/OS X系统,这是一个Python可执行程序,Windows系统会忽略这个注释
# 第二行注释是为了告诉Python解释器,按照UTF-8编码读取源代码,否则,你在源代码中写的中文输出可能会有乱码。
n = 32
f = 4325.564
s1 = 'hello, world'
s2 = 'hello, \'Jin\''
s3 = r'hello, "Jin"'
s4 = r'''hello,
Jin!
das'''
print(n)
print(f)
print(s1)
print(s2)
print(s3)
print(s4)
print("你好啊")
print('%2d-%02d' % (3, 1))
print('%.2f' % 3.1415926)
|
#1
print('Task 1')
for x in range(0, 13):
print(x, 'Results')
for y in range(0, 13):
z = y * x
print(x, '*', y, '=', z)
|
from PIL.ExifTags import TAGS
from PIL import Image
def testForExif(imgFileName):
try:
exifData = {}
imgFile = Image.open(imgFileName)
info = imgFile._getexif() # Extrae metadatos de una imagen
if info:
for (tag,value) in info.items():
decoded = TAGS.get(tag,tag)
exifData[decoded] = value
exifGPS = exifData['GPSInfo']
if exifGPS:
print '[+] ' + imgFileName + ' GPS Data: ' + exifGPS
except:
pass
testForExif('<RUta de la imagen>')
|
# https://leetcode.com/problems/n-th-tribonacci-number/
"""
The Tribonacci sequence Tn is defined as follows:
T0 = 0, T1 = 1, T2 = 1, and Tn+3 = Tn + Tn+1 + Tn+2 for n >= 0.
Given n, return the value of Tn.
Example 1:
Input: n = 4
Output: 4
Explanation:
T_3 = 0 + 1 + 1 = 2
T_4 = 1 + 1 + 2 = 4
Example 2:
Input: n = 25
Output: 1389537
Constraints:
0 <= n <= 37
The answer is guaranteed to fit within a 32-bit integer, ie. answer <= 2^31 - 1.
"""
from functools import cache
@cache
def tribonacci(n: int) -> int:
if n == 0:
return 0
elif n == 1:
return 1
elif n == 2:
return 1
return tribonacci(n - 1) + tribonacci(n - 2) + tribonacci(n - 3)
def tribonacci(n: int) -> int:
dp = dict(zip([0, 1, 2], [0, 1, 1]))
for v in range(3, n + 1):
dp[v] = dp[v - 1] + dp[v - 2] + dp[v - 3]
return dp[n]
def tribonacci(n: int) -> int:
def helper(a, b, c, n):
if n == 0:
return a
a, b, c = b, c, a + b + c
return helper(a, b, c, n - 1)
return helper(0, 1, 1, n)
|
#!/usr/bin/env python
# encoding: utf-8
from pyroute2 import IPDB
import time
class RtTable():
def __init__(self):
"""
rip_entry = [
{'dst': '172.16.1.0/24',
'metric': 1, #1-16
'gateway': '172.16.1.2',
'interface': 4,
'timer': 0,
}
]
"""
self.rip_table = []
self.direct_table = []
self.ipdb = IPDB()
self.get_direct_table()
def get_direct_table(self):
self.direct_table = [ x for x in self.ipdb.routes if x['family'] == 2 if x['prefsrc']]
def apply_rip_route(self, route):
""" route = {'dst': '172.16.1.0/24',
'oif': 4,
'gateway': '172.16.1.2',
}
"""
self.ipdb.routes.add(route).commit()
def remove_rip_route(self, route):
r = {'dst': route['dst'],
'oif': route['interface'],
'gateway': route['gateway']}
self.ipdb.routes.remove(r).commit()
def get_route(self, route):
for r in self.rip_table:
if r['dst'] == route['dst']:
return r
def add_route(self, route):
self.rip_table.append(route)
def update_route(self, route):
self.remove_route(route)
self.add_route(route)
def remove_route(self, route):
for r in self.rip_table:
if r['dst'] == route['dst']:
self.rip_table.remove(r)
def get_all_route(self):
return self.rip_table
def update_route_time(self, route):
for r in self.rip_table:
if r['dst'] == route['dst']:
r['timer'] = time.time()
|
from World import Narrative
from World.Types.Log import Message
from Grammar.tree import Node
from Grammar.actions import Terminals as T
from Grammar.plot import export_grammar_plot
from copy import copy
class Progress:
quest: Node = None
semantics_indices = {}
semantics_parsed_for_branches = []
current_node: Node = None
completed_indices = []
def __init__(self, quest: Node):
local_quest = copy(quest)
# local_quest.clean_nulls()
local_quest.set_indices()
self.quest = local_quest
self.semantics_indices = {}
self.semantics_parsed_for_branches = []
self.current_node = local_quest
self.completed_indices = []
export_grammar_plot(local_quest)
self.semantics_indices[0] = []
self.get_narratives(root=local_quest, pre_semantics=[])
def get_narratives(self, root: Node, pre_semantics: list):
"""
Run narration functions for Non-terminal nodes that weren't parsed already
:param root:
:param pre_semantics:
:return:
"""
if root.index in self.semantics_parsed_for_branches:
return
if root.branches:
# Parse only Non-terminals
children_pre_semantics = Narrative.find(root)(*pre_semantics)
for i, branch in enumerate(root.branches):
if branch.action == T.null:
continue
self.semantics_indices[branch.index] = children_pre_semantics[i]
self.semantics_parsed_for_branches.append(root.index)
def get_current_semantics(self):
if self.current_node and self.current_node.index in self.semantics_indices:
return self.semantics_indices[self.current_node.index]
return []
def print_progress(self, full: bool=False):
Message.debug("level: %i, current-node: %s" % (self.current_node.index, self.current_node.action))
if full:
Message.debug("semantics: %s" % self.semantics_indices)
else:
Message.debug("semantics: %s" % self.get_current_semantics())
def find_next_active_level(self, node: Node=None):
if node is None:
node = self.current_node
if node.index not in self.completed_indices and node.action != T.null:
if node.branches:
# go down the tree, player should first complete parent's branches
for branch in node.branches:
if branch.index not in self.completed_indices:
# if completed skip
if branch.index not in self.semantics_parsed_for_branches:
# don't parse branches which already parsed
self.update_active_level(branch)
if not self.find_next_active_level(branch):
return False
else:
# terminal waiting to be completed
return False
self.completed_indices.append(node.index)
return True
def update_active_level(self, new_node: Node):
# using previous node
# only non-terminal actions
self.get_narratives(self.current_node, pre_semantics=self.get_current_semantics())
# update active level
self.current_node = new_node
def check_action_proceed(self, action: T, args: list) -> bool:
if action == self.current_node.action and args == self.get_current_semantics():
self.completed_indices.append(self.current_node.index)
self.find_next_active_level(self.quest)
return True
self.find_next_active_level()
return False
|
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import pandas_datareader.data as web
import pandas as pd
df = web.DataReader('TSLA', 'yahoo', '2018-01-01', '2018-01-05')
exit(df)
|
# -*- coding: utf-8 -*-
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
from networkapi.util.geral import mount_url
class RouteMapPostSuccessTestCase(NetworkApiTestCase):
route_map_uri = '/api/v4/route-map/'
fixtures = [
'networkapi/config/fixtures/initial_config.json',
'networkapi/system/fixtures/initial_variables.json',
'networkapi/usuario/fixtures/initial_usuario.json',
'networkapi/grupo/fixtures/initial_ugrupo.json',
'networkapi/usuario/fixtures/initial_usuariogrupo.json',
'networkapi/api_ogp/fixtures/initial_objecttype.json',
'networkapi/api_ogp/fixtures/initial_objectgrouppermissiongeneral.json',
'networkapi/grupo/fixtures/initial_permissions.json',
'networkapi/grupo/fixtures/initial_permissoes_administrativas.json',
]
json_path = 'api_route_map/v4/tests/route_map/sanity/json/post/{}'
def setUp(self):
self.client = Client()
self.authorization = self.get_http_authorization('test')
self.content_type = 'application/json'
self.fields = ['name']
def tearDown(self):
pass
def test_post_route_maps(self):
"""Test POST RouteMaps."""
route_maps_path = self.json_path.\
format('two_route_maps.json')
response = self.client.post(
self.route_map_uri,
data=self.load_json(route_maps_path),
content_type=self.content_type,
HTTP_AUTHORIZATION=self.authorization)
self.compare_status(201, response.status_code)
get_ids = [data['id'] for data in response.data]
uri = mount_url(self.route_map_uri,
get_ids,
kind=['basic'],
fields=self.fields)
response = self.client.get(
uri,
HTTP_AUTHORIZATION=self.authorization
)
self.compare_status(200, response.status_code)
self.compare_json(route_maps_path,
response.data)
|
import os
import subprocess as sp
print("Hello, what is your name: ",end='')
name=input()
print("{}, which software service you need: ".format(name),end='')
sw=input()
if sw=='firefox':
cmd= "ssh -X -l root 172.17.0.2 firefox"
sp.getoutput(cmd)
elif sw=='atom':
cmd= "ssh -X -l root 172.17.0.2 atom"
sp.getoutput(cmd)
elif sw=='gedit':
cmd= "ssh -X -l root 172.17.0.2 gedit"
sp.getoutput(cmd)
elif sw=='vlc':
cmd= "ssh -X -l root 172.17.0.2 vlc"
sp.getoutput(cmd)
|
import ex3
import point_cloud
import numpy as np
from matplotlib import pyplot
from matplotlib import patches
#
# an attempt at constructing the
# boundary operators of the graph
# from the given threshold in ex3.py.
#
# SCRATCH THAT
# First, just try to manually visualize
# all 2-simplices
# (complete graphs on 3 vertices).
#
node_list = {j:list( ex3.pairs[ex3.pairs[:,0]==j,1] ) for j in np.unique(ex3.pairs[:,0])}
loops = []
for k,v in node_list.items():
for i in range(len(v)):
for j in range(i+1,len(v)):
if (v[i] in node_list.keys()) and (v[j] in node_list[v[i]]):
loops.append([k,v[i],v[j]])
#
if __name__=="__main__":
ex3.pc.visualize()
ex3.pc.draw_ball(ex3.which, ex3.thresh/2., c=[0.5,0,0.5])
for j,o in enumerate(ex3.D2.order[:len(ex3.pairs)]):
iijj = ex3.D2.coords[o]
xxyy = ex3.X[iijj]
middle = np.mean(xxyy, axis=0)
ex3.pc.ax.plot(xxyy[:,0], xxyy[:,1], c=pyplot.cm.tab10(j%10), lw=2)
#
loop_coords = [ex3.X[l] for l in loops]
polys = []
for lc in loop_coords:
triangle = patches.Polygon(lc, facecolor=[0,0,0,0.5], edgecolor='k')
tripatch = ex3.pc.ax.add_patch(triangle)
polys.append(tripatch)
#
if False:
for j,o in enumerate(ex3.D2.order[:len(ex3.pairs)]):
iijj = ex3.D2.coords[o]
xxyy = ex3.X[iijj]
middle = np.mean(xxyy, axis=0)
ex3.pc.ax.text(middle[0],middle[1], '%.2f'%ex3.D2.distances[o], c='w', fontsize=11, ha='center', va='center')
for j,co in enumerate(ex3.pc.pts):
ex3.pc.ax.text(co[0], co[1], j, c='w', fontsize=10, ha='center', va='center', bbox={'facecolor':'r', 'alpha':0.5})
ex3.pc.ax.set_title('Threshold %.2f'%ex3.thresh, c='k', fontsize=18)
ex3.pc.fig.tight_layout()
ex3.pc.fig.subplots_adjust(top=0.9)
#
ex3.pc.ax.axis('square')
pyplot.ion()
|
from core.base.parser import StaticParser
from core.utils import parse_hexstring
from core.base.parser import STR_TEMPLATE
class SetProgressStatusParser(StaticParser):
name = 'MSG_Set_Progress_Status'
event_code = '1003'
def _make_str(self, container):
data = parse_hexstring(container.src_data.data)
context = container.src_data._asdict()
context['message'] = '{}({})'.format(self.name, data[0])
return STR_TEMPLATE.format(**context)
|
from core.models.Employee import Employee
from django.db.utils import IntegrityError
import pytest
@pytest.mark.django_db
class TestEmployee:
"""Test for employee models."""
def test_register_employee(self):
"""test registering employee."""
employee = Employee.objects.create(
name = 'Test',
email = 'test2@email.com',
department = 'M'
)
assert Employee.objects.get(email='test2@email.com') == employee |
import numpy as np
import pytest
import tensorflow as tf
from sklearn.metrics import mean_squared_error
from ml_intuition.evaluation.performance_metrics import overall_rms_abundance_angle_distance, \
cnn_rmse, per_class_rmse, dcae_rmse, average_angle_spectral_mapper
sess = tf.Session()
def softmax(x: np.ndarray) -> np.ndarray:
exp_x = np.exp(x)
norm_factor = np.sum(exp_x, axis=1)
result = exp_x / norm_factor[:, None]
np.testing.assert_array_equal(np.around(result.sum(axis=1), 5).astype(int),
np.ones(shape=(result.shape[0])).astype(int))
return result
class TestRMSAbundanceAngleDistance:
@pytest.mark.parametrize(
'y_true, y_pred',
[
(np.random.uniform(0, 1, (10, 10)), np.random.uniform(0, 1, (10, 10))),
(np.random.uniform(0, 1, (10, 2)), np.random.uniform(0, 1, (10, 2))),
(np.random.uniform(0, 1, (10, 1)), np.random.uniform(0, 1, (10, 1)))
])
def test_rmsaad_with_external_implementation(self, y_true: np.ndarray, y_pred: np.ndarray):
y_true, y_pred = softmax(y_true), softmax(y_pred)
tf_y_true, tf_y_pred = tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred)
tf_error = overall_rms_abundance_angle_distance(y_true=tf_y_true, y_pred=tf_y_pred).eval(session=sess)
error = 0
for i in range(y_true.shape[0]):
error += (np.arccos(np.dot(y_true[i], y_pred[i]) /
(np.linalg.norm(y_true[i]) * np.linalg.norm(y_pred[i])))) ** 2
error /= y_true.shape[0]
error = np.sqrt(error)
assert round(tf_error, 3) == round(error, 3)
class TestRMSE:
@pytest.mark.parametrize(
'y_true, y_pred',
[
(np.random.uniform(0, 1, (10, 10)), np.random.uniform(0, 1, (10, 10))),
(np.random.uniform(0, 1, (10, 2)), np.random.uniform(0, 1, (10, 2))),
(np.random.uniform(0, 1, (10, 1)), np.random.uniform(0, 1, (10, 1)))
])
def test_cnn_rmse_with_external_implementations(self, y_true: np.ndarray, y_pred: np.ndarray):
y_true, y_pred = softmax(y_true), softmax(y_pred)
tf_y_true, tf_y_pred = tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred)
tf_overall_error = cnn_rmse(y_true=tf_y_true, y_pred=tf_y_pred).eval(session=sess)
error = np.sqrt(np.mean((y_true - y_pred) ** 2))
assert round(tf_overall_error, 3) == round(error, 3) == \
round(np.sqrt(mean_squared_error(y_true=y_true, y_pred=y_pred)), 3)
tf_per_class_error = per_class_rmse(y_true=tf_y_true, y_pred=tf_y_pred).eval(session=sess)
per_class_error = np.sqrt(np.mean((y_true - y_pred) ** 2, axis=0))
assert np.array_equal(np.round(tf_per_class_error, 3), np.round(per_class_error, 3))
@pytest.mark.parametrize(
'y_true, y_pred',
[
(np.random.uniform(0, 1, (10, 10)), np.random.uniform(0, 1, (10, 10))),
(np.random.uniform(0, 1, (10, 2)), np.random.uniform(0, 1, (10, 2))),
(np.random.uniform(0, 1, (10, 1)), np.random.uniform(0, 1, (10, 1)))
])
def test_dcae_rmse_with_external_implementations(self, y_true: np.ndarray, y_pred: np.ndarray):
y_true, y_pred = softmax(y_true), softmax(y_pred)
tf_y_true, tf_y_pred = tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred)
tf_overall_error = dcae_rmse(y_true=tf_y_true, y_pred=tf_y_pred).eval(session=sess)
error = np.mean(np.sqrt(np.mean(np.square(y_pred - y_true), axis=1)))
assert round(tf_overall_error, 3) == round(error, 3)
class TestAverageAngleSpectralMapper:
@pytest.mark.parametrize(
'y_true, y_pred',
[
(np.random.uniform(0, 1, (10, 10)), np.random.uniform(0, 1, (10, 10))),
(np.random.uniform(0, 1, (10, 2)), np.random.uniform(0, 1, (10, 2))),
(np.random.uniform(0, 1, (10, 1)), np.random.uniform(0, 1, (10, 1)))
])
def test_aSAM_with_external_implementations(self, y_true: np.ndarray, y_pred: np.ndarray):
y_true, y_pred = softmax(y_true), softmax(y_pred)
tf_y_true, tf_y_pred = tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred)
tf_error = average_angle_spectral_mapper(y_true=tf_y_true, y_pred=tf_y_pred).eval(session=sess)
error = 0
for i in range(y_true.shape[0]):
error += np.arccos(np.dot(y_true[i], y_pred[i]) / (np.linalg.norm(y_true[i]) * np.linalg.norm(y_pred[i])))
error /= y_true.shape[0]
assert round(tf_error, 3) == round(error, 3)
|
from socket import *
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind(("",8899))
serverSocket.listen(5)
clientSocket,clientInfo = serverSocket.accept()
recvData = clientSocket.recv(1024)
print ("%s:%s"%(str(clientInfo),recvData))
clientSocket.close()
serverSocket.close()
|
"""Data manipulation tools for cognate detection."""
import numpy as np
import itertools as it
import collections
import csv
import lingpy
from . import distances
from . import ipa2asjp
def clean_word(w):
"""Clean a string to reduce non-IPA noise."""
w = w.replace("-", "")
w = w.replace(" ", "")
w = w.replace("%", "")
w = w.replace("~", "")
w = w.replace("*", "")
w = w.replace("$", "")
w = w.replace("\"", "")
w = w.replace("|", "")
w = w.replace(".", "")
w = w.replace("+", "")
w = w.replace("·", "")
w = w.replace("?", "")
w = w.replace("’", "")
w = w.replace("]", "")
w = w.replace("[", "")
w = w.replace("=", "")
w = w.replace("_", "")
w = w.replace("<", "")
w = w.replace(">", "")
w = w.replace("‐", "")
w = w.replace("ᶢ", "")
w = w.replace("C", "c")
w = w.replace("K", "k")
w = w.replace("L", "l")
w = w.replace("W", "w")
w = w.replace("T", "t")
w = w.replace('dʒ͡', 'd͡ʒ')
w = w.replace('ʤ', 'd͡ʒ')
w = w.replace('Ɂ', 'Ɂ')
return w
def read_data_ielex_type(datafile, char_list=set(),
cogids_are_cross_semantically_unique=False,
data='ASJP'):
"""Read an IELex style TSV file."""
line_id = 0
data_dict = collections.defaultdict(lambda: collections.defaultdict())
cogid_dict = {}
words_dict = collections.defaultdict(lambda: collections.defaultdict(list))
langs_list = []
# Ignore the header line of the data file.
datafile.readline()
for line in datafile:
line = line.strip()
arr = line.split("\t")
lang = arr[0]
concept = arr[2]
cogid = arr[6]
cogid = cogid.replace("-", "")
cogid = cogid.replace("?", "")
if data == 'ASJP':
asjp_word = clean_word(arr[5].split(", ")[0])
else:
raise NotImplementedError
for ch in asjp_word:
if ch not in char_list:
char_list.add(ch)
if len(asjp_word) < 1:
continue
data_dict[concept][line_id, lang] = asjp_word
cogid_dict.setdefault(cogid
if cogids_are_cross_semantically_unique
else (cogid, concept), set()).add(
(lang, concept, asjp_word))
words_dict[concept][lang].append(asjp_word)
if lang not in langs_list:
langs_list.append(lang)
line_id += 1
return (data_dict,
list(cogid_dict.values()),
words_dict,
langs_list,
char_list)
def read_data_cldf(datafile, sep="\t", char_list=set(),
cogids_are_cross_semantically_unique=True,
data='ASJP'):
"""Read a CLDF file in TSV or CSV format."""
reader = csv.DictReader(
datafile,
dialect='excel' if sep == ', ' else 'excel-tab')
langs = set()
data_dict = collections.defaultdict(lambda: collections.defaultdict())
cogid_dict = collections.defaultdict(lambda: collections.defaultdict())
words_dict = collections.defaultdict(lambda: collections.defaultdict(list))
for line, row in enumerate(reader):
lang = row["Language ID"]
langs.add(lang)
if data == 'ASJP':
try:
asjp_word = clean_word(row["ASJP"])
except KeyError:
asjp_word = ipa2asjp.ipa2asjp(row["IPA"])
elif data == 'IPA':
asjp_word = tuple(
lingpy.ipa2tokens(row["IPA"], merge_vowels=False))
else:
asjp_word = row[data]
if not asjp_word:
continue
for ch in asjp_word:
if ch not in char_list:
char_list.add(ch)
concept = row["Feature ID"]
cogid = row["Cognate Class"]
data_dict[concept][line, lang] = asjp_word
cogid_dict.setdefault(cogid
if cogids_are_cross_semantically_unique
else (cogid, concept), set()).add(
(lang, concept, asjp_word))
words_dict[concept].setdefault(lang, []).append(asjp_word)
return (data_dict,
list(cogid_dict.values()),
words_dict,
list(langs),
char_list)
def read_data_lingpy(datafile, sep="\t", char_list=set(),
cogids_are_cross_semantically_unique=True,
data='ASJP'):
"""Read a Lingpy file in TSV or CSV format."""
reader = csv.DictReader(
datafile,
dialect='excel' if sep == ', ' else 'excel-tab')
langs = set()
data_dict = collections.defaultdict(collections.defaultdict)
cogid_dict = {}
words_dict = collections.defaultdict(lambda: collections.defaultdict(list))
for line, row in enumerate(reader):
lang = row.get("DOCULECT_ID", row["DOCULECT"])
langs.add(lang)
if data == 'ASJP':
try:
asjp_word = clean_word(row["ASJP"])
except KeyError:
asjp_word = ipa2asjp.ipa2asjp(row["IPA"])
elif data == 'IPA':
asjp_word = tuple(ipa2asjp.tokenize_word_reversibly(
clean_word(row["IPA"])))
else:
asjp_word = row[data]
if not asjp_word:
continue
for ch in asjp_word:
if ch not in char_list:
char_list.add(ch)
concept = row["CONCEPT"]
cogid = row["COGID"]
data_dict[concept][line, lang] = asjp_word
cogid_dict.setdefault(cogid
if cogids_are_cross_semantically_unique
else (cogid, concept), set()).add(
(lang, concept, asjp_word))
words_dict[concept].setdefault(lang, []).append(asjp_word)
return (data_dict,
list(cogid_dict.values()),
words_dict,
list(langs),
char_list)
def calc_pmi(alignments, scores=None):
"""Calculate a pointwise mutual information dictionary from alignments.
Given a sequence of pairwaise alignments and their relative
weights, calculate the logarithmic pairwise mutual information
encoded for the character pairs in the alignments.
"""
if scores is None:
scores = it.cycle([1])
sound_dict = collections.defaultdict(float)
count_dict = collections.defaultdict(float)
for alignment, score in zip(alignments, scores):
for a1, a2 in alignment:
if a1 == "" or a2 == "":
continue
count_dict[a1, a2] += 1.0*score
count_dict[a2, a1] += 1.0*score
sound_dict[a1] += 2.0*score
sound_dict[a2] += 2.0*score
log_weight = 2 * np.log(sum(list(
sound_dict.values()))) - np.log(sum(list(
count_dict.values())))
for (c1, c2) in count_dict.keys():
m = count_dict[c1, c2]
#assert m > 0
num = np.log(m)
denom = np.log(sound_dict[c1]) + np.log(sound_dict[c2])
val = num - denom + log_weight
count_dict[c1, c2] = val
return count_dict
class OnlinePMITrainer:
"""Train a PMI scorer step-by-step on always improving alignments."""
def __init__(self, margin=1.0, alpha=0.75, gop=-2.5, gep=-1.75):
"""Create a persistent aligner object.
margin: scaling factor for scores
alpha: Decay in update weight (must be between 0.5 and 1)
gop, gep: Gap opening and extending penalty. gop=None uses character-dependent penalties.
"""
self.margin = margin
self.alpha = alpha
self.n_updates = 0
self.pmidict = collections.defaultdict(float)
self.gep = gep
self.gop = gop
def align_pairs(self, word_pairs, local=False):
"""Align a list of word pairs, removing those that align badly."""
algn_list, scores = [], []
n_zero = 0
for w in range(len(word_pairs)-1, -1, -1):
((c1, l1, w1), (c2, l2, w2)) = word_pairs[w]
s, alg = distances.needleman_wunsch(
w1, w2, gop=self.gop, gep=self.gep, lodict=self.pmidict,
local=local)
if s <= self.margin:
n_zero += 1
word_pairs.pop(w)
continue
algn_list.append(alg)
scores.append(s)
self.update_pmi_dict(algn_list, scores=scores)
return algn_list, n_zero
def update_pmi_dict(self, algn_list, scores=None):
eta = (self.n_updates + 2) ** (-self.alpha)
for k, v in calc_pmi(algn_list, scores).items():
pmidict_val = self.pmidict.get(k, 0.0)
self.pmidict[k] = (eta * v) + ((1.0 - eta) * pmidict_val)
self.n_updates += 1
class MaxPairDict(dict):
"""A maximum-of-pairs lookup dictionary.
Multiple options must be given in a tuple, for historical
reasons. (A set would be much nice, because it only contains
hashables but isn't itself hashable, so there would be no danger
of confusion.
>>> m = MaxPairDict({(1, 1): 2, (1, 0): 1, (0, 0): 0})
>>> m[(1, 1)]
2
>>> m[((0, 1), 0)]
1
>>> m[((0, 1), (0, 1))]
Traceback (most recent call last):
...
KeyError: (0, 1)
"""
def __getitem__(self, key):
"""Return the maximum value among all pairs given.
x.__getitem__(y) <=> x[y]
"""
key1, key2 = key
max_val = -float('inf')
if type(key1) != tuple:
key1 = [key1]
if type(key2) != tuple:
key2 = [key2]
for k1 in key1:
for k2 in key2:
v = dict.__getitem__(self, (k1, k2))
if v > max_val:
max_val = v
return max_val
def get(self, key, default=None):
"""Return m[k] if k in m, otherwise default (None).
Ideally, at some point, this will work:
>>> m = MaxPairDict({(1, 1): 2, (1, 0): 1, (0, 0): 0})
>>> m.get(((0, 1), (0, 1)))
2
Currently, this returns None, because one of the values cannot
be found.
"""
try:
return self[key]
except KeyError:
return default
def multi_align(
similarity_sets, guide_tree, pairwise=distances.needleman_wunsch,
**kwargs):
"""Align multiple sequences according to a give guide tree."""
languages = {leaf.name: leaf for leaf in guide_tree.get_leaves()}
for s, similarityset in enumerate(similarity_sets):
for (language, concept, form) in similarityset:
try:
leaf = languages[language]
except KeyError:
continue
try:
leaf.forms
except AttributeError:
leaf.forms = {}
leaf.forms.setdefault(s, []).append(
((language, ), (concept, ), tuple((x, ) for x in form)))
for node in guide_tree.walk('postorder'):
print(node.name)
try:
entries_by_group = node.forms
except AttributeError:
entries_by_group = {}
for child in node.descendants:
try:
for group, alignment in child.alignment.items():
entries_by_group.setdefault(group, []).append(alignment)
except AttributeError:
pass
aligned_groups = {}
for group in entries_by_group:
forms = entries_by_group[group]
already_aligned = None
for (new_languages, new_concepts, new_alignment) in forms:
if not already_aligned:
languages = new_languages
concepts = new_concepts
already_aligned = new_alignment
else:
gap1 = ('', ) * len(languages)
gap2 = ('', ) * len(new_alignment[0])
languages += new_languages
concepts += new_concepts
print("Aligning:")
print(already_aligned)
print(new_alignment)
s, combined_alignment = pairwise(
already_aligned, new_alignment, **kwargs)
print(combined_alignment)
already_aligned = tuple(
(x if x else gap1) + (y if y else gap2)
for x, y in combined_alignment)
print(already_aligned)
aligned_groups[group] = languages, concepts, already_aligned
node.alignment = aligned_groups
return node.alignment
|
# Generated by Django 2.0 on 2018-07-30 19:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_remove_post_image'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(default=1, upload_to='img'),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'enter_ques.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from st_entry import Ui_MainWindowStent
import urllib2
import urllib
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindowEques(object):
##############################init enter ques###############################
def setdta(self,oid,opas,tid,ques,typ):
self.oid = oid
self.opas = opas
self.tid = tid
self.ques = ques
self.qno =1
self.typ=typ
##########################################################################
################################enter questions###########################
def entques(self):
ques = self.textEdit.toPlainText()
op1 = self.textEdit_2.toPlainText()
op2 = self.textEdit_3.toPlainText()
op3 = self.textEdit_4.toPlainText()
op4 = self.textEdit_5.toPlainText()
mrks = self.spinBox.value()
ans = self.spinBox_2.value()
tid = self.tid
if self.qno<=self.ques:
url = 'http://localhost/quiz/enterques.php'
values = {'tid':tid,
'qno':self.qno,
'ques':ques,
'op1':op1,
'op2':op2,
'op3':op3,
'op4':op4,
'ans':ans,
'mrks':mrks}
data = urllib.urlencode(values)
req = urllib2.Request(url,data)
x = urllib2.urlopen(req)
print "Saved Question",self.qno
self.qno+=1
else :
print "Questions completed"
styp = self.typ
if self.typ==0:
print "enter students"
url = 'http://localhost/quiz/createtable.php'
values = {'tid':tid,
'ques':self.ques}
data = urllib.urlencode(values)
req = urllib2.Request(url,data)
x = urllib2.urlopen(req)
print x.read()
app = QtGui.QApplication.instance()
app.closeAllWindows()
self.stwin = QtGui.QMainWindow()
self.ui = Ui_MainWindowStent()
self.ui.setupUiStent(self.stwin)
self.ui.ldta(tid,styp)
self.stwin.show()
else:
url = 'http://localhost/quiz/createtable.php'
values = {'tid':tid,
'ques':ques}
data = urllib.urlencode(values)
req = urllib2.Request(url,data)
x = urllib2.urlopen(req)
print x.read()
app = QtGui.QApplication.instance()
app.closeAllWindows()
##########################################################################
def setupUiEques(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 500)
MainWindow.setMinimumSize(QtCore.QSize(800, 500))
MainWindow.setMaximumSize(QtCore.QSize(800, 500))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(350, 110, 191, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.label_5 = QtGui.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(10, 10, 781, 91))
self.label_5.setText(_fromUtf8(""))
self.label_5.setPixmap(QtGui.QPixmap(_fromUtf8("logo.png")))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 170, 81, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, 260, 81, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(410, 260, 81, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_6 = QtGui.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(20, 330, 81, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.label_7 = QtGui.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(410, 330, 81, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_8 = QtGui.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(20, 400, 81, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.label_9 = QtGui.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(410, 400, 101, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(370, 430, 75, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
###########################save ques##################################
self.pushButton.clicked.connect(self.entques)
######################################################################
self.textEdit = QtGui.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(90, 150, 681, 81))
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.textEdit_2 = QtGui.QTextEdit(self.centralwidget)
self.textEdit_2.setGeometry(QtCore.QRect(90, 250, 301, 51))
self.textEdit_2.setObjectName(_fromUtf8("textEdit_2"))
self.textEdit_3 = QtGui.QTextEdit(self.centralwidget)
self.textEdit_3.setGeometry(QtCore.QRect(470, 250, 301, 51))
self.textEdit_3.setObjectName(_fromUtf8("textEdit_3"))
self.textEdit_4 = QtGui.QTextEdit(self.centralwidget)
self.textEdit_4.setGeometry(QtCore.QRect(90, 320, 301, 51))
self.textEdit_4.setObjectName(_fromUtf8("textEdit_4"))
self.textEdit_5 = QtGui.QTextEdit(self.centralwidget)
self.textEdit_5.setGeometry(QtCore.QRect(470, 320, 301, 51))
self.textEdit_5.setObjectName(_fromUtf8("textEdit_5"))
self.spinBox = QtGui.QSpinBox(self.centralwidget)
self.spinBox.setGeometry(QtCore.QRect(90, 400, 42, 22))
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.spinBox_2 = QtGui.QSpinBox(self.centralwidget)
self.spinBox_2.setGeometry(QtCore.QRect(530, 400, 42, 22))
self.spinBox_2.setObjectName(_fromUtf8("spinBox_2"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Vision - Enter Questions", None))
self.label.setText(_translate("MainWindow", "Enter Questions", None))
self.label_2.setText(_translate("MainWindow", "Question", None))
self.label_3.setText(_translate("MainWindow", "Option 1", None))
self.label_4.setText(_translate("MainWindow", "Option 2", None))
self.label_6.setText(_translate("MainWindow", "Option 3", None))
self.label_7.setText(_translate("MainWindow", "Option 4", None))
self.label_8.setText(_translate("MainWindow", "Marks", None))
self.label_9.setText(_translate("MainWindow", "Correct Option", None))
self.pushButton.setText(_translate("MainWindow", "Next", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import statsmodels.api as sm
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('../csv_files/1.03. Dummies.csv')
# print(df.drop(['Attendance'], axis=1))
f = lambda x: 1 if x == 'Yes' else 0
# df['Attendance_dummy'] = df['Attendance'].apply(f)
df['Attendance_dummy'] = df['Attendance'].map({'Yes': 1, 'No': 0})
print(df)
# print(df.describe())
print(df.columns)
y = df.loc[:, 'GPA']
x1 = df.loc[:, ['SAT', 'Attendance_dummy']]
x = sm.add_constant(x1)
results = sm.OLS(y, x).fit().summary()
print(results)
plt.scatter(df['SAT'], y, c=df['Attendance_dummy'], cmap='jet')
scatter = sns.regplot(df['SAT'], y, scatter=False)
print([scatter.get_children()[3]])
# print(plt.gca().patches)
yhat_yes = 0.8665 + 0.0014 * df['SAT']
yhat_no = 0.6439 + 0.0014 * df['SAT']
plt.plot(df['SAT'], yhat_yes, c='green')
plt.plot(df['SAT'], yhat_no, c='red')
plt.xlabel('SAT')
plt.ylabel('GPA')
plt.show()
|
import p44_random_tools as RT
reload(RT)
Nx = 100
k = np.arange(Nx,dtype='complex')
x = np.arange(0,1,1./Nx)
Ak = np.zeros(Nx,dtype='complex')
odds = k.real%2 == 1
# -Nx keeps the normalization right, so F(x) = 1,0
# 1/np.pi is for the actual series.
# 1j makes it a sign series.
# Ak[0]=50 keeps the zero-point right (otherwise it's +- 1/2)
Ak[odds] = -Nx/np.pi/k[odds]*1j #1/2 + 2./np.pi/k[odds]
Ak[0] += 50
Ak = RT.symmetric(Ak)
ax = np.fft.ifft(Ak)
#tp = np.abs(ax)
plt.clf()
dumb_plt(plt,None,ax.real,'x','square','p44_square_test.pdf',c='r')
dumb_plt(plt,None,ax.imag,'x','square','p44_square_test.pdf',c='g') #should be zero.
Ak_phase = Ak*np.exp((np.pi/2+np.random.random(Nx))*1j)
Ak_phase = RT.symmetric(Ak_phase)
ax_phase = np.fft.ifft(Ak_phase)
plt.clf()
dumb_plt(plt,None,ax_phase.real,'x','square','p44_square_test_phase.pdf',c='r')
dumb_plt(plt,None,ax_phase.imag,'x','square','p44_square_test_phase.pdf',c='g') #should be zero.
plt.clf()
plt.hist(ax.real,histtype='step',color='r')
plt.hist(ax_phase.real,histtype='step',color='g')
outname='p44_square_test_hist.pdf'
plt.savefig(outname)
print(outname)
plt.clf()
dumb_plt(plt,None,np.abs(Ak_phase[odds]),'k','Ampl(r)angle(g)','p44_square_test_phase_ampl.pdf',c='r')
dumb_plt(plt,None,np.angle(Ak_phase[odds]),'k','Ampl(r)angle(g)','p44_square_test_phase_ampl.pdf',c='g')
|
from time import sleep
def analisar(* val):
mai = 'nenhum'
print('~'*40)
print('Analisando os valores passados...')
for i, v in enumerate(val):
print(v, end=' ')
if i == 0 or v > mai:
mai = v
sleep(0.3)
print(f'- Foram informados {len(val)} valores ao todo')
print(f'O maior valor informado foi {mai}')
input('Aperte enter para continuar')
# Main
analisar(2, 9, 4, 5, 7, 1)
analisar(4, 7, 0)
analisar(1, 2)
analisar(6)
analisar()
|
#!/usr/bin/env python
from setuptools import setup
# Lets makes ure we have the correct modules installed before continuing.
# OpenSSL is required.
def readme():
with open('README.rst') as f:
return f.read()
setup(
name="pykeytool",
version="0.2",
description="pykeytool - IoT SC PKI Tool for Generating Keys and submitting csr's to IoT SC",
long_description=readme(),
url='http://github.com/wilsonstuart/pykeytool',
packages=['pykeytool', ],
zip_safe=False,
install_requires=['pyopenssl', 'argparse', 'pyyaml', 'Crypto', 'PyKCS11', 'cryptography', 'asn1crypto', 'requests', 'pycryptodomex', 'cython'],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.6',
'Environment :: Console'
'Topic :: PKI',
],
scripts=['scripts/generatekeyandcsr', 'scripts/generatepkcs12', 'scripts/generatecert'],
data_files=[('config', ['config/config.ini', 'config/logging.ini']),
('raapi', []),
]
)
|
import datetime
import logging
import time
from urllib.parse import quote
import uuid
import requests
from django.core.cache import cache
from django.shortcuts import render, redirect
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from django.contrib.auth import login, logout, get_user_model, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.urls import reverse
from django.views import View
from accounts.token_creator import TokenGenerator
from .models import User
from .forms import EditUserForm, UserRegistrationForm, UserAuthForm, UserPasswordSetupForm
from .tasks import send_confirmation_email
@login_required
def personal_information(request):
user = request.user
context = {
'user': user
}
return render(request, 'accounts/personal_information.html', context)
@login_required
def edit_personal_information(request):
user = request.user
email = user.email
if request.method == 'POST':
form = EditUserForm(instance=user, data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
if email == user.email:
messages.success(request, "Information were successfully updated")
else:
# send_confirmation_email.delay(user.id)
send_confirmation_email(user.id)
messages.warning(request, "Information were successfully updated. Please confirm your email address "
"to complete your registration. It's easy "
"- just check your email and click on the confirmation link.")
return redirect(reverse('personal_information'))
else:
form = EditUserForm(instance=user)
context = {
'form': form
}
return render(request, 'accounts/edit_personal_information.html', context)
def login_view(request):
redirect_to = request.POST.get('next', request.GET.get('next', ''))
do_redirect = False
if request.user.is_authenticated:
if redirect_to == request.path:
raise ValueError('Redirection loop for authenticated user detected.')
return redirect(reverse('homepage'))
elif request.method == 'POST':
login_form = UserAuthForm(request, data=request.POST)
if login_form.is_valid():
login(request, login_form.get_user())
return redirect(reverse('homepage'))
else:
login_form = UserAuthForm(request)
register_form = UserRegistrationForm()
context = {
'register_form': register_form,
'login_form': login_form
}
return render(request, 'accounts/login.html', context)
def register(request):
from .forms import UserRegistrationForm
if request.user.is_authenticated:
return redirect(reverse('homepage'))
if request.method == 'POST':
register_form = UserRegistrationForm(request.POST)
if register_form.is_valid():
user = register_form.save()
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
# send_confirmation_email.delay(user.id)
send_confirmation_email(user.id)
messages.warning(request, "Thanks for joining Smart Contract! Please confirm your email address to complete your "
"registration. It's easy - just check your email and click on the confirmation "
"link.")
return redirect(reverse('homepage'))
else:
register_form = UserRegistrationForm()
login_form = UserAuthForm(request)
context = {
'register_form': register_form,
'login_form': login_form
}
return render(request, 'accounts/login.html', context)
@login_required
def create_password(request):
if request.method == 'POST':
password_form = UserPasswordSetupForm(request.POST)
if password_form.is_valid():
user = User.objects.get(pk=request.user.id)
user.set_password(password_form.cleaned_data['password1'])
messages.success(request, "Password was successfully set. Please, log in again")
user.save()
return redirect(reverse('personal_information'))
else:
password_form = UserPasswordSetupForm()
context = {
'form': password_form
}
return render(request, 'accounts/password_create_form.html', context)
def logout_view(request):
_next = request.GET.get('next')
logout(request)
return redirect(_next if _next else settings.LOGOUT_REDIRECT_URL)
def activate_view(request, uidb64, token):
UserModel = get_user_model()
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and TokenGenerator().check_token(user, token):
user.is_verified = True
user.save()
messages.success(request, "Thank you for confirming your email!")
return redirect(reverse('index'))
return HttpResponse('Activation link is invalid!')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0012_auto_20141030_2158'),
]
operations = [
migrations.CreateModel(
name='message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sent_at', models.DateTimeField(auto_now_add=True)),
('seen', models.NullBooleanField(default=False)),
('recipient', models.ForeignKey(related_name=b'recieved_messages', verbose_name=b'Reciever', to='profiles.GlideProfile')),
('sender', models.ForeignKey(related_name=b'sent_messages', verbose_name=b'Sender', to='profiles.GlideProfile')),
],
options={
},
bases=(models.Model,),
),
]
|
import tarfile
import os
for filename in os.listdir():
if filename.endswith(".tar.gz"):
tar=tarfile.open(filename)
name = tar.getnames()
if os.path.isdir(file_name):
pass
else:
os.mkdir(file_name)
tar.extract(name, file_name + "_files/")
# for root,dir,files in os.walk('/path/to/dir/'):
# for file in files:
# fullpath=os.path.join(root,file)
# tar.add(fullpath,arcname=file)
# tar.close()
# t = tarfile.open("/path/to/your.tar", "r:")
# t.extractall(path = '/path/to/extractdir/')
# t.close()
# def un_tar(file_name):
# """untar zip file"""
# tar = tarfile.open(file_name)
# names = tar.getnames()
# # 后面新建文件夹,先判断是否存在.
# if os.path.isdir(file_name + "_files"):
# pass
# else:
# os.mkdir(file_name + "_files")
# #由于解压后是许多文件,预先建立同名文件夹
# for name in names:
# tar.extract(name, file_name + "_files/")
# tar.close()
# # print(os.path.join(directory, filename))
|
#!/usr/bin/env python3
import sys
import random
VOCABFILE = 'hogwarts-houses-vocab.tsv'
houses = {}
with open(VOCABFILE) as vf:
for line in vf:
uri, name = line.strip().split('\t')
houses[uri] = name
namedata = []
for uri, house_name in houses.items():
housefn = house_name.lower() + ".txt"
with open(housefn) as names:
for name in names:
namedata.append("{}\t{}".format(name.strip(), uri))
random.shuffle(namedata)
for nameline in namedata:
print(nameline)
|
import selenium.webdriver as webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class taobao_infos:
# 对象初始化
def __init__(self):
self.url = "https://login.taobao.com/member/login.jhtml"
options = webdriver.ChromeOptions()
options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images":2})#不加载图片,加快访问速度
options.add_experimental_option("excludeSwitches", ['enable-automation'])# 设置开发者模式,防止被各大网站识别出来使用了Selenium
chrome_driver = r'/home/huizi/chromedriver'
self.browser = webdriver.Chrome(executable_path=chrome_driver,options=options)
self.wait = WebDriverWait(self.browser, 10)
# 登录淘宝
def login(self):
self.browser.get(self.url)
print(self.browser.page_source)
# filePath = r'/home/huizi/文档/weibo.html'
# file = open(filePath,'w')
# file.write(self.browser.page_source)
# file.close()
weibo_login = self.wait.until(EC.presence_of_element_located((
By.CSS_SELECTOR, '.weibo-login')
))
weibo_login.click()
weibo_username = self.wait.until(EC.presence_of_element_located(
(By.NAME, 'username')
))
weibo_username.send_keys(weibo_user)
weibo_password = self.wait.until(EC.presence_of_element_located(
(By.NAME, 'password')
))
weibo_password.send_keys(weibo_pwd)
weibo_success = self.wait.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, '.W_btn_g>span')
))
weibo_success.click()
# 由于淘宝的安全账户,导致无法登录,所以接下来无法操作
if __name__ == '__main__':
weibo_user = '15668095291'
weibo_pwd = 'youzi123'
a = taobao_infos()
a.login()
|
#!/usr/bin/python
import copy
import sys
sys.setrecursionlimit(1000000)
def check(word, arrs, count):
warr = list(word)
karrs = copy.copy(arrs)
for c in warr:
if c not in arrs:
arrs[c] = 0
karrs[c] = 0
if karrs[c] == 0:
return count
karrs[c] = karrs[c] -1
arrs = copy.copy(karrs)
return check(word, arrs, count+1)
f = open('sample.txt', 'r')
tc = int(f.readline())
#print tc
for index in range(tc):
result = ""
strs = f.readline().rstrip()
arr = list(strs)
arrs = {}
for c in arr:
if c not in arrs:
arrs[c] = 0
arrs[c] += 1
cnt = check("ZERO", arrs, 0)
for k in range(cnt):
result += "0"
for c in list("ZERO"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
cnt = check("TWO", arrs, 0)
for k in range(cnt):
result += "2"
for c in list("TWO"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
cnt = check("FOUR", arrs, 0)
for k in range(cnt):
result += "4"
for c in list("FOUR"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
cnt = check("SIX", arrs, 0)
for k in range(cnt):
result += "6"
for c in list("SIX"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
cnt = check("EIGHT", arrs, 0)
for k in range(cnt):
result += "8"
for c in list("EIGHT"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
cnt = check("THREE", arrs, 0)
for k in range(cnt):
result += "3"
for c in list("THREE"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
cnt = check("FIVE", arrs, 0)
for k in range(cnt):
result += "5"
for c in list("FIVE"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
cnt = check("ONE", arrs, 0)
for k in range(cnt):
result += "1"
for c in list("ONE"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
cnt = check("SEVEN", arrs, 0)
for k in range(cnt):
result += "7"
for c in list("SEVEN"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
cnt = check("NINE", arrs, 0)
for k in range(cnt):
result += "9"
for c in list("NINE"):
if c in arrs and arrs[c] > 0:
arrs[c] = arrs[c] -1
res = list(result)
res.sort()
result = ''.join(res)
print "CASE #" + str(index+1) + ": " + result
|
# Generated by Django 2.2 on 2019-04-10 12:41
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True)),
('active', models.BooleanField(default=True)),
('officer', models.BooleanField(default=False)),
('admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='District',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Parish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Session',
fields=[
('session_id', models.AutoField(choices=[('2018A', '2018A'), ('2018B', '2018B'), ('2018C', '2018C')], primary_key=True, serialize=False)),
('area', models.IntegerField(verbose_name='eg 2ac')),
('harvest', models.IntegerField(verbose_name='eg 5000kg')),
],
),
migrations.CreateModel(
name='Village',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='village name', max_length=40)),
('parish', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Parish')),
],
),
migrations.CreateModel(
name='Subcounty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('district', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.District')),
],
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('farm_area', models.CharField(choices=[('2.0 AC', '1-5 AC'), ('2.0 AC', '2.0 AC')], max_length=20)),
('rice_type', models.CharField(choices=[('K', 'Kaiso'), ('S', 'Super'), ('T', 'Tilda')], max_length=1)),
('sowing_type', models.CharField(max_length=100)),
('planting_type', models.CharField(max_length=100)),
('levelling', models.CharField(choices=[('B', 'Bad'), ('F', 'Fair'), ('G', 'Good')], max_length=1)),
('rigde', models.CharField(choices=[('B', 'Bad'), ('F', 'Fair'), ('G', 'Good')], max_length=1)),
('watercourse_state', models.CharField(choices=[('B', 'Bad'), ('F', 'Fair'), ('G', 'Good')], max_length=1)),
('fertilizer', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')])),
('fertilizer_1_type', models.CharField(max_length=80)),
('fertilizer_1_amount', models.IntegerField()),
('fertilizer_2_type', models.CharField(max_length=80)),
('fertilizer_2_amount', models.IntegerField()),
('weed_condition', models.CharField(choices=[('B', 'Bad'), ('F', 'Fair'), ('G', 'Good')], max_length=1)),
('harvest_date', models.DateField()),
('session_id', models.ForeignKey(choices=[('2018A', '2018A'), ('2018B', '2018B'), ('2018C', '2018C')], on_delete=django.db.models.deletion.CASCADE, to='accounts.Session')),
],
),
migrations.AddField(
model_name='parish',
name='sub_county',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.Subcounty'),
),
migrations.CreateModel(
name='Officer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter your full name', max_length=40)),
('login_id', models.CharField(help_text='Unique ID for every officer', max_length=255, unique=True)),
('password', models.CharField(max_length=8)),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None, unique=True)),
('district_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.District')),
('subcounty_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Subcounty')),
],
),
migrations.CreateModel(
name='Farmer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='enter your full name', max_length=40)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('birth_year', models.DateField(help_text='date of birth')),
('marriage', models.CharField(blank=True, choices=[('M', 'Married'), ('S', 'Single'), ('D', 'Divorced')], max_length=1)),
('language', models.CharField(blank=True, choices=[('LUGANDA', 'Luganda'), ('LUSOGA', 'Lusoga'), ('ACHOLI', 'Acholi'), ('RUNYANKOLE', 'Runyankole')], max_length=50)),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None, unique=True)),
('photo', models.FileField(upload_to='images/')),
('community_status', models.CharField(blank=True, choices=[('MOBILIZER', 'Mobilizer'), ('CHAIRMAN', 'Chairman'), ('OTHER_ROLES', 'Other_roles')], max_length=60)),
('instructor_possibility', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')])),
('crop_type', models.CharField(choices=[('POTATO', 'Potato'), ('MAIZE', 'Maize'), ('BEANS', 'Bean'), ('RICE', 'Rice')], max_length=40)),
('district_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.District')),
('parish', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Parish')),
('subcounty_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Subcounty')),
('village', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Village')),
],
),
]
|
from abc import ABC, abstractmethod
class SpikeElement(ABC):
# Abstract methods
@staticmethod
@abstractmethod
def get_installed_spif_cls_list():
pass
@staticmethod
@abstractmethod
def get_display_name_from_spif_class(spif_class):
pass
@abstractmethod
def run(self, payload, next_elem):
pass
@property
@abstractmethod
def display_name(self):
pass
@property
@abstractmethod
def display_icon(self):
pass
# Concrete base class methods
def __init__(self, spif_class):
self._spif_class = spif_class
self._param_list = None
@property
def spif_class(self):
return self._spif_class
@property
def param_list(self):
return self._param_list
@param_list.setter
def param_list(self, param_list):
self._param_list = param_list
|
#!/usr/bin/python
# coding:utf-8
import os
import cv2
import random
import numpy as np
from ctools.basic_func import get_all_files
for j, left_right in enumerate(["left", "right"]):
for area in range(5):
# area = 0
# image_dir = "/media/cobot/5C8B2D882D247B561/project_data/screw_miss/1217"
# image_dir = "/media/cobot/94584AF0584AD0A2/data/_0normal_images/" + left_right + "/20190309"
# image_dir = "/home/cobot/Desktop/_3miss_screw/" + left_right + ""
image_dir = "/home/cobot/Desktop/temp/" + left_right + ""
output_dir = "/home/cobot/Desktop/temp"
# get rois
roi_length = 128
rois = []
mask = cv2.imread(
"/home/cobot/caid2.0/data/deploy/mobile_phone_screw/templates/miss_screw/masks_" + left_right + "/mask" + str(
area) + ".bmp", 0)
bool_img = cv2.threshold(mask, 254, 255, cv2.THRESH_BINARY)
# _, labels, stats, centroids = cv2.connectedComponentsWithStats(bool_img[1])
_, labels, stats, centroids = cv2.connectedComponentsWithStatsWithAlgorithm(bool_img[1], 8, cv2.CV_16U,
cv2.CCL_WU)
batch = len(centroids)
for centers, stat in zip(centroids, stats):
if stat[4] < 500000:
rois = rois + [centers[::-1] - roi_length / 2]
# rois = rois + [centers[::-1]]
# #
# if True:
# for k, centers in enumerate(rois):
# cv2.circle(mask, (centers[1].astype(np.int), centers[0].astype(np.int)), 100, (255, 255, 255), 10)
# # cv2.putText(mask, "xx" + str(k), (centers[1].astype(np.int), centers[0].astype(np.int)),
# # cv2.FONT_HERSHEY_SIMPLEX, 20.0, (255, 255, 255),
# # lineType=cv2.LINE_AA)
#
# cv2.imwrite("xxx.png", mask)
files = get_all_files(image_dir)
random.shuffle(files)
files = files[0:50]
for k, file_name in enumerate(files):
print(str(k) + ":" + file_name)
image = cv2.imread(os.path.join(image_dir, file_name))[:, (4 - area) * 1550:((4 - area) * 1550 + 1520), :]
# for k, centers in enumerate(rois):
# cv2.circle(image, (centers[1].astype(np.int), centers[0].astype(np.int)), 100, (255, 255, 255), 10)
# # cv2.putText(mask, "xx" + str(k), (centers[1].astype(np.int), centers[0].astype(np.int)),
# # cv2.FONT_HERSHEY_SIMPLEX, 20.0, (255, 255, 255),
# # lineType=cv2.LINE_AA)
# cv2.imshow("xx", image)
# cv2.waitKey();
# image, M = image_correct(template, image)
for k, roi in enumerate(rois):
roi = roi.astype(np.int)
patch = image[roi[0]:roi[0] + roi_length, roi[1]:roi[1] + roi_length]
full_path = os.path.join(output_dir,
str(j) + str(area) + "_" + str(k) + os.sep + file_name.split(os.sep)[-1])
if not os.path.exists(os.path.dirname(full_path)):
os.mkdir(os.path.dirname(full_path))
cv2.imwrite(full_path, patch)
|
#!/usr/bin/env python
# coding: utf-8
# In[17]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[25]:
df = pd.read_csv('student-mat.csv')
df.head()
# In[20]:
sns.set(style = 'white')
g = sns.lmplot(y='Walc', x='age', hue='sex', data = df)
g.set_ylabels('Weekly Alcohol Consumption')
g.set_xlabels('Age')
plt.show()
# In[27]:
sns.set(style = 'dark')
g = sns.catplot(x = 'age', y = 'Walc', hue = 'sex', data = df, kind= 'bar')
plt.show()
# In[32]:
sns.set(style = 'darkgrid')
g = sns.boxplot(x = 'age', y = 'Walc', hue = 'sex', data = df)
sns.despine(offset=10, trim=True)
plt.show()
# In[33]:
sns.set(style="whitegrid")
g = sns.pointplot(x="age", y="Walc", hue="sex", data=df,
kind="strip", palette="pastel",ci=95,dodge=True,join=False)
plt.show()
# In[ ]:
|
"""
Features
1. if title contain special
2. if title contain number (usually phone number)
3.
"""
from digoie.domain.docs.parser import doc_url
from digoie.domain.docs.parser import doc_title
from digoie.domain.docs.parser import doc_body
class Doc(object):
# URL = None;
# TITLE = None;
# BODY = None;
def __init__(self, url, title, body):
self.url = url
self.title = title
self.body = body
# self. content needed for further process
self.initialize()
def initialize(self):
""" initialize main conent of doc
"""
# doc_url.parse(self.url)
doc_title.parse(self.title)
# if self.body:
# words, tags = doc_body.parse(self.body)
def parse_title(self, title):
pass
def parse_body(self, body):
pass
|
import uuid
from rest_framework import status
from rest_framework.decorators import api_view, parser_classes
from rest_framework.response import Response
from rest_framework.parsers import FormParser, MultiPartParser
from .models import products, attributes
from .serializers import productsSerializers, attributesSerializers
from django.core.cache import cache
@api_view(['GET'])
def overview(request):
api_urls = {
'List': 'products/list/',
'Detail View': 'products/detail/<str:pk>/',
'Create': 'products/create/',
'Create From List': 'products/create_list/',
'Update': 'products/update/<str:pk>/',
'Delete': 'products/delete/<str:pk>/',
}
return Response(api_urls)
@api_view(['GET'])
def get(request):
product = products.objects.all()
searlizer = productsSerializers(product, many=True)
return Response(searlizer.data)
@api_view(['GET'])
def get_one(request, key):
product = products.objects.get(id=key)
searlizer = productsSerializers(product, many=False)
return Response(searlizer.data)
@api_view(['POST'])
def post(request):
sObj = products.if_not_exists().create(id=uuid.uuid4(), attributes=request.data)
return Response(sObj, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def post_list(request):
if len(request.data) > 0:
_products = []
for data in request.data:
uid = str(uuid.uuid4())
tup = (uid, data)
_products.append(tup)
sObj = products.if_not_exists().create(id=uuid.uuid4(), attributes=dict(_products))
return Response(sObj, status=status.HTTP_201_CREATED)
else:
return Response("Empty List Noy Allowed", status=status.HTTP_204_NO_CONTENT)
@api_view(['POST'])
def update(request, key):
sObj = products.objects(id=key).if_exists().update(attributes=request.data)
return Response(sObj, status=status.HTTP_201_CREATED)
@api_view(['DELETE'])
def delete(request, key):
res = products.objects.get(id=key)
res.delete()
return Response('Item succsesfully delete!')
@api_view(['GET'])
def attributes_overview(request):
api_urls = {
'List': 'attributes/list/',
'Detail View': 'attributes/detail/<str:pk>/',
'Create': 'attributes/create/',
'Update': 'attributes/update/<str:pk>/',
'Delete': 'attributes/delete/<str:pk>/',
}
return Response(api_urls)
@api_view(['GET'])
def attributes_get(request):
attribute = attributes.objects.all()
searlizer = attributesSerializers(attribute, many=True)
return Response(searlizer.data)
@api_view(['GET'])
def attributes_get_one(request, key):
print(cache.get(key)) # Here is How you can get cache.
attribute = attributes.objects.get(id=key)
searlizer = attributesSerializers(attribute, many=False)
return Response(searlizer.data)
@api_view(['POST'])
@parser_classes([FormParser, MultiPartParser])
def attributes_post(request):
sObj = attributes.if_not_exists().create(id=uuid.uuid4(), name=request.data['name'])
cache.set(sObj.id, sObj.name) # Setting Cache
return Response(sObj, status=status.HTTP_201_CREATED)
@api_view(['POST'])
@parser_classes([FormParser, MultiPartParser])
def attributes_update(request, key):
sObj = attributes.objects(id=key).if_exists().update(name=request.data['name'])
cache.set(key, request.data['name']) # Updating Cache
return Response('Updated', status=status.HTTP_202_ACCEPTED)
@api_view(['DELETE'])
def attributes_delete(request, key):
res = attributes.objects.get(id=key)
res.delete()
return Response('Item Succsesfully Deleteted!')
|
import chess
import encode
import copy
import util
import numpy as np
import scoring
# wrapper for chess board
class Game:
def __init__(self, board=None):
if board is None:
self.board = chess.Board()
else:
self.board = board
self.key = None
self.encoded = None
self.valid_moves = None
self.canon_val = None
self.state = None
self.state_map = {'*': 2, '1-0': 1, '0-1': -1, '1/2-1/2': 0}
def get_encoded(self):
if self.encoded is None:
self.encoded = encode.encode_board(self.board)
return self.encoded
def get_key(self):
if self.key is None:
self.key = self.get_encoded().tobytes()
return self.key
def get_valid_moves(self):
if self.valid_moves is None:
self.valid_moves = set(map(lambda x: x.uci(), self.board.legal_moves))
return self.valid_moves
def get_full_moves(self):
return self.board.fullmove_number
def get_mask(self):
return encode.get_prob_mask(self.get_valid_moves())
def get_canon_val(self):
if self.canon_val is None:
self.canon_val = scoring.eval_board(self.board)
return self.canon_val
def reset_params(self):
self.encoded = None
self.state = None
self.valid_moves = None
self.key = None
self.canon_val = None
def make_move(self, uci):
self.reset_params()
self.board.push_uci(uci)
def make_san_move(self, san):
self.reset_params()
self.board.push_san(san)
def coord_to_move(self, ind):
tup = np.unravel_index(ind, (8, 8, 76))
starting = (tup[0], tup[1])
starting_alg = encode.get_alg(starting)
plane = tup[2]
diff = encode.decode_dir(plane)
if util.represents_int(diff[1]):
final_char = ''
diff1 = diff[0]
diff2 = diff[1]
else:
final_char = diff[1]
diff1 = 1 if self.board.turn else -1
diff2 = diff[0]
ending = (tup[0] + diff1, tup[1] + diff2)
ending_alg = encode.get_alg(ending)
return starting_alg + ending_alg + final_char
def get_game_state(self, cd=True):
if self.state is None:
self.state = self.state_map[self.board.result(claim_draw=cd)]
return self.state
def copy_game(self):
return copy.deepcopy(self)
def normalize(self):
if not self.board.turn:
return Game(self.board.mirror())
return self
def move_and_normalize(self, uci):
game = self.copy_game()
game.make_move(uci)
game = game.normalize()
return game
def copy_and_normalize(self):
return self.copy_game().normalize()
def early_rollout(self, value=None):
if value is None:
value = self.get_canon_val()
player = 1 if self.board.turn else -1
if abs(value) < 0.01:
return 0
elif value >= 0.01:
return player
else:
return -player
def check_resign(self, value=None, thresh=-0.8):
if value is None:
value = self.get_canon_val()
player = 1 if self.board.turn else -1
if value < thresh:
return -player
else:
return None |
#!/usr/bin/env python3
"""
Advent of Code 2017: Day #
"""
import os
from shared.readdayinput import readdayinput
def first_half(d):
"""
first half solver:
"""
d = d.split(',')
away = 0
vert = 0
horzl = 0
horzr = 0
for step in d:
if step == 'n':
vert += 1
elif step == 's':
vert -= 1
elif step == 'ne':
horzr += 1
elif step == 'sw':
horzr -= 1
elif step == 'nw':
horzl += 1
elif step == 'se':
horzl -= 1
temp = vert + horzr
if temp > away:
away = temp
print(away)
print("vert, horzr, horzl", vert, horzr, horzl)
'''
ne = d.count('ne')
n = d.count('n')
s = d.count('s')
nw = d.count('nw')
se = d.count('se')
sw = d.count('sw')
vert = n - s
horzr = ne - sw
horzl = nw - se
'''
print("vert, horzr, horzl", vert, horzr, horzl)
result = None
return result
def second_half(dayinput):
"""
second half solver:
"""
lines = dayinput.split('\n')
result = None
return result
def app():
"""
runs day application
"""
dayinput = readdayinput()
half_one = first_half(dayinput)
half_two = second_half(dayinput)
print(half_one, half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
|
from django.contrib import admin
from .models import User, Profile, Review, UserRecommendation
# Register your models here.
class UserAdmin(admin.ModelAdmin):
readonly_fields = ('slug',)
class ProfileAdmin(admin.ModelAdmin):
readonly_fields = ('user',)
class ReviewAdmin(admin.ModelAdmin):
# fields = ('author',)
readonly_fields = ('slug',)
admin.site.register(User, UserAdmin)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Review, ReviewAdmin)
admin.site.register(UserRecommendation) |
import os
import omise
def create_token():
omise.api_public = os.getenv("OMISE_PKEY")
token = omise.Token.create(
name="Somchai Prasert",
number="4242424242424242",
expiration_month=10,
expiration_year=2022,
# city="Bangkok",
# postal_code="10320",
# security_code=123,
)
return token
if __name__ == '__main__':
print create_token()
|
# Version 8.1.1
#
############################################################################
# OVERVIEW
############################################################################
# This file contains descriptions of the settings that you can use to
# configure te alert action for PagerDuty. The TA writes to the PagerDuty
# events API (v2). Details for values passed are described in
# https://developer.pagerduty.com/docs/events-api-v2/trigger-events/
# Note that the TA will use the values passed:
# first as tags to extract from the message
# then as static values
# and finally if niether yield a result it will fall back to the values
# defined in the alert_actions.conf
[pagerduty_events]
param.pd_url = <string>
* (Optional) The url called by the TA. To execute an update/resolve specify
...v2/change/enqueue
* Default: https://events.pagerduty.com/v2/enqueue
param.pd_key = <string>
* (Required) The integration key for either the service or the the
* account wide value. Default behaviour is to create suppressed alert
* when the account wide key is used
* Default: pd_key
param.pd_dedup_key = <string>
* (Optional) Used to deduplicate on the PagerDuty side. Repeated
* alerts carrying this value will not create new incidents
* Default: pd_dedup_key
param.pd_source = <string>
* (Required) The object about which this alert is being raised
*
* Default: pd_source
param.pd_summary = <string>
* (Required) The short description of the problem, < 1024 chars
*
* Default: pd_summary
param.pd_severity = <string>
* (Required) One of the ket PagerDuty values
* critical, error, warning or info
*
* Default: pd_severity
param.pd_link_text = <string>
* (Optional) Ordered list of names for the link hrefs supplied
* These will be displayed as hyperlinks in PagerDuty
* Must match the number
* Default: pd_link_text
param.pd_link_href = <string>
* (Optional) Ordered list of hyperlinks for this event
* See link_text
* Default: pd_link_href
param.pd_class = <string>
* (Optional) Class of the event
* Default: pd_class
param.pd_component = <string>
* (Optional) Component is the parameter, eg CPU on host X
* CPU is the component to the source 'host X'
* Default: pd_component
param.pd_group = <string>
* (Optional) Major grouping, ie application/service
* Default: pd_group
|
from configs import *
import speech_recognition as sr
from random import choice
import pyttsx3
import datetime
import sys
from time import sleep as wait
import webbrowser as wb
def intro():
print('=============================================================================================')
print('= version: ' + version, ' ' + name, 'assistant' ' made by' + creator, ' =')
print('=============================================================================================')
frase_intro = ('{} assistente, versão {} feito por {}'.format(name_to_talk, version, creator_to_talk) )
say(frase_intro)
start()
def restart():
print('.')
wait(0.2)
start()
def desligar():
sys.exit()
def reboot():
wait(0.2)
intro()
def say(tosay):
engine = pyttsx3.init()
engine.say(tosay)
engine.runAndWait()
def start():
while True:
r = sr.Recognizer()
with sr.Microphone() as fonte:
print('ouvindo...')
audio = r.listen(fonte)
textc = r.recognize_google(audio, language='pt-br')
text = textc.lower()
print(text)
try:
engine = pyttsx3.init()
#função boas vindas
if text == str(name): #esse ouve o próprio nome dela e responde com um bom dia ou algo do tipo
msg_boas_vindas = choice(l_boas_vindas)
say(msg_boas_vindas)
#função tocar música
elif 'playlist' in text:#abre a playlist de muscia do usuário, pre definida em "configs"
wb.open(playlist, new=2)
#função dia
elif 'dia' in text: #fala o dia
print(dia)
say(dia)
#função horas
elif 'horas' in text: #fala as horas
print(hr)
say(hr)
#função piadas
elif 'piada' in text: # lança a braba
joke = (choice(l_piadas))
joke = choice(l_piadas)
print (joke)
say(joke)
#função desligar
elif 'desligar' in text: #desliga o sistema
desligando = str('desligando em 3, 2, 1')
print (desligando)
engine.say(desligando)
engine.runAndWait()
desligar()
#função reiniciar
elif 'reiniciar' in text: #reinicia o sistema
reiniciando = str('reiniciando em 3, 2, 1')
print (reiniciando)
engine.say(reiniciando)
engine.runAndWait()
reboot()
elif 'fale' in text:
texto_falar = text.replace('fale', '')
say(texto_falar)
elif 'pesquis' in text:
site_pesquisar = text.replace('pesquis', '')
say('pesquisando ' + site_pesquisar)
wb.open('https://www.google.com/search?client=opera-gx&hs=5GZ&sxsrf=ALeKk02LWQxX_lhfnlTF6lCi_LYm0x5kqg%3A1601686367378&ei=X8t3X_LeFpPA5OUP0e6-WA&q={}&oq={}&gs_lcp=CgZwc3ktYWIQAzIHCAAQChDLATIECAAQHjoHCCMQ6gIQJzoECCMQJzoFCAAQsQM6CAguELEDEIMBOgIIADoFCC4QsQM6BAgAEAo6BggAEAoQHlD_EVjVH2COImgBcAB4AIABsgKIAdMJkgEHMC41LjAuMZgBAKABAaoBB2d3cy13aXqwAQrAAQE&sclient=psy-ab&ved=0ahUKEwiyiuDXmpfsAhUTILkGHVG3DwsQ4dUDCAw&uact=5'.format(site_pesquisar, site_pesquisar), new=2)
elif text not in comandos:
restart()
except:
restart()
intro()
|
n = int(input())
if n in [1,3,5,7,8,10,12]:
print(31)
elif n in [4,6,9,11]:
print(30)
else:
print(28) |
from __future__ import print_function
import os
import sys
import time
import boto
from boto.s3.key import Key
from pymongo import MongoClient
from pymongo.cursor import Cursor
from pymongo.errors import AutoReconnect
# Monkey-patch PyMongo to avoid throwing AutoReconnect
# errors. We try to reconnect a couple times before giving up.
def reconnect(f):
# https://gist.github.com/aliang/1393029
def f_retry(*args, **kwargs):
N_RECONNECT_TRIALS = 3
for i in range(N_RECONNECT_TRIALS):
try:
return f(*args, **kwargs)
except AutoReconnect as e:
print('Fail to execute %s [%s] (attempt %d/%d)' % (
f.__name__, e, i, N_RECONNECT_TRIALS),
file=sys.stderr)
time.sleep(1)
raise RuntimeError('AutoReconnect failed. Fail to '
'execute %s [%s]' % (f.__name__, e))
return f_retry
Cursor._Cursor__send_message = reconnect(Cursor._Cursor__send_message)
MongoClient._send_message = reconnect(MongoClient._send_message)
MongoClient._send_message_with_response = reconnect(MongoClient._send_message_with_response)
MongoClient._MongoClient__try_node = reconnect(MongoClient._MongoClient__try_node)
def connect_mongo():
uri = 'mongodb://%s:%s@%s' % (os.environ['MONGO_USER'],
os.environ['MONGO_PASSWORD'],
os.environ['MONGO_URL'])
client = MongoClient(uri, safe=True)
cursor = getattr(client, os.environ['MONGO_DATABASE'])
collection = getattr(cursor, os.environ['MONGO_COLLECTION'])
return collection
def upload_s3(prefix, filenames):
conn = boto.connect_s3(os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'])
bucket = conn.get_bucket(os.environ['AWS_S3_BUCKET_NAME'])
for filename in filenames:
if os.path.exists(filename):
k = Key(bucket)
k.key = os.path.join(prefix, filename)
k.set_contents_from_filename(filename)
else:
print('%s does not exist!' % filename)
|
from ledger import messaging
import coloredlogs, logging
from errors.errors import ApiBadRequest, ApiInternalError
coloredlogs.install()
from addressing import addresser
from protocompiled import payload_pb2
from transactions.common import make_header_and_batch
async def send_float_account(**in_data):
"""
txn_key(sawtooth_signing.Signer): signer created from user zeroth public key
batch_key(sawtooth_signing.Signer): signer created from QCI mnemonic zero private key,
pancard(str): pancard of the user ,
phone_number(str): phone_number of the user,
email(str): email of the user,
claimed(bool): If this float account is claimed or not,
claimed_by(str): Public key of the user for whom this float_acc transaction,
create_asset_index(int): random key index at which the first asset was created,
parent_pub(str): public key of the parent ,
parent_idx(str): Required to be appened to parent accoutn flt_key_inds, key_index,
time=time.time();
indian_time=indian_time_stamp(),
claimed_on(str): Date on which this flt account was claimed and converted to create account)
"""
inputs = [addresser.create_organization_account_address(
account_id=in_data["parent_zero_pub"],
index=0),
addresser.float_account_address(
account_id=in_data["txn_key"].get_public_key().as_hex(),
index=in_data["parent_idx"]
)
]
logging.info(f"THe account address for the parent on blockchain {inputs[0]}")
logging.info(f"THe float account address for the user {inputs[1]}")
outputs = [addresser.create_organization_account_address(
account_id=in_data["parent_zero_pub"],
index=0),
addresser.float_account_address(
account_id=in_data["txn_key"].get_public_key().as_hex(),
index=in_data["parent_idx"]
)
]
if in_data["child_zero_pub"]:
child_address = addresser.child_account_address(
account_id=in_data["child_zero_pub"],
index=0
)
logging.info(f"CHILD address is {child_address}")
inputs.append(child_address)
outputs.append(child_address)
logging.info(f"INPUTS ADDRESSES --<{inputs}>--")
logging.info(f"OUTPUTS ADDRESSES --<{outputs}>--")
float_account = payload_pb2.CreateFloatAccount(
claimed_on=in_data["claimed_on"],
org_name=in_data["org_name"],
pancard=in_data["pancard"],
gst_number=in_data["gst_number"],
tan_number=in_data["tan_number"],
phone_number=in_data["phone_number"],
email=in_data["email"],
claimed=in_data["claimed"],
claimed_by=in_data["claimed_by"],
create_asset_idxs=in_data["create_asset_idxs"],
parent_idx=in_data["parent_idx"],
time=in_data["time"],
indian_time=in_data["indian_time"],
parent_role=in_data["parent_role"],
role=in_data["role"],
parent_zero_pub=in_data["parent_zero_pub"],
nonce=in_data["nonce"],
nonce_hash=in_data["nonce_hash"],
signed_nonce=in_data["signed_nonce"],
child_zero_pub=in_data["child_zero_pub"]
)
logging.info(float_account)
logging.info(f"THe serialized protobuf for float_account is {float_account}")
payload = payload_pb2.TransactionPayload(
payload_type=payload_pb2.TransactionPayload.CREATE_FLOAT_ACCOUNT,
create_float_account=float_account)
transaction_ids, batches, batch_id, batch_list_bytes= make_header_and_batch(
payload=payload,
inputs=inputs,
outputs=outputs,
txn_key=in_data["txn_key"],
batch_key=in_data["batch_key"])
logging.info(f"This is the batch_id {batch_id}")
rest_api_response = await messaging.send(
batch_list_bytes,
in_data["config"])
try:
result = await messaging.wait_for_status(batch_id, in_data["config"])
except (ApiBadRequest, ApiInternalError) as err:
#await auth_query.remove_auth_entry(request.app.config.DB_CONN, request.json.get('email'))
raise err
return False, False
return transaction_ids, batch_id
|
'''
Created on 24/03/2013
@author: artavares
This scripts writes a .csv file with the average values of user-selected fields
from a sequence of SUMO output files.
Example: duaIterate.py generated 100 tripinfo files: tripinfo_000.xml to
tripinfo_099.xml, and the user wants to know the average travel time of each iteration
'''
import xml.etree.ElementTree as ET
import sys
import os
import csv
from optparse import OptionParser
import collections
def average_results(options):
'''
Averages the results of the given attributes of a xml file.
The options object is configured according to parse_args()
'''
if not options.iterations:
print 'parameter -i/--iterations required.'
exit()
#data = collections.defaultdict(dict)
#print options
sep = options.separator.decode('string-escape')
#opens output file and writes header
outfile = open(options.output, 'w')
fields = options.fields.split(',')
#adds the first column (iteration) to the fields
fields = ['it'] + fields
outfile.write('#' + sep.join(fields) + '\n')
for i in range(options.iterations):
sys.stdout.write('Generating averages for iteration %d' % (i + 1) )
tripinfo_file = '%s%s.xml' % (options.prefix, str(i).zfill(3))
#parses the i-th tripinfo file
tree = ET.parse(tripinfo_file)
#initializes average data and sets the iteration number
data = {f:0 for f in fields}
data['it'] = i + 1
ftrips = full_trips_in_window(options.begin, options.end, tripinfo_file)
print ': %d full trips in window (%d,%d)' % (len(ftrips), options.begin, options.end)
#traverses the xml file, averaging the values of the fields
parsed_elements = 0
for element in tree.getroot():
#skips vehicles who haven't completed trips within the time window
if element.get('id') not in ftrips:
continue
#print element.attrib
for f in fields[1:]:
#print type(data[f]), type(element.get(f)), type(parsed_elements)
data[f] = new_average(data[f], element.get(f), parsed_elements)
parsed_elements += 1
#writes one line in the output
places = ['%s' for f in fields]
variables = [data[f] for f in fields]
outfile.write((sep.join(places) + '\n') % tuple(variables))
print "Output file '%s' written." % options.output
def new_average(old_avg, new_value, stepnum):
#if old_avg == {}:
# return new_value
#old_avg = float(old_avg)
#print new_value, old_avg, stepnum
return ( (float(new_value) - float(old_avg)) / (stepnum + 1)) + old_avg
def full_trips_in_window(begin, finish, tripinfo_file):
'''
Returns a list with the ID of the drivers that
made a full trip between begin and finish
'''
if finish == 0:
finish = sys.maxint
rinfo_tree = ET.parse(tripinfo_file)
fulltrips = []
#handles edge case:
if finish == 0:
finish = sys.maxint
for vdata in rinfo_tree.getroot():
try:
if float(vdata.get('depart')) >= begin and float(vdata.get('arrival')) <= finish:
fulltrips.append(vdata.get('id'))
except ValueError:
print 'Warning: %s has no depart or arrival in %s, skipping...' % (vdata.get('id'), tripinfo_file)
return fulltrips
def parse_args():
parser = OptionParser()
parser.add_option(
'-o', '--output', type=str, default='avgresults.csv',
help = 'output file'
)
parser.add_option(
'-b', '--begin',
help='the start time of the time window',
type=int, default=0
)
parser.add_option(
'-e', '--end',
help='the end time of the time window, 0=unlimited',
type=int, default=0
)
parser.add_option(
"-f", "--fields", default=None, type='str',
help="the fields you want to output in the .csv file (field1,field2,field3,...)."
)
parser.add_option(
'-p', '--prefix',
help='the prefix for the SUMO result files: [prefix]i.xml, where i is the iteration',
type=str, default=None
)
parser.add_option(
'-i', '--iterations',
help='the number of iterations to be read from inputs',
type=int
)
parser.add_option(
'-s', '--separator',
help='the separator for the output file',
type=str, default=','
)
return parser.parse_args(sys.argv)
if __name__ == '__main__':
(options, args) = parse_args()
average_results(options)
|
import requests
# Para tipos de peliculas
respuesta = requests.get(url='http://127.0.0.1:5000/type/all')
if respuesta.ok:
print(respuesta.json())
else:
print('No hay respuesta')
# Para tener lista de actores
respuesta = requests.get(url='http://127.0.0.1:5000/actors/all')
if respuesta.ok:
print(respuesta.json())
else:
print('No hay respuesta') |
# Some utility methods for sets and dictionaries. In a better world these
# could all be replaced by fold().
import copy
import itertools
def iterlen(iterator):
return sum(1 for i in iterator)
def intersection(sets):
if len(sets) == 0:
return set()
base = copy.copy(sets[0])
for s in itertools.islice(sets, 1, None):
base &= s
return base
def intersection_update(sets):
if len(sets) == 0:
return set()
base = sets[0]
for s in itertools.islice(sets, 1, None):
base &= s
return base
def union(sets):
base = set()
for s in sets:
base |= s
return base
def union_update(sets):
if len(sets) == 0:
return set()
base = sets[0]
for s in itertools.islice(sets, 1, None):
base |= s
return base
def addAll(dicts):
if len(dicts) == 0:
return {}
base = {}
for d in dicts:
base.update(d)
return base
def addAll_update(dicts):
if len(dicts) == 0:
return {}
base = dicts[0]
for d in itertools.islice(dicts, 1, None):
base.update(d)
return base |
from scisoftpy.dictutils import DataHolder
from scisoftpy.nexus.nxclasses import NXroot
import scisoftpy as dnp
import logging
logger = logging.getLogger(__name__)
def determineScannableContainingField(targetFieldname, scannables):
for scn in scannables:
fieldnames = list(scn.getInputNames()) + list(scn.getExtraNames())
if targetFieldname in fieldnames:
return scn
abbrevtarget = ".".join(targetFieldname.split(".")[1:])
for scn in scannables:
fieldnames = list(scn.getInputNames()) + list(scn.getExtraNames())
if abbrevtarget in fieldnames:
return scn
for scn in scannables:
if list(scn.getInputNames()) == [u'value'] and targetFieldname in scn.getName():
return scn
raise KeyError('Neither targetFieldname "%s" nor abbrevtarget "%s" found in scannables: %s' % (targetFieldname, abbrevtarget,
['%r:%r+%r' % (scn.getName(), list(scn.getInputNames()), list(scn.getExtraNames())) for scn in scannables]))
def getDatasetFromLoadedFile(loadedFile, fieldName, scanDataPointCache=None):
'''
Gets a dataset called fieldName from an already loaded file see loadScanFile(scanOb)
returns dataset
'''
logger.debug('Getting data for %s, from %s (with cache=%s)', fieldName, loadedFile, scanDataPointCache)
# Check if the field names are the full local names if so get just the last part of
# the field names as this should be the node name. Keep original fieldname, it might
# be useful later
if '.' in fieldName:
# with scnname.fieldname strip off scnname
strippedFieldName = fieldName.split('.')[-1]
else: # fieldname doesn't require stripping
strippedFieldName = fieldName
# If we have a scanDataPointCache use it for performance
if(scanDataPointCache):
positionsDataset = scanDataPointCache.getPositionsFor(strippedFieldName)
if None in positionsDataset:
raise KeyError("positionsDataset for field {} contains None/null entries".format(strippedFieldName))
return dnp.asarray(positionsDataset)
# Check if its a NeXus file
if isinstance(loadedFile, NXroot):
# Note: Using first node returned, this might fail if there are multiple nodes with the same name!
# Might be possible to disambiguate this using the original fieldname?
loadedNodes = loadedFile.getnodes(strippedFieldName, group=False, data=True)
if len(loadedNodes) == 0:
raise KeyError("%s not found in data file" % strippedFieldName)
# Find nodes which have a local_name
probableNodes = [loadedNodes[_n] for _n in xrange(len(loadedNodes))
if 'local_name' in loadedNodes[_n].attrs]
# Use the first local_name which matches the fieldName or fall back on using the first node
for node in probableNodes:
if node.attrs['local_name'] == fieldName:
lazyDataset = node
break
else:
lazyDataset = loadedNodes[0]
# Use slicing to load the whole lazy dataset into a array i.e. non-lazy dataset
dataset = lazyDataset[...]
return dataset
elif isinstance(loadedFile, DataHolder):
datasetList = loadedFile[strippedFieldName]
# Convert the dataset as a list into the array
dataset = dnp.asarray(datasetList)
return dataset
# Not a supported file type
else:
print "The file format is not supported"
print loadedFile.__class__
class Struct(object):
def __init__(self):
self.attrnames = []
def addAttribute(self, attrname, value):
self.__dict__[attrname]=value
self.attrnames.append(attrname)
def __repr__(self):
result = ''
for attrname in self.attrnames:
result += attrname + ' = ' + `self.__dict__[attrname]` + '\n'
return result
def __str__(self):
return self.__repr__()
def __getitem__(self, key):
return self.__dict__[key]
class ScanDataProcessorResult(object):
"""When viewed as a string this returns nice wordy results, otherwise it
is a structure of the from:
processor.abscissa = 'x'
processor.ordinate = 'e1'
processor.scn.x = 1
processor.scn.mie: i1, e1 = 3, 4
processor.field.x = 1
processor.field.i1 = 3
processor.field.e1 = 4
processor.result.fwhm = 2
processor.result.peak = 1
"""
log = logger.getChild('ScanDataProcessorResult')
def __init__(self, dataSetResult, lastScanFile, allscannables, xfieldname, yfieldname, scanDataPointCache=None):
self.name = dataSetResult.processorName
self.labelValuePairs = dataSetResult.resultsDict
self.datasetProcessorReport = dataSetResult.report
self.scanDataPointCache = scanDataPointCache
xvalue = self.labelValuePairs[dataSetResult.keyxlabel]
self.scn = Struct()
self.field = Struct()
self.result = Struct()
# Give up here if there was a problem with the processor
for val in self.labelValuePairs.values():
if val is None:
self.scannableValues = None
self.report = self.datasetProcessorReport
self.str = self.datasetProcessorReport
return
# generate short report and determine scannable values at feature
self.scannableValues = self.determineScannableValuesAtFeature(allscannables, lastScanFile, xfieldname, xvalue)
self.str = '' # for __str__ and __repr__
# abscissa and ordinate fields:
self.abscissa = xfieldname
self.ordinate = yfieldname
self.str += ' ' + self.name + ".abscissa = '%s'\n" % self.abscissa
self.str += ' ' + self.name + ".ordinate = '%s'\n" % self.ordinate
# abscissa and ordinate scannables:
self.abscissa_scannable = determineScannableContainingField(xfieldname, allscannables)
self.ordinate_scannable = determineScannableContainingField(yfieldname, allscannables)
# feature location by scannable:
for scn, value in self.scannableValues.items():
self.scn.addAttribute(scn.getName(), value)
for scn in allscannables:
try:
self.str += ' ' + self.name + ".scn." + self.reportScannablePositionAtFeature(scn) + '\n'
except:
pass
# feature location by field
for scn, value in self.scannableValues.items():
fieldnames = list(scn.getInputNames()) + list(scn.getExtraNames())
scnpos = self.getScannableValueAtFeature(scn)
try:
scnpos = list(scnpos)
except TypeError: # not a list
scnpos = [scnpos]
for fieldname, pos, format in zip(fieldnames, scnpos, scn.getOutputFormat()):
self.field.addAttribute(fieldname, pos)
formattedpos = 'None' if pos == None else format%pos
self.str += ' ' + self.name + ".field." + fieldname + " = " + formattedpos + '\n'
# results
for label in dataSetResult.labelList:
val = dataSetResult.resultsDict[label]
self.result.addAttribute(label, val)
self.str += ' ' + self.name + ".result." + label + " = " + str(val) + '\n'
self.report = self.generateShortReport(dataSetResult)
def generateShortReport(self, dataSetResult):
# input names only on purpose!
scn = self.abscissa_scannable
if len(scn.getInputNames())==1:
scnlabel = scn.getName()
pos = self.scannableValues[scn]
elif len(scn.getInputNames())>1:
scnlabel = scn.getName() + "("
pos="("
for fieldname, fieldval in zip(scn.getInputNames(), self.scannableValues[scn]):
scnlabel += fieldname + ","
pos += str(fieldval) + ","
scnlabel = scnlabel[:-1] + ")"
pos = pos[:-1] + ")"
valuesString = ""
for label, value in self.labelValuePairs.items():
if label != dataSetResult.keyxlabel:
valuesString += label + " = " + str(value) + ", "
valuesString = valuesString[:-2]
return "At %s = %s (%s), %s %s. " % (scnlabel, pos, dataSetResult.keyxlabel, self.ordinate, valuesString)
def reportScannablePositionAtFeature(self, scn):
result = scn.name
fieldnames = list(scn.getInputNames()) + list(scn.getExtraNames())
# Get scannable position as a list
scnpos = self.getScannableValueAtFeature(scn)
try:
scnpos = list(scnpos)
except TypeError: # not a list
scnpos = [scnpos]
# if single fieldname matches scannable name, format is "processor.scn = value"
if len(fieldnames)==1:
if fieldnames[0]==scn.name:
return result + ' = ' + scn.getOutputFormat()[0] % scnpos[0]
# Otherwise format is "processor.scn: f1, f2 = v1, v2"
result += ': '
for name in fieldnames:
result += name + ', '
result = result[:-2]
result += ' = '
for format, pos in zip(scn.getOutputFormat(),scnpos):
result += 'None' if pos==None else format%pos + ', '
result = result[:-2]
return result
def go(self):
scn = self.abscissa_scannable
val = self.getScannableValueAtFeature(scn)
try:
len(val)
except TypeError:
val = [val]
val = val[:len(scn.getInputNames())]
if len(val)==0:
raise Exception("The scannable %s has no input fields"%scn.getName())
if len(val)==1:
val = val[0]
print "Moving %s to %s" % (scn.getName(), `val`)
scn.moveTo(val)
def __findValueInDataSet(self, value, dataset):
return list(dataset.doubleArray()).index(value)
def determineScannableValuesAtFeature(self, scannables, lastScanFile, xname, xvalue):
'''
This find the values of all scannables at the position given by xvalue. It uses linear interpolation to
find the return values.
'''
self.log.debug('Determining scannable (%s) values at %s', xname, xvalue)
# Get the x dataset from the file
dsx = getDatasetFromLoadedFile(lastScanFile, xname, self.scanDataPointCache)
decreasing = dsx[0] > dsx[-1]
#dnp.interp only works on increasing datasets
dsx = dnp.array(dsx[::-1,...]) if decreasing else dsx
# Check if the x value is inside the x range of the scan
feature_inside_scan_data = dsx.min() <= xvalue <= dsx.max()
result = {}
for scn in scannables:
try:
pos = []
fieldnames = list(scn.getInputNames()) + list(scn.getExtraNames())
for fieldname, format in zip(fieldnames, scn.getOutputFormat()):
scn_fieldname = scn.name + '.' + fieldname
if format == '%s':
# Cannot get filenames from SRS files!
value = float('nan')
else:
dsfield = getDatasetFromLoadedFile(lastScanFile, scn_fieldname, self.scanDataPointCache)
if feature_inside_scan_data:
# Find the value of the scannable by interpolation (only valid if feature is inside scan!).
# xvalue is the position of the feature against the x scannable
# dsx is the array of the x values
# dsfield is the array of the other scannable positions
if decreasing:
dsfield = dsfield[::-1,...]
value = dnp.interp(xvalue, dsx, dsfield)
else: # feature not inside scan
if scn_fieldname == xname:
value = xvalue
else:
# Trick case. Return start or end value for field
if xvalue <= dsx.min:
value = dsfield[0]
else: # xvalue >= dsx.min
value = dsfield[-1]
pos.append(value)
if len(pos)==1:
result[scn] = pos[0]
else:
result[scn] = pos
except:
pass
return result
def getScannableValueAtFeature(self, scn):
return self.scannableValues[scn]
def __repr__(self):
return self.str
def __str__(self):
return self.__repr__()
|
# Standard library
from argparse import ArgumentParser
# Third party
import climate_learn as cl
import pytorch_lightning as pl
from pytorch_lightning.callbacks import (
EarlyStopping,
ModelCheckpoint,
RichModelSummary,
RichProgressBar,
)
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
parser = ArgumentParser()
parser.add_argument("climatebench_dir")
parser.add_argument("model", choices=["resnet", "unet", "vit"])
parser.add_argument(
"variable",
choices=["tas", "diurnal_temperature_range", "pr", "pr90"],
help="The variable to predict.",
)
parser.add_argument("--summary_depth", type=int, default=1)
parser.add_argument("--max_epochs", type=int, default=50)
parser.add_argument("--patience", type=int, default=10)
parser.add_argument("--gpu", type=int, default=-1)
parser.add_argument("--checkpoint", default=None)
args = parser.parse_args()
# Set up data
variables = ["CO2", "SO2", "CH4", "BC"]
out_variables = args.variable
dm = cl.data.ClimateBenchDataModule(
args.climatebench_dir,
variables=variables,
out_variables=out_variables,
train_ratio=0.9,
history=10,
batch_size=16,
num_workers=1,
)
# Set up deep learning model
if args.model == "resnet":
model_kwargs = { # override some of the defaults
"in_channels": 4,
"out_channels": 1,
"history": 10,
"n_blocks": 28,
}
elif args.model == "unet":
model_kwargs = { # override some of the defaults
"in_channels": 4,
"out_channels": 1,
"history": 10,
"ch_mults": (1, 2, 2),
"is_attn": (False, False, False),
}
elif args.model == "vit":
model_kwargs = { # override some of the defaults
"img_size": (32, 64),
"in_channels": 4,
"out_channels": 1,
"history": 10,
"patch_size": 2,
"embed_dim": 128,
"depth": 8,
"decoder_depth": 2,
"learn_pos_emb": True,
"num_heads": 4,
}
optim_kwargs = {"lr": 5e-4, "weight_decay": 1e-5, "betas": (0.9, 0.99)}
sched_kwargs = {
"warmup_epochs": 5,
"max_epochs": 50,
"warmup_start_lr": 1e-8,
"eta_min": 1e-8,
}
model = cl.load_climatebench_module(
data_module=dm,
model=args.model,
model_kwargs=model_kwargs,
optim="adamw",
optim_kwargs=optim_kwargs,
sched="linear-warmup-cosine-annealing",
sched_kwargs=sched_kwargs,
)
# Set up trainer
pl.seed_everything(0)
default_root_dir = f"{args.model}_climatebench_{args.variable}"
logger = TensorBoardLogger(save_dir=f"{default_root_dir}/logs")
early_stopping = "val/mse:aggregate"
callbacks = [
RichProgressBar(),
RichModelSummary(max_depth=args.summary_depth),
EarlyStopping(monitor=early_stopping, patience=args.patience),
ModelCheckpoint(
dirpath=f"{default_root_dir}/checkpoints",
monitor=early_stopping,
filename="epoch_{epoch:03d}",
auto_insert_metric_name=False,
),
]
trainer = pl.Trainer(
logger=logger,
callbacks=callbacks,
default_root_dir=default_root_dir,
accelerator="gpu" if args.gpu != -1 else None,
devices=[args.gpu] if args.gpu != -1 else None,
max_epochs=args.max_epochs,
strategy="ddp",
precision="16",
log_every_n_steps=1,
)
# Train and evaluate model from scratch
if args.checkpoint is None:
trainer.fit(model, datamodule=dm)
trainer.test(model, datamodule=dm, ckpt_path="best")
# Evaluate saved model checkpoint
else:
model = cl.LitModule.load_from_checkpoint(
args.checkpoint,
net=model.net,
optimizer=model.optimizer,
lr_scheduler=None,
train_loss=None,
val_loss=None,
test_loss=model.test_loss,
test_target_transforms=model.test_target_transforms,
)
trainer.test(model, datamodule=dm)
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.education_contact_relationship import EducationContactRelationship
from ..one_drive_object_base import OneDriveObjectBase
class EducationRelatedContact(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def id(self):
"""Gets and sets the id
Returns:
str:
The id
"""
if "id" in self._prop_dict:
return self._prop_dict["id"]
else:
return None
@id.setter
def id(self, val):
self._prop_dict["id"] = val
@property
def display_name(self):
"""Gets and sets the displayName
Returns:
str:
The displayName
"""
if "displayName" in self._prop_dict:
return self._prop_dict["displayName"]
else:
return None
@display_name.setter
def display_name(self, val):
self._prop_dict["displayName"] = val
@property
def email_address(self):
"""Gets and sets the emailAddress
Returns:
str:
The emailAddress
"""
if "emailAddress" in self._prop_dict:
return self._prop_dict["emailAddress"]
else:
return None
@email_address.setter
def email_address(self, val):
self._prop_dict["emailAddress"] = val
@property
def mobile_phone(self):
"""Gets and sets the mobilePhone
Returns:
str:
The mobilePhone
"""
if "mobilePhone" in self._prop_dict:
return self._prop_dict["mobilePhone"]
else:
return None
@mobile_phone.setter
def mobile_phone(self, val):
self._prop_dict["mobilePhone"] = val
@property
def relationship(self):
"""
Gets and sets the relationship
Returns:
:class:`EducationContactRelationship<onedrivesdk.model.education_contact_relationship.EducationContactRelationship>`:
The relationship
"""
if "relationship" in self._prop_dict:
if isinstance(self._prop_dict["relationship"], OneDriveObjectBase):
return self._prop_dict["relationship"]
else :
self._prop_dict["relationship"] = EducationContactRelationship(self._prop_dict["relationship"])
return self._prop_dict["relationship"]
return None
@relationship.setter
def relationship(self, val):
self._prop_dict["relationship"] = val
@property
def access_consent(self):
"""Gets and sets the accessConsent
Returns:
bool:
The accessConsent
"""
if "accessConsent" in self._prop_dict:
return self._prop_dict["accessConsent"]
else:
return None
@access_consent.setter
def access_consent(self, val):
self._prop_dict["accessConsent"] = val
|
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
#variables gmail
gmail = "nahoj1992@gmail.com"
contrasena = "dragonball2010"
gmailAulavirtual = "jcalderonva@unsa.edu.pe"
#"creamos una instancia de chrome hacia el aulavirtual"
driver = webdriver.Chrome("C:\Python\Lib\site-packages\selenium\webdriver\chrome\chromedriver.exe")
print("Iniciando bot")
#ingreso al gmail
driver.get("https://docs.google.com/spreadsheets/d/1ADLUu_4Jz1OxYdBRcZfBi3lUS0DjK0F_nFE21-NCGBo/edit#gid=986616540")
email = driver.find_element_by_id('identifierId')
email.send_keys(gmail)
driver.find_element_by_id('identifierNext').click()
time.sleep(5)
contra = driver.find_element_by_xpath("//*[@id='password']/div[1]/div/div[1]/input")
contra.send_keys("dragonball2010")
driver.find_element_by_id('passwordNext').click()
time.sleep(5)
print("Ingresando al aula virtual")
#ingreso al aulavirtual
driver.get("http://dutic.unsa.edu.pe/aulavirtual/login/index.php")
#"nos logeamos"
login_form_sesion_dutic = driver.find_element_by_id('login')
username = driver.find_element_by_id('username')
password = driver.find_element_by_id('password')
username.send_keys("dutic")
password.send_keys("Qwert18*")
driver.find_element_by_id("loginbtn").click()
time.sleep(5)
#"adminastracion de usuarios"
driver.get("http://dutic.unsa.edu.pe/aulavirtual/admin/user.php")
driver.find_element_by_class_name("moreless-toggler").click()
email_usuario = driver.find_element_by_id('id_email')
email_usuario.send_keys(gmailAulavirtual)
time.sleep(5)
driver.find_element_by_id("id_addfilter").click()
driver.find_element_by_id("//*[@id='yui_3_17_2_1_1554146167123_524']").click()
|
from django.contrib import admin
from pelicula.models import Cliente, Genero, Pelicula, Prestamo
admin.site.register(Cliente)
admin.site.register(Genero)
admin.site.register(Pelicula)
admin.site.register(Prestamo)
|
# Generated by Django 2.2.9 on 2020-01-25 15:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('humans', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='group',
name='curratt',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='humans.Teacher'),
),
migrations.AlterField(
model_name='group',
name='headman',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='humans.Student'),
),
]
|
#coding: utf-8
from django import forms
from UserManagement.models import Person
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=50)
file = forms.FileField()
class MyRegistrationForm(forms.ModelForm):
username = forms.CharField(max_length=32, widget=forms.TextInput(attrs={"type": "text", "class": "form-control", "placeholder": "Username"}))
last_name = forms.CharField(max_length=32, widget=forms.TextInput(attrs={"type": "text", "class": "form-control", "placeholder": "Фамилия"}))
first_name = forms.CharField(max_length=32, widget=forms.TextInput(attrs={"type": "text", "class": "form-control", "placeholder": "Имя"}))
login = forms.CharField(max_length=32, widget=forms.TextInput(attrs={"type": "text", "class": "form-control", "placeholder": "логин"}))
password = forms.CharField(max_length=32, widget=forms.PasswordInput(attrs={"type": "password", "class": "form-control", "placeholder": "пароль"}))
confirmpassword = forms.CharField(max_length=32, widget=forms.PasswordInput(attrs={"type": "password", "class": "form-control", "placeholder": "Пожалуйста, подтвердите..."}))
email = forms.CharField(max_length=32, widget=forms.EmailInput(attrs={"type": "email", "class": "form-control", "placeholder": "e-mail"}))
phone_number = forms.CharField(max_length=32, widget=forms.NumberInput(attrs={"type": "number", "class": "form-control", "placeholder": "phone_number"}))
birthdate = forms.CharField(widget=forms.DateInput(attrs={"type": "date", "id": "birthdate", "class": "form-control"}))
status = forms.CharField(widget=forms.Select(attrs={"id": "status", "class": "form-control"}))
photo = forms.FileField(widget=forms.ImageField()) # {"value_from_datadict": ""}
rules = forms.BooleanField(widget=forms.CheckboxInput(attrs={"type": "checkbox"}))
class Meta:
model = Person
fields = ('__all__')
exclude = ('rules',)
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('__all__')
exclude = ('rules',)
class UserChangeForm(forms.ModelForm):
"""
Форма для обновления данных пользователей. Нужна только для того, чтобы не
видеть постоянных ошибок "Не заполнено поле password" при обновлении данных
пользователя.
"""
class Meta:
model = Person
fields = ('__all__')
exclude = ('rules',)
|
import numpy as np
from operator import itemgetter
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
import csv
# training and cross validation
with open('data/processed_tfidf/ALLYEARS/SPARSE2016.dat') as f:
content = f.readlines()
data = []
for line in content:
y = int(line.split()[0]) # get spam spam_classification
wtemp = line.split()[1:] # get word counts
w = []
for i in range(0, len(wtemp)):
w.append((wtemp[i].split(":")[0], wtemp[i].split(":")[1]))
data.append((y, w))
X = np.zeros((len(data), 16244)) # matrix indexed by cmat[di-1][wj]
Y = np.zeros(len(data))
for i in range(0, len(data)):
Y[i] = data[i][0]
for feat, fval in data[i][1]:
X[i-1][int(feat)] = float(fval)
print np.array(Y).shape
print np.matrix(X).shape
svm_sm = LinearSVC(C=0.5, random_state=42, loss='hinge')
svm_sm.fit(X, Y)
scores = cross_val_score(svm_sm, X, Y, cv=5)
print scores.mean()
# testing
with open('data/processed_tfidf/ALLYEARS/SPARSE2008.dat') as f:
content = f.readlines()
data = []
for line in content:
y = int(line.split()[0]) # get spam spam_classification
wtemp = line.split()[1:] # get word counts
w = []
for i in range(0, len(wtemp)):
w.append((wtemp[i].split(":")[0], wtemp[i].split(":")[1]))
data.append((y, w))
Xtest = np.zeros((len(data), 16244)) # matrix indexed by cmat[di-1][wj]
Ytest = np.zeros(len(data))
for i in range(0, len(data)):
Ytest[i] = data[i][0]
for feat, fval in data[i][1]:
Xtest[i-1][int(feat)] = float(fval)
print np.array(Ytest).shape
print np.matrix(Xtest).shape
feature_predict = svm_sm.predict(Xtest)
ncorrect = 0
for i in range(0, len(Ytest)):
if (Ytest[i] == feature_predict[i]):
ncorrect += 1
accuracy = float(ncorrect)/len(Ytest)
print accuracy
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from stestr.commands.failing import failing as failing_command
from stestr.commands.history import history_list as history_list_command
from stestr.commands.history import history_remove as history_remove_command
from stestr.commands.history import history_show as history_show_command
from stestr.commands.init import init as init_command
from stestr.commands.last import last as last_command
from stestr.commands.list import list_command
from stestr.commands.load import load as load_command
from stestr.commands.run import run_command
from stestr.commands.slowest import slowest as slowest_command
__all__ = [
"failing_command",
"init_command",
"last_command",
"list_command",
"load_command",
"run_command",
"slowest_command",
"history_show_command",
"history_list_command",
"history_remove_command",
]
|
from datetime import datetime
from ward import test
from repository import BookRepository
@test('test find by author should return list of books')
def _():
books = [
{
'name': 'The Gatuk book',
'author': 'Gatuk'
},
{
'name': 'The Human book',
'author': 'Human'
}
]
repository = BookRepository(books)
actual = repository.find_by_author(name='Gatuk')
assert 1 == len(actual)
assert 'Gatuk' == actual[0]['author']
@test('test find by hardest should return list of books')
def _():
books = [
{
'name': 'The Gatuk book',
'length': 300
},
{
'name': 'The Human book',
'length': 280
}
]
repository = BookRepository(books)
actual = repository.find_by_hardest(length=300)
assert 1 == len(actual)
assert 300 == actual[0]['length']
@test('test find by recent should return list of books')
def _():
books = [
{
'name': 'The Gatuk book',
'publication_date': datetime(2020, 10, 25).date()
},
{
'name': 'The Human book',
'publication_date': datetime(2017, 10, 25).date()
}
]
repository = BookRepository(books)
actual = repository.find_by_recent(year=2)
assert 1 == len(actual)
assert datetime(2020, 10, 25).date() == actual[0]['publication_date']
|
import re
def read_passports():
passports = []
combo = "" # For sexy oneliner later
with open('day04/input') as f:
for line in f:
if line == '\n': # break between passports
passports.append({x.split(':')[0]: x.split(':')[1]
for x in combo.split()})
combo = ""
else: # Build up
combo = ' '.join([combo, line])
# Don't lose the last passport
passports.append({x.split(':')[0]: x.split(':')[1]
for x in combo.split()})
return passports
def part1():
passports = read_passports()
valid = 0
for pp in passports:
if len(pp) == 8: # all fields
valid += 1
elif len(pp) == 7: # doesn't need cid
if "cid" not in pp.keys():
valid += 1
print(f'Part 1 answer: {valid}')
def valid_pp(pp):
if len(pp) < 7 or (len(pp) == 7 and 'cid' in pp.keys()):
return False
x = int(pp['byr'])
if x < 1920 or x > 2002:
return False
x = int(pp['iyr'])
if x < 2010 or x > 2020:
return False
x = int(pp['eyr'])
if x < 2020 or x > 2030:
return False
x = pp['ecl']
if x not in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
return False
x = pp['pid']
if len(x) != 9:
return False
# The gross one that made me lookup regex again
m = re.search(r'(\d+)(\w+)', pp['hgt'])
x = int(m.group(1))
if m.group(2) == 'in':
if x < 59 or x > 76:
return False
elif m.group(2) == 'cm':
if x < 150 or x > 193:
return False
else:
return False
# Forgot hair color, must use regex now :(
# For future Kat: the hair color must be the pound sign
# followed by exactly six hex digits, so that's what the
# regex matches.
if not re.match(r'[#](\d|[a-f]){6}', pp['hcl']):
return False
# If we passed all conditions
return True
def part2():
valid = sum((valid_pp(x) for x in read_passports()))
print(f'Part 2 answer: {valid}')
if __name__ == '__main__':
part1()
part2()
|
"""youtubeの操作を行う"""
import json
import traceback
from datetime import datetime
from datetime import timedelta
import requests
from bs4 import BeautifulSoup
def get_video_info(conn, api_key, video_id):
"""動画情報の取得"""
conn_str = '/youtube/v3/videos?id={0}&key={1}&fields=items(id,snippet(channelId,title,categoryId,publishedAt),statistics)&part=snippet,statistics'.format(video_id, api_key)
conn.request('GET', conn_str)
res = conn.getresponse()
if res.status != 200:
raise Exception(res.status, res.reason)
data = res.read().decode("utf-8")
return json.loads(data)
def get_channel_video_list(conn, api_key, channel_id, published_before=None, published_after=None):
"""チャンネル中の動画一覧取得"""
# 参考資料
# https://developers.google.com/youtube/v3/docs/search/list?hl=ja
# https://support.google.com/youtube/forum/AAAAiuErobU8A1V4NqExIE/?hl=en&gpf=%23!topic%2Fyoutube%2F8A1V4NqExIE
# https://qiita.com/yuji_saito/items/8f472dcd785c1fadf666
result = []
conn_str = '/youtube/v3/search?part=snippet&channelId={0}&key={1}&maxResults=50&order=date&kind=video'.format(channel_id, api_key)
print (conn_str)
if published_before:
conn_str = conn_str + '&publishedBefore=' + published_before
if published_after:
conn_str = conn_str + '&publishedAfter=' + published_after
conn.request('GET', conn_str)
res = conn.getresponse()
if res.status != 200:
raise Exception(res.status, res.reason)
data = res.read().decode("utf-8")
data = json.loads(data)
for item in data['items']:
if item['id']['kind'] == 'youtube#video':
result.append({'videoId' : item['id']['videoId'], 'title':item['snippet']['title'], 'publishedAt':item['snippet']['publishedAt']})
return result
def convert_string_to_datetime(str_dt):
"""文字を日付時刻型に変換"""
date_time = datetime.strptime(str_dt, '%Y-%m-%dT%H:%M:%S.000Z')
return date_time
def convert_datetime_to_string(date_time):
"""日付時刻型を文字に変換"""
return date_time.strftime('%Y-%m-%dT%H:%M:%S.000Z')
def get_all_channel_video_list(conn, api_key, channel_id, published_after=None):
"""チャンネル中の動画をすべて取得"""
tmp_result = []
result = get_channel_video_list(conn, api_key, channel_id, None, published_after)
if not result:
return result
while 1:
tmp_dt = convert_string_to_datetime(result[len(result)-1]['publishedAt'])
tmp_dt = tmp_dt - timedelta(seconds=1)
published_before = convert_datetime_to_string(tmp_dt)
tmp_result = get_channel_video_list(conn, api_key, channel_id, published_before, published_after)
if not tmp_result:
return result
result.extend(tmp_result)
def get_message_data(video_id, item):
"""["continuationContents"]["liveChatContinuation"]["actions"][1:]以下を解析"""
message = ''
purchase_amount = ''
data = None
# たぶん、全パターンの網羅はできていない
if 'addLiveChatTickerItemAction' in item["replayChatItemAction"]["actions"][0]:
if 'liveChatTextMessageRenderer' in item["replayChatItemAction"]["actions"][0]['addLiveChatTickerItemAction']['item']:
data = item["replayChatItemAction"]["actions"][0]['addLiveChatTickerItemAction']['item']['liveChatTickerPaidMessageItemRenderer']['showItemEndpoint']['showLiveChatItemEndpoint']['renderer']
if 'liveChatTickerPaidMessageItemRenderer' in item["replayChatItemAction"]["actions"][0]['addLiveChatTickerItemAction']['item']:
data = item["replayChatItemAction"]["actions"][0]['addLiveChatTickerItemAction']['item']['liveChatTickerPaidMessageItemRenderer']['showItemEndpoint']['showLiveChatItemEndpoint']['renderer']['liveChatPaidMessageRenderer']
purchase_amount = data['purchaseAmountText']['simpleText']
if 'liveChatPlaceholderItemRenderer' in item["replayChatItemAction"]["actions"][0]['addLiveChatTickerItemAction']['item']:
print(item)
return None
if 'addChatItemAction' in item["replayChatItemAction"]["actions"][0]:
if 'liveChatTextMessageRenderer' in item["replayChatItemAction"]["actions"][0]['addChatItemAction']['item']:
data = item["replayChatItemAction"]["actions"][0]['addChatItemAction']['item']['liveChatTextMessageRenderer']
if 'liveChatPaidMessageRenderer' in item["replayChatItemAction"]["actions"][0]['addChatItemAction']['item']:
data = item["replayChatItemAction"]["actions"][0]['addChatItemAction']['item']['liveChatPaidMessageRenderer']
purchase_amount = data['purchaseAmountText']['simpleText']
if 'liveChatPlaceholderItemRenderer' in item["replayChatItemAction"]["actions"][0]['addChatItemAction']['item']:
print(item)
return None
if data is None:
print(item)
return None
if 'message' in data:
for msg in data['message']['runs']:
if 'text' in msg:
# emojiが来る可能性がある
message = message + msg['text']
else:
print(data)
author = data['authorName']['simpleText']
offset_time_msec = item["replayChatItemAction"]["videoOffsetTimeMsec"]
return {'video_id': video_id, 'message': message, 'author': author, 'offset_time_msec': offset_time_msec, 'purchase_amount': purchase_amount}
def get_archive_live_chat(video_id, callback):
"""アーカイブのライブチャットの取得"""
target_url = 'https://www.youtube.com/watch?v=' + video_id
result = []
session = requests.Session()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}
# まず動画ページにrequestsを実行しhtmlソースを手に入れてlive_chat_replayの先頭のurlを入手
html = requests.get(target_url)
soup = BeautifulSoup(html.text, "html.parser")
item = None
next_url = None
for iframe in soup.find_all("iframe"):
if "live_chat_replay" in iframe["src"]:
next_url = iframe["src"]
if next_url is None:
# ライブチャットの再生が無効になっている場合など.
# 例:JRfVSFJhcLw 配信失敗により、アンジュがただただ病むだけ。
print ('iframe src not found.')
return
while 1:
try:
html = session.get(next_url, headers=headers)
soup = BeautifulSoup(html.text, "lxml")
# 次に飛ぶurlのデータがある部分をfind_allで探してsplitで整形
for scrp in soup.find_all("script"):
if "window[\"ytInitialData\"]" in scrp.text:
dict_str = scrp.text.split(" = ")[1]
# 辞書形式と認識すると簡単にデータを取得できるが, 末尾に邪魔なのがあるので消しておく(「空白2つ + \n + ;」を消す)
dict_str = dict_str.rstrip(" \n;")
dics = json.loads(dict_str)
if not 'liveChatReplayContinuationData' in dics["continuationContents"]["liveChatContinuation"]["continuations"][0]:
# 次のデータが存在しない
return
# "https://www.youtube.com/live_chat_replay?continuation=" + continue_url が次のlive_chat_replayのurl
continue_url = dics["continuationContents"]["liveChatContinuation"]["continuations"][0]["liveChatReplayContinuationData"]["continuation"]
next_url = "https://www.youtube.com/live_chat_replay?continuation=" + continue_url
# dics["continuationContents"]["liveChatContinuation"]["actions"]がコメントデータのリスト。先頭はノイズデータなので[1:]で保存
for item in dics["continuationContents"]["liveChatContinuation"]["actions"][1:]:
rec = get_message_data(video_id, item)
if not rec is None:
result.append(rec)
# next_urlが入手できなくなったら終わり
except:
traceback.print_exc()
print(item)
break
callback(result)
result = []
|
import numpy as np
from itertools import chain
class Lattice:
def __init__(self, N, conc):
self.matrix_size = N
self.spin = np.random.choice([1, -1], p = [conc, 1-conc], size = (N, N))
self.decision_times = []
self.last_changed = np.zeros((N, N))
self.time = 0
self.vert = lambda x,y : [((x,y),(x%N, (y-1)%N)), ((x,y),(x%N, (y+2)%N))]
self.hori = lambda x,y : [((x,y),((x-1)%N, y%N)), ((x,y),((x+2)%N, y%N))]
def stauffer_2d_step(self, media, up, down):
states = [np.copy(self.spin)]
N = self.matrix_size
for spin_update in range(self.matrix_size*self.matrix_size):
# select random site
x = np.random.choice(np.arange(0, self.matrix_size-1))
y = np.random.choice(np.arange(0, self.matrix_size-1))
rows = np.arange(x-1, x+3)%self.matrix_size
cols = np.arange(y-1, y+3)%self.matrix_size
M = self.spin[np.ix_(rows, cols)] # mini matrix of 4x4
array_list = [M[1, :], M[2, :], M[:, 1], M[:, 2]]
old_spins = np.copy(self.spin)
small_m = M[np.ix_([1, 2], [1, 2])] # mini matrix of 2x2
if(small_m[0,0]==small_m[0,1]==small_m[1,0]==small_m[1,1]):
for array in array_list:
assert(len(array)==4)
array[0] = array[1]
array[3] = array[1]
# effect of media on neighboring 8 positions
else:
for array in array_list:
assert(len(array)==4)
array[0] = np.random.choice([1, -1, array[0]], p = [media*up, media*down, 1-media])
array[3] = np.random.choice([1, -1, array[3]], p = [media*up, media*down, 1-media])
self.spin[np.ix_(rows, cols)] = M
changed = self.spin != old_spins
self.decision_times.append(list(self.time - self.last_changed[changed]))
self.last_changed[changed] = self.time
self.time = self.time + 1
return changed, states
def stauffer_2d_step_modified(self, method_version, media, up, down):
N = self.matrix_size
# states = [np.copy(self.spin)]
for spin_update in range(self.matrix_size*self.matrix_size):
# select random site
x = np.random.choice(np.arange(0, self.matrix_size-1))
y = np.random.choice(np.arange(0, self.matrix_size-1))
rows = np.arange(x-1, x+3)%self.matrix_size
cols = np.arange(y-1, y+3)%self.matrix_size
M = self.spin[np.ix_(rows, cols)]
array_list = [M[1, :], M[2, :], M[:, 1], M[:, 2]]
old_spins = np.copy(self.spin)
small_m = M[np.ix_([1, 2], [1, 2])]
if(small_m[0,0]==small_m[0,1]==small_m[1,0]==small_m[1,1]):
for array in array_list:
assert(len(array)==4)
array[0] = array[1]
array[3] = array[1]
else:
for array in array_list:
assert(len(array)==4)
if(method_version==0):
# modified : take neighbor's neighbor's position
array[0] = np.random.choice([1, -1, array[2]], p = [media*up, media*down, 1-media])
array[3] = np.random.choice([1, -1, array[1]], p = [media*up, media*down, 1-media])
else:
# modified : take neighbor's position
array[0] = np.random.choice([1, -1, array[1]], p = [media*up, media*down, 1-media])
array[3] = np.random.choice([1, -1, array[2]], p = [media*up, media*down, 1-media])
self.spin[np.ix_(rows, cols)] = M
changed = self.spin != old_spins
self.decision_times.append(list(self.time - self.last_changed[changed]))
self.last_changed[changed] = self.time
self.time = self.time + 1
return changed
# Sweep over lattice until steady state is reached
def metropolis(self, media = 0, up = 1, down = 0, method = "default", display = False):
self.time = 0
tmax = 500
epsilon = 0.001
# time_of_last_change = 0
threshold = self.matrix_size
total = self.matrix_size*self.matrix_size
states = [np.copy(self.spin)]
while(self.time < tmax*total):
if(method == "default"):
changed = self.stauffer_2d_step(media, up, down)
elif(method == "modified1"):
changed = self.stauffer_2d_step_modified(0, media, up, down)
elif(method == "modified2"):
changed = self.stauffer_2d_step_modified(1, media, up, down)
m = self.magnetization()
staggered_m = self.staggered_magnetization()
states.append(np.copy(self.spin))
# steady state consensus --> exit
if(abs(m) == 1 or 1-abs(m) <= epsilon):
break
# steady state stalemate
elif(staggered_m == 1):
break
return m, self.time, self.decision_times, states
def magnetization(self):
return np.mean(self.spin)
def staggered_magnetization(self):
# returns 1 for stalemate
mask = np.zeros((self.matrix_size, self.matrix_size))
mask[0::2, 0::2] = 1
mask[1::2, 1::2] = 1
M_plus = np.mean(self.spin[np.where(mask==1)])
M_minus = np.mean(self.spin[np.where(mask==0)])
return abs(0.5*(M_plus - M_minus))
def display(self):
fig, ax = plt.subplots(figsize=(3,3))
img = ax.imshow(self.spin, cmap='Greys')
plt.colorbar(img)
plt.show()
|
import sys
import os
abs_file_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(abs_file_path, "..", "..")) # add path
import torch
import torch.nn as nn
import collections
from torch.utils.serialization import load_lua
from model import SCNN
model1 = load_lua('experiments/vgg_SCNN_DULR_w9/vgg_SCNN_DULR_w9.t7', unknown_classes=True)
model2 = collections.OrderedDict()
model2['backbone.0.weight'] = model1.modules[0].weight
model2['backbone.1.weight'] = model1.modules[1].weight
model2['backbone.1.bias'] = model1.modules[1].bias
model2['backbone.1.running_mean'] = model1.modules[1].running_mean
model2['backbone.1.running_var'] = model1.modules[1].running_var
model2['backbone.3.weight'] = model1.modules[3].weight
model2['backbone.4.weight'] = model1.modules[4].weight
model2['backbone.4.bias'] = model1.modules[4].bias
model2['backbone.4.running_mean'] = model1.modules[4].running_mean
model2['backbone.4.running_var'] = model1.modules[4].running_var
model2['backbone.7.weight'] = model1.modules[7].weight
model2['backbone.8.weight'] = model1.modules[8].weight
model2['backbone.8.bias'] = model1.modules[8].bias
model2['backbone.8.running_mean'] = model1.modules[8].running_mean
model2['backbone.8.running_var'] = model1.modules[8].running_var
model2['backbone.10.weight'] = model1.modules[10].weight
model2['backbone.11.weight'] = model1.modules[11].weight
model2['backbone.11.bias'] = model1.modules[11].bias
model2['backbone.11.running_mean'] = model1.modules[11].running_mean
model2['backbone.11.running_var'] = model1.modules[11].running_var
model2['backbone.14.weight'] = model1.modules[14].weight
model2['backbone.15.weight'] = model1.modules[15].weight
model2['backbone.15.bias'] = model1.modules[15].bias
model2['backbone.15.running_mean'] = model1.modules[15].running_mean
model2['backbone.15.running_var'] = model1.modules[15].running_var
model2['backbone.17.weight'] = model1.modules[17].weight
model2['backbone.18.weight'] = model1.modules[18].weight
model2['backbone.18.bias'] = model1.modules[18].bias
model2['backbone.18.running_mean'] = model1.modules[18].running_mean
model2['backbone.18.running_var'] = model1.modules[18].running_var
model2['backbone.20.weight'] = model1.modules[20].weight
model2['backbone.21.weight'] = model1.modules[21].weight
model2['backbone.21.bias'] = model1.modules[21].bias
model2['backbone.21.running_mean'] = model1.modules[21].running_mean
model2['backbone.21.running_var'] = model1.modules[21].running_var
model2['backbone.24.weight'] = model1.modules[24].weight
model2['backbone.25.weight'] = model1.modules[25].weight
model2['backbone.25.bias'] = model1.modules[25].bias
model2['backbone.25.running_mean'] = model1.modules[25].running_mean
model2['backbone.25.running_var'] = model1.modules[25].running_var
model2['backbone.27.weight'] = model1.modules[27].weight
model2['backbone.28.weight'] = model1.modules[28].weight
model2['backbone.28.bias'] = model1.modules[28].bias
model2['backbone.28.running_mean'] = model1.modules[28].running_mean
model2['backbone.28.running_var'] = model1.modules[28].running_var
model2['backbone.30.weight'] = model1.modules[30].weight
model2['backbone.31.weight'] = model1.modules[31].weight
model2['backbone.31.bias'] = model1.modules[31].bias
model2['backbone.31.running_mean'] = model1.modules[31].running_mean
model2['backbone.31.running_var'] = model1.modules[31].running_var
model2['backbone.34.weight'] = model1.modules[33].weight
model2['backbone.35.weight'] = model1.modules[34].weight
model2['backbone.35.bias'] = model1.modules[34].bias
model2['backbone.35.running_mean'] = model1.modules[34].running_mean
model2['backbone.35.running_var'] = model1.modules[34].running_var
model2['backbone.37.weight'] = model1.modules[36].weight
model2['backbone.38.weight'] = model1.modules[37].weight
model2['backbone.38.bias'] = model1.modules[37].bias
model2['backbone.38.running_mean'] = model1.modules[37].running_mean
model2['backbone.38.running_var'] = model1.modules[37].running_var
model2['backbone.40.weight'] = model1.modules[39].weight
model2['backbone.41.weight'] = model1.modules[40].weight
model2['backbone.41.bias'] = model1.modules[40].bias
model2['backbone.41.running_mean'] = model1.modules[40].running_mean
model2['backbone.41.running_var'] = model1.modules[40].running_var
model2['layer1.0.weight'] = model1.modules[42].modules[0].weight
model2['layer1.1.weight'] = model1.modules[42].modules[1].weight
model2['layer1.1.bias'] = model1.modules[42].modules[1].bias
model2['layer1.1.running_mean'] = model1.modules[42].modules[1].running_mean
model2['layer1.1.running_var'] = model1.modules[42].modules[1].running_var
model2['layer1.3.weight'] = model1.modules[42].modules[3].weight
model2['layer1.4.weight'] = model1.modules[42].modules[4].weight
model2['layer1.4.bias'] = model1.modules[42].modules[4].bias
model2['layer1.4.running_mean'] = model1.modules[42].modules[4].running_mean
model2['layer1.4.running_var'] = model1.modules[42].modules[4].running_var
model2['message_passing.up_down.weight'] = model1.modules[42].modules[6].modules[0].modules[0].modules[2].modules[0].modules[1].modules[1].modules[0].weight
model2['message_passing.down_up.weight'] = model1.modules[42].modules[6].modules[0].modules[0].modules[140].modules[1].modules[2].modules[0].modules[0].weight
model2['message_passing.left_right.weight'] = model1.modules[42].modules[6].modules[1].modules[0].modules[2].modules[0].modules[1].modules[1].modules[0].weight
model2['message_passing.right_left.weight'] = model1.modules[42].modules[6].modules[1].modules[0].modules[396].modules[1].modules[2].modules[0].modules[0].weight
model2['layer2.1.weight'] = model1.modules[42].modules[8].weight
model2['layer2.1.bias'] = model1.modules[42].modules[8].bias
model2['fc.0.weight'] = model1.modules[43].modules[1].modules[3].weight
model2['fc.0.bias'] = model1.modules[43].modules[1].modules[3].bias
model2['fc.2.weight'] = model1.modules[43].modules[1].modules[5].weight
model2['fc.2.bias'] = model1.modules[43].modules[1].modules[5].bias
save_name = os.path.join('experiments', 'vgg_SCNN_DULR_w9', 'vgg_SCNN_DULR_w9.pth')
torch.save(model2, save_name)
# load and save again
net = SCNN(input_size=(800, 288), pretrained=False)
d = torch.load(save_name)
net.load_state_dict(d, strict=False)
for m in net.backbone.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
m.bias.data.zero_()
save_dict = {
"epoch": 0,
"net": net.state_dict(),
"optim": None,
"lr_scheduler": None
}
if not os.path.exists(os.path.join('experiments', 'vgg_SCNN_DULR_w9')):
os.makedirs(os.path.join('experiments', 'vgg_SCNN_DULR_w9'), exist_ok=True)
torch.save(save_dict, save_name) |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from blog.models import Post, Comment, BlogUser
# Register your models here.
class BlogUserInline(admin.StackedInline):
model = BlogUser
can_delete = False
verbose_name_plural = 'blog_users'
class UserAdmin(BaseUserAdmin):
inlines = (BlogUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Post)
admin.site.register(Comment) |
from multiprocessing import Process
import time
def f(name):
time.sleep(2)
print("hello, {0}".format(name))
if __name__ == '__main__':
p = Process(target=f, args=["zys",])
p1 = Process(target=f, args=["zys",])
p.start()
p1.start()
|
"""
This script is used for trimming SGS data
"""
#!/usr/bin/python
# -*- coding: <encoding name> -*-
import argparse
from processores.file_processor import FilePorcessor
from processores.cmd_processor import CmdProcessor
def fozu():
print(" _ooOoo_ ")
print(" o8888888o ")
print(" 88 . 88 ")
print(" (| -_- |) ")
print(" O\\ = /O ")
print(" ____/`---'\\____ ")
print(" . ' \\| |// `. ")
print(" / \\||| : |||// \\ ")
print(" / _||||| -:- |||||- \\ ")
print(" | | \\\\\\ - /// | | ")
print(" | \\_| ''\\---/'' | | ")
print(" \\ .-\\__ `-` ___/-. / ")
print(" ___`. .' /--.--\\ `. . __ ")
print(" ."" '< `.___\\_<|>_/___.' >'"". ")
print(" | | : `- \\`.;`\\ _ /`;.`/ - ` : | | ")
print(" \\ \\ `-. \\_ __\\ /__ _/ .-` / / ")
print(" ======`-.____`-.___\\_____/___.-`____.-'====== ")
print(" `=---=' ")
print(" ")
print(" ............................................. ")
print(" Prey for no bug ")
print(" Zen of python: ")
print(" Beautiful is better than ugly.; ")
print(" Explicit is better than implicit. ")
print(" Simple is better than complex. ")
print(" Complex is better than complicated. ")
print(" Flat is better than nested. ")
print(" Sparse is better than dense. ")
print(" Readability counts. ")
print(" Now is better than never. ")
def main():
fozu()
parser = argparse.ArgumentParser(description="Trim the sequencing data in the directory.")
parser.add_argument('-d', '--directory', help='The directory where you put the sequencing files.', default='')
parser.add_argument('-p', '--program', help='chose the program you wang to use (btrim or fqtrim, default is btrim)',
default='btrim')
parser.add_argument('-g', '--is_genome', help='"T" for sequencing data without poly A (genome sequencing data, default),\
"F" for sequencing data with poly A (transcriptome sequencing data).\
This option is only useful if you use fqtrim.',
default='T')
args = parser.parse_args()
f_proc = FilePorcessor()
f_proc = f_proc.fit(args.directory)
paired_seq_files = f_proc.get_paired_seq_fpaths()
cmd_proc = CmdProcessor()
cmd_proc.fit(paired_seq_files, args.is_genome)
if args.program.lower() == 'btrim':
cmd_proc.cmd_btrim()
elif args.program.lower() == 'fqtrim':
cmd_proc.cmd_fqtrim()
else:
print("Wrong program, please choose 'btrim', or 'fqtrim'.")
if __name__ == '__main__':
main()
|
from Crypto.Cipher import AES
from collections import OrderedDict
def pad(data):
pad_length = 16 - len(data) % 16
return data + bytearray(pad_length * [pad_length])
def unpad(data):
pad_length = data[-1]
if pad_length < 16:
data = data[:-pad_length]
return data
def parse(qs):
result = OrderedDict()
pairs = qs.split('&')
for pair in pairs:
key, value = pair.split('=')
result[key] = value
return result
def escape(value):
return value.replace('&', 'and').replace('=', 'is')
def unparse(values):
pairs = []
for key, value in values.items():
pairs.append(escape(key) + '=' + escape(str(value)))
return '&'.join(pairs)
def profile_for(email):
return unparse(OrderedDict([
('email', email),
('user_id', 10),
('role', 'user')
]))
def encrypted_profile_for(email):
return encrypt(profile_for(email))
def encrypt(data):
data = bytearray(data, 'utf-8')
with open('key.txt', 'rb') as fp:
key = fp.read()
assert len(key) == 16
padded = pad(data)
return AES.new(key, AES.MODE_ECB).encrypt(bytes(padded))
def decrypt(data):
with open('key.txt', 'rb') as fp:
key = fp.read()
assert len(key) == 16
return unpad(AES.new(key, AES.MODE_ECB).decrypt(data))
def decrypt_profile(encrypted):
return parse(decrypt(encrypted).decode('utf-8', 'ignore'))
qs = 'foo=bar&baz=qux&zap=zazzle'
print(parse(qs))
print(profile_for("foo@bar.com"))
print(profile_for("foo@bar.com&admin=yes"))
print(decrypt_profile(encrypted_profile_for('test@foo.com')))
print(profile_for('hello@world.com'))
# email=h@old.com&user_id=10&role=user
# ================================
ends_in_role_is = encrypted_profile_for('h@old.com')[0:32]
# email=foofoofoofadmin&user_id=10&role=user
# ================
has_admin = encrypted_profile_for('foofoofoofadmin')[16:32]
print(decrypt_profile(ends_in_role_is + has_admin))
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from recruitment.items import TongchengItem
class TongchengSpider(scrapy.Spider):
handle_httpstatus_list = [301, 302]
name = 'tongcheng'
allowed_domains = ['58.com']
#一期数据爬取
didian_list={'bj','cd','cq','cs','cc','dl','dg','fz'}
#二期数据爬取
didian_list2 = {'fs','gz',
'gy','gl','hz','hz','heb','hf','hhht','hk','jn','km',
'lz','ls','nj','nb','nn','nc','qd','sh','sz',
'sy','sjz','sz','tj','ty','wh','wx','wlmq','wh',
'xa','xm','xn','yc','yc','yt','zz','zh'}
# tongcheng_url='http://jn.58.com/tech/pn1/?utm_source=market&spm=b-31580022738699-me-f-824.bdpz_biaoti&PGTID=0d303655-0010-915b-ca53-cb17de8b2ef6&ClickID=3'
tongcheng_url='http://{dd}.58.com/tech/'
num = 0
def start_requests(self):
for didian in self.didian_list:
yield Request(self.tongcheng_url.format(dd=didian), self.parse_per_page)
def parse_per_page(self, response):
url_list = response.css('.main .leftbar .infolist dt a::attr(href)').extract()
if not url_list:
url_list = response.css('.main .leftCon li .job_name a::attr(href)').extract()
if not url_list:
print('url list 为空')
for tongcheng_per_message_url in url_list:
yield Request(url=tongcheng_per_message_url, callback=self.parse_per_message)
#下一界面的链接
tongcheng_next_page = response.css('.main .leftbar .pagerout .next::attr(href) ').extract_first()
if not tongcheng_next_page:
tongcheng_next_page = response.css('.con .main .leftCon .pagesout .next::attr(href) ').extract_first()
if tongcheng_next_page:
yield Request(url=tongcheng_next_page, callback=self.parse_per_page)
def parse_per_message(self, response):
if response.css('.con'):
#公司地点
gsdd = response.css('.con .leftCon .pos_info .pos-area .pos_area_item::text ').extract_first().strip()
#公司名称
gsmc= response.css('.con .rightCon .item_con .company_baseInfo .comp_baseInfo_title .baseInfo_link a::text').extract_first().strip()
#岗位描述
gwms_arr=response.css('.con .leftCon .item_con .pos_description .posDes .des::text').extract()
gwms= ''.join(gwms_arr).strip().replace('/t', '')
#职位类别
zwlb = response.css('.con .leftCon .item_con .pos_base_info .pos_title::text').extract_first().strip()
#职位月薪
zwyx = response.css('.con .leftCon .item_con .pos_base_info .pos_salary::text').extract_first().strip()
# 职位名称
zwmc = response.css('.con .leftCon .item_con .pos_name::text').extract_first().strip()
condition = response.css('.con .leftCon .item_con .pos_base_condition .item_condition::text').extract()
#招聘人数
zprs = condition[0].strip()
#学历要求
xlyq = condition[1].strip()
#经验要求
jyyq = condition[2].strip()
item=TongchengItem()
item['gsmc'] = gsmc
item['zwmc'] = zwmc
item['zprs'] = zprs
item['zwyx'] = zwyx
item['gsdd'] = gsdd
item['xlyq'] = xlyq
item['jyyq'] = jyyq
item['gwms'] = gwms
item['zwlb'] = zwlb
item['xxly'] = '58同城'
yield item
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import make_interp_spline as spline
U = np.array([0, 0.15, 0.24, 0.3, 0.41, 0.51, 0.6, 0.7, 0.75, 0.76, 0.78])
I = np.array([0, 0, 0, 0, 0, 0.02, 1.9, 13.9, 46.7, 59.1, 98.1])
# U_smooth = np.linspace(U.min(), I.max(), 300)
# I_smooth = spline(U, I)(U_smooth)
plt.plot(U, I, '--', U, I, 'r.', label = "V/A")
plt.title("Diode's Positive V/A", fontsize = 18)
plt.xlabel("Voltage/V", fontsize = 16)
plt.ylabel("Current/mA", fontsize = 16)
plt.legend()
# for u, i in zip(U, Iv):
# plt.text(u, i, (u,i), ha='right', va='bottom', fontsize = 5)
plt.grid(True)
plt.xticks(np.arange(0, 0.9, 0.1))
plt.yticks(np.arange(0, 105, 5))
plt.show() |
repo = {
r"profiles/[\w-]+" : Profile,
r"[^/]+" : Category,
r"[^/]+/[^/]+" : CatPkg,
r"[^/]+/[^/]+/[^_]+" : PkgAtom,
}
disk = {
r"[^/]+" : Category,
r"[^/]+/[^/]+" : CatPkg,
r"[^/]+/[^/]+/[^_]+" : PkgAtom,
class Root(object):
def __init__(self,path):
self.path = path
def __hash__(self):
return hash(path)
def __eq__(self,other):
return self.path == other.path
class Path(object):
def __init__(self,root,path):
self.root = root
self.path = path
def __hash__(self):
return hash(self.fullpath)
def adjpath(self,change):
# This returns a new path -
# foo.adjpath("..") would return the previous directory.
# foo.adjpath("foo") would return a path to the current path plus "/foo"
# foo.adjpath("/foo") would return an absolute path "/foo.
# The path root for the new path is the same as this path.
if os.path.isabs(change):
return Path(root=self.root,path=change)
else:
return Path(root=self.root,path=os.path.normpath(os.path.join(self.path, change)))
def contents(self):
objs = set()
for file in os.path.listdir(self.fullpath):
objs.add(self.adjpath(file))
return objs
@property
def fullpath(self):
return os.path.join(self.root.path,self.path)
def exists(self):
return os.path.exists(self.fullpath)
def open(self,mode):
return open(self.fullpath,mode)
class PortageRepository(Path):
def contents(self):
cats = set(os.path.listdir(self.fullpath) & valid
objs = set()
for cat in cats:
objs.add(Category(root=self.root,path=cat))
return objs
@property
def valid(self):
return set(self.path.adjpath("profiles/categories").grabfile())
class Category(Path):
def contents(self):
objs = set()
for catpkg in os.path.listdir(self.fullpath):
objs.add(CatPkg(root=self.root,path="%s/%s" % (self.path, catpkg)))
return objs
class CatPkg(Path):
def contents(self):
objs = set()
for pkgatom in os.path.listdir(self.fullpath):
if pkgatom[-7:] != ".ebuild":
continue
vers = pkgatom[len(self.path):-7]
objs.add(PkgAtom(root=self.root,path="%s/%s" % (self.path, vers)))
return objs
class PkgAtom(Path):
def exists(self):
class GitPortageRoot(Root):
class ProfileRoot(Root):
|
# coding: utf-8
from django.views.generic import FormView
from .classes import Keeper
from .forms import PaymentForm
from .mixins import AjaxFormMixin
from .models import AppUser
class HomepageView(AjaxFormMixin, FormView):
template_name = 'homepage.html'
form_class = PaymentForm
@staticmethod
def get_class_children():
for i in xrange(1, 4):
Keeper()
return Keeper.get_instances()
def get_context_data(self, **kwargs):
context = super(HomepageView, self).get_context_data(**kwargs)
context['class_children'] = self.get_class_children()
return context
def process_data(self, data):
user, receivers, amount = data['user'], data['receivers'], data['amount']
if receivers:
part = amount / float(len(receivers))
user.change_balance(-amount)
receivers = AppUser.objects.filter(inn__in=receivers)
for receiver in receivers:
receiver.change_balance(part)
|
#!/usr/bin/python env
# coding: utf-8
from helper.model import DBConnection
from config import finance_dict
import time
""""
mysql数据库迁移
"""
if __name__ == "__main__":
# model = Model()
# model.select(select_param=["id", "name", "gender"], where_param={"name = ": "jack", "gender = ": "M"})
# model.order_by(["name desc", "gender desc"])
# model.limit(1, 4)
# result = model.execute()
# print result
# model.insert(result)
# 配置
print finance_dict
# 迁移数据库
model = DBConnection(**finance_dict)
# 要迁移的数据库表 CpcFinanceDetail
model.table_name = "CpcFinanceDetail"
model_v2 = DBConnection(**finance_dict)
# 迁移的数据迁移到 CpcFinanceDetail1
model_v2.table_name = "CpcFinanceDetail1"
# 从线下库中找到上次同步的id
model_v2.select(select_param=["max(id) as max_id"])
mid = model_v2.execute()
max_id = mid[0]["max_id"]
if max_id is None:
max_id = 0
# 同步操作
status = True
# while status:
model.select(select_param=["*"], where_param={"id > ": max_id})
model.limit(0, 1000)
item_list = model.execute()
print item_list
# item_list = model.execute()
# if not item_list:
# status = False
# break
# count = len(item_list)
# max_id = item_list[count - 1]["id"]
#
# print "正在向表 " + table + " 表中插入" + str(count) + " 条数据 当前id为 " + str(max_id)
# # 插入到广告线下数据库
# model_v2.insert(item_list)
# model_v2.exe()
# model_v2.commit()
# # 停留0.1 s
# time.sleep(0.1)
|
import os
from flask import Flask, render_template
app = Flask(__name__)
# disable caching
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
@app.route("/")
@app.route("/index")
def show_image():
image_path = os.sep.join(["static", "co2.png"])
return render_template("index.html", co2_graph_path=image_path)
|
#!/usr/bin/env python
#
# test_state.py -
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
import os.path as op
from fsl.data.image import Image
from fsl.data.vtk import VTKMesh
import fsleyes.state as state
from fsleyes.views.orthopanel import OrthoPanel
from fsleyes.views.histogrampanel import HistogramPanel
from . import run_with_fsleyes, realYield
datadir = op.join(op.dirname(__file__), 'testdata')
def _test_state(frame, overlayList, displayCtx):
rfile = op.join(datadir, 'mesh_ref.nii.gz')
mfile = op.join(datadir, 'mesh_l_thal.vtk')
overlayList.append(Image( rfile))
overlayList.append(VTKMesh(mfile))
ref, mesh = overlayList
ropts = displayCtx.getOpts(ref)
mopts = displayCtx.getOpts(mesh)
ropts.cmap = 'hot'
mopts.refImage = ref
mopts.outline = True
frame.addViewPanel(OrthoPanel)
frame.addViewPanel(HistogramPanel)
oopts = frame.viewPanels[0].sceneOpts
oopts.showXCanvas = False
oopts.zzoom = 2000
frame.viewPanels[1].smooth = True
frame.viewPanels[1].histType = 'count'
realYield(200)
st = state.getState(frame)
frame.removeAllViewPanels()
del overlayList[:]
realYield(200)
state.setState(frame, st)
realYield(200)
ortho, hist = frame.viewPanels
assert isinstance(ortho, OrthoPanel)
assert isinstance(hist, HistogramPanel)
assert not ortho.sceneOpts.showXCanvas
assert ortho.sceneOpts.zzoom == 2000
assert hist.smooth
assert hist.histType == 'count'
ref, mesh = overlayList
assert isinstance(ref, Image)
assert isinstance(mesh, VTKMesh)
ropts = displayCtx.getOpts(ref)
mopts = displayCtx.getOpts(mesh)
assert ropts.cmap.name == 'hot'
assert mopts.refImage is ref
assert mopts.outline
def test_state():
run_with_fsleyes(_test_state)
|
import unittest
#from django.http import HttpResponse
#from django.test.client import RequestFactory
from django.test import Client
from django.contrib.auth.models import User
from pip._vendor.requests import session
class MyTestCase(unittest.TestCase):
def test_FactoryUserStatus(self): # Running
from views import FactoryUserStatus
self.assertEquals("receptionist", FactoryUserStatus("rachel"))
def test_get_boolean(self):
from views import get_boolean
self.assertEqual(get_boolean("yes"), True)
def test_sign_up_success(self):
from views import signup_page
from django.test import Client
c = Client()
response = c.post('/register/',{'username':'Monika','password':'123456','confirm_password':'123456','fname':'testName',
'lname':'testLastName','gender':'Female','dob':'testDate','insurance':'yes',
'mobileno':'testmobno','address':'testAddress','zipcode':'dfgdgd','state':'testState'})
#print response.status_code
self.assertIsNotNone(response.content)
def test_sign_up_fail(self):
from views import signup_page
from django.test import Client
c = Client()
response = c.post('/register/', {'username': 'rachel', 'password': '123456', 'confirm_password': '123456',
'fname': 'testName',
'lname': 'testLastName', 'gender': 'Female', 'dob': 'testDate',
'insurance': 'yes',
'mobileno': 'testmobno', 'address': 'testAddress', 'zipcode': 'dfgdgd',
'state': 'testState'})
#self.assertIsNotNone(response.url)
#self.assertTemplateUsed(response,'register.html')
#self.assertContains(response, 'This username is already being taken!')
def test_getDoctors(self):
from views import show_doctors
from django.http import HttpRequest
get_req = HttpRequest()
names = show_doctors(get_req)
self.assertIsNotNone(names)
#print names.content
def test_checkBeds(self):
from django.http import HttpRequest
from views import check_beds
get_req = HttpRequest()
get_req.user=User()
get_req.user.username="aferguson0"
get_req.user.password="1234567890"
from django.test import Client
c = Client()
response= c.post('/check_beds/')
self.assertIsNotNone(check_beds(get_req,'aferguson0'))
def test_check_login_page_POST(self):
import requests
from views import admit_patient
import json
headers = {'content-type': 'application/json'}
url = 'http://127.0.0.1:8000/login/'
data = {"data": {"username": "dcole0","password":"1234567890"}}
self.assertIsNotNone(requests.post(url, params={}, data=json.dumps(data), headers=None))
# Test that ShowDoctors does not return None
def test_show_doctors(self):
from django.http import HttpRequest
from views import show_doctors
testrequest = HttpRequest()
self.assertIsNotNone(show_doctors(testrequest))
# View appointments for Doctors
def test_view_appointments_doctors(self):
from django.http import HttpRequest
from views import view_appointments_doctors
testrequest = HttpRequest()
testuser = User()
testuser.username='dcole0'
testuser.password='1234567890'
testuser.is_active = True
testrequest.user = testuser
self.assertIsNotNone(view_appointments_doctors(testrequest,testrequest.user.username))
# View appointments for Patients
def test_view_appointments_patients(self):
from django.http import HttpRequest
from views import view_appointments_patients
testrequest = HttpRequest()
testuser = User()
testuser.username = 'sscott0'
testuser.password = '1234567890'
testuser.is_active = True
testrequest.user = testuser
self.assertIsNotNone(view_appointments_patients(testrequest, testrequest.user.username))
# View Time for patients
def test_view_time(self):
from django.http import HttpRequest
from views import view_time
testrequest = HttpRequest()
testuser = User()
testuser.username = 'dcole0'
testuser.password = '1234567890'
testuser.is_active = True
testrequest.user = testuser
self.assertIsNotNone(view_time(testrequest, testrequest.user.username))
# Test Admit Patient
def test_admit_patient(self):
import requests
from views import admit_patient
import json
headers = {'content-type': 'application/json'}
url = 'http://127.0.0.1:8000/admit/aferguson0/'
data = {"data": {"patientID": None}}
params = {'sessionKey': '9ebbd0b25760557393a43064a92bae539d962103', 'format': 'xml', 'platformId': 1}
self.assertIsNotNone(requests.post(url, params={}, data=json.dumps(data), headers=None))
# Test Add Doctors
def test_doctor_add(self):
from views import doctor_add
import requests
import json
url = 'http://127.0.0.1:8000/doctoradd/aferguson0/'
data = {"data": {"patientID": None}}
params = {'sessionKey': '9ebbd0b25760557393a43064a92bae539d962103', 'format': 'xml', 'platformId': 1}
self.assertIsNotNone(requests.post(url, params={}, data=json.dumps(data), headers=None))
def test_patient_discharge(self):
import requests
from views import admit_patient
import json
headers = {'content-type': 'application/json'}
url = 'http://127.0.0.1:8000/discharge/aferguson0/'
data = {"data": {"patientID": None}}
params = {'sessionKey': '9ebbd0b25760557393a43064a92bae539d962103', 'format': 'xml', 'platformId': 1}
self.assertIsNotNone(requests.post(url, params={}, data=json.dumps(data), headers=None))
def test_set_office_hours(self):
import requests
from views import set_office_hours
import json
headers = {'content-type': 'application/json'}
url = 'http://127.0.0.1:8000/timings/dcole0/monday'
data = {"data": {"starttime": '2.30pm',"endtime": '3.30pm','starttime2':'5.30pm',
'endtime2':'8.30pm'}}
params = {'sessionKey': '9ebbd0b25760557393a43064a92bae539d962103', 'format': 'xml', 'platformId': 1}
self.assertIsNotNone(requests.post(url, params={}, data=json.dumps(data), headers=None))
def test_option_maker(self):
from views import option_maker
retVal = option_maker('holiday', "dcole0", "Sunday")
for i in retVal:
print i
self.assertIsNotNone(retVal)
def test_get_clean_timings_array(self):
from views import get_clean_timings_array
retVal = get_clean_timings_array('holiday')
self.assertIsNotNone(retVal)
def test_transfer_patient(self):
import requests
import json
url = 'http://127.0.0.1:8000/transfer/rachel/sscott0'
data = {"data": {"doctorID": 'dcole0', "description": 'Needs eye surgery'}}
self.assertIsNotNone(requests.post(url, params={}, data=json.dumps(data), headers=None))
def test_view_transfer_consents(self):
import requests
url = 'http://127.0.0.1:8000/tconsent/dcole0'
self.assertIsNotNone(requests.post(url, params={}, headers=None))
def test_approve_transfer_consent(self):
from django.http import HttpRequest
from views import approve_transfer_consent
testrequest = HttpRequest()
testuser = User()
testuser.username = 'dcole0'
testuser.password = '123456789'
testuser.is_active = True
testrequest.user = testuser
self.assertIsNotNone(approve_transfer_consent(testrequest, testrequest.user.username, "sscott0"))
import requests
url = 'http://127.0.0.1:8000/transferapprove/dcole0/sscott0'
self.assertIsNotNone(requests.post(url, params={}, headers=None))
def test_reject_transfer_consent(self):
from django.http import HttpRequest
from views import reject_transfer_consent
testrequest = HttpRequest()
testuser = User()
testuser.username = 'dcole0'
testuser.password = '1234567890'
testuser.is_active = True
testrequest.user = testuser
self.assertIsNotNone(reject_transfer_consent(testrequest, testrequest.user.username, "sscott0"))
import requests
url = 'http://127.0.0.1:8000/transferreject/dcole0/sscott0'
self.assertIsNotNone(requests.post(url, params={}, headers=None))
if __name__ == '__main__':
unittest.main() |
# filename : jutil.py
# author : Jinho D. Choi
# last update: 4/19/2010
import math
# Converts 'i' into 'n' binary bits
# i: integer
# n: number of bits: integer
def bits(i, n):
return tuple((0,1)[i>>j & 1] for j in range(n-1,-1,-1))
# Returns sublist of 'L' indicated by 't'.
# L: list
# t: binary tuple
def getSub(L, t):
ls = list()
for i in range(0, len(L)):
if t[i] == 1: ls.append(L[i])
return ls
# Returns the permutation of 'L'.
# L: list
def permutations(L):
ls = list()
size = len(L)
for i in range(1, int(math.pow(2, size))):
ls.append(getSub(L, bits(i, size)))
return ls
|
file = open('PersonalData.txt','w')
file.write("Sebastian Bętkowski\n")
file.write("Uniwersytet Ekonomiczny\n")
file.write("Informatyka Stosowana")
file.close() |
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Nick DeVeaux on 2014-08-07.
to run: python counting_nucleotides.py < dna.txt
Sample Dataset
AGCTTTTCATTCTGACTGCAACGGGCAATATGTCTCTGTGTGGATTAAAAAAAGAGTGTCTGATAGCAGC
Sample Output
20 12 17 21
"""
import sys
import os
import utils
import fileinput
def nucleiotide_incidence(seq):
incidence = []
for bp in ('A', 'C', 'G', 'T'):
incidence.append(seq.count(bp))
return incidence
def main():
args = []
for line in fileinput.input():
args.append(line.rstrip())
input = args[0]
utils.print_array(nucleiotide_incidence(input))
if __name__ == '__main__':
main() |
from sys import argv, exit
script, sstfile, imgName = argv
import matplotlib
matplotlib.use('Agg')
#import pandas as pd
import numpy as np
from netCDF4 import Dataset, num2date
import datetime
#from itertools import compress
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from mpl_toolkits.axes_grid1 import make_axes_locatable
sstnc=Dataset(sstfile,'r')
lon=sstnc.variables['lon'][:]
lat=sstnc.variables['lat'][:]
sst=np.squeeze(sstnc.variables['sst'][:])
sst[sst==-999]=np.nan
sstt=sstnc.variables['time']
t=num2date(sstt[0],units=sstt.units,calendar=sstt.calendar)
LAND = cfeature.NaturalEarthFeature(
'physical', 'land', '10m',
edgecolor='face',
facecolor='tan')
state_lines = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none')
def plotMap(figname,figtitle,data):
plt.close('all')
fig, ax = plt.subplots(figsize=(11, 8),subplot_kw=dict(projection=ccrs.PlateCarree()))
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
plt.title(figtitle)
divider = make_axes_locatable(ax)
cax = divider.new_horizontal(size='5%', pad=0.05, axes_class=plt.Axes)
fig.add_axes(cax)
ax.set_extent([np.min(lon),np.max(lon),np.min(lat),np.max(lat)])
ax.add_feature(LAND, zorder=0, edgecolor='black')
ax.add_feature(cfeature.LAKES, facecolor='white')
ax.add_feature(cfeature.BORDERS)
ax.add_feature(state_lines, edgecolor='black')
h=ax.pcolor(lon,lat,data,vmin=0,vmax=30,cmap='jet')
cb=plt.colorbar(h,cax=cax)
plt.savefig(figname, dpi=300)
plotMap(imgName,'1-day Coldest Pixel AVHRR Composite '+t.strftime('%Y-%m-%d'),sst)
plt.close('all')
exit()
|
from django_cron import cronScheduler, Job
from ioestu.endofday import *
class CheckMail(Job):
"""
Cron Job that checks the lgr users mailbox and adds any
approved senders' attachments to the db
"""
# run every 300 seconds (5 minutes)
run_every = 300
def job(self):
backupDatabase()
cronScheduler.register(CheckMail) |
s=input()
if str(s).isdigit() and str(s).isupper() or str(s).islower():
print("yes")
else:
print("no")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2019-07-04 09:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0008_auto_20190628_1447'),
]
operations = [
migrations.AlterField(
model_name='restraunt',
name='closing_hour',
field=models.TimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='restraunt',
name='opening_hour',
field=models.TimeField(blank=True, null=True),
),
]
|
# Bioinformatics
# Normalize raw dataset using MEAN or MEDIAN value
# Spike-in control normalization
# 4/7/2018, Bongsoo Park, Johns Hopkins
import operator
import numpy as np
f = open("raw_data_before_normalization.txt","r")
line_cnt = 0
id_list = []
norm_factor_mean = []
norm_factor_median = []
mirna_dataset = {}
for line in f:
line = line.strip()
data = line.split("\t")
#print len(data)
# treatment of header
if line_cnt == 0:
cnt = 0
for ele in data:
if cnt > 1:
id_list.append(ele)
cnt += 1
else:
tmp = []
cnt = 0
for ele in data:
if cnt > 1:
tmp.append(ele)
cnt += 1
mirna_dataset.update({data[0]:tmp})
# calculate normalization factors (Spike-in control)
if data[0] == "UniSp6 CP":
#print data
x = []
cnt = 0
for ele in data:
if cnt > 1:
x.append(float(ele))
cnt += 1
x = np.array(x)
y = np.mean(x)
z = np.median(x)
print "MEAN:", np.mean(x)
print "MEDIAN:", np.median(x)
cnt = 0
for ele in data:
if cnt > 1:
norm_factor_mean.append(float(ele)/y)
norm_factor_median.append(float(ele)/z)
cnt += 1
print "processing normalized factor for MEAN and MEDIAN..."
line_cnt += 1
f.close()
#print len(id_list)
#print len(norm_factor_mean)
#print len(norm_factor_median)
normalized_mirna_dataset = {}
for the_key, the_value in sorted(mirna_dataset.items(), key=operator.itemgetter(0)):
norm_array = ""
cnt = 0
for ele in mirna_dataset[the_key]:
try:
norm_array += "\t" + str(float(ele)/norm_factor_mean[cnt])
except:
norm_array += "\t" + "ND"
cnt += 1
print the_key+norm_array
|
"""
.. module:: verzamelend.tests
:platform: Unix
:synopsis:
.. moduleauthor:: Pedro Salgado <steenzout@ymail.com>
"""
import os
import verzamelend.config
import verzamelend.logging
import logging
import unittest
LOGGING_CONFIG_FILE = '%s/tests/logging.conf' % os.curdir
PACKAGE_CONFIG_FILE = '%s/tests/verzamelend.cfg' % os.curdir
class Basic(object):
"""
Basic functionality to enhance test cases.
"""
def setup_configuration(self):
"""
Setup test configuration.
It will also load (once) the test configuration.
"""
logging.getLogger('%s.%s' % (__name__, 'Basic')).info('setup_configuration()')
verzamelend.config.reset()
verzamelend.config.load_configuration(PACKAGE_CONFIG_FILE)
self.configuration = verzamelend.config.get()
def setup_logger(self):
"""
Setup test logger.
It will also load (once) the test logging configuration.
"""
logging.getLogger('%s.%s' % (__name__, 'Basic')).info('setup_logger()')
verzamelend.logging.load_configuration(LOGGING_CONFIG_FILE)
self.logger = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
class BaseTestCase(unittest.TestCase, Basic):
"""
Base test case.
"""
__slots__ = ('configuration', 'logger')
def __init__(self, methodName):
"""
Initializes a BaseTestCase instance.
:param methodName: the test method to be executed.
:type methodName: str
"""
super(BaseTestCase, self).__init__(methodName)
self.setup_logger()
self.setup_configuration()
def setUp(self):
"""
Setup test resources.
"""
self.logger.info('setUp()')
def tearDown(self):
"""
Tear down test resources.
"""
self.logger.info('tearDown()')
class MockConfig(object):
"""
Mock object for the collectd configuration.
"""
def __init__(self, children):
"""
Initializes a MockConfig object.
:param children: list of configuration items.
:type children: list
"""
self.children = children
|
#!/usr/bin/env python
"""
A simple echo server
"""
import socket
host = ''
port = 12345
backlog = 5
size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(backlog)
while 1:
client, address = s.accept()
amountsent = 0
while True:
try:
data = client.recv(size)
except Exception, e:
print 'recv',e
data = ''
amountsent = amountsent + len(data)
if data:
try:
client.send(data)
except Exception, e:
print 'send',e
data = ''
if not data:
print "closing with", amountsent
client.close()
break
|
from django.urls import path
from . import views
app_name = 'my_admin'
urlpatterns = [
path('/index', views.index, name='index'),
path('/manage', views.ManageUser, name='manage'),
path('<int:id>/info', views.Info, name='info'),
path('<int:id>/change', views.Change, name='change'),
path('/saveinfo', views.SaveInfo, name='saveinfo'),
path('<int:id>/delete', views.Delete, name='delete')
]
|
import numpy as np
import struct
import sys
import pdb
np.set_printoptions(threshold=np.inf)
import scipy.misc
caffe_root = '/home/mjhuria/caffe/' # this file is expected to be in {caffe_root}/examples
sys.path.insert(0, caffe_root + 'python')
import caffe
import matplotlib.pyplot as plt
# %matplotlib inline
caffe.set_mode_cpu()
model_def = '/media/mjhuria/New Volume/Vikram/EC551/Final/27-04/Test.prototxt'
# model_def = '/home/mjhuria/caffe/to_train/caffe_alexnet_train_iter_10000.caffemodel'
model_weights = '/media/mjhuria/New Volume/Vikram/EC551/Final/27-04/solver_iter_1785.caffemodel'
model_weights_new = '/media/mjhuria/New Volume/Vikram/EC551/Final/27-04/solver_iter_50000.caffemodel'
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perfor
new_net = caffe.Net(model_def, # defines the structure of the model
model_weights_new, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perfor
# print [(k, v.data.shape) for k, v in net.blobs.items()]
# print [(k, v[0].data.shape, v[0].data.min(), v[0].data.max()) for k, v in net.params.items()]
conv1_f = net.params['conv1'][0].data # conv kernels
conv1_fb = net.params['conv1'][1].data # conv bias
conv2_f = net.params['conv2'][0].data # conv kernels
conv2_fb = net.params['conv2'][1].data # conv bias
ip1_f = net.params['fc6'][0].data # inner product weights
ip1_fb = net.params['fc6'][1].data # inner product bias
ip2_f = net.params['fc8'][0].data # inner product weights
ip2_fb = net.params['fc8'][1].data # inner product bias
ip2_fb_new = new_net.params['fc8'][1].data # inner product bias
print("conv1")
print(conv1_f)
print("conv1_bias")
print(conv1_fb)
print("conv2")
print(new_net.params['conv1'][0].data)
print("conv1_bias")
print(new_net.params['conv1'][1].data) |
import os
import platform
import hashlib
def clear():
return os.system('cls') if(platform.system() == "Windows") else os.system('clear')
def hashmd5(password):
return hashlib.md5(password.encode('utf-8')).hexdigest() |
import math
import os
from random import randint
from collections import deque
import pygame
from pygame.locals import *
# Constants
fps = 60 # frequency rate
Animation_speed = 0.2 # horizontal flying speed
window_width = 284*2 # the width of the window
window_height = 512 # the height of the window
class Bird(pygame.sprite.Sprite):
bird_image_width = bird_image_height = 50
speed_up = 0.3
speed_down = 0.15
flying_duration_time = 300
def __init__(self,x,y,ms_to_climb,images): # construct function
super(Bird,self).__init__() # pass the Bird class to its inherited class and initialize
self.x, self.y = x, y
self.ms_to_climb = ms_to_climb # construct at the level of Bird class
self._image_wingup, self._image_wingdown = images # construct at the level of the father class
self._mask_wingup = pygame.mask.from_surface(self._image_wingup) # mask for later collision detection
self._mask_wingdown = pygame.mask.from_surface(self._image_wingdown)
def update(self, delta_frame = 1): # update the bird's position, delat_frame indicates the number of frames
# since this method was last called
if self.ms_to_climb > 0 : # if the bird is flying up
percentage_motion_done = 1 - self.ms_to_climb/Bird.flying_duration_time # percentage of the whole flying up motion
self.y -= (Bird.speed_up * frame_to_ms(delta_frame) * (1 - math.cos(percentage_motion_done * math.pi)))
# updating the bird position with the defined consine function
self.ms_to_climb -= frame_to_ms(delta_frame) # update the ms_to_climb accordingly
else:
self.y += Bird.speed_down * frame_to_ms(delta_frame) # if the bird is flying down, just appling speed_down
@property
def image(self): # deciding when to show wing_up or wing_down
if pygame.time.get_ticks() % 300>=150: # returns ms after pygame init()
return self._image_wingdown
else:
return self._image_wingup
@property
def mask(self): # for detecting the collision
if pygame.time.get_ticks() % 300>=150:
return self._mask_wingdown
else:
return self._mask_wingup
@property # return Rect object for pygame to store and manipulate rectangle ares
def rect(self):
return Rect(self.x,self.y,Bird.bird_image_width,Bird.bird_image_height)
class Pipe(pygame.sprite.Sprite):
pipe_width = 80 # width and height of a pipe body, same size with the image of pipe body
pipe_height = 32
interval_generating_pipe = 2000 # time interval adding new pipes, in ms
def __init__(self, pipe_end_img, pipe_body_img):
self.x = float(window_width - 1) # new pipe will be added
self.score_counted = False
self.image = pygame.Surface((Pipe.pipe_width,window_height), SRCALPHA) # create a new image object, the pixel format will include a per-pixel alpha
self.image.convert()
self.image.fill((0,0,0,0))
# calculate the number of pipe body need to use for construct a pipe
total_num_pipe_body = int((window_height - 3 * Bird.bird_image_height - 3 * Pipe.pipe_height)/Pipe.pipe_height)
# randomly generate number of top and bottom pipes in pair
self.num_pipe_bottom = randint(1, total_num_pipe_body)
self.num_pipe_top = total_num_pipe_body - self.num_pipe_bottom
# for bottom pipe
for i in range(1, self.num_pipe_bottom+1):
# calculate the position of the bottom pipe
pos = (0, window_height - i * Pipe.pipe_height)
# show the constructed image for pipes with the pipe body image and number of that
self.image.blit(pipe_body_img, pos)
# calculate the end position of the pipe without end part
bottom_end_y = window_height - self.bottom_height_x
# decide the position of the pipe_end
bottom_end_piece_pos = (0, bottom_end_y - Pipe.pipe_height)
# constructed image
self.image.blit(pipe_end_img, bottom_end_piece_pos)
# for top pipe
for i in range(self.num_pipe_top):
self.image.blit(pipe_body_img,(0, i * Pipe.pipe_height))
top_end_y = self.top_height_x
self.image.blit(pipe_end_img, (0, top_end_y))
# compensate for added end pieces
self.num_pipe_top += 1
self.num_pipe_bottom += 1
# for collision detection
self.mask = pygame.mask.from_surface(self.image)
# get the top and bottom pipe's height in pixels
@property
def top_height_x(self):
return self.num_pipe_top * Pipe.pipe_height
@property
def bottom_height_x(self):
return self.num_pipe_bottom * Pipe.pipe_height
# determine whether such pipe will be visible to user
@property
def visible(self):
return -Pipe.pipe_width < self.x < window_width
# construct Rect object for pygame to store and manipulate
@property
def rect(self):
return Rect(self.x, 0, Pipe.pipe_width, Pipe.pipe_height)
# update the pipes' position.
def update(self, delta_frames=1): # delta_frames: The number of frames elapsed since this method was last called.
self.x -= Animation_speed * frame_to_ms(delta_frames)
# check collision
def collides_with(self, bird):
return pygame.sprite.collide_mask(self, bird)
def load_images():
def load_image(image_name):
filename = os.path.join('.','images',image_name)
image = pygame.image.load(filename)
image.convert()
return image
return {
'background': load_image('background.png'),
'pipe-end': load_image('pipe_end.png'),
'pipe-body': load_image('pipe_body.png'),
# images for animating the flapping bird -- animated GIFs are
# not supported in pygame
'bird-wingup': load_image('bird_wing_up.png'),
'bird-wingdown': load_image('bird_wing_down.png')
}
# convert frames to milliseconds at the specified framerate
def frame_to_ms(frame,fps=fps):
return 1000.0*frame/fps
# convert milliseconds to frames at the specified framerate
def ms_to_frame(ms,fps=fps):
return ms*fps/1000.0
def main():
# initialize the game.
pygame.init()
# set the size of the window and caption of the game
display_game = pygame.display.set_mode((window_width,window_height))
pygame.display.set_caption('Flappy Bird')
# track the amount of time
clock = pygame.time.Clock()
score_font = pygame.font.SysFont('arial',64,bold=True)
images = load_images()
bird_instance = Bird(50,int(window_height/2-Bird.bird_image_height/2),1,(images['bird-wingup'],images['bird-wingdown']))
pipes = deque()
score = 0
frame_clock = 0
done = False
while not done:
# update the clock
clock.tick(fps)
# if game is running, generating pipes for game
if not frame_clock % ms_to_frame(Pipe.interval_generating_pipe):
pp = Pipe(images['pipe-end'], images['pipe-body'])
pipes.append(pp)
# get the event, decide reactions with respect to each event
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):
done = True
break
elif event.type == KEYUP and event.key == K_SPACE:
bird_instance.ms_to_climb = Bird.flying_duration_time
pipe_collision = any(p.collides_with(bird_instance) for p in pipes)
# checking for the edge collision
if pipe_collision or 0 >= bird_instance.y or bird_instance.y >= window_width - Bird.bird_image_height:
done = True
# showing the background image
for x in (0, window_width / 2):
display_game.blit(images['background'], (x, 0))
# showing the expected pipes to user
while pipes and not pipes[0].visible:
pipes.popleft()
# updating the pipes
for p in pipes:
p.update()
display_game.blit(p.image, p.rect)
# updating the bird
bird_instance.update()
display_game.blit(bird_instance.image, bird_instance.rect)
# update and display score
for p in pipes:
if p.x + Pipe.pipe_width < bird_instance.x and not p.score_counted:
score += 1
p.score_counted = True
# showing the score
score_surface = score_font.render(str(score), True, (255, 255, 255))
score_x = window_width / 2 - score_surface.get_width() / 2
display_game.blit(score_surface, (score_x, Pipe.pipe_height))
# update the full display surface to the screen
pygame.display.flip()
frame_clock += 1
print('Game over! Score: %i' % score)
pygame.quit()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.