index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
6,600 | 7d25a8eb61b6fb9069616745c2b68fd3ceeca9fb | # Generated by Django 2.2.2 on 2019-10-19 14:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='phone_number',
field=models.CharField(max_length=15, verbose_name='phone number'),
),
]
|
6,601 | a757bbb9ad2f6f5bf04cdf4091b97841b8e40432 | import os
import pickle
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython import embed
from optimizers.utils_1 import Model_1, Architecture_1
from optimizers.utils import Model, Architecture
colors={
'BOHB-PC-DARTS': 'darkorange',
'BOHB-DARTS': 'dodgerblue',
'BOHB-GDAS' : 'forestgreen',
'RE': 'crimson',
'RS': 'darkorchid',
'RL': 'sienna',
'TPE': 'deepskyblue',
'SMAC': 'violet',
'HB': 'darkgray',
'BOHB': 'gold'
}
markers={
'BOHB-DARTS': '^',
'BOHB-PC-DARTS': 'v',
'BOHB-GDAS' : 'x',
'RS': 'D',
'RE': 'o',
'RL': 's',
'SMAC': 'h',
'HB': '>',
'BOHB': '*',
'TPE': '<'
}
def get_incumbent(losses, time_stamps):
return_dict = {'time_stamps': [],
'losses': [],
}
current_incumbent = float('inf')
incumbent_budget = -float('inf')
for l, t in zip(losses, time_stamps):
if l < current_incumbent:
current_incumbent = l
return_dict['losses'].append(l)
return_dict['time_stamps'].append(t)
else:
return_dict['losses'].append(return_dict['losses'][-1])
return_dict['time_stamps'].append(t)
return return_dict.values()
def get_trajectories(args, global_min, path='regularized_evolution',
methods=['RE', 'RS']):
all_trajectories = {}
for m in methods:
dfs = []
for seed in range(500):
filename = os.path.join(path, m,
'algo_{}_0_ssp_{}_seed_{}.obj'.format(m, args.space,
seed))
try:
with open(filename, 'rb') as f:
data = pickle.load(f)
losses = [1 - x.test_accuracy - global_min for x in data]
times = np.array([x.training_time for x in data])
times = [np.sum(times[:i+1]) for i in range(len(times))]
if m in ['HB', 'BOHB']:
costs = np.array([x.budget for x in data])
costs = np.array(
[np.sum(costs[:i+1]) for i in range(len(costs))]
)
n = len(np.where(costs <= 280*108)[0])
times, losses = get_incumbent(losses[:n], times[:n])
else:
times, losses = get_incumbent(losses, times)
print(seed, ' MIN: ', min(losses))
df = pd.DataFrame({str(seed): losses}, index=times)
#embed()
dfs.append(df)
except FileNotFoundError:
break
df = merge_and_fill_trajectories(dfs, default_value=None)
if df.empty:
continue
print(m, df.shape)
all_trajectories[m] = {
'time_stamps': np.array(df.index),
'losses': np.array(df.T)
}
return all_trajectories
def merge_and_fill_trajectories(pandas_data_frames, default_value=None):
# merge all tracjectories keeping all time steps
df = pd.DataFrame().join(pandas_data_frames, how='outer')
# forward fill to make it a propper step function
df=df.fillna(method='ffill')
if default_value is None:
# backward fill to replace the NaNs for the early times by
# the performance of a random configuration
df=df.fillna(method='bfill')
else:
df=df.fillna(default_value)
return(df)
def plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,
incumbent=None, show=True, linewidth=3, marker_size=10,
xscale='log', xlabel='wall clock time [s]', yscale='log',
ylabel=None, legend_loc = 'best', xlim=None, ylim=None,
plot_mean=True, labels={}, markers=markers, colors=colors,
figsize=(16,9)):
if regret:
if ylabel is None: ylabel = 'regret'
# find lowest performance in the data to update incumbent
if incumbent is None:
incumbent = np.inf
for tr in incumbent_trajectories.values():
incumbent = min(tr['losses'][:,-1].min(), incumbent)
print('incumbent value: ', incumbent)
for m,tr in incumbent_trajectories.items():
trajectory = np.copy(tr['losses'])
if (trajectory.shape[0] == 0): continue
if regret: trajectory -= incumbent
sem = np.sqrt(trajectory.var(axis=0, ddof=1)/tr['losses'].shape[0])
if plot_mean:
mean = trajectory.mean(axis=0)
else:
mean = np.median(trajectory,axis=0)
sem *= 1.253
if 'DARTS' in m or 'GDAS' in m:
ax.fill_between(tr['time_stamps'], mean-2*sem, mean+2*sem,
color=colors[m], alpha=0.2)
ax.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
if axins is not None:
axins.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
return (fig, ax)
|
6,602 | e5e7856d752f14e0671bae8d8b7997207c667ae1 | from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.views.generic.base import View
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
# from com_search.get_info import Search as Search_1
# from com_search.models import CompanyType
import json
# Create your views here.
class SearchSuggest(View):
def get(self, request):
key_words = request.GET.get('s','')
# print(key_words, '===============')
re_datas = []
com_datas = []
if key_words:
es = Elasticsearch(hosts=["127.0.0.1"])
s = Search(index='zntg_2').using(es).query('match', company_name=key_words)
for i in s:
re_datas.append(i.company_name)
res = s.execute()
# s = CompanyType.search()
# s = s.suggest('my_suggest', key_words, completion={
# "field": "suggest", "fuzzy": {
# "fuzziness": 2
# },
# "size": 10
# })
# suggestions = s.execute_suggest()
# for match in suggestions.my_suggest[0].options:
# for match in suggestions.my_suggest[0].options:
# source = match._source
# com_datas.append(str(source))
# re_datas.append(source["company_name"])
# print(source)
# print(re_datas)
# # print(suggestions['my_suggest'][0])
# # print(suggestions['my_suggest'][0]['options'])
# print(json.dumps(re_datas))
# print(com_datas)
# print(json.dumps(com_datas))
return HttpResponse(json.dumps(re_datas), content_type="application/json")
class SearchDetail(View):
def get(self, request):
key_words = request.GET.get('q', '')
# print(key_words, '===============')
data = {}
if key_words:
es = Elasticsearch(hosts=["127.0.0.1"])
s = Search(index='zntg_2').using(es).query('match', company_name=key_words)
for i in s[0]:
# print(i.company_name)
# for k in ['company_name', 'crn', 'former_name', 'organization_type', 'faren', 'registered_capital', 'company_type', 'registration_state', 'searched_by', 'data_count']:
data['company_name'] = i.company_name
data['crn'] = i.crn
try:
data['former_name'] = str(i.former_name)
except:
pass
data['organization_type'] = i.organization_type
data['faren'] = i.faren
data['registered_capital'] = i.registered_capital
data['company_type'] = i.company_type
data['registration_state'] = i.registration_state
data['searched_by'] = str(i.searched_by)
data['data_count'] = i.data_count
# print(data)
return HttpResponse(json.dumps(data), content_type="application/json")
class CodeSearch(View):
def get(self, request):
key_words = request.GET.get('code', '')
print(key_words)
search = Search_1()
data = search.get_data(key_words)
# text = {'code': key_words}
# print(json.dumps(text))
print(data)
return JsonResponse(data)
if __name__ == '__main__':
# client = Elasticsearch(hosts=["127.0.0.1"])
# 创建相关实例
es = Elasticsearch(hosts=["127.0.0.1"])
# using参数是指定Elasticsearch实例对象,index指定索引,可以缩小范围,index接受一个列表作为多个索引,且也可以用正则表示符合某种规则的索引都可以被索引,如index=["bank", "banner", "country"]又如index=["b*"]后者可以同时索引所有以b开头的索引,search中同样可以指定具体doc-type
# s = Search(using=client, index="zntg_5")
# q = {"query": {"match": {"name": "easy"}}}
# res = es.Search(body=q)
# print(res)
s = Search(index='zntg_2').using(es).query('match', company_name='延安一正启源科技发展股份有限公司')
# for i in s[0]:
# print(i.company_name)
# print(i.company_type)
res = s.execute()
# print(res)
# res = es.get(index="zntg_5", doc_type="company", id='AWQ7fKZzZ2odEMYJXOY0')
# print(res["hits"]["hits"][0]['_source'])
a = res["hits"]["hits"][0]['_source']
print(a['former_name'])
|
6,603 | b52807a15cef8f07f685f8761a470d4a24d9c3dc | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/6/20 下午4:00
# @Author : Huang HUi
# @Site :
# @File : query_parse.py
# @Software: PyCharm
from mysqlConnection import mysqlConnection
import yaml
import copy
import time
import csv
import json
from collections import OrderedDict
import ast
#
# GIVEN_QUERY = {'days': [10,14], 'countries': [{'country_id': 28, 'day': None}],
# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 27, 'day': None}, {'region_id': 69, 'day': None}], 'pois': [],
# 'regionNotGo': [], 'poiNotGo': [], 'regionSorted': [135, 131], 'availableMonths': [],
# 'price': [5000, 15000], 'hotelRating': None, 'arrivalRegionId': 27, 'departRegionId': None}
# GIVEN_QUERY={'days': [10,13], 'countries': [{'country_id': 11, 'day': None}], 'regions': [{'region_id': 266, 'day': None},
# {'region_id': 220, 'day': None}], 'pois': [795, 800,878,1536]}
# GIVEN_QUERY={'days': [12], 'countries': [{'country_id': 28, 'day': None}],
# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 70, 'day': None}],
# 'pois': [1361, 1380, 1382, 1385, 1386, 1413, 1512, 1700, 1701, 1712, 1713]}
def query_parse(GIVEN_QUERY):
try:
countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY['countries']))
except :
countryIds_query=None
try:
days_query=GIVEN_QUERY['days']
except :
days_query=None
try:
regions_query = GIVEN_QUERY['regions']
except :
regions_query=[]
try:
regionDic_query = list(map(lambda x: {x['region_id']: x['day']}, regions_query))
except :
regionDic_query=[]
try:
pois_query=GIVEN_QUERY['pois']
except :
pois_query=[]
try:
regionNotGo_query=GIVEN_QUERY['regionNotGo']
except :
regionNotGo_query=[]
try:
poiNotGo_query=GIVEN_QUERY['poiNotGo']
except :
poiNotGo_query=[]
try:
regionSorted_query=GIVEN_QUERY['regionSorted']
except :
regionSorted_query=[]
try:
availableMonths_query=GIVEN_QUERY['availableMonths']
except :
availableMonths_query=[]
try:
price_query=GIVEN_QUERY['price']
except :
price_query=None
try:
hotelRating_query=GIVEN_QUERY['hotelRating']
except :
hotelRating_query=None
try:
arrivalRegionId_query=GIVEN_QUERY['arrivalRegionId']
except :
arrivalRegionId_query=None
try:
departRegionId_query=GIVEN_QUERY['departRegionId']
except:
departRegionId_query=None
connection=mysqlConnection()
try:
with connection.cursor() as cursor:
if GIVEN_QUERY['countries']:
# country condition
if arrivalRegionId_query:
sql = "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (arrivalRegionId_query,str(countryIds_query)[1:-1])
else:
sql = "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1]
else:
# all
sql = "SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null "
cursor.execute(sql)
startParts = cursor.fetchall()
if GIVEN_QUERY['countries']:
if departRegionId_query:
sql = "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (departRegionId_query, str(countryIds_query)[1:-1])
else:
sql = "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1]
else:
sql = "SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null "
cursor.execute(sql)
endParts = cursor.fetchall()
finally:
connection.close()
startParts = [dict['id'] for dict in startParts]
endParts = [dict['id'] for dict in endParts]
return countryIds_query, days_query, regions_query, regionDic_query, \
pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query, availableMonths_query, price_query, \
hotelRating_query, arrivalRegionId_query, departRegionId_query,startParts,endParts
|
6,604 | 51ed99a68486bd52499bbc28e68ff2312e02ea1f | from django.db import models
from datetime import datetime
# Create your models here.
class Notifications(models.Model):
username= models.CharField(max_length=20)
phone_number= models.BigIntegerField(default= 0)
email= models.EmailField()
firstname= models.CharField(max_length=20)
app_name= models.CharField(max_length=50)
service= models.CharField(max_length=50)
datetime= models.CharField(default= str(datetime.now()), max_length=50)
message= models.CharField(default= 0, max_length=300)
notify_type= models.CharField(default= 'email', max_length=20)
|
6,605 | dd053da45d2577772414b1373ba324b0bfdc0d94 | from pyrogram import Client, filters
from pyrogram.errors import MessageNotModified
from db.models import *
@Client.on_callback_query(filters.regex('^change_lg_'))
async def on_change_language(_, callback):
settings_id = int(callback.data.split('_')[2])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)
return
await callback.answer()
await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))
@Client.on_callback_query(filters.regex('^language_g_'))
async def on_language_selected(_, callback):
data = callback.data.split('_')[2:]
settings_id = int(data[0])
language = '_'.join(data[1:])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)
return
settings.chat.language = language
await callback.answer(settings.chat.get_message('language_selected', flag=settings.chat.get_message('flag')),
show_alert=True)
try:
await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))
except MessageNotModified: # If the user selects the same language he already had
pass
|
6,606 | 965db2523f60d83bd338bcc62ab8e5705550aa89 | import csv
import json,os
mylist=[]
clist=["North Indian","Italian","Continental","Chinese","Mexican","South Indian"]
for filename in os.listdir("/home/asket/Desktop/DBMS/menu"):
print(filename) |
6,607 | 0b36bf9ac7887101be5503a0edce19e1111e5ca0 | # Import other modules
from zelda_utilities.constants import *
# Helps establish the current frame for sprite animation/image changing
class Animation:
def __init__(self):
# Animation clock
self.next_frame = pygame.time.get_ticks()
# Starting frame
self.frame = 0
# ~12 frames/sec (1000ms // 12)
self.frame_time = 1000 // ANIMATION_RATE
def anim_sprite(self):
if pygame.time.get_ticks() > self.next_frame:
self.frame = (self.frame + 1) % (24 * ANIMATION_RATE) # reset > 20 sec
self.next_frame += self.frame_time
return self.frame
|
6,608 | 9706b9ba81f41b131c364a16bb17a0c1e31e3a04 | import numpy as np
import skimage
def preprocess_img(img, size):
img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)
img = skimage.transform.resize(img, size)
img = skimage.color.rgb2gray(img)
return img
# data = minerl.data.make("MineRLNavigateDense-v0", data_dir="../dataset/navigate")
#
# # Iterate through a single epoch gathering sequences of at most 32 steps
# for current_state, action, reward, next_state, done in data.sarsd_iter(num_epochs=1, max_sequence_len=32):
# # Print the POV @ the first step of the sequence
# print(current_state['pov'][0])
#
# # Print the final reward pf the sequence!
# print(reward[-1])
#
# # Check if final (next_state) is terminal.
# print(done[-1])
#
# # ... do something with the data.
# print("At the end of trajectories the length can be < max_sequence_len", len(reward))
|
6,609 | 46bf5866d5353c58e130b20ffa4d95df8abf986b | import os
import pickle
import PySimpleGUI as sg
from typing import Dict
sg.ChangeLookAndFeel('Black')
import string
from nltk.tokenize import word_tokenize
from itertools import chain
from glob import glob
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
import re
import nltk
from nltk.stem import LancasterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
file = open("/Users/lorenzostigliano/Desktop/teststem/Paradise_Coldplay.txt","r") #put here the path of your song for testing the remove stopword functionality
righe = file.read()
words = righe.split()
#that's some bullshit test
lemmatizer = WordNetLemmatizer()
ps = LancasterStemmer()
#this is the right one that we're gonna use for real
lmtzr = WordNetLemmatizer()
for parole in words:
parola = lmtzr.lemmatize(parole,'v')
appendFile = open('filteredtext.txt','a')
appendFile.write(" "+str(parola))
appendFile.close()
with open("/Users/lorenzostigliano/Desktop/Progetto Gestione dell'Informazione/filteredtext.txt", "r") as f:
with open("/Users/lorenzostigliano/Desktop/teststem/Paradise_Coldplay.txt", "w") as f1:
for line in f:
f1.write(line)
import os
os.remove("/Users/lorenzostigliano/Desktop/Progetto Gestione dell'Informazione/filteredtext.txt")
|
6,610 | 02cd99f0a265fe01835a6adc211e750a58d993fd | import Pyro4
from Pyro4 import Daemon, Proxy
from threading import Thread
import thread
import pickle
import socket
Pyro4.config.REQUIRE_EXPOSE = False
def register(obj):
''' Register an object with daemon '''
daemon = Pyro4.Daemon(host="localhost")
uri = daemon.register(obj) # Scheduler
serve_daemon(daemon)
return uri
def serve_daemon(daemon):
''' Serve the daemon in a separate thread '''
t = Thread(target=lambda: daemon.requestLoop())
t.setDaemon(True)
t.start()
def proxy(uri):
''' Return a proxy object for the given uri '''
return Pyro4.Proxy(uri)
class SharedObject(object):
''' Shared object that is distribtued across nodes '''
def __init__(self):
''' Register the child object to the daeomn and
replace object with the proxy object '''
self.name = register(self)
print proxy(self.name)
def __getattribute__(self, name):
""" Intercept calls to any of the methods in the child object """
attr = object.__getattribute__(self, name)
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
# Allow async calls to methods (promises)
if 'async' in kwargs: del kwargs['async']
# result = func(*args, **kwargs)
# return result
return newfunc
else:
return attr |
6,611 | 8d8df517ca5486e62cc1b5ac23bbcfa65ed9c1ff | from numba import jit
@jit
def resolve():
N = int(input())
ans = 0
for n in range(1, N+1):
for m in range(n, N+1, n):
ans += m
print(ans)
if __name__ == "__main__":
resolve() |
6,612 | b3a07107ef64bb50f4768954cbb579d8e66bd003 | #!/usr/bin/python
# The program was written by YU Ho Yung and LEUNG Kin Tung in group 24.
# The program is written with stubs for the phase 3
#
import pymongo
from scrapy.selector import Selector
import pandas as pd
# import numpy as np
import datetime
import re
import pprint
import subprocess
import scrapy
from pymongo import MongoClient
import model4
import model5
def isNonNegativeFloat(floatNumberInput):
try:
if(float(floatNumberInput) >= 0):
return True
except ValueError as err:
print('Your input is not a non-negative number: ', err)
# pass
return False
def isCourseCode(corseCode):
try:
matchObj = re.match(r'[A-Z]?[A-Z]?[A-Z]?[A-Z]?\d?\d?\d?\d?([A-Z]?)',str(corseCode))
if( matchObj != None):
return True
except ValueError as err:
print('Your courseCode is not correct: ', err)
# pass
return False
def isIntNumber(intNumberInput):
try:
int(intNumberInput)
return True
except ValueError as err:
print('Your number is not an integer: ', err)
# pass
return False
'''
5.1 Collection Dropping and Empty Collection Creating
(This feature will be used for the demonstration purpose.
The detailed implementation of this feature will be completed by you in Phase 3.)
Input:
none
Output:
Display a message “Collection dropping and empty collection creating are successful”
(after the collection(s) is(are) removed and the new empty collection(s) is(are) created).
'''
# to handle the function "Collection Dropping and Empty Collection Creating"
def collectionDroppingAndEmptyCollectionCreatingHandler(db):
# to execute the function "update address"
collectionDroppingAndEmptyCollectionCreating(db)
def collectionDroppingAndEmptyCollectionCreating(db):
#A function to drop and empty all collections
# Dropping Collection
try:
print("Dropping Collection...")
print(" Dropping collection \'course\'...")
db.course.drop()
except pymongo.errors.ConnectionFailure as error:
print("Collection Dropping Failed! Error Message: \"{}\"".format(error))
print("Collection dropping and empty collection creating are successful")
'''
5.2 Data Crawling
(The detailed implementation of this feature will be completed by you in Phase 3.)
Input:
a URL (e.g., “http://course.cse.ust.hk/comp4332/index.html”) or
a special keyword (i.e., “default”)
Output:
If the input is “default”, display a message “Data Crawling is successful and all data are inserted into the database”
(after all data are crawled from the default URL given in the project webpage and are inserted into the database).
Otherwise, do the same prompt operation but the URL used is the URL typed in the input.
'''
def dataCrawlingHandler():
url = input("Please input the URL for Data Crawling: ")
dataCrawling(url)
def dataCrawling(url):
print("Data Crawling started")
if(str(url).lower() == "default" or url == ""):
#implement the crawling function from default website
# Inserting Documents
url = 'http://comp4332.com/realistic'
#testing
# url = 'http://comp4332.com/trial'
with open('url.txt', 'w') as f: #Please double check if this works
f.write(str(url))
strCommand = "scrapy crawl ustWebpageSpider" #referred to ustWebpageSpider.py
subprocess.run(strCommand, shell=True)
print("Data Crawling is successful and all data from default are inserted into the database")
else:
with open('url.txt', 'w') as f: #Please double check if this works
f.write(str(url))
strCommand = "scrapy crawl ustWebpageSpider" #referred to ustWebpageSpider.py
subprocess.run(strCommand, shell=True)
# implement the crawling function from the given url
print("Data Crawling is successful and all data are inserted into the database from: ", str(url))
#The detailed implementation of this feature will be completed in Phase 3
'''
5.3 Course Search
(The detailed implementation of this feature will be completed by you in Phase 4.
But, the design of this feature will be completed in Phase 2.)
We have the following two operations for a course search.
1. Course Search by Keyword
2. Course Search by Waiting List Size
Note: Although there are some missing data in this project (which may require “prediction”),
in this part/feature, you just perform these operations for a course search only based on the data given to you.
There is no need to perform any “prediction” in this part.
'''
#def courseSearch():
#This is just an abstraction here, not even a stub.
#The detailed implementation of this feature will be completed in Phase 4.
'''
5.3.1 Course Search by Keyword
Input:
a text (k) where this text is called “keyword(s)”
Output:
A list of courses which course titles, course description or course remarks match the given text k.
In the output, for each course, please show “Course Code”, “Course Title”, “No. of Units/Credits”,
a list of sections of the course each with “Section”, “Date & Time”, “Quota”, “Enrol”, “Avail” and “Wait”.
Please sort the list of courses in ascending order of “Course Code”.
(Within a single course, please sort in ascending order of “Sections”)
We say that a phrase P matches text k if at least one of the words in phrase P is equal to one of words in k.
For example, if P = “Big Data Mining and Management” and k = “Mining”, then P matches k.
If P = “Big Data Mining and Management” and k = “Risk Mining”, then P matches k too.
If P = “Big Data Mining and Management” and k = “Mining Management”, then P matches k.
'''
# "lectureSection" is optional
# "satisfied" is optional
def outputCourseDetails(courseCode, lectureSection = 0, satisfied = ""):
#: search the course code which match in database
#TODO: print the Course Details of the Course Code
if(satisfied == ""):
cols = ["Course Code", "Course Title", "No. of Units/Credits", "Section", "Date & Time", "Quota", "Enrol", "Avail","Wait"]
df = pd.DataFrame({"Course Code" : ["COMP1001", "COMP1021"],"Course Title": ["Exploring Multimedia and Internet Computing","Introduction to Computer Science"],"No. of Units/Credits":[3,3], "Section":["L1,L2","L1"], "Date & Time":["Th 03:00PM - 04:50PM","TuTh 04:30PM - 05:20PM"], "Quota":[67,80], "Enrol":[19,75], "Avail":[48,5],"Wait":[0,26]},columns=cols)
print(df)
#return df.values.tolist()
return df
else:
cols = ["Course Code", "Course Title", "No. of Units/Credits", "Section", "Date & Time", "Quota", "Enrol", "Avail","Wait", "Satisfied"]
df = pd.DataFrame({"Course Code" : ["COMP1001", "COMP1021"],"Course Title": ["Exploring Multimedia and Internet Computing","Introduction to Computer Science"],"No. of Units/Credits":[3,3], "Section":["L1,L2","L1"], "Date & Time":["Th 03:00PM - 04:50PM","TuTh 04:30PM - 05:20PM"], "Quota":[67,80], "Enrol":[19,75], "Avail":[48,5],"Wait":[0,26], "Satisfied":["Yes","No"]},columns=cols)
print(df)
#return df.values.tolist()
return df
def courseSearchByKeywordHandler(db):
keyword = input("Please input a keyword for searching : ")
courseSearchByKeyword(db,keyword)
def courseSearchByKeyword(db,keyword):
keyword = keyword.split()
keyword = "|".join(keyword)
#TODO:Use the keyword to find a list of courses.
#The keyword will be searched in course titles, course description or course remarks.
try:
print("Querying Documents...")
print(" Finding a list of course which title....")
# listOfCourse = db.course.find()
listOfCourse = db.course.aggregate([
{
"$match": {
"$or": [
{"title": {'$regex': keyword}},
{"description": {'$regex': keyword}},
{"colistWith": {'$regex': keyword}}
]
}
},
{
"$unwind": "$sections"
},
{
"$sort": {"sections.recordTime": 1 }
},
{
"$group":{
"_id":{"sid":"$sections.sectionId", "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"sections":{"$last": "$sections"},
"description":{"$last":"$description"}
}
},
{
"$sort": {"sections.sectionId": 1 }
},
{
"$group":{
"_id":{ "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"sections":{
"$push": {
"sectionId":"$sections.sectionId",
"dateAndTime":"$sections.offerings.dateAndTime",
"quota":"$sections.quota",
"enrol":"$sections.enrol",
"avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } ,
"wait":"$sections.wait"
}
},
"description":{"$last":"$description"}
}
},
{
"$project":{"_id":0,"code":1,"title":1,"credits":1,"sections":1,"description":1}
}
])
recordNo = 0
for oneCourse in listOfCourse:
recordNo = recordNo + 1
print("Record {:d}:".format(recordNo))
print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"]))
for oneSection in oneCourse["sections"]:
print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"])))
print("description: {:s}".format(oneCourse["description"]))
# pprint.pprint(oneCourse)
except pymongo.errors.ConnectionFailure as error:
print("Document Querying Failed! Error Message: \"{}\"".format(error))
# courseCode = "COMP1001"
# return outputCourseDetails(courseCode)
'''
5.3.2 Course Search by Waiting List Size
Input:
A non-negative real number f
Starting Time Slot (start_ts)
Ending Time Slot (end_ts)
Output:
A list of courses each of which has a lecture section (e.g., “L1” and “L2”) in a time slot,
says match_ts,between start_ts (inclusively) and end_ts (inclusively)
where
the number of students in the waiting list of this lecture section is
greater than or equal to
f multiplied by the number of students enrolled in this lecture section in that timeslot.
In the output, for each “distinct” course, please show
“Course Code”,
Course Title”,
“No. of Units/Credits”,
“Matched Time Slot”,
a list of sections (including both lecture 9/17 COMP4332/RMBI4310 Project (Spring 2018) Course Registration Data Analytics
sections and non-lecture sections)
of the course each with “Section”,
“Date & Time”,
“Quota”,
“Enrol”,
“Avail”,
“Wait” and
“Satisfied”
(all shown with the content/values recorded in the time slot match_ts).
Note that “Matched Time Slot” is a new attribute in this query and it is equal to match_ts.
If a single course satisfies the required condition in multiple time slots
(no matter which lecture section of this course satisfies the required condition),
we just show the latest time slot among all these time slots in which this course satisfies the required condition.
Thus, each course should appear at most once in the output.
Note that “Satisfied” is another new attribute in this query.
It is equal to “Yes”
if the number of students in the waiting list of this section
is greater than or equal to
f multiplied by the number ofstudents enrolled in this section in that time slot.
It is equal to “No” otherwise.
Attribute “Satisfied” is not needed to be considered in Phase 2.
Please sort the list of courses in ascending order of “Course Code”.
(Within a single course, please sort in ascending order of “Sections”)
'''
def courseSearchByWaitingListSizeHandler(db):
correctInput = False
while(correctInput == False):
f = input("Please input a non-negative real number: ")
correctInput = isNonNegativeFloat(f)
start_ts = input("Please input a Starting Time Slot: ")
end_ts = input("Please input a Ending Time Slot : ")
courseSearchByWaitingListSize(db, f, start_ts, end_ts)
# A non-negative real number f
# Starting Time Slot (start_ts)
# Ending Time Slot (end_ts)
def courseSearchByWaitingListSize(db, f, start_ts, end_ts):
#TODO: A function uses the Waiting List Size number to find a list of courses and output a list of course code with lecture section
# satisfied = "Yes"
# f = 0.01
try:
print("Querying Documents...")
listOfCourseWithWaitingListSize = db.course.aggregate([
{ "$unwind": "$sections" },
{"$match":
{"$and":[
{"sections.recordTime": {"$gte": datetime.datetime.strptime(start_ts, "%Y-%m-%d %H:%M")}},
{"sections.recordTime": {"$lte": datetime.datetime.strptime(end_ts, "%Y-%m-%d %H:%M")}}
]
}
},
{ "$project":
{"code": 1,
"title": 1,
"credits": 1,
"sections":1,
"satisfied":{"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]},
"lecSatisfied":{
"$cond":[{
"$and":[
{
"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]
},
{
"$eq":[{"$substr": ["$sections.sectionId",0,1]},"L"]
}
]
},1,0]
}
},
},
{
"$sort": {"sections.sectionId": 1 }
},
{
"$group":{
"_id":{ "code": "$code", "recordTime":"$sections.recordTime"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$sections.recordTime"},
"sections":{
"$push": {
"sectionId":"$sections.sectionId",
"dateAndTime":"$sections.offerings.dateAndTime",
"quota":"$sections.quota",
"enrol":"$sections.enrol",
"avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } ,
"wait":"$sections.wait",
"satisfied":"$satisfied",
}
},
"lecSatisfiedCount":{"$sum":"$lecSatisfied"}
}
},
{ "$match": {"lecSatisfiedCount": {"$gt":0}}
},
{
"$sort": {"recordTime": 1 }
},
{
"$group":{
"_id":{ "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$recordTime"},
"sections":{"$last": "$sections"}
}
},
{
"$project":{
"_id":0,
"code": 1,
"title":1,
"credits": 1,
"recordTime":1,
"sections":1
}
}
]
)
recordNo = 0
for oneCourse in listOfCourseWithWaitingListSize:
recordNo = recordNo + 1
print("Record {:d}:".format(recordNo))
pprint.pprint(oneCourse)
# print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"]))
# for oneSection in oneCourse["sections"]:
# print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"])))
# print("description: {:s}".format(oneCourse["description"]))
#pprint(" Record {:d}: (sid={:s}, sname={:s}, byear={:d})".format(recordNo, oneStudent["sid"], oneStudent["sname"], oneStudent["byear"]))
#print("Record {:d}: (course={:s})".format(recordNo, oneCourse))
except pymongo.errors.ConnectionFailure as error:
print("Document Querying Failed! Error Message: \"{}\"".format(error))
#return outputCourseDetails(courseCode, lectureSection, satisfied)
'''
5.4 Waiting List Size Prediction
(The detailed implementation of this feature will be completed by you in Phase 6.
But, the design of this feature will be completed in Phase 5.)
Input:
Course Code (cc)
Lecture number (ln) (e.g., the input should be “1” denoting “L1”)
Time Slot (ts)
Output:
“N1,N2,N3,N4,N5”
where Ni denotes the number of students in the waiting list of the lecture number (ln) (if any) of the course cc
in the given time slot (ts) predicted by Model i for each i in [1, 5]
(Note that these 5 numbers are integers.)
Note: Since we know that training a model may take some time, in general, “cc” could be any course code.
However, in our demonstration, we will test with the course code “cc” starting from “COMP1942”, “COMP42”, “COMP43” or “RMBI” only
(i.e., (1) the COMP course (“COMP1942”), (2) any COMP course with starting course digits equal to “42” or “43” and (3) any RMBI course).
Thus, you just need to train your model with the data from the course with the course code “cc” starting from
these course code prefixes described above before our demonstration.
When we use this feature in our demonstration, you just need to load the trained model and perform the prediction of this feature based on the trained model.
If there is no lecture section of the course (cc) specified in the input or if the lecture number entered (ln) is not offered for the course (cc) specified in the input,
we just need to show “There is no lecture section and thus there is no prediction result.”
Although it is possible that we could use 5 models for one course and we could also use 5 “different” models for another course,
for the sake of simplicity, please use the same 5 models for any course needed.
Of course, even if the 5 models are the same (with the same set of “input” parameter values) for any two courses,
we know that each of the 5 models could be trained with different enrollment data from different courses, resulting in different “model” parameter values
(e.g., the weight values between neurons in a neural network which should be found from the data).
'''
def waitingListSizePredictionHandler(db):
correctInput = False
while(correctInput == False):
cc = input("Please input a Course Code: ")
cc = str(cc).upper()
correctInput= isCourseCode(cc)
correctInput = False
while(correctInput == False):
ln = input("Please input a Lecture number: ")
correctInput = isIntNumber(ln)
ts = input("Please input a Time Slot : ")
N1, N2, N3, N4, N5 = waitingListSizePrediction(cc,ln,ts)
print("The prediction result \"N1, N2, N3, N4, N5\" from 5 Models:")
print(N1,",", N2,",", N3,",", N4,",", N5)
'''
5.5 Waiting List Size Training
(This feature will be used for your “own” training purpose before we have the real feature from Section 5.4.
The detailed implementation of this feature will be completed by you in Phase 6.
But, the design of this feature will be completed in Phase 5.)
Input:
none
Output:
Display a message “Waiting list size training is successful” (after the training process on the waiting list size finishes).
'''
def waitingListSizeTraining():
#TODO: The function for the training process on the waiting list size
print("Waiting list size training is successful")
return courseData
# Course Code (cc)
# Lecture number (ln) (e.g., the input should be “1” denoting “L1”)
# Time Slot (ts)
def waitingListSizePrediction(courseCode,lectureNumber, timeslot):
# courseData = waitingListSizeTraining()
#TODO: Create 5 model to find the prediction
# timeslot = "2018-01-26 22:30"
earliestTime = datetime.datetime.strptime("2018-01-25T09:00Z", "%Y-%m-%dT%H:%MZ").timestamp()
timeslot = int((datetime.datetime.strptime(timeslot, "%Y-%m-%d %H:%M").timestamp() - earliestTime)/1800)
lectureNumber= int(str(lectureNumber)[-1])
courseCode = str(courseCode).upper()
# print(courseData)
N1 = model4.predictionHandler(courseCode,lectureNumber, timeslot)+1
N2 = model4.predictionHandler(courseCode,lectureNumber, timeslot)-1
N3 = model4.predictionHandler(courseCode,lectureNumber, timeslot)
N4 = model4.predictionHandler(courseCode,lectureNumber, timeslot)+2
N5 = model5.predictionHandler(courseCode,lectureNumber, timeslot)
#There are 5 Models to predict 5 different (int) result
#N1, N2, N3, N4, N5 = 11,12,11,14,13
return int(N1), int(N2), int(N3), int(N4), int(N5)
# to display the system interface with stubs
def main():
try:
# Making a DB connection
print("Making a MongoDB connection...")
client = MongoClient("mongodb://localhost:27017")
# Getting a Database named "course"
print("Getting a database named \"course\"")
db = client["hkust"]
# here, we need to implement for the flow
# display the menu
choice = "0"
while (choice != "6"):
print("")
print(" Main Menu")
print("=========================")
print("1. Collection Dropping and Empty Collection Creating")
print("2. Data Crawling")
print("3. Course Search by Keyword")
print("4. Course Search by Waiting List Size")
print("5. Waiting List Size Prediction")
print("6. Exit")
print("")
# allow the user to choose one of the functions in the menu
choice = input("Please input your choice (1-6): ")
print("")
# check the input and call the correspondence function
if (choice == "1"):
collectionDroppingAndEmptyCollectionCreatingHandler(db)
elif (choice == "2"):
dataCrawlingHandler()
elif (choice == "3"):
courseSearchByKeywordHandler(db)
elif (choice == "4"):
courseSearchByWaitingListSizeHandler(db)
elif (choice == "5"):
waitingListSizePredictionHandler(db)
elif (choice == "6"):
print("")
else:
print("Invalid Input!")
client.close()
except pymongo.errors.ConnectionFailure as error:
print("DB Connection Failed! Error Message: \"{}\"".format(error))
main()
|
6,613 | c7558486fc50623f6e64b58668153b75bb6149b9 | # Generated by Django 2.2.10 on 2020-05-06 14:43
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('planner', '0023_auto_20191226_1330'),
]
operations = [
migrations.AddField(
model_name='employee',
name='coefficient',
field=models.PositiveSmallIntegerField(default=100, validators=[django.core.validators.MaxValueValidator(200)], verbose_name='Коєфіцієнт плану'),
),
]
|
6,614 | d983cb4ae79d8370ed0809b86762c1e2ea125320 | import pandas as pd
def ranked(country, variety, price, num):
data = pd.read_csv("wine_final.csv")
if num == 0:
return None
if country !='':
data = data.query("country==\"{}\"".format(country))
if variety !='':
data = data.query("variety==\"{}\"".format(variety))
if price !='':
# data['price']=data['price'].str.replace(',','').astype(int)
data = data.query("price=={}".format(price))
data = data.sort_values(by='points', ascending=False)
result=[]
count = 0
for index,row in data.iterrows():
if count >= num:
break
d = {"Name":{row["title"]},"Varitey": {row["variety"]},"Price": row["price"],"Points":row["points"]}
count += 1
result.append(d)
return result
#ranked("US","","",10)
|
6,615 | a12fe733e607b1ce4cf0f3f4adc3ea85d082e769 | from pyftpdlib.authorizers import DummyAuthorizer # Autorizaciones
from pyftpdlib.handlers import FTPHandler # Comandos del usuario
from pyftpdlib.servers import FTPServer # Creacion del servidor
import logging
import os
def main():
# Instancia un autorizador dummy para controlar usuarios "virtuales"
authorizer = DummyAuthorizer()
# Define un nuevo usuario teniendo todos los permisos y otro para usuarios de solo lectura
authorizer.add_user('user', '12345', '.', perm='elradfmwMT')
authorizer.add_anonymous(os.getcwd()) # Obtener la direcccion del archivo actual
# Instancia una clase controladora de FTP
handler = FTPHandler
handler.authorizer = authorizer
# Define un string predeterminado que se envia al cliente cuando se conecte
handler.banner = 'pyftpdlib basado en FTP, listo'
# Informacion sobre las conexiones y acciones dentro de la carpeta
# logging.basicConfig(filename='pyftpd.log', level=logging.INFO)
logging.basicConfig(level=logging.INFO, format='(ServidorTCP) %(message)s',)
# Instancia una clase servidor FTP
address = ('127.0.0.1', 2121) # Direccion IP y puerto de escucha del servidor (puerto por default 21)
server = FTPServer(address, handler) # Se crea el socket
# configura un limite de conexiones
server.max_cons = 10 # Numero maximo de conexiones simultanesas
server.max_cons_per_ip = 5 # Numero maximo de conexiones aceptadas por la misma dirección IP (default=0 (sin limite))
# Inicia el servidor FTP
server.serve_forever() # (timeout=None, blocking=True, handle_exit=True)
if __name__ == '__main__':
print("Servidor a la escucha")
main() |
6,616 | e9fff1fb0a79493d4d7f3417c7d554eb10a978a0 | def fun1(fun):
return "Hai!!!! "+fun
def message():
return "How are you"
res = fun1(message())
print(res)
|
6,617 | 056636e2220e529d3f66872a4a48c0984cda1ce4 | def sort(L):
n = len(L)
if n < 2:
return L
L1, L2 = L[:n // 2], L[n // 2:]
return merge(sort(L1), sort(L2))
def merge(L1, L2):
if L1 == []:
return L2
if L2 == []:
return L1
x1, R1 = L1[0], L1[1:]
x2, R2 = L2[0], L2[1:]
if x1 <= x2:
return [x1] + merge(R1, L2)
else:
return [x2] + merge(L1, R2)
print(sort([9, 7, 8, 0, 5, 6, 4, 1, 2, 3])) |
6,618 | 79c7a2f2e5f0301c15efe1b26a7839a12098f793 | # coding: utf-8
'''
Precision, Recall, F1で評価する
Leave-one-outの結果
K-foldの結果
'''
import sys
import os.path
import snlocest.util as util
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.metrics import precision_recall_fscore_support, classification_report
def precision_recall_fscore(nodes, y_true, y_pred):
df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)
#df['pred'].replace(0, np.NaN, inplace=True)
n_predicted_nodes = len(df[df['pred'] != 0])
n_corrects = len(df[df['pred'] == df['true']])
n_test = len(nodes)
p = n_corrects / n_predicted_nodes
r = n_corrects / n_test
f = 2 * p * r / (p + r)
return (p, r, f, n_predicted_nodes, n_corrects, n_test)
def main(args):
# ラベルが付けられたノードIDのリスト、それに対応するラベルのリスト
labeled_nodes, labels = util.load_labellist(args.labelfile)
if args.random_state:
random_state = args.random_state
n_splits = args.n_splits if args.n_splits else 10
cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)
for i, (train, test) in enumerate(cv.split(labeled_nodes)):
filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.format(args.stem, i, args.ext))
test_nodes, y_pred = util.load_result(filepath)
y_test = labels[test]
assert np.all(labeled_nodes[test] == test_nodes), '分割後のノードID集合が異なる。random_stateが違うのでは?'
#assert len(y_pred) == len(y_test)
#precision, recall, f1, support = precision_recall_fscore_support(y_test, y_pred, average='micro')
#print(args.resultfile_or_dir, i, precision, recall, f1, support, sep='\t')
#report = classification_report(y_test, y_pred)
#print(report)
print(args.resultfile_or_dir, i, *precision_recall_fscore(test_nodes, y_test, y_pred), sep='\t')
else:
resultfile = args.resultfile_or_dir
# '-'が指定されたら標準入力から読み込む
if args.resultfile_or_dir == '-':
resultfile = sys.stdin
test_nodes, y_pred = util.load_result(resultfile)
assert np.all(labeled_nodes == test_nodes)
print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes, labels, y_pred), sep='\t')
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('labelfile')
parser.add_argument('resultfile_or_dir')
parser.add_argument('--random-state', type=int)
parser.add_argument('--n-splits', type=int, default=10)
parser.add_argument('--stem', default='result', help='結果ファイル名の文字列')
parser.add_argument('--ext', default='tsv', help='結果ファイルの拡張子')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
df = main(args)
|
6,619 | 3378ce72ae67d09258554048138b7f9023000922 | from django.shortcuts import render
import datetime
from django.http import*
from django.core.files.storage import FileSystemStorage
import uuid
import os
import cv2
import numpy as np
from pathlib import Path
def index(request):
print(request.session);
today=datetime.datetime.now()
return render(request,'index.html',{
"today":today.strftime("%d-%m=%Y")})
def isFileOpen(request):
stack=request.session['stack']
if stack>0 and request.session.get('name')!=None and request.session.get('email')!=None:
return true
else:
return false
def getState(request):
if(isFileOpen):
fileName=request.session['stack'][0]
email=request.session['email']
name=request.session['name']
return JsonResponse({'state':'open','name':name,'email':email,'fileName':fileName})
else:
return JsonResponse({'state':none,'name':'',email:'','fileName':''})
def openFile(request):
if request.method=='POST' and request.FILES['fileName']:
imageFile=request.FILES['fileName']
fs=FileSystemStorage()
imageFileName=fs.save(imageFile.name,imageFile)
stack=[]
redostack=[]
imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))
img=cv2.imread(imgpath)
(h, w) = img.shape[:2]
r = 500 / float(h)
dim = (int(w * r),500)
stdimg=cv2.resize(img,dim,interpolation=cv2.INTER_AREA)
stdimgPath=str(Path(imgpath).with_suffix(''))+str(uuid.uuid4())[-3:]+'.png'
print(stdimgPath)
cv2.imwrite(stdimgPath,stdimg)
stdFileName=stdimgPath.split('/')[-1];
stack.append(stdFileName)
request.session['stack']=stack
print(img.shape)
request.session['size']=()
request.session['redo']=True
request.session['oriImg']=imageFileName
request.session['borderSize']=0;
request.session['email']=request.POST['email']
request.session['name']=request.POST.get('name')
request.session['redostack']=redostack
return JsonResponse({'fileName':imageFileName})
def getImage(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
if len(stack)>0:
fileToServer=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))
return FileResponse(open(fileToServer,'rb'))
return HttpResponse('')
def showOrignal(request):
if request.method=="GET" and request.session.has_key('oriImg'):
stack=request.session['stack']
for file in stack:
fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))
os.remove(fileDelete);
request.session.pop('stack')
stack=[]
stack.insert(0,request.session['oriImg'])
request.session['stack']=stack
return JsonResponse({'response':'orignal'})
else:
return HttpResponse('')
def closeFile(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
for file in stack:
fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))
os.remove(fileDelete);
request.session.pop('stack')
request.session.pop('email')
request.session.pop('name')
return JsonResponse({'response':'closed'})
else:
return HttpResponse('');
def undo(request):
if request.method=="GET" and request.session.has_key('stack') and len(request.session['stack'])>1:
stack=request.session['stack']
fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack.pop(0)))
os.remove(fileDelete);
request.session['stack']=stack;
return JsonResponse({"response":"undid"})
else:
return HttpResponse('')
def redo(request):
if request.method=="GET" and request.session.has_key('redostack') and len(request.session['redostack'])>0:
redoStack=request.session['redostack']
request.session['redo']=False;
value=redoStack.pop()
if(value=='grayscale'):
toGrayscale(request)
if(value=='cool'):
cool(request)
if(value=='scaleIt'):
scaleit(request)
if(value=='setBorder'):
setBorder(request);
request.session['redostack']=redoStack;
return JsonResponse({'response':'redo'})
def toGrayscale(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
redostack=request.session['redostack']
if len(stack)>0:
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......
grayImage=cv2.imread(fileAbsPath)
grayImage=cv2.cvtColor(grayImage,cv2.COLOR_BGR2GRAY)
cv2.imwrite(grayscalefilepath,grayImage)
gfilename=grayscalefilepath.split('/')[-1];
stack.insert(0,gfilename)
if request.session['redo']:
redostack.insert(0,'grayscale')
request.session['redo']=True
request.session['stack']=stack
request.session['redostack']=redostack
return JsonResponse({'response':'convertedToGrayscale'})
else:
return HttpResponse()
def scaleit(request):
if request.method=="POST" and request.session.has_key('stack'):
newX=int(request.POST['newX'])
newY=int(request.POST['newY'])
request.session['size']=(newX,newY)
stack=request.session['stack']
redostack=request.session['redostack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
newimg=cv2.resize(oriimg,(newX,newY),interpolation=cv2.INTER_AREA)
request.session['size']=newimg.shape;
cv2.imwrite(scalefilepath,newimg);
scalefilename=scalefilepath.split('/')[-1]
stack.insert(0,scalefilename)
redostack.insert(0,'scaleIt')
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'scaled'})
if request.method=="GET" and request.session.has_key('size'):
newX=request.session['size'][0]
newY=request.session['size'][1]
stack=request.session['stack']
redostack=request.session['redostack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
newimg=cv2.resize(oriimg,(int(newX),int(newY)))
request.session['size']=newimg.shape;
cv2.imwrite(scalefilepath,newimg);
scalefilename=scalefilepath.split('/')[-1]
stack.insert(0,scalefilename)
redostack.insert(0,'scaleit')
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'scaled'})
else:
return HttpResponse('')
def cropIt(request):
if request.method=="POST" and request.session.has_key('stack'):
x=int(request.POST['X']);
y=int(request.POST['Y']);
h=int(request.POST['h'])
w=int(request.POST['w'])
stack=request.session['stack']
redostack=request.session['redostack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
cropfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
crop_img = oriimg[y:h, x:w]
cv2.imwrite(cropfilepath,crop_img);
cropfilename=cropfilepath.split('/')[-1]
stack.insert(0,cropfilename)
request.session['redostack']=redostack;
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
else:
return HttpResponse('')
def setBorder(request):
if request.method=="POST" and request.session.has_key('stack'):
bordersize=int(request.POST['size']);
stack=request.session['stack']
redostack=request.session['redostack']
request.session['borderSize']=bordersize
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
row,col=oriimg.shape[:2]
bottom=oriimg[row-2:row,0:col]
mean=cv2.mean(bottom)[0]
border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean])
cv2.imwrite(borderfilepath,border);
borderfilename=borderfilepath.split('/')[-1]
stack.insert(0,borderfilename)
if request.session['redo']:
redostack.insert(0,'setBorder')
request.session['redo']=True
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
if request.method=="GET" and request.session.has_key('borderSize'):
bordersize=request.session['borderSize'];
stack=request.session['stack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
row,col=oriimg.shape[:2]
bottom=oriimg[row-2:row,0:col]
mean=cv2.mean(bottom)[0]
border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean])
cv2.imwrite(borderfilepath,border);
borderfilename=borderfilepath.split('/')[-1]
stack.insert(0,borderfilename)
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
else:
return HttpResponse('')
def cool(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
redostack=request.session['redostack']
if len(stack)>0:
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......
grayImage=cv2.imread(fileAbsPath)
grayImage=cv2.applyColorMap(grayImage,cv2.COLORMAP_PARULA)
cv2.imwrite(grayscalefilepath,grayImage)
gfilename=grayscalefilepath.split('/')[-1];
stack.insert(0,gfilename)
if request.session['redo']:
redostack.insert(0,'cool')
request.session['redo']=True
request.session['stack']=stack
request.session['redostack']=redostack
return JsonResponse({'response':'convertedToGrayscale'})
else:
return HttpResponse()
def addWatermark(request):
if request.method=="POST" and request.session.has_key('stack'):
text=request.POST['t']
print(text);
stack=request.session['stack']
redostack=request.session['redostack']
request.session['text']=text
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
textimgPath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
overlay=oriimg.copy()
output=oriimg.copy()
cv2.putText(overlay,text.format(0.5),(10,30),cv2. cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
cv2.addWeighted(overlay,0.5,output,1-0.5,0,output)
cv2.imwrite(textimgPath,output);
textimgName=textimgPath.split('/')[-1]
stack.insert(0,textimgName)
if request.session['redo']:
redostack.insert(0,'addWatermark')
request.session['redo']=True
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
if request.method=="GET" and request.session.has_key('borderSize'):
bordersize=request.session['borderSize'];
stack=request.session['stack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
row,col=oriimg.shape[:2]
bottom=oriimg[row-2:row,0:col]
def rotateRight(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
redostack=request.session['redostack']
if len(stack)>0:
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
rotatefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......
rotateImage=cv2.imread(fileAbsPath)
(h,w)=rotateImage.shape[:2]
center=(w/2,h/2)
angle90=90
scale=1.0
M=cv2.getRotationMatrix2D(center,angle90,scale)
rotated180=cv2.warpAffine(rotateImage,M,(h,w))
cv2.imwrite(rotatefilepath,rotated180)
gfilename=rotatefilepath.split('/')[-1];
stack.insert(0,gfilename)
if request.session['redo']:
redostack.insert(0,'rotateRight')
request.session['redo']=True
request.session['stack']=stack
request.session['redostack']=redostack
return JsonResponse({'response':'rotated'})
else:
return HttpResponse()
def overlay(request):
if request.method=="POST" and request.session.has_key('stack'):
stack=request.session['stack']
if len(stack)>0:
imageFile=request.FILES['fileName']
fs=FileSystemStorage()
imageFileName=fs.save(imageFile.name,imageFile)
imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))
img=cv2.imread(imgpath)
oriimgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))
oriimg=cv2.imread(oriimgpath)
h,w=oriimg.shape[:2]
print(h,w);
tsa='large_white_square.png';
transImgPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%tsa))
tsa=cv2.imread(transImgPath);
tsa=cv2.resize(tsa,(w,h))
h,w=tsa.shape[:2]
print(h,w)
x_offset=y_offset=50
tsa[y_offset:y_offset+img.shape[0], x_offset:x_offset+img.shape[1]] = img
h,w=tsa.shape[:2]
print(h,w)
dst=cv2.addWeighted(oriimg,0.7,tsa,0.3,0);
uui=str(uuid.uuid4())
print(uui)
print(uui[-3:])
overlayfilepath=str(Path(oriimgpath).with_suffix(''))+uui[-3:]+'.png' #here dirty coding......
cv2.imwrite(overlayfilepath,dst);
overlayfilename=overlayfilepath.split('/')[-1]
stack.insert(0,overlayfilename)
print(stack[0]);
if request.session['redo']:
#redostack.insert(0,'overlayed')
request.session['redo']=True
request.session['stack']=stack
#request.session['redostack']=redostack
return JsonResponse({'response':'rotated'})
else:
return HttpResponse()
|
6,620 | 5707e24596dfe2d85e9a7caa93aa3e253a41ae40 | # -*- encoding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('apps.profiles.views',
url(r'^$', 'index', name='profiles'),
# Show a specific profile.
url(r'^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile', name='profiles_view'),
url(r'^edit/$', 'edit_profile', name='profile_edit'),
url(r'^privacy/$', 'privacy', name='profile_privacy'),
url(r'^connected_apps/$', 'connected_apps', name='profile_connected_apps'),
url(r'^password/$', 'password', name='profile_password'),
url(r'^position/$', 'position', name='profile_position'),
url(r'^email/$', 'add_email', name='profile_add_email'),
# Ajax views
url(r'^deleteposition/$', 'delete_position', name='profile_delete_position'),
url(r'^email/delete_email/$', 'delete_email', name='profile_delete_email'),
url(r'^email/set_primary/$', 'set_primary', name='profile_set_primary'),
url(r'^email/verify_email/$', 'verify_email', name='profile_verify_email'),
url(r'^email/toggle_infomail/$', 'toggle_infomail', name='profile_toggle_infomail'),
url(r'^email/toggle_jobmail/$', 'toggle_jobmail', name='profile_toggle_jobmail'),
url(r'^marks/update_mark_rules/$', 'update_mark_rules', name='profile_update_mark_rules'),
# Endpoint that exposes a json lump of all users but only id and name.
url(r'^api_plain_user_search/$', 'api_plain_user_search', name='profiles_api_plain_user_search'),
# Endpoint that exposes a json lump of all users which have set their profile to public.
url(r'^api_user_search/$', 'api_user_search', name='profiles_api_user_search'),
url(r'^user_search/$', 'user_search', name='profiles_user_search'),
# Profile index with active tab.
url(r'^(?P<active_tab>\w+)/$', 'index', name='profiles_active'),
)
|
6,621 | 24635989ccdb0f35f1e618dd8dc07f2cf84faddb | a = [1, 1, 2, 3, 4, 4, 5, 7, 12, 30, 49]
for i in range(0, len(a)):
if a[i] < 5:
print(str(a[i]) + " ")
i += 1
else:
i += 1
|
6,622 | 5dc17db0aca109720d1ba62d65b86d9b81714063 | import os
import flask_sqlalchemy as sqlalchemy
from flask import Flask, jsonify, request,render_template,redirect,url_for,json,flash
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_cors import CORS
import datetime
from flask_bootstrap import Bootstrap
from flask_login import LoginManager,current_user, login_user,logout_user, login_required
from flask_login import UserMixin
from hashlib import md5
from database.models import *
#from sqlalchemy_imageattach.entity import Image, image_attachment
app = Flask(__name__,static_url_path='/static')
app.debug = True
CORS(app)
login_manager = LoginManager()
login_manager.init_app(app)
#UPLOAD_FOLDER = '../static/templates'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sqlalchemy-demo.db'
app.config['SECRET_KEY'] = 'Thisissupposedtobesecret!'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
bootstrap = Bootstrap(app)
app.config.update(DEBUG=True)
db = sqlalchemy.SQLAlchemy(app)
base_url = '/api/'
@login_manager.user_loader
def load_user(id):
user = Student.query.get(int(id))
if user is not None:
return user
else:
return Instructor.query.get(int(id))
@app.route(base_url, methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('studenthome'))
form = LoginForm()
if form.validate_on_submit():
user = Student.query.filter_by(email=form.email.data).first()
# Login Student
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('apply'))
user = Instructor.query.filter_by(email=form.email.data).first()
# Login Instructor
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('post'))
# Login failed
flash('Invalid username or password')
return redirect(url_for('login'))
return render_template('mainpage.html', title='Sign In', form=form)
# Route to student Profile
@app.route(base_url + 'studentProfile', methods=['GET'])
def studenthome():
return render_template('student_Profile.html')
# Route to Instructor Profile
@app.route(base_url + 'instructorProfile', methods=['GET'])
def instructorhome():
return render_template('Instructor_Profile.html')
# Route to create a student account and main page
@app.route(base_url + 'Register', methods=['POST','GET'])
def createAccount():
if request.method == 'POST':
# Student option is checked
if request.form['options'] == 'STUDENT':
new_user = Student(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
# Make sure id is unique
while Instructor.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('studenthome'))
# Instructor option is checked
elif request.form['options'] == 'INSTRUCTOR':
new_user = Instructor(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
# Make sure id is unique
while Student.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('instructorhome'))
return redirect(url_for('login'))
#return render_template('studenPortal.html', Jobs = Jobs.query.all())
# Route to create a instructor account
@app.route(base_url + 'instructors', methods=['POST'])
def createInstructor():
instructor = Instructor(**request.json)
db.session.add(instructor)
db.session.commit()
db.session.refresh(instructor)
return jsonify({"status": 1, "instructor": instructor_to_obj(instructor)}), 200
# Route to post a job for Instructors
@app.route(base_url + 'post', methods=['POST','GET'])
#@login_required
def post():
if request.method == 'POST':
new_job = Jobs(request.form['position'],request.form['Semester'],request.form['pay'],request.form['gpa_required'])
db.session.add(new_job)
db.session.commit()
db.session.refresh(new_job)
#,applicates = Job_Application.query.all()
return render_template('instructorPortal.html',applicates = Job_Application.query.all())
# Route to Display jobs for students
@app.route(base_url + 'apply', methods=['POST','GET'])
@login_required
def apply():
if request.method == 'POST':
#temp_student = Student(first_name=current_user.first_name,last_name=current_user.last_name,email=current_user.email,password=current_user.password)
#db.session.add(temp_student)
#db.session.commit()
new_app = Job_Application(grade_recieved=request.form['Grade'],Avalialability=request.form['Avalialability'],bio=request.form['bio'],gpa_overall=request.form['gpa_overall'],job_status=request.form['job_status'],owner=current_user)
new_app.job_status = "Submited"
#new_app = Job_Application(owner=temp_student)
db.session.add(new_app)
db.session.commit()
db.session.refresh(new_app)
flash("Job Application successfully Submited")
return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id))
# Route to edit info in a student account
# Edit ONLY major, gpa and grad_date
@app.route(base_url + 'students_edit', methods=['GET', 'POST'])
@login_required
def editStudent():
if request.method == 'POST':
current_user.gpa = request.form['editGpa']
current_user.major = request.form['editMajor']
db.session.add(current_user)
db.session.commit()
db.session.refresh(current_user)
return render_template('student_Profile.html',current_user=current_user)
return render_template('student_Profile.html',current_user=current_user)
# Route to edit info in an Instructor account
# Edit ONLY email, office, and phone
@app.route(base_url + 'instructors_edit', methods=['GET', 'POST'])
@login_required
def editInstructor():
if request.method == 'POST':
current_user.email = request.form['editEmail']
current_user.phone = request.form['editPhone']
current_user.office = request.form['editOffice']
db.session.add(current_user)
db.session.commit()
db.session.refresh(current_user)
return render_template('Instructor_Profile.html',current_user=current_user)
return render_template('Instructor_Profile.html',current_user=current_user)
# Route to update Student Application
@app.route(base_url + 'updateApplication', methods=['POST'])
@login_required
def update_application(applicate):
if request.method == 'POST':
student = Student.query.filter_by(id =applicate.owner_id)
student.Job_Application.job_status = "Rejected"
db,session.add(student)
db.session.commit()
db.session.refresh(student)
return render_template('instructorPortal.html',applicates = Job_Application.query.all())
# Route to Delete student Application
@app.route(base_url + 'cancel_Application', methods=['DELETE'])
@login_required
def cancel_application():
if request.method == 'DELETE':
job_position = request.form['job_name']
job_pos = current_user.jobs.filter_by(position=job_position)
db.session.delete(job_pos)
db.session.commit()
db.session.refresh()
return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id),applied=Jobs.query.filter_by())
# Route to Login out User
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
def main():
db.create_all() # creates the tables you've provided
app.run(debug=True) # runs the Flask application
if __name__ == '__main__':
main()
|
6,623 | 4d7e30714ae209e1d09d895dadf7a19928fe253f | # coding: utf-8
# 02. 「パトカー」+「タクシー」=「パタトクカシーー」
# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.
s1 = "パトカー"
s2 = "タクシー"
ans = ""
for c1, c2 in zip(s1, s2):
ans += c1 + c2
print(ans)
#パタトクカシーー
|
6,624 | 1e929bc3c97de859a16a4ac8d5ac2ebadefd0516 | from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as pl
import seaborn as sb
sb.set_color_codes('muted')
import scipy.optimize as op
from scipy import stats
def errorbar(t, f, s, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10,3))
ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]',
ylabel='Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def pixels(pix, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(5,5))
ax.imshow(pix, interpolation='none')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def centroids(t, x, y, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10,3))
ax.plot(t, x, label='x', color='b')
ax.plot(t, y, label='y', color='r')
ax.legend()
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]',
ylabel='Centroid')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def simple_ts(t, f, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10,3))
ax.plot(t, f, 'bo', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]',
ylabel='Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
# def corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):
# with sb.axes_style('white'):
# fig, axs = pl.subplots(1, 3, figsize=(10,3), sharex=True, sharey=False)
# axs.flat[0].plot(t, f, 'k.')
# axs.flat[0].plot(t, mod_full, '-', lw=2)
# axs.flat[1].plot(t, f_cor, 'k.')
# axs.flat[1].plot(t, mod_ma, '-', lw=5)
# axs.flat[2].plot(t, resid, 'k.')
# pl.setp(axs, xlim=[t.min(), t.max()], xticks=[], yticks=[])
# fig.tight_layout()
# if fp:
# fig.savefig(fp)
# pl.close()
def corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):
rc = {'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.size': 5,
'ytick.major.size': 5,
'xtick.minor.size': 2,
'ytick.minor.size': 2}
# t_offset = int(t[0])
# t_offset = 2450000
# t -= t_offset
with sb.axes_style('white', rc):
fig, axs = pl.subplots(3, 1, figsize=(6,6), sharex=True, sharey=False)
axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)
# axs.flat[0].plot(t, mod_full, 'r-', lw=1, label='Transit + Systematics')
axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')
# axs.flat[0].legend()
axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)
axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')
# axs.flat[1].legend()
axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)
axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)
axs.flat[0].minorticks_on()
axs.flat[1].minorticks_on()
axs.flat[2].minorticks_on()
pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)
pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')
pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')
# pl.setp(axs.flat[2], title='Precision: {0:.0f} ppm'.format(resid.std()*1e6), ylabel='Residuals')
pl.setp(axs.flat[2], title='Residuals')
# pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='T-{} [BJD]'.format(t_offset))
pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
|
6,625 | 23160c2f030b0bd862360e944fbbc283c6cb45b2 | from rest_framework import serializers
class BillBaseSerializer(serializers.Serializer):
vendor = serializers.CharField(required=False)
amount = serializers.FloatField()
bill_date = serializers.DateField()
due_date = serializers.DateField()
class BillListSerializer(BillBaseSerializer):
id = serializers.SerializerMethodField()
def get_id(self, object):
return object.key.id()
class BillCreateSerializer(BillBaseSerializer):
line_items = serializers.JSONField(default=None)
company = serializers.CharField()
branch = serializers.CharField()
status = serializers.IntegerField()
date_of_payment = serializers.DateField(default=None)
notes = serializers.CharField(max_length=500, default=None)
class BillPaymentSerializer(serializers.Serializer):
status = serializers.IntegerField()
date_of_payment = serializers.DateField(required=True)
notes = serializers.CharField(max_length=500, required=False)
class BillDetailSerializer(BillCreateSerializer, BillListSerializer):
created_by = serializers.CharField(default=None)
created_on = serializers.DateField(default=None)
updated_by = serializers.CharField(default=None)
updated_on = serializers.DateField(default=None)
|
6,626 | 8c1718f56a73fdd962154abfaedc7c0c3cb0d9ba | from django.urls import path
from . import views
app_name = 'adverts'
urlpatterns = [
path('', views.AdvertListView.as_view(), name="list"),
path('create/', views.AdvertFormView.as_view(), name='adverts-create'),
path('<str:category>/', views.AdvertListView.as_view(), name="adverts-list-categories"),
]
|
6,627 | d1d293a5d2c394e69d93488605f27b5468220286 | import pymongo
import time
client = pymongo.MongoClient('localhost', 27017);
db = client['zhihu']; # 类似dict,若不存在,则新建;
# client.drop_database('zhihu') # 删除db
collection = db['zhihu']; # 若不存在,则新建;
# db.drop_collection('zhihu') # 删除collection
document_test = {'name': 'test', 'time': time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))}
test_id = collection.insert(document_test);
# collection.find_one({'name': 'test'})
# collection.find({'name': 'test'}) 返回curser,可继续进行find,count等操作
# collection.update({'name': 'test'}, {'$set': {'name': 'test_update'}})
print(test_id)
|
6,628 | c3c82b9ba198b7818cc8e63710140bbb6e28a9ea |
"""
クリップボードのamazonのURLから不要な部分を削除する
"""
# -*- coding: utf-8 -*-
import re
import pyperclip as clip
from urllib.parse import urlparse
#print(clip.paste())
def urlShortner():
# text = "https://www.amazon.co.jp/Jupyter-Cookbook-Dan-Toomey/dp/1788839447/ref=sr_1_5?s=books&ie=UTF8&qid=1535164277&sr=1-5&keywords=Jupyter"
if clip.paste():
text = clip.paste()
o = urlparse(text)
# print(o.scheme)
if not (o.scheme == 'http' or o.scheme == 'https') :
print("This is not url.")
return 1
newUrl = "https://www.amazon.co.jp"
urlLen = len(text)
#print(urlLen)
matchObj = re.search(r'https://www.amazon.co.jp', text)
matchObjDp = re.search(r'/dp/', text)
matchObjRef = re.search(r'/ref', text)
""""
if matchObjRef:
print (matchObjDp.start()) # マッチした文字列の開始位置: 3
print(type(matchObj.start()))
print(type(matchObj.end()))
"""
if matchObjDp and matchObjRef:
i:int = matchObjDp.start()
#print("2ndStart:" + str(i) )
while i < matchObjRef.start():
newUrl = newUrl + text[i]
i= i+1
shortUrl = newUrl.replace("www","")
print ("shortUrl:" + shortUrl)
clip.copy(shortUrl)
else:
print ("This url is not an introduction page of books on the amazon website.")
urlShortner()
|
6,629 | 3e305cee2f814698729c008320e326c4bd42640d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('grid2', '0003_auto_20161231_2329'),
]
operations = [
migrations.RemoveField(
model_name='grid',
name='gameNumber',
),
migrations.RemoveField(
model_name='grid',
name='gameType',
),
migrations.AddField(
model_name='grid',
name='active',
field=models.BooleanField(default=True),
),
]
|
6,630 | 2ca40a53291a62bbdb4386decc5a2dfa84431836 | #https://www.geeksforgeeks.org/count-of-substrings-of-length-k-with-exactly-k-distinct-characters/
#https://www.geeksforgeeks.org/count-number-of-substrings-with-exactly-k-distinct-characters/
|
6,631 | 70373c74e459efb2a310d94ae906910423e8bfd4 | import re
from mapa import graficar_lista, graficar_matriz
class nodo:
def __init__(self, x, y, n, c):
self.columna = x
self.fila = y
self.nombre = n
self.color = c
pattern_matriz = r"[M|m][A|a][T|t][R|r][I|i][Z|z]\s*\(.*,.*,.*,.*,.*\)\{"
pattern_fila = r"[F|f][I|i][L|l][A|a]\s*\(.*\)\s*.*;"
pattern_nodo = r"[N|n][O|o][D|d][O|o]\s*\(.*,.*,.*\).*;"
pattern_defecto = r"\}\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\s*\(.*\).*"
propiedades = {
'fila' : '',
'columna' : '',
'nombre_matriz' : '',
'forma_nodo' : '',
'matriz_doble': '',
}
nodos = []
nombre_def = ""
color_def = ""
def leer_archivo_matriz(path):
with open(path, 'r', encoding='utf-8') as f:
lineas = f.readlines()
num_fila = 0
estado = ""
for i in lineas:
if re.search(pattern_matriz, i):
separado = re.findall(r"\(.*,.*,.*,.*,.*\)",i)
separados = separado[0].replace("(","")
separados = separados.replace(")","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
separados[2] = separados[2].replace("'","")
separados[2] = separados[2].replace(" ","")
separados[3] = separados[3].replace(" ","")
separados[4] = separados[4].replace(" ","")
#Asignar Variables al diccionario
propiedades['fila'] = separados[0]
propiedades['columna'] = separados[1]
propiedades['nombre_matriz'] = separados[2]
propiedades['forma_nodo'] = separados[3]
propiedades['matriz_doble'] = separados[4]
elif re.search(pattern_fila, i):
separado2 = re.findall(r"\).*",i)
separados2 = separado2[0].replace(")"," ")
separados2 = separados2.replace(";","")
separados2 = separados2.replace(" ","")
separado = re.findall(r"\(.*\)",i)
separados = separado[0].replace("(","")
separados = separados.replace(")","")
separados = separados.replace(";","")
separados = separados.replace(" ","")
separados = re.split(r",",separados)
num = 0
for nom in separados:
nom = nom.replace("'", "")
nom = nom.replace(" ", "")
nodos.append(nodo(num, num_fila, nom, separados2))
num = num+1
num_fila = num_fila + 1
elif re.search(pattern_nodo, i):
separado = re.findall(r"\(.*,.*,.*\).*;",i)
separados = separado[0].replace("(","")
separados = separados.replace(")",",")
separados = separados.replace(";","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
separados[2] = separados[2].replace("'","")
separados[2] = separados[2].replace(" ","")
separados[3] = separados[3].replace(" ","")
nodos.append(nodo(int(separados[0])-1, int(separados[1])-1, separados[2], separados[3]))
elif re.search(pattern_defecto, i):
separado = re.findall(r"\(.*\).*",i)
separados = separado[0].replace("(","")
separados = separados.replace(")",",")
separados = separados.replace(";","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace("'","")
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
for nod in nodos:
if nod.nombre == "#":
nod.nombre = separados[0]
nombre_def = separados[0]
if nod.color == "#":
nod.color = separados[1]
color_def = separados[1]
mat = []
for i in range(0,int(propiedades["columna"])):
mat.append([])
for j in range(0, int(propiedades["fila"])):
mat[i].append(nodo(str(j),str(i),nombre_def, color_def))
for i in range(0,int(propiedades["columna"])):
for j in range(0, int(propiedades["fila"])):
for k in nodos:
if mat[i][j].fila == str(int(k.fila)) and mat[i][j].columna == str(int(k.columna)):
mat[i][j] = k
# for i in range(0,int(propiedades["columna"])):
# for j in range(0, int(propiedades["fila"])):
# print(mat[i][j].fila, mat[i][j].columna,mat[i][j].nombre, mat[i][j].color)
# print(mat)
matriz = (propiedades, mat)
# for i in nodos:
# print(i.nombre, i.color, i.columna, i.fila)
graficar_matriz(matriz)
# leer_archivo_matriz("Matriz.lfp") |
6,632 | aae280e049c00e70e2214662a07eee8bfa29227e | import sys
import pygame
import pygame.camera
from pygame.locals import *
from PIL import Image
pygame.init()
pygame.camera.init()
camlist = pygame.camera.list_cameras()
print(camlist)
# images = map(Image.open, ['Test1.jpg', 'Test2.jpg', 'Test3.jpg'])
# widths, heights = zip(*(i.size for i in images))
# total_width = sum(widths)
# max_height = max(heights)
# new_im = Image.new('RGB', (total_width, max_height))
# x_offset = 0
# for im in images:
# new_im.paste(im, (x_offset,0))
# x_offset += im.size[0]
# new_im.save('test.jpg') |
6,633 | d0bd08bea65878f5fccfc4affecdf53cc36179df | import cv2 as cv
#! THESE ARE IMAGES THAT AREN'T DOWNSIZED
#original_image_1 = cv.imread("hamburger_face.JPG")
#original_image_2 = cv.imread("hammock_reading.JPG")
#original_image_3 = cv.imread("sofa_face.JPG")
#original_image_4 = cv.imread("frisbee_team.JPG")
original_image_5 = cv.imread("mans_face.JPG")
# TO PRINT OUT ARRAY AND DIMENSIONS
# print(original_image)
# print(original_image.shape)
#grayscale_image = cv.cvtColor(original_image_1, cv.COLOR_BGR2GRAY)
#grayscale_image = cv.cvtColor(original_image_2, cv.COLOR_BGR2GRAY)
#grayscale_image = cv.cvtColor(original_image_3, cv.COLOR_BGR2GRAY)
#grayscale_image = cv.cvtColor(original_image_4, cv.COLOR_BGR2GRAY)
grayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)
# TO PRINT OUT GRAYSCALE IMG
#cv.imshow("gray_img", grayscale_image)
#cv.waitKey(0)
#cv.destroyAllWindows()
face_cascade = cv.CascadeClassifier('haar_cascade_front.xml')
detected_faces = face_cascade.detectMultiScale(grayscale_image)
# PRINTS COORDINATES OF FACES
#print(detected_faces)
for face in detected_faces:
x , y , w , h = face
cv.rectangle(original_image_5, (x, y), (x + w , y + h ), (0 , 255 , 0), 2)
cv.imshow("orig_img", original_image_5)
cv.waitKey(0)
cv.destroyAllWindows() |
6,634 | fd04f6f4a03fdbe40e400d04e5759ef9ef30f974 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 16:07:25 2018
@author: Yigao
"""
import re
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
## create a tokenizer
hfilename = "file.txt"
linecount=0
hashcount=0
wordcount=0
BagOfWords=[]
BagOfHashes=[]
BagOfLinks=[]
with open(hfilename, "r") as file:
for line in file:
#print(line,"\n")
tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)
WordList=tweetSplitter.tokenize(line)
#WordList2=word_tokenize(line)
#linecount=linecount+1
#print(WordList)
#print(len(WordList))
#print(WordList[0])
#print(WordList2)
#print(len(WordList2))
#print(WordList2[3:6])
#print("NEXT..........\n")
regex1=re.compile('^#.+')
regex2=re.compile('[^\W\d]') #no numbers
regex3=re.compile('^http*')
regex4=re.compile('.+\..+')
for item in WordList:
if(len(item)>2):
if((re.match(regex1,item))):
#print(item)
newitem=item[1:] #remove the hash
BagOfHashes.append(newitem)
hashcount=hashcount+1
elif(re.match(regex2,item)):
if(re.match(regex3,item) or re.match(regex4,item)):
BagOfLinks.append(item)
else:
BagOfWords.append(item)
wordcount=wordcount+1
else:
pass
else:
pass
#print(linecount)
#print(BagOfWords)
#print(BagOfHashes)
#print(BagOfLinks)
BigBag=BagOfWords+BagOfHashes
## create Word Cloud
IgnoreThese=[] #other irrelevant words
filtered_words = [] #list of words ready for wordcloud
for word in BigBag:
if (word.lower() not in stopwords.words()) and (word.lower() not in IgnoreThese):
filtered_words.append(word.lower())
word_string = " ".join(filtered_words)
with open("wordcloud.txt", "w") as f:
f.write(word_string)
with open("tableau.txt", "w") as f:
for s in filtered_words:
f.write("%s\n" % s)
TwitterWordCloud = WordCloud(width = 800, height = 800, background_color = "white", stopwords = None,
min_font_size = 10).generate(word_string)
plt.figure(figsize = (8,8), facecolor = None)
plt.imshow(TwitterWordCloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show() |
6,635 | f2c96b3133137019dc6bd462f096f3b4c5f12648 | # Generated by Django 3.1.1 on 2020-10-29 13:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registered_user', '0005_auto_20201029_1710'),
]
operations = [
migrations.AlterField(
model_name='user_details',
name='dateofbirth',
field=models.DateField(null=True),
),
]
|
6,636 | 3eca3066a6c6484257ca17164d35654812a87b80 | n, imp = list(map(int, input().split()))
villagers = {}
peoples = []
susList = set()
for i in range(n):
peeps = set(list(map(int, input().split()))[1:])
# Initialize the set
villagers[i+1] = villagers.get(i+1, set())
for p in peeps:
if i+1 in peeps:
susList.add(i+1)
break
villagers[p] = villagers.get(p, set()) | {i+1}
peoples.append(peeps)
# Confirmed imposters
queue = [s for s in susList]
while queue:
# Everyone that voted for them is an imposter
s = queue.pop()
queue.extend(list(villagers[s]))
susList |= set(villagers[s])
villagers[s] = set()
# Discredit all imposter votes
for s in susList:
for p in peoples[s-1]:
try:
villagers[p].remove(s)
except:
pass
for k, v in sorted(villagers.items(), key=lambda x: x[0]):
if imp - len(susList) >= (n- len(susList)) // 2:
print(0)
elif k in susList:
print(0)
elif len(v) >= imp - len(susList):
print(1)
else:
print(0)
|
6,637 | 15134d7e4036c102bc9d2ba4d321fadd0467100f | from django.contrib import admin
from django.db import models
from tinymce.widgets import TinyMCE
from .models import UserInfo
# Register your models here.
class UserInfoAdmin(admin.ModelAdmin):
list_display=[
'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
search_fields=[
'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
list_display_links=[
'user_name',
# 'user_profession',
# 'user_phone',
# 'user_email',
# 'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
list_editable = [
# 'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
# 'facebook_link',
# 'instagram_link',
# 'telegram_link',
# 'whatsup_link',
# 'linkedin_link',
# 'github_link',
# 'stackoverflow_link',
# 'facebook_link',
]
fieldsets=(
('Basic Info', {'fields' : [
'user_image',
'user_name',
'user_profession',
],
},
),
(
'Contact Info', {
'fields': [
'user_phone',
'user_email',
'user_address',
],
},
),
(
'Social Links', {
'fields': [
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
],
},
),
(
'Core Info', {
'fields' :[
'user_info',
'user_experience',
'user_edu',
],
},
),
)
formfield_overrides = {
models.TextField: {'widget': TinyMCE}
}
admin.site.register(UserInfo, UserInfoAdmin) |
6,638 | 70c084dab8469ca34b0e3e5174101111e695f1ca | class TflearnDataSourceExtraTemplate(object):
"""
Base class for TFLearn's DataSource (if we use wrapping).
Parameters:
----------
rewrite_data_aug : bool
use wrapper for data augmentation
"""
def __init__(self, rewrite_data_aug=False):
self.rewrite_data_aug = rewrite_data_aug
|
6,639 | d988cfebeec37df700f46bbb027a4980ba624d30 | import numpy as np
# data I/O
data = open('input.txt', 'r').read() # should be simple plain text file
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print("chars: ", chars)
#one-hot encoding
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
iteration=50000
hidden_size = 100
seq_length = 25
learning_rate = 1e-1
# model parameters
U = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden
W = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden
V = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
x, h, yprime = {}, {}, {}
h[-1] = np.copy(hprev)
loss = 0
# forward pass
for t in range(len(inputs)):
x[t] = np.zeros((vocab_size,1))
x[t][inputs[t]] = 1 # encode-1ofk representation
h[t] = np.tanh(np.dot(U, x[t]) + np.dot(W, h[t-1]) + bh)
temp=np.dot(V, h[t]) + by
yprime[t] = np.exp(temp) / np.sum(np.exp(temp))
loss += -np.log(yprime[t][targets[t],0]) # softmax (cross-entropy loss) for 1-of-k representaiton
# backprop
dU, dW, dV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(h[0])
for t in reversed(range(len(inputs))):
dy = np.copy(yprime[t])
dy[targets[t]] -= 1 # backprop into y. http://cs231n.github.io/neural-networks-case-study/#grad
dV += np.dot(dy, h[t].T)
dby += dy
dh = np.dot(V.T, dy) + dhnext # backprop into h
dhraw = (1 - h[t] * h[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dU += np.dot(dhraw, x[t].T)
dW += np.dot(dhraw, h[t-1].T)
dhnext = np.dot(W.T, dhraw)
for dparam in [dU, dW, dV, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dU, dW, dV, dbh, dby, h[len(inputs)-1]
n, p = 0, 0
mU, mW, mV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
for n in range(iteration):
if p+seq_length+1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size,1)) # reset RNN memory
p = 0
inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
loss, dU, dW, dV, dbh, dby, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0:
print (n,smooth_loss)
# perform parameter update with Adagrad
# for param, dparam, mem in zip([U, W, V, bh, by],
# [dU, dW, dV, dbh, dby],
# [mU, mW, mV, mbh, mby]):
# mem += dparam * dparam
# param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
param=[U, W, V, bh, by]
dparam=[dU, dW, dV, dbh, dby]
mem=[mU, mW, mV, mbh, mby]
for i in range(len(param)):
mem[i] += dparam[i] * dparam[i]
param[i] += -learning_rate * dparam[i] / np.sqrt(mem[i] + 1e-8) # adagrad update
p += seq_length # move data pointer
# n += 1 # iteration counter
# if n>iteration:
# print("done")
# sys.exit(0)
|
6,640 | b6527a09f346ee1b7dd446a0ff21995a995481a8 | import argparse
def parse_args():
"""
Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.
:return: Populated namespace.
"""
parser = argparse.ArgumentParser(description='baseline Mask R-CNN')
parser.add_argument('--dataset', required=True,
metavar="/path/to/dataset/",
help='Directory of the dataset')
parser.add_argument('--continue_train', type=str, required=False, default='None',
metavar="/path/to/latest/weights.h5", help="Path to lastest training weights .h5 file")
parser.add_argument('--weight', required=False,
metavar='/path/to/pretrained/weight.h5', help="Path to trained weight")
parser.add_argument('--image', required=False,
metavar='/path/to/testing/image/directory', help="Path to testing image directory")
parser.add_argument('--video', required=False,
metavar='/path/to/testing/image/directory', help="Path to testing image directory")
return parser.parse_args()
|
6,641 | 05144338cc9c0c65010e0b8a3dd6fb50f6343214 | def climb_ways(n, k): |
6,642 | 1d1f1c9b70ca487b48593c85c3e0b5afc10f0b07 | import os
import sys
import random
import pygame
import time
from pygame import locals
SCREEN_WIDTH = 1280
SCREEN_HEIGHT = 1024
class Moto(pygame.sprite.Sprite):
def __init__(self, player_num, start_direction):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("motor" + str(player_num) + ".png").convert()
self.orig_image = self.image
# Fetch the rectangle object that has the dimensions of the image
# Update the position of this object by setting the values of rect.x and rect.y
self.rect = self.image.get_rect()
self.direction = start_direction
def move_single_axis(self, dx, dy):
# Move the rect
self.rect.x += dx
self.rect.y += dy
def moveRight(self):
self.direction = 0
self.image = pygame.transform.rotate(self.orig_image, 0)
def moveLeft(self):
self.direction = 1
self.image = pygame.transform.rotate(self.orig_image, 0)
def moveUp(self):
self.direction = 2
self.image = pygame.transform.rotate(self.orig_image, 90)
def moveDown(self):
self.direction = 3
self.image = pygame.transform.rotate(self.orig_image, 90)
# Class for the orange dude
class Player(object):
def __init__(self, player_num, px, py, sx, sy, start_direction):
self.player_num = player_num
self.rect = pygame.Rect(px, py, sx, sy)
self.direction = start_direction
self.moto = Moto(player_num, start_direction)
self.moto.rect.x = px
self.moto.rect.y = py
def moveRight(self):
if self.direction != 1:
self.direction = 0
self.moto.moveRight()
def moveLeft(self):
if self.direction != 0:
self.direction = 1
self.moto.moveLeft()
def moveUp(self):
if self.direction != 3:
self.direction = 2
self.moto.moveUp()
def moveDown(self):
if self.direction != 2:
self.direction = 3
self.moto.moveDown()
def moveOn(self):
if self.direction == 0:
self.move(2, 0)
if self.direction == 1:
self.move(-2, 0)
if self.direction == 2:
self.move(0, -2)
if self.direction == 3:
self.move(0, 2)
def move(self, dx, dy):
# Move each axis separately. Note that this checks for collisions both times.
if dx != 0:
self.move_single_axis(dx, 0)
self.moto.move_single_axis(dx, 0)
if dy != 0:
self.move_single_axis(0, dy)
self.moto.move_single_axis(0, dy)
def move_single_axis(self, dx, dy):
# Move the rect
self.rect.x += dx
self.rect.y += dy
# Draw a wall (after the movement)
Wall(self.player_num, (self.rect.centerx, self.rect.centery))
# Nice class to hold a wall rect
class Wall(object):
def __init__(self, player_num, pos):
Game.walls[player_num].append(self)
self.rect = pygame.Rect(pos[0], pos[1], 3, 3)
# MAIN
class Game:
walls = [[], []]
def main(self):
winner = 0
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
# walls for 2 players: lists in list
Game.walls = [[], []]
# starting positions
player = Player(0, SCREEN_WIDTH - 80, int(SCREEN_HEIGHT / 2), 2, 16, 1)
player2 = Player(1, 80, int(SCREEN_HEIGHT / 2), 2, 16, 0)
# JOYSTICK
try:
pygame.joystick.init()
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
joysticks[0].init()
joysticks[1].init()
player1_joystick = joysticks[0]
player2_joystick = joysticks[1]
except IndexError:
player1_joystick = None
player2_joystick = None
end = pygame.image.load('number3.png')
screen.fill((0, 0, 0))
screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))
pygame.display.flip()
pygame.time.wait(1000)
end = pygame.image.load('number2.png')
screen.fill((0, 0, 0))
screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))
pygame.display.flip()
pygame.time.wait(1000)
end = pygame.image.load('number1.png')
screen.fill((0, 0, 0))
screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))
pygame.display.flip()
pygame.time.wait(1000)
# end = pygame.image.load('arcade.jpg').convert()
# screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))
pygame.display.flip()
# background_image = pygame.transform.scale(pygame.image.load('arcade.jpg').convert(), (1280, 1024))
# screen.blit(background_image, [0, 0])
running = True
while running:
clock.tick(60)
for e in pygame.event.get():
if e.type == pygame.QUIT:
running = False
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
running = False
# JOYSTICK
try:
if e.type == pygame.locals.JOYAXISMOTION:
player1jx, player1jy = player1_joystick.get_axis(0), player1_joystick.get_axis(1)
if player1jx < 0:
player2.moveLeft()
if player1jx > 0:
player2.moveRight()
if player1jy < 0:
player2.moveUp()
if player1jy > 0:
player2.moveDown()
player2jx, player2jy = player2_joystick.get_axis(0), player2_joystick.get_axis(1)
if player2jx < 0:
player.moveLeft()
if player2jx > 0:
player.moveRight()
if player2jy < 0:
player.moveUp()
if player2jy > 0:
player.moveDown()
except:
pass
# PLAYER 1
# Move the player if an arrow key is pressed
key = pygame.key.get_pressed()
if key[pygame.K_LEFT]:
player.moveLeft()
if key[pygame.K_RIGHT]:
player.moveRight()
if key[pygame.K_UP]:
player.moveUp()
if key[pygame.K_DOWN]:
player.moveDown()
player.moveOn()
# PLAYER 2
key = pygame.key.get_pressed()
if key[pygame.K_a]:
player2.moveLeft()
if key[pygame.K_d]:
player2.moveRight()
if key[pygame.K_w]:
player2.moveUp()
if key[pygame.K_s]:
player2.moveDown()
player2.moveOn()
# check borders
if player.moto.rect.x < 0 or player.moto.rect.x > SCREEN_WIDTH:
winner = 2
running = False
if player2.moto.rect.x < 0 or player2.moto.rect.x > SCREEN_WIDTH:
winner = 1
running = False
if player.moto.rect.y < 0 or player.moto.rect.y > SCREEN_HEIGHT:
winner = 2
running = False
if player2.moto.rect.y < 0 or player2.moto.rect.y > SCREEN_HEIGHT:
winner = 1
running = False
# Draw the scene
# screen.blit(background_image, [0, 0])
# pygame.display.flip()
screen.fill((0, 0, 0))
# Player 1 walls
counter1 = 0
counter2 = 0
coll_range = len(Game.walls[0]) - (player.moto.rect.width / 2 + 10)
coll_range_2 = len(Game.walls[1]) - (player2.moto.rect.width / 2 + 10)
for wall in Game.walls[0]:
if player2.moto.rect.colliderect(wall.rect):
winner = 1
running = False
if (counter1 < coll_range) and player.moto.rect.colliderect(wall.rect):
winner = 2
running = False
counter1 += 1
pygame.draw.rect(screen, (255, 0, 0), wall.rect)
# Player 2 walls
for wall in Game.walls[1]:
if player.moto.rect.colliderect(wall.rect):
winner = 2
running = False
if (counter2 < coll_range_2) and player2.moto.rect.colliderect(wall.rect):
winner = 1
running = False
counter2 += 1
pygame.draw.rect(screen, (0, 0, 255), wall.rect)
# Player 1
pygame.draw.rect(screen, (255, 200, 0), player.rect)
screen.blit(player.moto.image, (player.moto.rect.x, player.moto.rect.y))
# Player 2
pygame.draw.rect(screen, (255, 200, 0), player2.rect)
screen.blit(player2.moto.image, (player2.moto.rect.x, player2.moto.rect.y))
pygame.display.flip()
# GAME OVER
print("Winner: ", winner)
running = True
clock = pygame.time.Clock()
sound = pygame.mixer.Sound('blast.wav')
sound.play(loops=0, maxtime=0, fade_ms=0)
while running:
clock.tick(60)
for e in pygame.event.get():
if e.type == pygame.JOYBUTTONDOWN:
player1Button = player1_joystick.get_button(0)
if (player1Button > 0):
running = False
print("BACK TO MENU")
return True
player2Button = player2_joystick.get_button(0)
if (player2Button > 0):
running = False
print("BACK TO MENU")
return True
if e.type == pygame.KEYDOWN and (e.key == pygame.K_KP_ENTER or e.key == pygame.K_RETURN):
running = False
print("BACK TO MENU")
return True
end = pygame.image.load('gameover.png')
screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 1024), (0.5 * SCREEN_HEIGHT) - (0.5 * 768)))
screen.fill((0, 0, 0))
screen.blit(end, (10, 10))
if winner == 2:
myfont = pygame.font.SysFont("monospace", 72)
label = myfont.render('Blue won!', 1, (0, 0, 225))
screen.blit(label, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 750)))
else:
myfont = pygame.font.SysFont("monospace", 72)
label = myfont.render('Red won!', 1, (255, 0, 0))
screen.blit(label, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 750)))
pygame.display.flip()
|
6,643 | 2238345a69c2d7a1958a23a470dcb2be6469caeb | import math3d
import math
import pygame
import random
class PBody(object):
""" A physics-enabled object. """
def __init__(self, pos, mass=1, rad=10, vel=(0,0), color=(255,255,255)):
self.pos = math3d.VectorN(pos)
self.vel = math3d.VectorN(vel)
self.rad = 10 # in pixels
self.color = color
self.mass = mass
def render(self, surf):
pygame.draw.circle(surf, self.color, \
self.pos.toIntTuple(), self.rad)
def update(self, dT):
""" Updates our object:
1. Changes position due to current velocity.
2. (optional) Apply friction
3. (optional) Enforce a terminal velocity
"""
self.pos += self.vel * dT
def applyForce(self, F, dT):
""" Modify velocity such that we apply the FORCE
f for dT seconds. Note: F = ma """
class Player(PBody):
def loadImage(self, file_name):
""" file_name is a string (of the image path / filename)
e.g. 'b4_top.png' or 'imgs\\b4_top.png' """
self.surf = pygame.image.load(file_name)
self.draw_angle = 0 # In degrees
self.bullets = []
def moveTowards(self, mx, my):
""" Makes the player accelerate towards the mouse. Note:
this will call self.applyForce. """
pass
def fire(self):
""" Create a new PBody in self.bullets. Give it the position of
the front of the snow-mobile and a velocity based on the draw-
direction you might be able to use math3d.polar_to_cartesian here). """
def update(self, dT):
# Call the base-class (PBody) update first
PBody.update(self, dT)
# Do the player-specific updates here. e.g. update all bullets and remove
# "dead" ones.
def render(self, surf):
PBody.render(self, surf) # Call base-class render
# Draw the snowmobile image (or some other image),
# rotated (and centered) appropriately.
tempS = pygame.transform.rotate(self.surf, self.draw_angle)
pygame.display.init()
screen = pygame.display.set_mode((800,600))
clock = pygame.time.Clock()
done = False
B = PBody((400,300), 1)
while not done:
# Update
deltaTime = clock.tick() / 1000.0
B.applyForce(math3d.VectorN(0,10), deltaTime) # Called ONCE per frame to apply gravity.
B.update(deltaTime) # Called ONCE per frame
# Input
pygame.event.get()
kPress = pygame.key.get_pressed()
mPress = pygame.mouse.get_pressed()
mPos = pygame.mouse.get_pos()
if kPress[pygame.K_ESCAPE]:
done = True
# Draw
screen.fill((0,0,0))
B.render(screen)
pygame.display.flip()
pygame.display.quit()
|
6,644 | aad3c104432a1a028d96263236133e495536ee69 | from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider
class Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):
name = "pharmacy_4_less_au"
item_attributes = {"brand": "Pharmacy 4 Less", "brand_wikidata": "Q63367608"}
key = "6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX"
|
6,645 | 65b90fccd0ee74b369475aa9fe33f159881c8b82 | class cal4:
def setdata(self,n1):
self.n1 = n1
def display(self):
return n1*n1
n1 = int(input("Enter number: "))
c = cal4()
print(c.display()) |
6,646 | 36bdd6f7c130914856ddf495c50f928405c345aa | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-23 19:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mocbackend', '0034_auto_20181122_1903'),
]
operations = [
migrations.AddField(
model_name='stagecollection',
name='last_in_log',
field=models.DateTimeField(blank=True, default=None, editable=False, null=True),
),
migrations.AddField(
model_name='stagesource',
name='last_in_log',
field=models.DateTimeField(blank=True, default=None, editable=False, null=True),
),
]
|
6,647 | b1530c664fa236e61ff50bca502bf79730c3386c | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# All about users.
#
# author: ze.apollo@gmail.com
#
from client import ClientHelper
from mongodb import MongoDBClient
class FixedData:
def get_data( self, id ):
data = self.get_data_from_mongodb( id )
if ( data ):
return data
else:
data = self.get_data_from_douban( id )
self.upsert_data_into_mongo( data )
|
6,648 | 1dd235ecfe577b508d0777e8c70026114aeb154f | from tdm.lib.device import DddDevice, DeviceAction, DeviceWHQuery, Validity
class CallJohnDevice(DddDevice):
class MakeCall(DeviceAction):
def perform(self, select_contact, select_number):
contact = self.device.CONTACTS.get(select_contact)
number_type = self.device.CONTACTS.get(select_number)
return True
class contact_lookup(DeviceWHQuery):
def perform(self, select_contact, select_number):
#print("Looking up {}".format(select_contact))
number = self.device.PHONE_NUMBERS.get(select_contact).get(select_number)
#print(number)
return [number]
class PhoneNumberAvailable(Validity):
def is_valid(self, select_contact):
#print(self.device.CONTACTS.values())
if self.device.PHONE_NUMBERS.get(select_contact) == None:
#print("{} is not in contacts".format(select_contact))
return False
else:
#print("{} is in contacts".format(select_contact))
return True
JOHN = "contact_john"
LISA = "contact_lisa"
MARY = "contact_mary"
ANDY = "contact_andy"
MOBILE = "mobile"
WORK = "work"
HOME = "home"
PHONE_NUMBERS = {
JOHN: {
MOBILE: "0701234567",
WORK: "0736582934",
HOME: "031122363"
},
LISA: {
MOBILE: "0709876543",
WORK: "0763559230",
HOME: "031749205"
},
MARY: {
MOBILE: "0706574839",
WORK: "0784736475",
HOME: "031847528"
},
ANDY: None
}
CONTACTS = {
"John": JOHN,
"Lisa": LISA,
"Mary": MARY,
"Andy": ANDY,
}
|
6,649 | a382edb861a43ac3065a781ea996a8d1dd819954 | def game_manager(info_list):
dictionary = {}
for piece_info in info_list:
piece_info = piece_info.split('||')
piece_info[2] = int(piece_info[2])
if piece_info[2] not in dictionary:
dictionary[piece_info[2]] = {(piece_info[1],piece_info[0])}
dictionary[piece_info[2]].add((piece_info[1],piece_info[0]))
print(dictionary)
info = ['Final Fantasy VII||SCEA||1997','Mirror’s Edge||Electronic Arts||2008','GTA 4||Rockstar Games||2008','Grandia||SCEA||1997', \
'Half Life 2||Valve||2004']
game_manager(info)
|
6,650 | d18c0fa29ccdabdd9e11622e8aaec91ff96117df | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Alonso Vidales"
__email__ = "alonso.vidales@tras2.es"
__date__ = "2013-11-11"
class ConnectedSets:
"""
This is a classic percolation problem, the algorithms uses an array
of integer to represent tees, each tree will be a set of connected elements
"""
__debug = False
def link_nodes(self, left, right):
"""
For two given nodes, search for the root node of each tree, after
obtaine the two root nodes, point the right root node to the left root
node connecting the two trees
To represent the trees, an array is used, the root nodes are -1 and the
value on each positions points to the position of the parent node
"""
if self.__debug:
print "Linking: %s - %s" % (left, right)
root_left = (left[0] * len(self.__matrix)) + left[1]
parent_left = self.__percolation[root_left]
while parent_left != -1:
root_left = parent_left
parent_left = self.__percolation[parent_left]
root_right = (right[0] * len(self.__matrix)) + right[1]
parent_right = self.__percolation[root_right]
while parent_right != -1:
root_right = parent_right
parent_right = self.__percolation[parent_right]
if root_right != root_left:
self.__percolation[root_right] = root_left
if self.__debug:
print "Link: %d - %d - %s" % (root_left, root_right, self.__percolation)
def resolve(self):
size = len(self.__matrix)
for rowPos in range(size):
for colPos in range(size):
# Check left connection
if colPos > 0:
if self.__matrix[rowPos][colPos - 1] == self.__matrix[rowPos][colPos]:
# Link pos with left
self.link_nodes((rowPos, colPos - 1), (rowPos, colPos))
# Check top-right connection
if (colPos + 1) < size and rowPos > 0:
if self.__matrix[rowPos - 1][colPos + 1] == self.__matrix[rowPos][colPos]:
# Link pos with top-left
self.link_nodes((rowPos - 1, colPos + 1), (rowPos, colPos))
# Check top-left connection
if colPos > 0 and rowPos > 0:
if self.__matrix[rowPos - 1][colPos - 1] == self.__matrix[rowPos][colPos]:
# Link pos with top-left
self.link_nodes((rowPos - 1, colPos - 1), (rowPos, colPos))
# Check top connection
if rowPos > 0:
if self.__matrix[rowPos - 1][colPos] == self.__matrix[rowPos][colPos]:
# Link pos with top
self.link_nodes((rowPos - 1, colPos), (rowPos, colPos))
if self.__debug:
print self.__percolation
components = 0
# Get all the root nodes of the trees (nodes with -1 as parent), and
# check if the root node on the original matrix contains a 1
for pos in range(len(self.__percolation)):
if self.__percolation[pos] == -1 and self.__matrix[pos / size][pos % size] == 1:
components += 1
return components
def __init__(self, matrix):
self.__percolation = [-1] * (len(matrix) ** 2)
self.__matrix = matrix
if __name__ == "__main__":
for problem in range(int(raw_input())):
matrix = []
for row in range(int(raw_input())):
matrix.append(map(int, raw_input().split()))
print ConnectedSets(matrix).resolve()
|
6,651 | 1947bd280234189ed35277c449cd708a204ea7a4 | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def associate_node(ServerName=None, NodeName=None, EngineAttributes=None):
"""
See also: AWS API Documentation
:example: response = client.associate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type ServerName: string
:param ServerName: [REQUIRED]
:type NodeName: string
:param NodeName: [REQUIRED]
:type EngineAttributes: list
:param EngineAttributes:
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:rtype: dict
:return: {
'NodeAssociationStatusToken': 'string'
}
:returns:
(dict) --
NodeAssociationStatusToken (string) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_backup(ServerName=None, Description=None):
"""
Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.
Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.
This operation is asnychronous.
By default 50 manual backups can be created.
A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.create_backup(
ServerName='string',
Description='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to back up.
:type Description: string
:param Description: A user-defined description of the backup.
:rtype: dict
:return: {
'Backup': {
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
}
}
:returns:
(string) --
"""
pass
def create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=None, ServerName=None, InstanceProfileArn=None, InstanceType=None, KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None, BackupId=None):
"""
Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.
This operation is asnychronous.
A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.
When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.
By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.
See also: AWS API Documentation
:example: response = client.create_server(
DisableAutomatedBackup=True|False,
Engine='string',
EngineModel='string',
EngineVersion='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
],
BackupRetentionCount=123,
ServerName='string',
InstanceProfileArn='string',
InstanceType='string',
KeyPair='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string',
SecurityGroupIds=[
'string',
],
ServiceRoleArn='string',
SubnetIds=[
'string',
],
BackupId='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .
:type Engine: string
:param Engine: The configuration management engine to use. Valid values include Chef .
:type EngineModel: string
:param EngineModel: The engine model, or option. Valid values include Single .
:type EngineVersion: string
:param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.
:type EngineAttributes: list
:param EngineAttributes: Engine attributes on a specified server.
Attributes accepted in a createServer request:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: [REQUIRED]
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .
:type KeyPair: string
:param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.
Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:
HH:MM for daily backups
DDD:HH:MM for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .
If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
(string) --
:type ServiceRoleArn: string
:param ServiceRoleArn: [REQUIRED]
The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.
:type SubnetIds: list
:param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.
For more information about supported Amazon EC2 platforms, see Supported Platforms .
(string) --
:type BackupId: string
:param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def delete_backup(BackupId=None):
"""
Deletes a backup. You can delete both manual and automated backups.
This operation is asynchronous.
A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.delete_backup(
BackupId='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .
:rtype: dict
:return: {}
"""
pass
def delete_server(ServerName=None):
"""
Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.delete_server(
ServerName='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The ID of the server to delete.
:rtype: dict
:return: {}
"""
pass
def describe_account_attributes():
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.describe_account_attributes()
:rtype: dict
:return: {
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
"""
pass
def describe_backups(BackupId=None, ServerName=None, NextToken=None, MaxResults=None):
"""
Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
:type BackupId: string
:param BackupId: Describes a single backup.
:type ServerName: string
:param ServerName: Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def describe_events(ServerName=None, NextToken=None, MaxResults=None):
"""
Describes events for a specified server. Results are ordered by time, with newest events first.
This operation is synchronous.
A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_events(
ServerName='string',
NextToken='string',
MaxResults=123
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server for which you want to view events.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeEvents again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'ServerEvents': [
{
'CreatedAt': datetime(2015, 1, 1),
'ServerName': 'string',
'Message': 'string',
'LogUrl': 'string'
},
],
'NextToken': 'string'
}
"""
pass
def describe_node_association_status(NodeAssociationStatusToken=None, ServerName=None):
"""
See also: AWS API Documentation
:example: response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: [REQUIRED]
:type ServerName: string
:param ServerName: [REQUIRED]
:rtype: dict
:return: {
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'
}
:returns:
(dict) --
NodeAssociationStatus (string) --
"""
pass
def describe_servers(ServerName=None, NextToken=None, MaxResults=None):
"""
Lists all configuration management servers that are identified with your account. Only the stored results from Amazon DynamoDB are returned. AWS OpsWorks for Chef Automate does not query other services.
This operation is synchronous.
A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_servers(
ServerName='string',
NextToken='string',
MaxResults=123
)
:type ServerName: string
:param ServerName: Describes the server with the specified ServerName.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeServers again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'Servers': [
{
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def disassociate_node(ServerName=None, NodeName=None, EngineAttributes=None):
"""
See also: AWS API Documentation
:example: response = client.disassociate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type ServerName: string
:param ServerName: [REQUIRED]
:type NodeName: string
:param NodeName: [REQUIRED]
:type EngineAttributes: list
:param EngineAttributes:
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:rtype: dict
:return: {
'NodeAssociationStatusToken': 'string'
}
:returns:
(dict) --
NodeAssociationStatusToken (string) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def restore_server(BackupId=None, ServerName=None, InstanceType=None, KeyPair=None):
"""
Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.
This operation is asynchronous.
A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.restore_server(
BackupId='string',
ServerName='string',
InstanceType='string',
KeyPair='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup that you want to use to restore a server.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to restore.
:type InstanceType: string
:param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.
:type KeyPair: string
:param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def start_maintenance(ServerName=None):
"""
Manually starts server maintenance. This command can be useful if an earlier maintenance attempt failed, and the underlying cause of maintenance failure has been resolved. The server will switch to UNDER_MAINTENANCE state, while maintenace is in progress.
Maintenace can only be started for HEALTHY and UNHEALTHY servers. A InvalidStateException is thrown otherwise. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.start_maintenance(
ServerName='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server on which to run maintenance.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
(string) --
"""
pass
def update_server(DisableAutomatedBackup=None, BackupRetentionCount=None, ServerName=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=None):
"""
Updates settings for a server.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.update_server(
DisableAutomatedBackup=True|False,
BackupRetentionCount=123,
ServerName='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Setting DisableAutomatedBackup to true disables automated or scheduled backups. Automated backups are enabled by default.
:type BackupRetentionCount: integer
:param BackupRetentionCount: Sets the number of automated backups that you want to keep.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server to update.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
DDD:HH:MM (weekly start time) or HH:MM (daily start time).
Time windows always use coordinated universal time (UTC).
Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
DDD:HH:MM (weekly start time) or HH:MM (daily start time).
Time windows always use coordinated universal time (UTC).
Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def update_server_engine_attributes(ServerName=None, AttributeName=None, AttributeValue=None):
"""
Updates engine specific attributes on a specified server. Server will enter the MODIFYING state when this operation is in progress. Only one update can take place at a time.
This operation can be use to reset Chef Server main API key (CHEF_PIVOTAL_KEY ).
This operation is asynchronous.
This operation can only be called for HEALTHY and UNHEALTHY servers. Otherwise a InvalidStateException is raised. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.update_server_engine_attributes(
ServerName='string',
AttributeName='string',
AttributeValue='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server to update.
:type AttributeName: string
:param AttributeName: [REQUIRED]
The name of the engine attribute to update.
:type AttributeValue: string
:param AttributeValue: The value to set for the attribute.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
|
6,652 | 87d1c28819d187944a3cf99b35b1d41eab11b139 | # 1.Create a list of 10 elements of four different data types like int, string, complex and float.
i=[1,2,3,4,5,6,7,8,9,10]
f=[10.5,12.2,13.7,14.9,14.9,18.8,19.7,23.6,90.9,25.7]
s=['Arpi','world','Hello','Python','Consultadd','job','c++','Concepts','interesting']
c=[1+2j,2+3j,4+5j,5+6j,56+7j,8+9j,7+8j,3+6j,7+9j]
print(c) |
6,653 | 21172985bf36302f6b0b2101e353d9fbcafb0673 | from typing import Dict, List, Sequence, Iterable, Tuple
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.common.file_utils import cached_path
import logging
from overrides import overrides
import itertools
from allennlp.data.tokenizers import Token
from allennlp.data.fields import ListField, TextField, SequenceLabelField, Field, MetadataField, SpanField, LabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
polar_dict = {
"1": "Ture",
"0": "False"
}
@DatasetReader.register("bertclassification")
class ClassificationReader(DatasetReader):
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False
) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in data_file:
polar, sent = line.strip().split(",")
tokens = [Token(token) for token in sent]
yield self.text_to_instance(tokens, polar)
def text_to_instance(
self,
tokens:List[Token],
polar
) -> Instance:
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
instance_fields['label'] = LabelField(polar)
return Instance(instance_fields)
|
6,654 | 51ff1181f0ddac3a8f7cbd9f9d2eedae29a6c559 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.deletion import CASCADE
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=CASCADE)
# portfolio = models.ManyToOneRel(User, on_delete=)
def __str__(self):
return f"{self.user.username} Profile" |
6,655 | 54c1b294d826deb43978591cad590c5e969bebd7 | __author__ = 'Or'
|
6,656 | 2458b8169029b3af501b650d548925770b0da74e |
from django.contrib import admin
from django.urls import path
from petsApp import views
urlpatterns = [
path('user/<int:id>/', views.getUser),
path('user/addImage/', views.addImage),
path('user/getImage/<int:id>/', views.getImage),
path('user/signup/', views.signUp),
path('user/login/', views.logIn),
path('user/logout/', views.logOut),
path('user/addInvoice/', views.addInvoice),
path('pets/', views.pets), # toto
path('pets/search/', views.searchPet), # toto
path('pets/addFond/<int:id>', views.addFond),
path('pets/fond/<pet>/', views.getFond),
path('pets/delete/', views.delPet),
path('invoice/', views.invoice),
]
|
6,657 | c0376d94b34ea43e562e68cd65d4e5d2c5b04fb3 | for name in ["Madan", "Mohan", "Reddy", "Govindu"]:
print("My name includes "+name)
# Tables
# for i in range(1, 11):
# for j in range(1, 11):
# print("{0} * {1} = {2}".format(i,j, i*j))
# print("\n")
for i in range(1, 3):
for j in range(4, 7):
if j==5:
break
print(j) |
6,658 | 555646a5d57152034b467cbce16b6c183bcfbb37 | import copy
from basics.binary_tree.binary_tree import TreeNode
from basics.binary_tree.traversals import level_order_traversal
def max_depth_bottom_up(root):
if not root:
return 0
max_so_far = 0
def max_depth(node, depth):
nonlocal max_so_far
if not node.left and not node.right:
max_so_far = max(max_so_far, depth)
else:
if node.left:
max_depth(node.left, 1 + depth)
if node.right:
max_depth(node.right, 1 + depth)
max_depth(root, 1)
return max_so_far
def max_depth_top_down(root):
if not root:
return 0
return 1 + max(max_depth_top_down(root.left),
max_depth_top_down(root.right))
def is_symmetric(root):
def is_mirror(left, right):
if left is None and right is None:
return True
elif left is None or right is None:
return False
else:
return (left.val == right.val and
is_mirror(left.right, right.left) and
is_mirror(left.left, right.right))
return is_mirror(root, root)
def has_path_sum(root, target_sum):
def path_sum(node, sum_left):
if not node:
return False
if not node.left and not node.right and node.val == sum_left:
return True
return (path_sum(node.left, sum_left-node.val) or
path_sum(node.right, sum_left - node.val))
return path_sum(root, target_sum)
def build_tree_from_inorder_preorder(inorder, preorder):
if not inorder or not preorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(preorder.pop(0))
mid = inorder_map[node.val]
node.left = helper(lo, mid - 1)
node.right = helper(mid + 1, hi)
return node
return helper(0, len(inorder) - 1)
def build_tree_from_inorder_postorder(inorder, postorder):
if not inorder or not postorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(postorder.pop())
mid = inorder_map[node.val]
node.right = helper(mid+1, hi)
node.left = helper(lo, mid-1)
return node
return helper(0, len(inorder)-1)
def next_right_pointer(root):
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
self.next = None
"""
levels = []
to_do = [root]
while to_do:
cur_level = []
next_to_do = []
for n in to_do:
if n is not None:
cur_level.append(n)
next_to_do += [n.left, n.right]
if cur_level:
levels.append(cur_level)
to_do = next_to_do
for level in levels[1:]:
level.append(None)
for i in range(1, len(level)):
level[i-1].next = level[i]
return root
def lowest_common_ancestor(root, p, q):
answer = None
def recurse_tree(node):
nonlocal answer
if not node:
return False
left = recurse_tree(node.left)
right = recurse_tree(node.right)
mid = node == p or node == q
if mid + left + right >= 2:
answer = node
return mid or left or right
recurse_tree(root)
return answer
def lowest_common_ancestor_2(root, p, q):
if root == p or root == q:
return root
left = right = None
if root.left:
left = lowest_common_ancestor_2(root.left, p, q)
if root.right:
right = lowest_common_ancestor_2(root.right, p, q)
if left and right:
return root
else:
return left or right
def lowest_common_ancestor_3(root, p, q):
stack = [root]
parents = {root: None}
while p not in parents or q not in parents:
node = stack.pop()
if node.left:
parents[node.left] = node
stack.append(node.left)
if node.right:
parents[node.right] = node
stack.append(node.right)
ancestors = set()
while p:
ancestors.add(p)
p = parents[p]
while q not in ancestors:
q = parents[q]
return q
def serialize_tree(root):
levels = level_order_traversal(root)
return levels
def deserialize_tree(serialized):
if not serialized:
return None
levels = copy.deepcopy(serialized)
root = TreeNode(levels.pop(0)[0])
nodes = [root]
while levels:
level = levels.pop(0)
next_nodes = []
for i, node in enumerate(nodes):
if node:
node.left = TreeNode(level[2*i]) if level[2*i] else None
node.right = TreeNode(level[2*i+1]) if level[2*i+1] else None
next_nodes += [node.left, node.right]
else:
next_nodes += [None, None]
nodes = next_nodes
return root
def equal(root1, root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return (root1.val == root2.val and
equal(root1.left, root2.left) and
equal(root1.right, root2.right))
|
6,659 | fbba928d51ccd08dbac25fcf2098be3a0d494d34 | ii = [('CoolWHM.py', 1), ('SoutRD.py', 1), ('BrewDTO.py', 2), ('FitzRNS2.py', 1), ('LyelCPG3.py', 1), ('TaylIF.py', 2)] |
6,660 | ad170f67e5b9f54d950ead91dd60cd4f3b753eca | from Config_paar import *
from Envelopefkt import *
from Kinematik import *
def A_m_n(M,N,x_plus,p_el,p_pos,k_photon,k_laser):
def f1(p):
return -(m*a0)/(pk(p)) * g(phi,sigma,Envelope) *( pe(1,p) * cos(ksi) * cos(phi) + pe(2,p) * sin(ksi) * sin(phi) )
def f2(p):
return -(m*a0)**2/(2.*pk(p))*g(phi,sigma,Envelope)**2*((cos(ksi)*cos(phi))**2+(sin(ksi)*sin(phi))**2)
def f(p):
return f1(p)+f2(p)
def f1_SVEA(p):
return -(m*a0)/(pk(p))*g(phi,sigma,Envelope)*(pe(1,p)*cos(ksi)*sin(phi)-pe(2,p)*sin(ksi)*cos(phi))
def f2_SVEA(p):
return -(m*a0)**2/(4.*pk(p))*(Int_g_2(phi,sigma,Envelope)+g(phi,sigma,Envelope)**2*cos(phi)*sin(phi)*(cos(ksi)**2-sin(ksi)**2))
def f_SVEA(p):
return f1_SVEA(p)+f2_SVEA(p)
pk = lambda imp: (imp * k_laser)
pe = lambda l,imp: (imp * eps_laser(l))
P_ = p_pos.minus() + p_el.minus() - k_photon.minus()
s = P_/k_laser.minus()
phi = w_laser * x_plus
H_plus = s*phi - f_SVEA(p_el) + f_SVEA(-p_pos)
if M == 0:
A = -1./s * (f(-p_pos) - f(p_el)) * exp(1j * H_plus)
else:
A = g(phi,sigma,Envelope)**M *exp( 1j* ( H_plus + N*phi))
return A
def A_m_n_nSVEA(M,N,x_plus,p_el,p_pos,k_photon,k_laser):
def f1(p):
if Envelope == 'cos^2':
fakt_a = sigma/(sigma-pi)
fakt_b = sigma/(sigma+pi)
Int_sin = -0.25 *( fakt_a * cos( phi/fakt_a ) + fakt_b * cos( phi/fakt_b ) +2.*cos(phi) )
Int_cos = 0.25 *( fakt_a * sin( phi/fakt_a ) + fakt_b * sin( phi/fakt_b ) +2.*sin(phi) )
return -(m*a0)/(pk(p)) *( pe(1,p) * cos(ksi) * Int_cos + pe(2,p) * sin(ksi) * Int_sin )
elif Envelope == 'cos^4':
fakt_a = lambda n: ( 1. + n*pi/sigma )
fakt_b = lambda n: ( -1. + n*pi/sigma )
Int_sin = 0.25 *( ( - cos( fakt_a(2.)*phi ) / fakt_a(2.) + cos( fakt_b(2.)*phi ) / fakt_b(2.) ) * 0.25 \
- cos( fakt_a(1.)*phi ) / fakt_a(1.) + cos( fakt_b(1.)*phi ) / fakt_b(1.) - 3./2. * cos(phi) )
Int_cos = 0.25 *( ( sin( fakt_a(2.)*phi ) / fakt_a(2.) + sin( fakt_b(2.)*phi ) / fakt_b(2.) ) * 0.25 \
+ sin( fakt_a(1.)*phi ) / fakt_a(1.) + sin( fakt_b(1.)*phi ) / fakt_b(1.) - 3./2. * sin(phi) )
return -(m*a0)/(pk(p)) * ( pe(1,p) * cos(ksi) * Int_cos + pe(2,p) * sin(ksi) * Int_sin )
elif Envelope == 'cosh':
raise IOError,'cosh noch nicht implementiert -> benutze SEVA'
else:
raise IOError,'Nicht analytisch loesbar -> benutze SEVA'
def f2(p):
if Envelope == 'cos^2':
a = pi/sigma/2.
F = lambda l,n: ( l + n*a )
Int_cos = 1./8. *( 1.5*phi + 0.75*sin(2.*phi) + sin(F(0,4.)*phi)/F(0,8.) + sin(F(0,2.)*phi)/a \
+ sin(F(-2.,4.)*phi)/F(-2.,4.)/4. + sin(F(2.,4.)*phi)/F(2.,4.)/4. \
+ sin(F(-2.,2.)*phi)/F(-2.,2.) + sin(F(2.,2.)*phi)/F(2.,2.) )
Int_sin = 1./8. *( 1.5*phi - 0.75*sin(2.*phi) + sin(F(0,4.)*phi)/F(0,8.) + sin(F(0,2.)*phi)/a \
- sin(F(-2.,4.)*phi)/F(-2.,4.)/4. - sin(F(2.,4.)*phi)/F(2.,4.)/4. \
- sin(F(-2.,2.)*phi)/F(-2.,2.) - sin(F(2.,2.)*phi)/F(2.,2.) )
return -( m*a0 )**2 / (2.*pk(p)) * ( cos(ksi)**2 * Int_cos + sin(ksi)**2 * Int_sin )
elif Envelope == 'cos^4':
Faktor = lambda l,n: ( l + n*pi/sigma )
Int_sin = 1./64. *( (- sin( Faktor(-2.,4.)*phi ) / Faktor(-2.,4.) - sin( Faktor(2.,4.)*phi ) / Faktor(2.,4.) ) / 8. \
- sin( Faktor(-2.,3.)*phi ) / Faktor(-2.,3.) - sin( Faktor(2.,3.)*phi ) / Faktor(2.,3.) \
-( sin( Faktor(-2.,2.)*phi ) / Faktor(-2.,2.) + sin( Faktor(2.,2.)*phi ) / Faktor(2.,2.) ) * 3.5 \
-( sin( Faktor(-2.,1.)*phi ) / Faktor(-2.,1.) + sin( Faktor(2.,1.)*phi ) / Faktor(2.,1.) ) * 7. \
+ sin( Faktor( 0.,4.)*phi ) / Faktor(0.,16.) + sin( Faktor(0.,3.)*phi ) / Faktor(0.,1.5) \
+( sin( Faktor( 0.,2.)*phi ) / Faktor(0.,2.) + sin( Faktor(0.,1.)*phi ) / Faktor(0.,0.5)) * 7. \
+ 35./4. * phi - 35./8. * sin( 2*phi ) )
Int_cos = 1./64. *( ( sin( Faktor(-2.,4.)*phi ) / Faktor(-2.,4.) + sin( Faktor(2.,4.)*phi ) / Faktor(2.,4.) ) / 8. \
+ sin( Faktor(-2.,3.)*phi ) / Faktor(-2.,3.) + sin( Faktor(2.,3.)*phi ) / Faktor(2.,3.) \
+( sin( Faktor(-2.,2.)*phi ) / Faktor(-2.,2.) + sin( Faktor(2.,2.)*phi ) / Faktor(2.,2.) ) * 3.5 \
+( sin( Faktor(-2.,1.)*phi ) / Faktor(-2.,1.) + sin( Faktor(2.,1.)*phi ) / Faktor(2.,1.) ) * 7. \
+ sin( Faktor( 0.,4.)*phi ) / Faktor(0.,16.) + sin( Faktor(0.,3.)*phi ) / Faktor(0.,1.5) \
+( sin( Faktor( 0.,2.)*phi ) / Faktor(0.,2.) + sin( Faktor(0.,1.)*phi ) / Faktor(0.,0.5)) * 7. \
+ 35./4. * phi - 35./8. * sin( 2*phi ) )
return -( m*a0 )**2 / (2.*pk(p)) * ( cos(ksi)**2 * Int_cos + sin(ksi)**2 * Int_sin )
elif Envelope == 'cosh':
raise IOError,'cosh noch nicht implementiert -> benutze SEVA'
else:
raise IOError,'Nicht analytisch loesbar -> benutze SEVA'
def f(p):
return f1(p)+f2(p)
pk = lambda imp: (imp * k_laser)
pe = lambda l,imp: (imp * eps_laser(l))
P_ = p_pos.minus() + p_el.minus() - k_photon.minus()
s = P_/k_laser.minus()
phi = w_laser * x_plus
H_plus = s*phi - f(p_el) + f(-p_pos)
A = g(phi,sigma,Envelope)**M *exp( 1j* ( H_plus + N*phi))
return A
def A_0_0 (A11,A1_1,A20,A22,A2_2):
p_pos,p_el,k_laser,k_photon,q_pos,eps_m,eps_p = kinematik()
pk = lambda p: (p * k_laser)
d_p = lambda p: m*a0 / ( 4.* pk(p) )
P_ = p_pos.minus() + p_el.minus() - k_photon.minus()
s = P_/k_laser.minus()
Wert = 2./s * ( ( d_p(p_pos)*p_pos*eps_m - d_p(p_el)*p_el*eps_m ) * A11 \
+ ( d_p(p_pos)*p_pos*eps_p - d_p(p_el)*p_el*eps_p ) * A1_1 \
- k_laser*k_photon*d_p(p_pos)*d_p(p_el) \
* ( 2.*A20 + (cos(ksi)**2 - sin(ksi)**2) * (A22 + A2_2) ) )
return Wert
|
6,661 | 619d2df45d0823930484f030a9a78e71ec718cb7 | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class AcademyConfig(AppConfig):
name = 'academy'
verbose_name = u"Академия"
|
6,662 | 23d15c719cd26ea67a032a91a3e73f0d8d3bcfd1 | from django.views import generic
from .models import Project
class IndexView(generic.ListView):
template_name = "projects/index.html"
context_object_name = "projectz"
def get_queryset(self):
"""Return all projects."""
return Project.objects.all()
class DetailView(generic.DetailView):
model = Project
template_name = "projects/detail.html" |
6,663 | a3e655350fb5fe7999bea4a87fb62c7698fb63f1 | from typing import List
import tensorflow as tf
from tensorflow.keras.layers import Dense
"""Possible agent network structures implemented as Tensorflow Modules"""
class QNetwork:
"""Create the neural network architecture for the DQN agent."""
def __init__(
self,
state_dim: int,
action_dim: int = 3, # Default: agents can hold=0, buy=1, or sell=2.
hidden_layer_sizes: List = [128, 256, 256, 128],
activation: str = "relu",
):
self._state_dim = state_dim
self._action_dim = action_dim
self._hidden_layer_sizes = hidden_layer_sizes
self._activation = activation
self._model = tf.keras.Sequential()
self._model.add(
Dense(
units=self._hidden_layer_sizes[0],
input_dim=self._state_dim,
activation=self._activation,
)
)
for i in range(2, len(self._hidden_layer_sizes)):
self._model.add(
Dense(self._hidden_layer_sizes[i], activation=self._activation)
)
self._model.add(Dense(self._action_dim, activation="linear"))
def get_model(self) -> tf.keras.Model:
return self._model
|
6,664 | 2e1ad83bcd16f59338032f8ad5ca8ebd74e92200 | from typing import Any, List
__all__: List[str]
record: Any
recarray: Any
format_parser: Any
fromarrays: Any
fromrecords: Any
fromstring: Any
fromfile: Any
array: Any
|
6,665 | 8c6b7f29b8dca61a5218b51c85149c9642af5649 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from dynamic_rest import viewsets
from django.shortcuts import render
import os
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from rest_framework.decorators import api_view, permission_classes, detail_route, list_route
from rest_framework import mixins
from rdkit import Chem
import random
from Bio.PDB.PDBParser import PDBParser
import time
from Bio.PDB.PDBIO import PDBIO
import residues_scanning_command
import prep_dock
import threading
from rosetta_workflow_all_scripts import rosetta_protein_prep, get_cst_file, change_pos, design_analysis
import rosetta_workflow_all_scripts
from django.utils.datastructures import MultiValueDictKeyError
import multiprocessing as mul
import time
from models import SubmitParamter, Onlinedock
from django.core.files import File
from email.mime.text import MIMEText
from email.header import Header
from smtplib import SMTP_SSL
from email.mime.multipart import MIMEMultipart
from email import encoders
from email.message import Message
from email.mime.base import MIMEBase
from dynamic_rest import viewsets
import serializers
from rest_framework.parsers import JSONParser
from polls.serializers import SubmitParamsSerializer, OnlinedockSerializer
from django.http import JsonResponse
import zipfile
import tempfile
def send_file_zipped(the_file, recipients, email_content, sender='1032847174@qq.com'):
zf = tempfile.TemporaryFile(prefix='mail', suffix='zip')
zip = zipfile.ZipFile(zf, 'w')
zip.write(the_file)
zip.close()
zf.seek(0)
### Create the message
themsg = MIMEMultipart()
themsg['Subject'] = 'File %s' % the_file
themsg['To'] = ', '.join(recipients)
themsg['From'] = sender
themsg.attach(MIMEText(email_content, 'html', 'utf-8'))
themsg.preamble = 'I am not using a MIME-aware mail reader.\n'
msg = MIMEBase('application', 'zip')
msg.set_payload(zf.read())
encoders.encode_base64(msg)
msg.add_header('Content-Disposition', 'attachment', filename=the_file)
themsg.attach(msg)
themsg = themsg.as_string()
### Send the message
import smtplib
host_server = 'smtp.qq.com'
sender_mail_addr = '1032847174@qq.com'
pwd = 'utxfxpzcpsnzbbcc'
smtp = SMTP_SSL(host_server)
smtp.set_debuglevel(1)
smtp.ehlo(host_server)
smtp.login(sender_mail_addr, pwd)
smtp.sendmail(sender, recipients, themsg)
smtp.quit()
# smtp = smtplib.SMTP()
# smtp.connect()
# smtp.sendmail(sender, recipients, themsg)
# smtp.close()
def get_pov_value(file):
f = open(file)
lines = f.readlines()
f.close()
for line in lines:
if line.startswith('1'):
value = float(line.split('|')[1].strip())
return value
def main(job_name, mutation_radius, pov_radius, pH, mutation_info_list, protein, ligand_name, ligand_resseq, chain_id, email_addr):
"""
:param job_name:
:param mutation_radius:
:param pov_radius:
:param pH:
:param mutation_info_list:
:param protein:
:param ligand_name:
:param ligand_resseq:
:param chain_id:
:return:
"""
current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
print current_time
log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log')
wild_protein_name = protein.name
# job_dir = os.path.join(log_dir, job_name + '_' + current_time)
job_dir = os.path.join(log_dir, job_name)
if not os.path.exists(job_dir):
os.mkdir(job_dir)
wild_protein_file = os.path.join(job_dir, wild_protein_name)
protein_str = protein.read()
prep_dock.save_to_file(wild_protein_file, protein_str)
prepare_protein_name = wild_protein_name.split('.')[0] + '_prep.pdb'
### Prepare protein
prep_dock.prep_protein(wild_protein_name, prepare_protein_name, job_dir, pH)
### make mutation
prep_dock.get_mut_protein(job_dir, job_name, mut_info_list=mutation_info_list, mutation_radius=mutation_radius,
prepare_protein_name=prepare_protein_name)
prepare_protein = os.path.join(job_dir, prepare_protein_name)
mutation_protein_name = job_name + '_mut-2.pdb'
mutation_protein = os.path.join(job_dir, mutation_protein_name)
### prep_pov
prep_dock.get_pro_lig_povin((prepare_protein_name, prepare_protein, chain_id, ligand_resseq, ligand_name), pov_radius, protein_type='prep')
prep_pov = os.path.join(job_dir, 'pov', 'prep', 'prep.log')
### mut_pov
prep_dock.get_pro_lig_povin((mutation_protein_name, mutation_protein, chain_id, ligand_resseq, ligand_name), pov_radius, protein_type='mut')
mut_pov = os.path.join(job_dir, 'pov', 'mut', 'mut.log')
### plip
# prep_dock.get_plip_file(prepare_protein, mutation_protein)
### TMalign
# prep_dock.TMalign(prepare_protein, mutation_protein)
onlinedock, create = Onlinedock.objects.get_or_create(job_name=job_name)
prep_protein_file = File(open(prepare_protein))
mut_protein_file = File(open(mutation_protein))
prep_pov_file = File(open(prep_pov))
mut_pov_file = File(open(mut_pov))
prep_pov_value = get_pov_value(prep_pov)
mut_pov_value = get_pov_value(mut_pov)
onlinedock.prep_protein.save(prepare_protein_name, prep_protein_file)
onlinedock.mut_protein.save(mutation_protein_name, mut_protein_file)
onlinedock.prep_pov.save('prep.log', prep_pov_file)
onlinedock.mut_pov.save('mut.log', mut_pov_file)
onlinedock.prep_pov_value = prep_pov_value
onlinedock.mut_pov_value = mut_pov_value
onlinedock.save()
os.chdir(job_dir)
os.system('zip related_info ' + prepare_protein + ' ' + mutation_protein + ' ' + prep_pov + ' ' + mut_pov)
email_content = "Wellcome to Jianping Lin Group server~~"
print(os.path.curdir)
related_info = os.path.join(os.path.curdir, 'related_info.zip')
send_file_zipped(related_info, email_addr, email_content=email_content)
def test():
job_dir = '/home/jianping/django_test/longge/polls/log/1111/pov'
# job_id = 0
@api_view(['POST'])
@permission_classes([permissions.AllowAny])
def online_docking(request):
# current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
# print current_time
# log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log')
job_name = request.data['job_name']
mutation_radius = request.data['mutation_radius'] ### mutation radius
pov_radius = str(request.data['pov_radius']) ### povelty radius
pH = request.data['pH']
mutation_info_list = request.data['mutation_info_list'] ### [chain, position, residue, ]
protein = request.data['protein_file']
ligand_name = request.data['ligand_name']
ligand_resseq = int(request.data['ligand_resseq'])
chain_id = request.data['chain_id']
email_addr = request.data['email_addr']
# main(job_name, mutation_radius, pov_radius, pH, mutation_info_list, protein, ligand_name, ligand_resseq, chain_id, email_addr)
t = threading.Thread(target=main, args=(job_name, mutation_radius, pov_radius, pH, mutation_info_list, protein, ligand_name, ligand_resseq, chain_id, email_addr))
t.setDaemon(False)
t.start()
return Response('Conguratulations, you have submitted successfully!!!')
@api_view(['POST'])
@permission_classes([permissions.AllowAny])
def prepare_protein(request):
job_name = request.data['job_name']
protein = request.data['protein']
job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')
work_dir = os.path.join(job_dir, job_name)
if not os.path.exists(work_dir):
os.mkdir(work_dir)
protein_name, protein_name_pure = protein.name, protein.name.split('.')[0]
local_protein_file = os.path.join(work_dir, protein_name)
protein_str = protein.read()
prep_dock.save_to_file(local_protein_file, protein_str)
os.chdir(work_dir)
protein_renumber_name, protein_renumber = protein_name_pure + '_renumber', protein_name_pure + '_renumber.pdb'
os.system(
'python ../../rosetta_workflow_all_scripts/PDB_renumber.py -i ' + protein_name + ' -a -r > ' + protein_renumber_name + '.pdb')
params, create = SubmitParamter.objects.get_or_create(job_name=job_name)
prt = File(open(local_protein_file))
prt_renumber = File(open(protein_renumber))
params.protein_file.save(protein_name, prt)
params.protein_renumber_file.save(protein_renumber, prt_renumber)
params.save()
# return Response(params)
serializer = SubmitParamsSerializer(params)
return JsonResponse(serializer.data, safe=False)
# return Response('Successfully')
@api_view(['POST'])
@permission_classes([permissions.AllowAny])
def first_step(request):
job_name = request.data['job_name']
# protein = request.data['protein']
ligand = request.data['ligand']
other_ligands = request.data['other_ligands'] ### ['A','215','MG','A','218','HOH','A','217','ATP']
other_ligands = other_ligands.split('[')[1].split(']')[0].split(',')
other_ligands = [str(i) for i in other_ligands]
res_chain = request.data['res_chain'] # 'A'
res_ligand_chain = request.data['res_ligand_chain'] ## 'A'
res_ligand_ID = request.data['res_ligand_ID'] ### '216'
res_ligand_name = request.data['res_ligand_name'] ### 'PRP'
# design_ligand_name = request.data['design_ligand_name'] ### 'ABC'
### third step ###
# CST_A_chain_name = request.data['CST_A_chain_name']
current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
print current_time
job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')
work_dir = os.path.join(job_dir, job_name)
if not os.path.exists(work_dir):
os.mkdir(work_dir)
# protein_name, protein_name_pure = protein.name, protein.name.split('.')[0]
# local_protein_file = os.path.join(work_dir, protein_name)
# protein_str = protein.read()
# prep_dock.save_to_file(local_protein_file, protein_str)
ligand_name, ligand_name_pure = ligand.name, ligand.name.split('.')[0]
local_ligand_file = os.path.join(work_dir, ligand_name)
ligand_str = ligand.read()
prep_dock.save_to_file(local_ligand_file, ligand_str)
os.chdir(work_dir)
# protein_renumber_name, protein_renumber = protein_name_pure + '_renumber', protein_name_pure + '_renumber.pdb'
# os.system('python ../../rosetta_workflow_all_scripts/PDB_renumber.py -i ' + protein_name + ' -a -r > ' + protein_renumber_name + '.pdb')
os.system('python ../../rosetta_workflow_all_scripts/design_ligand_prep.py ' + ligand_name)
while True:
if os.path.exists(ligand_name_pure+'.params'):
break
os.system('cp ../../rosetta_workflow_all_scripts/match.flags ./')
os.system('cp ../../rosetta_workflow_all_scripts/match_grid.flags ./')
for filename in os.listdir(work_dir):
if filename.endswith('renumber.pdb'):
protein_renumber_name = filename.split('.pdb')[0]
protein_renumber = filename
break
prep_pdb, prep_pdb_pure = protein_renumber_name + '_prep.pdb', protein_renumber_name + '_prep'
rosetta_protein_prep.prep_protein(protein_renumber, prep_pdb, res_chain, './')
rosetta_protein_prep.get_ligand(prep_pdb, res_ligand_chain, res_ligand_ID, res_ligand_name)
### my code ###
step = 3
other_ligands_class_list = [other_ligands[i: i+step] for i in range(0, len(other_ligands), step)]
os.system('cp ' + protein_renumber_name + '_chain' + res_chain + '.pdb combi_ligands.pdb')
if len(other_ligands) < 3:
print 'There are no ligands that need to be retained'
# os.system('cp ' + protein_renumber_name + '_chain' + res_chain + '.pdb combi_ligands.pdb')
else:
i = 0
for cls in other_ligands_class_list:
combi_name = '_'.join(cls)
print combi_name
rosetta_protein_prep.get_ligand(protein_renumber, cls[0], cls[1], cls[2])
last_out_name = protein_renumber_name + '_chain' + combi_name + '.pdb'
last_out_name_mol2 = protein_renumber_name + '_chain' + combi_name + '.mol2'
rosetta_protein_prep.combi_pdb('combi_ligands.pdb', last_out_name)
if cls[2] != 'HOH' and len(cls[2]) == 3:
i += 1
os.system('obabel -ipdb ' + last_out_name + ' -omol2 -O ' + last_out_name_mol2)
os.system('python /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/scripts/python/public/molfile_to_params.py ' + last_out_name_mol2 + '-n LG' + str(i))
os.system("sed -i '/^TER/c'TER'' combi_ligands.pdb")
rosetta_protein_prep.get_grid('../../rosetta_workflow_all_scripts/match_grid.flags', prep_pdb_pure, res_chain, res_ligand_chain, res_ligand_ID, res_ligand_name)
rosetta_protein_prep.get_match_flags('../../rosetta_workflow_all_scripts/match.flags', res_chain, 'ABC', prep_pdb_pure, ligand_name_pure)
os.system('/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/gen_lig_grids.linuxgccrelease -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database @match_grid_out.flags')
os.system('cp ' + protein_renumber + ' ./renumber.pdb')
### update database ###
# params, created = SubmitParamter.objects.get_or_create(
# job_name=job_name,
# other_ligands=other_ligands,
# res_chain=res_chain,
# res_ligand_chain=res_ligand_chain,
# res_ligand_name=res_ligand_name
# )
params = SubmitParamter.objects.get(job_name=job_name)
params.other_ligands = other_ligands
params.res_chain = res_chain
params.res_ligand_chain = res_ligand_chain
params.res_ligand_name = res_ligand_name
# prt = File(open(local_protein_file))
lgd = File(open(local_ligand_file))
# prt_renumber = File(open(protein_renumber))
ligand_params_file = File(open(ligand_name_pure+'.params'))
pos_file = os.path.join('./inputs', prep_pdb_pure+'_chain'+res_chain+'.pdb_0.pos')
pos_file_name = prep_pdb_pure+'_chain'+res_chain+'.pdb_0.pos'
inputs_pos_file = File(open(pos_file))
# params.protein_file.save(protein_name, prt)
params.ligand_file.save(ligand_name, lgd)
# params.protein_renumber_file.save(protein_renumber, prt_renumber)
params.ligand_params_file.save(ligand_name_pure+'.params', ligand_params_file)
params.inputs_pos_file.save(pos_file_name, inputs_pos_file)
params.save()
serializer = SubmitParamsSerializer(params)
return JsonResponse(serializer.data, safe=False)
# return Response('Successful')
@api_view(['POST'])
@permission_classes([permissions.AllowAny])
def second_step(request):
job_name = request.data['job_name']
constrain_info = request.data['constrain_info'] ### A:216:PRP:O2B:PB:O3A:A:131:ASP:OD2:CG:CB-O2B:PB:O3A-0.20:10.0:10.0:10.0:10.0:10.0-100.0:60.0:60.0:60.0:60.0:60.0-0:360.0:360.0:360.0:360.0:360.0-1:1:1:1:1:1, or A:216:PRP:O2B:PB:O3A:A:131:ASP:OD2:CG:CB-type:OH
cat_ID = request.data['cat_ID']
# cst1 = request.data['cst1']
# cst2 = request.data['cst2']
# cst3 = request.data['cst3']
# three_atoms = request.data['three_atoms'] ### O2B:PB:O3A, type:OH
# CST_A_chain_name = request.data['CST_A_chain_name'] ### 'A'
# CST_A_residue_ID = int(request.data['CST_A_residue_ID']) ### '216'
# CST_A_residue_name = request.data['CST_A_residue_name'] ### 'PRP'
# Atom_A1 = request.data['Atom_A1'] ### 'O2B'
# Atom_A2 = request.data['Atom_A2'] ### 'PB'
# Atom_A3 = request.data['Atom_A3'] ### 'O3A'
# CST_B_chain_name = request.data['CST_B_chain_name'] ### 'A'
# CST_B_residue_ID = int(request.data['CST_B_residue_ID']) ### '131'
# CST_B_residue_name = request.data['CST_B_residue_name'] ### 'ASP'
# Atom_B1 = request.data['Atom_B1'] ### 'OD2'
# Atom_B2 = request.data['Atom_B2'] ### 'CG'
# Atom_B3 = request.data['Atom_B3'] ### 'CB'
renumber_pdb = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design', job_name, 'renumber.pdb')
work_dir = os.path.dirname(renumber_pdb)
os.chdir(work_dir)
### my code ###
#_______________________________________________________________
constrain_info_list = [cst.split('-') for cst in constrain_info.split(',') if cst is not '']
# for constrain_info in constrain_info_list:
# if len(constrain_info) == 2:
parse = PDBParser(PERMISSIVE=1)
structure = parse.get_structure('renumber.pdb', renumber_pdb)
w = open('match.cst', 'w')
w.write('# cst constraint descriptior for renumber.pdb' + '\n\n\n')
w.write('# NOTE\n\n\n')
for idx, cst_info in enumerate(constrain_info_list):
cst_result = get_cst_file.measure_dist_angle_dihe_new(structure, idx, cst_info)
w.writelines(cst_result)
w.close()
# get_cst_file.measure_dist_angle_dihe(renumber_pdb, 'renumber.pdb', constrain_info_list, 'match.cst')
# ____________________________________________________________
# get_cst_file.measure_dist_angle_dihe(renumber_pdb, 'renumber.pdb', [(CST_A_chain_name, CST_A_residue_ID, CST_A_residue_name,
# Atom_A1, Atom_A2, Atom_A3, CST_B_chain_name,
# CST_B_residue_ID, CST_B_residue_name, Atom_B1,
# Atom_B2, Atom_B3), ], 'match.cst')
os.system('cp match.cst ./inputs')
inputs_dir = os.path.join(work_dir, 'inputs')
os.chdir(inputs_dir)
for filename in os.listdir(inputs_dir):
if filename.endswith('_0.pos'):
pos = os.path.join(inputs_dir, filename)
os.system('cp ' + pos + ' ./pos.bk')
change_pos.change_pos(filename, cat_ID)
params = SubmitParamter.objects.get(job_name=job_name)
params.constrain_info = constrain_info
params.cat_ID = cat_ID
match_cst_file = File(open('match.cst'))
params.match_cst_file.save('match.cst', match_cst_file)
params.save()
# for filename in os.listdir(inputs_dir):
# if filename.endswith('.params'):
# os.system('/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/CstfileToTheozymePDB.linuxgccrelease -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database -extra_res_fa ' + filename + ' -match:geometric_constraint_file match.cst')
return Response('Successful')
@api_view(['POST'])
@permission_classes([permissions.AllowAny])
def third_step(request):
job_name = request.data['job_name']
# user_specialized_cst_file = request.data['user_specialized_cst_file']
job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')
work_dir = os.path.join(job_dir, job_name)
os.chdir(work_dir)
# if user_specialized_cst_file:
# cst_str = user_specialized_cst_file.read()
# user_defined_cst_file = os.path.join(work_dir, 'inputs', 'match.cst')
# prep_dock.save_to_file(user_defined_cst_file, cst_str)
try:
cst_file = request.data['cst_file']
cst_str = cst_file.read()
user_defined_cst_file = os.path.join(work_dir, 'inputs', 'match.cst')
prep_dock.save_to_file(user_defined_cst_file, cst_str)
params = SubmitParamter.objects.get(job_name=job_name)
new_cst_file = File(open(user_defined_cst_file))
params.user_defined_cst_file.save('match.cst', new_cst_file)
params.save()
except MultiValueDictKeyError:
pass
try:
os.system('/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/match.linuxgccrelease @match_out.flags -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database')
# params = SubmitParamter.objects.get(job_name=job_name)
# UM_pdb_list = []
# for filename in os.listdir(os.path.join(work_dir, 'inputs')):
# if filename.startswith('UM'):
# file = os.path.join(work_dir, 'inputs', filename)
# UM_pdb = File(open(file))
UM_pdb_list = [filename for filename in os.listdir(os.path.join(work_dir, 'inputs')) if filename.startswith('UM')]
params.UM_pdb_count = len(UM_pdb_list)
params.save()
# return Response('Successful, there are {} UM***.pdb'.format(len(UM_pdb_list)))
serializer = SubmitParamsSerializer(params)
return JsonResponse(serializer.data, safe=False)
except:
return Response('Failed, please check the constraint file and submit again !!!')
from functools import wraps
def timethis(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(func.__name__, end-start)
return result
return wrapper
@timethis
def design_comand(match_file):
command = "/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/enzyme_design.linuxgccrelease @design_out.flags -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database -s "+ match_file + " -out:file:o " + match_file + "_DE.out > " + match_file + "_design.log"
os.system(command)
# def get_design_params(ligand_name, params=('6.0', '8.0', '10.0', '12.0', '5')):
# # """
# # :param ligand_name:
# # :param params: (6.0, 8.0, 10, 12.0, 5)
# # :return:
# # """
# # command = ''
# # os.system(command)
from functools import partial
def get_design_params(ligand_name, params=None): ### ligand_name not startswith('LG') endswith('params')
if params is None:
params = ('6.0', '8.0', '10.0', '12.0', '5')
return partial(get_design_params, ligand_name)(params)
# command = ''
command = "sed -e 's/res_ligand_params_file/design_" + ligand_name + ".params/g' -e 's/enz_score.out/enz_score_" + ligand_name + ".out/g' -e 's/-cut1 6.0/-cut1 " + params[0] + "/g' -e 's/-cut2 10.0/-cut2 " + params[1] + "/g' -e 's/-cut3 15.0/-cut3 " + params[2] + "/g' -e 's/-cut4 20.0/-cut4 " + params[3] + "/g' -e 's/-nstruct 5/-nstruct " + params[4] + "/g' design.flags > design_out.flags"
os.system(command)
def send_email(email_addr, email_content, result_file):
host_server = 'smtp.qq.com'
sender_mail_addr = '1032847174@qq.com'
pwd = 'utxfxpzcpsnzbbcc'
receiver_mail_addr = email_addr
mail_content = email_content
mail_title = "JianpingLin's email"
msg = MIMEMultipart()
msg['Subject'] = Header(mail_title, 'utf-8')
msg['From'] = sender_mail_addr
msg['To'] = Header('Receiver', 'utf-8')
msg.attach(MIMEText(mail_content, 'html', 'utf-8'))
# att1 = MIMEText(open(result_file).read(), 'base64', 'utf-8')
att1 = MIMEText(open(result_file).read(), 'base64')
# import zipfile
# att1 = MIMEText(zipfile.ZipFile(result_file), 'base64', 'utf-8')
att1['Content-Type'] = 'application/octet-stream'
att1['Content-Disposition'] = 'attachment; filename="match_design.tar.gz"'
msg.attach(att1)
smtp = SMTP_SSL(host_server)
smtp.set_debuglevel(1)
smtp.ehlo(host_server)
smtp.login(sender_mail_addr, pwd)
smtp.sendmail(sender_mail_addr, receiver_mail_addr, msg.as_string())
smtp.quit()
@api_view(['POST'])
@permission_classes([permissions.AllowAny])
def fourth_step(request):
job_name = request.data['job_name']
design_mini_range = request.data['design_mini_range']###
user_email = request.data['user_email']
# design_cst = request.data['design_cst']
job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')
work_dir = os.path.join(job_dir, job_name)
os.chdir(work_dir)
for filename in os.listdir(work_dir): ### ligand_name 必须是提交的mol2,不应该是LG.mol2
if filename.endswith('params') and not filename.startswith('LG'):
ligand_name = filename.split('.params')[0]
break
match_design_dir = os.path.join(work_dir, 'match_design')
if not os.path.exists(match_design_dir):
os.mkdir(match_design_dir)
# if design_cst != '':
# cst_str = design_cst.read()
# user_design_cst_file = os.path.join(work_dir, 'match_design', 'design.cst')
# prep_dock.save_to_file(user_design_cst_file, cst_str)
# else:
#
try:
design_cst = request.data['design_cst']
cst_str = design_cst.read()
user_design_cst_file = os.path.join(work_dir, 'match_design', 'design.cst')
prep_dock.save_to_file(user_design_cst_file, cst_str)
except MultiValueDictKeyError:
os.system('cp ./inputs/match.cst ./match_design/design.cst')
finally:
os.system('mv UM*match*.pdb ./match_design')
os.system('cp ../../rosetta_workflow_all_scripts/design.flags ./')
###To DO###
# command = "sed -e 's/res_ligand_params_file/design_" + ligand_name + ".params/g' -e 's/enz_score.out/enz_score_" + ligand_name + ".out/g' design.flags > design_out.flags"
# get_design_params(ligand_name, tuple(design_mini_range.split(';')))
####TO DO###
# os.system(command)
if design_mini_range != '':
#design_mini_range = req0uest.data['design_mini_range']
tpl_mini_range = tuple(design_mini_range.split(';'))
if len(tpl_mini_range) != 5:
return Response('Please check that the "Designable Range, Repackable Range and Number of Outputs" exists.')
else:
get_design_params(ligand_name, tpl_mini_range)
else:
get_design_params(ligand_name)
os.system("sed -r '/^PDB_ROTAMERS/d' " + ligand_name + ".params > match_design/design_" + ligand_name + ".params")
os.system('cp design_out.flags ./match_design')
match_dir = os.path.join(work_dir, 'match_design')
os.chdir(match_dir)
match_file_list = [filename for filename in os.listdir(match_dir) if filename.startswith('UM')]
# design_comand(match_file_list[0])
###Post user###
# pool = mul.Pool(5)
# pool.map(design_comand, match_file_list)
# pool.close()
# pool.join()
design_analysis.design_score(ligand_name, './')
params = SubmitParamter.objects.get(job_name=job_name)
params.user_email = user_email
design_ligandname_out = 'design_' + ligand_name.split('.')[0] + '.out'
file = File(open(design_ligandname_out))
params.design_ligand_name_out.save(design_ligandname_out, file)
params.save()
# os.chdir(work_dir)
# os.system('zip -r match_design.zip match_design')
# os.system('tar czvf match_design.tar.gz UM*DE*.pdb')
os.system('zip match_design UM*DE*.pdb ' + design_ligandname_out)
email_content = "Welcome to Jianping Lin's group"
match_design_file = os.path.join('./', 'match_design.zip')
# send_email(email_addr=user_email, email_content=email_content, result_file=design_ligandname_out)
# send_email(email_addr=user_email, email_content=email_content, result_file=match_design_file)
# send_file_zipped(design_ligandname_out, ['1032847174@qq.com'])
send_file_zipped(match_design_file, user_email, email_content=email_content)
serializer = SubmitParamsSerializer(params)
return JsonResponse(serializer.data, safe=False)
# return Response('Successfully, this process needs ')
def get_analysis_params_dic(params):
dic = {}
temp_list = params.split(',')
for param in temp_list:
name, value = param.split(':')
dic[name] = value
return dic
@api_view(['POST'])
@permission_classes([permissions.AllowAny])
def fifth_step(request):
job_name = request.data['job_name']
analysis_params = request.data['analysis_params'] ### all_cst value < 0.9\nSR_2_interf_E_1_5:-9,
# analysis_dict = get_analysis_params_dic(analysis_params) ### {all_cst:0.9, SR_2_interf_E_1_5:-9}
job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')
work_dir = os.path.join(job_dir, job_name)
os.chdir(work_dir)
for filename in os.listdir(work_dir):
if filename.endswith('params') and not filename.startswith('LG'):
ligand_name = filename.split('.params')[0]
break
match_dir = os.path.join(work_dir, 'match_design')
os.chdir(match_dir)
design_analysis.design_filter(ligand_name, analysis_params.strip())
# design_analysis.design_score(ligand_name, './')
analysis_command = 'perl /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/src/apps/public/enzdes/DesignSelect.pl -d ' + 'design_'+ligand_name+'.out' + ' -c ' + 'design_'+ligand_name+'.filter' + ' -tag_column last > filtered_designs_' + ligand_name +'.out'
print analysis_command
os.system(analysis_command)
# serializer = SubmitParamsSerializer(params)
# return JsonResponse(serializer.data, safe=False)
return Response('Successfully')
class SubmitParamsViewSet(viewsets.DynamicModelViewSet):
queryset = SubmitParamter.objects.all()
serializer_class = serializers.SubmitParamsSerializer
class OnlinedockViewSet(viewsets.DynamicModelViewSet):
queryset = Onlinedock.objects.all()
serializer_class = serializers.OnlinedockSerializer
|
6,666 | 3d10f8810594303beb0ccabce3497de86149b2e5 | from models import Person
from models import Skeleton
from models import Base_dolni
from models import Dolen_vrata
st = Person("Stoian")
Stoian = Person("Ivanov")
dolni = Skeleton(st, 900, 600, 2, 18, 28, 40)
dolni_st = Skeleton(Stoian, 900, 590, 2, 18, 28, 40)
dol_001 = Base_dolni(dolni_st, 550)
dol_001.set_description("dolen do mivkata")
dol_001.rendModul()
dol_002 = Dolen_vrata(dolni_st, 400, 2)
dol_002.set_description("долен втори с 2 врати")
dol_002.rendModul()
|
6,667 | b3c1843a742a82bca61650ab89ea8afdf3c9010d | from . import UbuntuPackageManager, RedHatPackageManager, SolarisPackageManager, RpmMixin
from infi import unittest
from infi.run_as_root import RootPermissions
from contextlib import contextmanager
from infi import pkgmgr
from mock import patch
import distro
# pylint: disable-all
class TestOnUbuntu(unittest.TestCase):
def _running_on_ubuntu(self):
return distro.id() == "ubuntu"
def setUp(self):
super(TestOnUbuntu, self).setUp()
self._should_skip()
def _should_skip(self):
if not self._running_on_ubuntu():
raise self.skipTest("This test runs only on ubuntu")
if not RootPermissions().is_root():
raise self.skipTest("This test must run with root permissions")
def test_sg3_utils(self):
from infi.execute import execute
execute('apt-get update'.split())
self._check_package("sg3-utils", "/usr/bin/sg_inq")
def _check_package(self, package_name, executable_name):
pkgmgr = UbuntuPackageManager()
is_installed_before = self._is_package_seems_to_be_installed(package_name, executable_name)
self.assertEqual(pkgmgr.is_package_installed(package_name), is_installed_before)
# Do the opposite
pkgmgr.install_package(package_name) if not is_installed_before else pkgmgr.remove_package(package_name)
self.assertNotEqual(pkgmgr.is_package_installed(package_name), is_installed_before)
def _is_package_seems_to_be_installed(self, package_name, executable_name):
from os.path import exists
return exists(executable_name)
def test_check_unknown_package(self):
pkgmgr = UbuntuPackageManager()
self.assertFalse(pkgmgr.is_package_installed('blablabla9988ok'))
class TestOnRedHat(unittest.TestCase):
def _running_on_redhat(self):
return distro.id() == "rhel"
def setUp(self):
super(TestOnRedHat, self).setUp()
self._should_skip()
def _should_skip(self):
if not self._running_on_redhat():
raise self.skipTest("This test runs only on red hat")
if not RootPermissions().is_root():
raise self.skipTest("This test must run with root permissions")
def test_sg3_utils(self):
self._check_package("sg3_utils", "/usr/bin/sg_inq")
def _check_package(self, package_name, executable_name):
pkgmgr = RedHatPackageManager()
is_installed_before = self._is_package_seems_to_be_installed(package_name, executable_name)
self.assertEqual(pkgmgr.is_package_installed(package_name), is_installed_before)
# Do the opposite
pkgmgr.install_package(package_name) if not is_installed_before else pkgmgr.remove_package(package_name)
self.assertNotEqual(pkgmgr.is_package_installed(package_name), is_installed_before)
def _is_package_seems_to_be_installed(self, package_name, executable_name):
from os.path import exists
return exists(executable_name)
class Output(object):
def __init__(self, returncode=0, stdout='', stderr=''):
super(Output, self).__init__()
self._returncode = returncode
self._stdout = stdout
self._stderr = stderr
def get_stdout(self):
return self._stdout
def get_stderr(self):
return self._stderr
def get_returncode(self):
return self._returncode
def wait(self, timeout=None):
pass
class TestUbuntuMock(TestOnUbuntu):
def _should_skip(self):
pass
def _dpkg_query_s(self):
from textwrap import dedent
if self._installed:
return Output(stdout=dedent("""
Package: sg3-utils
Status: installed ok installed
Priority: optional
Version: 1.30-1
Section: admin
""").encode("ascii"))
else:
return Output(stdout=dedent("""
dpkg-query: package sg3-utils is not installed and no information is available
Use dpkg --info (= dpkg-deb --info) to examine archive files,
and dpkg --contents (= dpkg-deb --contents) to list their contents.
""").encode("ascii"), returncode=1)
def _dpkg_query_l(self):
from textwrap import dedent
return Output(stdout=dedent("""
Desired=Unknown/Install/Remove/Purge/Hold
| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend
|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)
||/ Name Version Architecture Description
+++-===========================-==================-==================-===========================================================
{} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set
""".format("ii" if self._installed else "un")).encode("ascii"))
def _apt_get_install(self):
self._installed = True
return Output()
def _apt_get_update(self):
return Output()
@contextmanager
def _apply_patches(self):
with patch("infi.execute.execute") as execute:
def side_effect(*args, **kwargs):
command = args[0]
if "dpkg-query" in command:
if "-s" in command:
return self._dpkg_query_s()
if "-l" in command:
return self._dpkg_query_l()
elif "apt-get install" in ' '.join(command):
return self._apt_get_install()
elif "apt-get update" in ' '.join(command):
return self._apt_get_update()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_sg3_utils()
def test_check_unknown_package(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_check_unknown_package()
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class TestRedHatMock(TestOnRedHat):
def _should_skip(self):
pass
def _rpm_query(self):
return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else b'package sg3_utils is not installed',
returncode=0 if self._installed else 1)
def _yum_install(self):
self._installed = True
return Output()
@contextmanager
def _apply_patches(self):
with patch("infi.execute.execute") as execute:
def side_effect(*args, **kwargs):
command = args[0]
if "-q" in command:
return self._rpm_query()
elif "install" in command:
return self._yum_install()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestRedHatMock, self).test_sg3_utils()
pass
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class test_package_versioning(unittest.TestCase):
Solaris_v1 = b""" VERSION: 6.0.100.000,REV=08.01.2012.09.00"""
Solaris_v2 = b""" VERSION: 5.14.2.5"""
Ubuntu_v1 = b"""Version: 0.4.9-3ubuntu7.2"""
Ubuntu_v2 = b"""Version: 1:1.2.8.dfsg-1ubuntu1"""
rpm_v1 = b"""4.8-7.el7"""
rpm_v2 = b"""18.168.6.1-34.el7"""
def test_solaris_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v1
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.Solaris_v1)
self.assertEqual(result, {'version': '6.0.100.000', 'revision': '08.01.2012.09.00'})
def test_solaris_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v2
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.Solaris_v2)
self.assertEqual(result, {'version': '5.14.2.5'})
def test_ubuntu_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v1
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.Ubuntu_v1)
self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})
def test_ubuntu_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v2
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.Ubuntu_v2)
self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})
def test_rpm_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v1
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v1)
self.assertEqual(result, {'version': '4.8-7.el7'})
def test_rpm_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v2
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v2)
self.assertEqual(result, {'version': '18.168.6.1-34.el7'})
class GeneralTest(unittest.TestCase):
def _is_solaris(self):
from infi.os_info import get_platform_string
return get_platform_string().split('-')[0] == 'solaris'
def test_get_package_manager(self):
package_manager = pkgmgr.get_package_manager()
package_to_check = 'python'
if self._is_solaris():
package_to_check = 'CSW' + package_to_check
self.assertTrue(package_manager.is_package_installed(package_to_check))
|
6,668 | a0059563b2eed4ca185a8e0971e8e0c80f5fb8f8 | import random
import copy
random.seed(42)
import csv
import torch
import time
import statistics
import wandb
from model import Net, LinearRegression, LogisticRegression
def byGuide(data, val=None, test=None):
val_guides = val
if val == None:
val_guides = [
"GGGTGGGGGGAGTTTGCTCCTGG",
"GACCCCCTCCACCCCGCCTCCGG",
"GGCCTCCCCAAAGCCTGGCCAGG",
"GAACACAAAGCATAGACTGCGGG"
]
test_guides = test
if test==None:
test_guides = [
"GCAAAACTCAACCCTACCCCAGG",
"GGCCCAGACTGAGCACGTGATGG",
"GGGAAAGACCCAGCATCCGTGGG",
"GGAATCCCTTCTGCAGCACCTGG",
"GTGAGTGAGTGTGTGCGTGTGGG",
"GATGATGATGCCCCGGGCGTTGG",
"GCCGGAGGGGTTTGCACAGAAGG"
]
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1., 0.])
if pair['grna_target_sequence'] in val_guides:
val_set.append(pair)
elif pair['grna_target_sequence'] in test_guides:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def byTarget(data, train=.7, val=.1, test=.2):
random.shuffle(data)
train_set = []
val_set = []
test_set = []
for i in range(len(data)):
if i <= len(data) * train:
train_set.append(data[i])
elif i <= len(data) * (train + val):
val_set.append(data[i])
else:
test_set.append(data[i])
return [train_set, val_set, test_set]
def byStudy(data, val=None, test=None):
val_studies = val
if val == None:
val_studies = [
'Anderson',
'Ran',
]
test_studies = test
if test==None:
test_studies = [
'Kim',
'Tsai',
'Cho',
]
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1., 0.])
if pair['study_name'] in val_studies:
val_set.append(pair)
elif pair['study_name'] in test_studies:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def one_hot(data, sign='+'):
sins = None
sequence = None
data = data.lower()
for n in data:
one_hot = torch.zeros((1, 4))
if n =='a':
one_hot[0][0] = 1
elif n == 'c':
one_hot[0][1] = 1
elif n == 'g':
one_hot[0][2] = 1
elif n == 't':
one_hot[0][3] = 1
if sins == None:
sequence = copy.deepcopy(one_hot)
sins = 1
else:
sequence = torch.cat((sequence, one_hot), dim=0)
if list(sequence.size())[0] < 23:
for i in range(23 - list(sequence.size())[0]):
sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)
if list(sequence.size())[0] > 23:
sequence = sequence[:23]
if sign == '-':
sequence = torch.flip(sequence, [1])
return sequence
# import numpy as np
def dataLoader(file="crisprsql.csv", batch=64, mode="target"):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == "study":
loadData = byStudy(d)
elif mode == "guide":
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
thisdata.append([
[one_hot(line['grna_target_sequence'], line['grna_target_strand']),
one_hot(line['target_sequence'], line["target_strand"])],
torch.tensor([float(line['cleavage_freq'])])])
average_value.append(float(line['cleavage_freq']))
# if line
# mode = 0
# zero = 0
# for p in average_value:
# if p == statistics.mode(average_value):
# mode+=1
# if p <0:
# zero+=1
# print(f"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\nMedian: {statistics.median(average_value)}.\nMode: {statistics.mode(average_value)} with {mode} datapoint.\nstandard deviation: {statistics.pstdev(average_value)}.\nlowest value: {min(average_value)}.\nHighest value: {max(average_value)}\n{zero} datapoints below zero\n\n")
if train == True:
dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(4 if torch.cuda.is_available() else 4)))
print(thisdata[0][0][0].size())
train = False
else:
dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(4 if torch.cuda.is_available() else 4)))
thisdata1 = list()
for i in range(int(len(thisdata)/batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)
twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)
threes = thisdata[(i * batch) + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
# from scipy.stats import rankdata
class CRISPRDataset(torch.utils.data.Dataset):
def __init__(self, thisdata):
self.thisdata = thisdata
def __len__(self):
return len(self.thisdata)
def __getitem__(self, idx):
item = self.thisdata[idx]
sample = {
# (23, 4)
'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),
'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0),
# (1)
'cfd': torch.squeeze(item[1]).unsqueeze_(dim=0)
}
return sample
def collate_fn(batch):
# (256, 23, 4)
# (256, 1)
# print(sum(list(batch[0]['cfd'].shape)), sum(list(batch[0]['target'].shape, sum(list(batch[0]['guide'].shape)))))
output = {}
b = {key: [] for key in batch[0].keys()}
for i in batch:
if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)) > 0 and sum(list(i['guide'].shape)) > 0 :
for key in i.keys():
b[key].append(i[key])
else:
print('1', sum(list(i['cfd'].shape)), i['cfd'])
print('2', sum(list(i['target'].shape)), len(i['target'].shape), i['target'].tolist())
print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))
for key in b.keys():
# print(b[key])s
if len(b[key]) > 0:
output[key] = torch.stack(b[key])
else:
output[key] = torch.tensor([])
# output = {
# key: torch.stack([batch[i][key] for i in range(len(batch)) \
# if all( len(batch[i][k].shape) > 0 for k in batch[0].keys() )
# ])
# for key in batch[0].keys()
# }
return output
import pandas as pd
def rankDataLoader(file="crisprsql.csv", batch=64, mode="target"):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == "study":
loadData = byStudy(d)
elif mode == "guide":
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
ranks = list()
for line in d:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
ranks.append(float(line['cleavage_freq']))
ranks.sort()
for t in range(3):
df = pd.DataFrame(loadData[t])
# df.drop(df.columns.difference(['cleavage_freq']), 1, inplace=True)
# pd.to_numeric(df['cleavage_freq']
pd.to_numeric(df.cleavage_freq, errors='coerce')
# cleave = df.cleavage_freq
# df_ = pd.DataFrame(loadData[t]).drop(['cleavage_freq'], 1, inplace=True)
# df_.join(cleave)
df.dropna(subset=['cleavage_freq'], inplace=True)
print(df.head())
average_value = list()
thisdata = list()
for line in df.to_dict("records"):
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
thisdata.append([
[one_hot(line['grna_target_sequence'], line['grna_target_strand']),
one_hot(line['target_sequence'], line["target_strand"])],
torch.tensor(ranks.index(float(line['cleavage_freq'])) / len(ranks))])
average_value.append(float(line['cleavage_freq']))
# if line
# mode = 0
# zero = 0
# for p in average_value:
# if p == statistics.mode(average_value):
# mode+=1
# if p <0:
# zero+=1
# print(f"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\nMedian: {statistics.median(average_value)}.\nMode: {statistics.mode(average_value)} with {mode} datapoint.\nstandard deviation: {statistics.pstdev(average_value)}.\nlowest value: {min(average_value)}.\nHighest value: {max(average_value)}\n{zero} datapoints below zero\n\n")
if train == True:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))
# print(thisdata[0][0][0])
train = False
else:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))
# import pdb; pdb.set_trace()
thisdata1 = list()
for i in range(int(len(thisdata)/batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)
twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)
threes = thisdata[(i * batch) + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
def fullDataLoader(file="augmentcrisprsql.csv", batch=64, mode="target", target='rank'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
random.shuffle(d)
if mode == "study":
loadData = byStudy(d)
elif mode == "guide":
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
q = 0
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
if target == 'regular':
label = float(line['cleavage_freq'])
elif target == 'rank':
label = [float(line['ranked_cleavage_freq'])]
else:
label = [0, 1] if float(line['threshhold_cleavage_freq']) == 0 else [1, 0]
if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)) > 0 and sum(list(one_hot(line['target_sequence'], line["target_strand"]).shape)) > 0:
thisdata.append([
[one_hot(line['grna_target_sequence'], line['grna_target_strand']),
one_hot(line['target_sequence'], line["target_strand"])],
torch.tensor(label)])
average_value.append(label)
# print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line["target_strand"]).shape)))
else:
q+=1
print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line["target_strand"]).shape)))
# print(torch.tensor([label), len(torch.tensor([label]).shape))
print(q)
# if line
# mode = 0
# zero = 0
# for p in average_value:
# if p == statistics.mode(average_value):
# mode+=1
# if p <0:
# zero+=1
# print(f"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\nMedian: {statistics.median(average_value)}.\nMode: {statistics.mode(average_value)} with {mode} datapoint.\nstandard deviation: {statistics.pstdev(average_value)}.\nlowest value: {min(average_value)}.\nHighest value: {max(average_value)}\n{zero} datapoints below zero\n\n")
if train == True:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=4))
# print(thisdata[0][0][0])
train = False
else:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=4))
# import pdb; pdb.set_trace()
thisdata1 = list()
for i in range(int(len(thisdata)/batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)
twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)
threes = thisdata[(i * batch) + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.metrics import auc
def roc(labels, outputs):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
average_values = dict()
# print(len(llabels), len(loutputs))
for i in range(1, 2):
thislabel = list()
thisoutput = list()
pres = 0
totalpres = 0
for j in range(len(llabels)):
if llabels[j] <= .01 / i:
thislabel.append(0)
else:
thislabel.append(1)
if loutputs[j] <= .01 / i:
thisoutput.append(0)
else:
thisoutput.append(1)
if thislabel[-1] == thisoutput[-1]:
pres += 1
totalpres +=1
lr_precision, lr_recall, _ = precision_recall_curve(thislabel, thisoutput)
average_values[.1/i] = [roc_auc_score(thislabel, thisoutput), auc(lr_recall, lr_precision), pres/totalpres]
return average_values
def accuracy(labels, outputs, percent=.10):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
correct = 0
total = 0
# print(llabels)
for i in range(len(llabels)):
if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 + percent) >= loutputs[i]:
correct +=1
total += 1
return correct / total
def percentError(outputs, labels):
return torch.mean(torch.abs(labels - outputs) / labels)
def Test(net, dataset, device, crit, logpath=None):
net.eval()
correct = 0
total = 0
totalloss = 0
loss = 0
with torch.no_grad():
for i, data in enumerate(dataset, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
totalloss+=1
correct += (predicted == labels).sum().item()
loss+=crit(outputs, labels)
if logpath!= None:
f = open(logpath, 'w')
f.write('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
f.write(f"total: {total} correct: {correct}")
f.write(f'loss: {loss/totalloss}')
f.close()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print(f"total: {total} correct: {correct}")
print(f'loss: {loss/totalloss}')
return 100 * correct / total
def getAllStudy():
with open("crisprsql.csv") as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ["C", 'G', 'A', "T"]:
try:
alls[row['study_name']].add(row['grna_target_sequence'])
except KeyError:
alls[row["study_name"]] = set(row['grna_target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def getallGuide():
with open("crisprsql.csv") as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ["C", 'G', 'A', "T"]:
try:
alls[row['grna_target_sequence']].add(row['target_sequence'])
except KeyError:
alls[row["grna_target_sequence"]] = set(row['target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def aboveandbelow(threshold):
with open("crisprsql.csv") as f:
data = csv.DictReader(f)
alls = dict()
above = 0
total = 0
for row in data:
if row['grna_target_sequence'] not in ["C", 'G', 'A', "T"] and row['cleavage_freq'] != '':
if float(row['cleavage_freq']) > threshold:
above+=1
total+=1
print(f'Above: {above / total}%. Below: {(total - above) / total}')
def NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net, device, optim_time=None, logpath=None):
net.to(device)
#def optim, loss, and init graph data
criterion = crit
optimizer = optim
# get all labels for ROC
full_full_labels = None
for i, data in enumerate(train_data, 0):
if full_full_labels == None:
full_full_labels = data[1].to(device)
else:
full_full_labels = torch.cat((full_full_labels, data[1].to(device)), 0)
full_val_labels = None
for i, data in enumerate(val_data, 0):
if full_val_labels == None:
full_val_labels = data[1].to(device)
else:
full_val_labels = torch.cat((full_val_labels, data[1].to(device)), 0)
print("begin training")
if logpath!= None:
f = open(logpath, 'w')
#these go down, and random loss is ~2.303 so 15 will be replaced
best = 15
bestval = 15
bestepoch = 0
e = 0
# begin training loop, larget loop is for lr scedule
times = list()
# bestnet = LogisticRegression()
# bestnet.load_state_dict(copy.deepcopy(net.state_dict()))
for q in optim_time:
optimizer = q[1]
print(q[0])
# net.load_state_dict(copy.deepcopy(bestnet.state_dict())
# print(
# 'params', [p for p in net.parameters()],
# '\ngrads', [p.grad for p in net.parameters()]
# )
# epoch loop
for epoch in range(q[0]): # loop over the dataset multiple times
ftime = time.monotonic()
random.shuffle(train_data)
correct = 0
total = 0
running_loss = 0.0
# train mode
net.train()
full_output = None
full_labels = None
full_full_output = None
for i, data in enumerate(train_data, 0):
# train step
inputs, labels = data[0], data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
# t = time.monotonic()
outputs = net(inputs)
# print(time.monotonic - t, " seconds for 512 outputs")
loss = criterion(outputs, labels)
loss.backward()
# import pdb; pdb.set_trace()
# things to look at:
# - loss
# - parameters
# - inputs
# - grads
# if e % 300 == 299:
# print(
# 'loss', loss,
# # '\ninputs', inputs,
# '\nlabels', labels,
# '\noutputs', outputs
# )
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
total+= labels.size(0)
correct += (predicted == labels).sum().item()
# print()
running_loss += loss.item()
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
if full_labels == None:
full_labels = labels
else:
full_labels = torch.cat((full_labels, labels), 0)
# w = {f'output {i}': outputs.flatten()[i] for i in range(outputs.flatten().size(0))}
# w.update({
# f'label {i}': labels.flatten()[i] for i in range(labels.flatten().size(0))
# })
w = ({'loss': loss.item(),
'accuracy': accuracy(labels, outputs),
'percent error': percentError(outputs, labels)})
wandb.log(
# {
# 'loss': loss.item(),
# # 'params': [p for p in net.parameters()],
# # 'grads': [p.grad for p in net.parameters()],
# # 'inputs': inputs,
# f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),
# f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),
# 'accuracy': accuracy(labels, outputs)
# }
w
)
# print statistics
if i % batch_per == batch_per - 1: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(e + 1, i + 1, running_loss / batch_per))
# best = min(best, running_loss / batch_per)
# print('Accuracy of the network on the ' + str(batch_per) + 'th update: %d %%' % (
# 100 * correct / total))
wl = roc(full_labels, full_output)
wandlog = {}
for q in wl:
wandlog[f"midepoch ROC_AUC"] = wl[q][0]
wandlog[f"midepoch PR_AUC"] = wl[q][1]
wandlog[f"midepoch threshhold accuracy"] = wl[q][2]
# wandlog.update({
# "LOSS": running_loss / batch_per,
# "TYPE": "TRAIN",
# 'EPOCH': e+1,
# 'UPDATE': (e*len(train_data)) + i + 1})
w.update({'midepoch loss': loss.item(),
'midepoch accuracy': accuracy(labels, outputs),
'midepoch percent error': percentError(outputs, labels)})
wandb.log(
# {
# 'loss': loss.item(),
# # 'params': [p for p in net.parameters()],
# # 'grads': [p.grad for p in net.parameters()],
# # 'inputs': inputs,
# f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),
# f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),
# 'accuracy': accuracy(labels, outputs)
# }
w
)
wandb.log(wandlog)
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output, full_output), 0)
full_output = None
full_labels = None
running_loss = 0
correct = 0
total = 0
# print('[%d] loss: %.20f' %
# (epoch + 1, running_loss / total))
# if logpath != None:
# f.write('[%d] loss: %.20f' %
# (epoch + 1, running_loss / total))
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output, full_output), 0)
# ROC is commented out when training on 10 samples
wl = roc(full_full_labels, full_full_output)
w = {}
for q in wl:
w[f"epoch ROC_AUC"] = wl[q][0]
w[f"epoch PR_AUC"] = wl[q][1]
w[f"epoch threshhold accuracy"] = wl[q][2]
# wandlog.update({
# "LOSS": running_loss / batch_per,
# "TYPE": "TRAIN",
# 'EPOCH': e+1,
# 'UPDATE': (e + 1) *len(train_data)})
w.update({'epoch loss': loss.item(),
'epoch accuracy': accuracy(full_full_labels, full_full_output),
'epoch percent error': percentError(full_full_output, full_full_labels),
'label': labels.flatten()[0],
'output': outputs.flatten()[0]})
wandb.log(
# {
# 'loss': loss.item(),
# # 'params': [p for p in net.parameters()],
# # 'grads': [p.grad for p in net.parameters()],
# # 'inputs': inputs,
# f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),
# f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),
# 'accuracy': accuracy(labels, outputs)
# }
w
)
if w['epoch accuracy'] == 1:
PATH = f'.accuracynet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch PR_AUC'] == 1:
PATH = f'.PRnet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch ROC_AUC'] == 1:
PATH = f'.ROCnet.pth'
torch.save(net.state_dict(), PATH)
# wandb.log(wandlog)
full_output = None
full_full_output = None
running_loss = 0
correct = 0
total = 0
running_loss = 0
net.eval()
correct = 0
total = 0
if e % 10 == 9:
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
#check val set
for i, data in enumerate(val_data, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
running_loss += loss.item()
total+= labels.size(0)
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
# if e % 300 == 299:
print(f'Validation loss for Epoch [{e +1}]: {running_loss/total}')
# if logpath != None:
# f.write(f'Validation loss for Epoch [{epoch}]: {running_loss/total}')
# wl = roc(full_val_labels, full_output)
wandlog = {}
# for q in wl:
# wandlog[f"{q} ROC_AUC"] = wl[q][0]
# wandlog[f"{q} PR_AUC"] = wl[q][1]
# wandlog[f"{q} ACCURACY"] = wl[q][2]
# wandlog.update({
# "LOSS": running_loss / len(val_data),
# "TYPE": "VAL",
# 'EPOCH': e+1,
# 'UPDATE': (e + 1)*len(train_data)})
# wandb.log(wandlog)
# best = min(best, running_loss / total)
# early stop just goes to the next lr change checkpoint
if bestval <= running_loss / total:
# if epoch >= 5:
# print('Early Stop')
# print(f"Best Validation loss: {bestval}")
# print(f"Current Validation loss: {running_loss / total}")
e = e
# break
# continue
# return
else:
# bestnet.load_state_dict(copy.deepcopy(net.state_dict()))
bestepoch = e
bestval = running_loss / total
running_loss = 0
correct = 0
total = 0
times.append(time.monotonic() - ftime)
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
# if e % 300 == 299:
print('time for epoch: ', times[-1], 'seconds')
if logpath != None:
f.write(f'time for epoch: {times[-1]}, seconds')
e+=1
# finish training. in future dont plot and save here just return them
print('Finished Training')
print('average time per epoch: ', sum(times)/len(times), 'seconds')
if logpath != None:
f.write('Finished Training')
f.write(f'average time per epoch: {sum(times)/len(times)} seconds')
f.close()
return
# def compute_dataframe(df: pd.DataFrame, checkpoint_path: str):
# model = LogisticRegression().load_state_dict(torch.load(checkpoint_path, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
# targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()
# preds = []
# for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):
# pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])
# preds.append(pred.item())
# df['pred'] = preds
# return df
def compute_dataframe(df: pd.DataFrame, checkpoint_path):
model = checkpoint_path
targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()
preds = []
for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):
pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])
preds.append(pred.item())
df['pred'] = preds
return df |
6,669 | 777dc2056443f0404ccb75d570f2ddc3a3aa747b | import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
class TestTaniHub():
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
def test_tanihub_number_1(self):
self.driver.get('https://tanihub.com/')
jabodetabek_option = self.driver.find_element_by_xpath("//p[text()='Jabodetabek']")
user_image_button = self.driver.find_element_by_xpath("//img[@alt='profile']")
time.sleep(5)
jabodetabek_option.click()
time.sleep(2)
assert user_image_button.is_displayed()
user_image_button.click()
email_text_box = self.driver.find_element_by_xpath("//input[@type='email' and @id='input-icon-3']")
assert email_text_box.is_displayed()
email_text_box.send_keys('testinguser@mailinator.com')
selanjutnya_msk_btn = self.driver.find_element_by_xpath("//button[@type='submit' and @id='Button-2']")
assert selanjutnya_msk_btn.is_enabled()
selanjutnya_msk_btn.click()
time.sleep(2)
password_txt_box = self.driver.find_element_by_xpath("//input[@type='password' and @id='input-password-4']")
assert password_txt_box.is_displayed()
password_txt_box.send_keys('admin123')
selanjutnya_msk_btn.click()
search_text_box = self.driver.find_element_by_xpath("//input[@id='input-icon-3' and @type='text']")
assert search_text_box.is_displayed()
search_text_box.send_keys('Minyak Goreng Rose Brand 2 L Karton')
search_text_box.send_keys(Keys.ENTER)
time.sleep(5)
search_result_first_cart_button = self.driver.find_element_by_xpath("//button[@id='CardProduct-1601' and @type='button']")
# assert search_result_first_cart_button.is_displayed()
search_result_first_cart_button.click()
keranjang_btn = self.driver.find_element_by_xpath("//button[@id='Button-2' and @type='button']/span")
assert keranjang_btn.is_displayed()
keranjang_btn.click()
time.sleep(5)
checkout_btn = self.driver.find_element_by_xpath("//button[text()='Checkout' and @type='button']")
assert checkout_btn.is_displayed()
checkout_btn.click()
time.sleep(5)
self.driver.quit()
def test_tanihub_number_2(self):
self.driver.get('http://timvroom.com/selenium/playground/')
title_page = self.driver.title
answer_box_1 = self.driver.find_element_by_id("answer1")
answer_box_1.send_keys(title_page)
name_txt_box = self.driver.find_element_by_id("name")
name_txt_box.send_keys('Kilgore Trout')
occupation_dropdown = self.driver.find_element_by_id("occupation")
Select(occupation_dropdown).select_by_value('scifiauthor')
list_blue_box = self.driver.find_elements_by_class_name("bluebox")
answer_box_4 = self.driver.find_element_by_id("answer4")
answer_box_4.send_keys(str(len(list_blue_box)))
click_me_link = self.driver.find_element_by_xpath("//a[text()='click me']")
click_me_link.click()
red_box_element = self.driver.find_element_by_id("redbox")
answer_box_6 = self.driver.find_element_by_id("answer6")
answer_box_6.send_keys(str(red_box_element.get_attribute("class")))
self.driver.execute_script('return ran_this_js_function()')
value_script = self.driver.execute_script('return got_return_from_js_function()')
answer_box_8 = self.driver.find_element_by_id("answer8")
answer_box_8.send_keys(str(value_script))
wrote_book_rdbtn = self.driver.find_element_by_xpath("//input[@type='radio' and @name='wrotebook']")
wrote_book_rdbtn.click()
answer_box_10 = self.driver.find_element_by_id("answer10")
orange_box = self.driver.find_element_by_id("orangebox").location
green_box = self.driver.find_element_by_id("greenbox").location
answer_box_11 = self.driver.find_element_by_id("answer11")
if green_box['y'] > orange_box['y']:
answer_box_11.send_keys('orange')
else:
answer_box_11.send_keys('green')
answer_box_10.send_keys(str(red_box_element.text))
self.driver.set_window_size(850, 650)
answer_box_13 = self.driver.find_element_by_id("answer13")
answer_box_14 = self.driver.find_element_by_id("answer14")
try:
is_here_element = self.driver.find_element_by_id("ishere")
if is_here_element.is_displayed():
answer_box_13.send_keys('yes')
else:
answer_box_13.send_keys('no')
except:
answer_box_13.send_keys('no')
try:
purple_box = self.driver.find_element_by_id("purplebox")
if purple_box.is_displayed():
answer_box_14.send_keys('yes')
else:
answer_box_14.send_keys('no')
except:
answer_box_14.send_keys('no')
click_then_wait_link = self.driver.find_element_by_xpath("//a[text()='click then wait']")
click_then_wait_link.click()
WebDriverWait(self.driver, 20).until(expected_conditions.element_to_be_clickable((By.XPATH, "//a[text()='click after wait']")))
click_after_wait_link = self.driver.find_element_by_xpath("//a[text()='click after wait']")
click_after_wait_link.click()
self.driver.switch_to.alert.accept()
submit_button = self.driver.find_element_by_id("submitbutton")
submit_button.click()
check_results = self.driver.find_element_by_id("checkresults")
check_results.click()
self.driver.quit()
def test_selenium_number_1():
TestTaniHub().test_tanihub_number_1()
def test_selenium_number_2():
TestTaniHub().test_tanihub_number_2() |
6,670 | 8cd54362680aa3a96babe100b9231f6f16b3f577 | import librosa
import soundfile
import os, glob
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
emotionsRavdessData = {
'01': 'neutral',
'02': 'calm',
'03': 'happy',
'04': 'sad',
'05': 'angry',
'06': 'fearful',
'07': 'disgust',
'08': 'surprised'
}
observed_emotions = ['neutral', 'calm', 'happy', 'disgust', 'sad', 'angry']
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as file:
X = file.read(dtype="float32")
sample_rate = file.samplerate
if chroma:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T, axis=0)
result = np.hstack((result, mel))
return result
def load_dataset(test_size=0.15):
x, y = [], []
# Ravdess Dataset
for file in glob.glob("DataSets/ravdessData/Actor_*/*.wav"):
file_name = os.path.basename(file)
emotion = emotionsRavdessData[file_name.split("-")[2]]
if emotion not in observed_emotions:
continue
feature = extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
# TESS Toronto Dataset
for file in glob.glob("DataSets/TESS_Toronto_emotional_speech_set_data/OAF_*/*.wav"):
file_name = os.path.basename(file)
emotion = file_name.split("_")[2].split(".")[0]
if emotion not in observed_emotions:
continue
feature = extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return train_test_split(np.array(x), y, test_size=test_size, random_state=9)
x_train, x_test, y_train, y_test = load_dataset(test_size=0.15)
model = MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive',
max_iter=500)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
accuracy = accuracy_score(y_true=y_test, y_pred=y_pred)
## Train info
# print((x_train.shape[0], x_test.shape[0]))
# print(f'Features extracted: {x_train.shape[1]}')
# print("Accuracy: {:.2f}%".format(accuracy*100))
def emotionRecognize(file):
try:
new_emotion = extract_feature(file, mfcc=True, chroma=True, mel=True)
new_emotion = new_emotion.tolist()
new_emotion = [new_emotion]
new_emotion = np.array(new_emotion)
return model.predict(new_emotion)[0]
except:
return None
|
6,671 | db309283137383cd698f235e7326c6e5c50f6cf3 | from django.urls import path, include
from rest_framework.routers import SimpleRouter
from board_api.views import PostViewSet, UpvoteView, CommentViewSet
router = SimpleRouter()
router.register(r"post", PostViewSet)
router.register(r"post_upvote", UpvoteView)
router.register(r"comment", CommentViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
6,672 | 113572682ca83408b7c22e0e178f29945d741142 | from ..lib import read_input, write_output
def link_bits(num: str = None, bit_i: str = '0', bit_j: str = '0', write: bool = True) -> int:
if num is None:
num, bit_i, bit_j = read_input()
num = int(num, 2)
num_len = num.bit_length()
mask = 2 ** num_len - 1
first_i = (num >> (num_len - int(bit_i))) << int(bit_j)
last_j = ((num << (num_len - int(bit_j))) & mask) >> (num_len - int(bit_j))
result = first_i | last_j
if write:
write_output(f'{result}')
return result
def get_bits_between(num: str = None, bit_i: str = '0', bit_j: str = '0', write: bool = True) -> int:
if num is None:
num, bit_i, bit_j = read_input()
num = int(num, 2)
mask = 2 ** (num.bit_length() - int(bit_j)) - 1
result = ((((num >> int(bit_j)) << int(bit_i)) & mask) >> int(bit_i))
if write:
write_output(f'{result}')
return result
|
6,673 | b8219c21dc2cdd497d3de48c59c146a1fd1509ec | # leetcode 718 最长重复子数组
# 给两个整数数组 A 和 B ,返回两个数组中公共的、长度最长的子数组的长度。
#
# 示例 1:
# 输入:
# A: [1,2,3,2,1]
# B: [3,2,1,4,7]
# 输出: 3
# 解释:
# 长度最长的公共子数组是 [3, 2, 1]。
#
# 说明:
# 1 <= len(A), len(B) <= 1000
# 0 <= A[i], B[i] < 100
class Solution:
def findLength(self, A: [int], B: [int])->int:
"""
动态规划, 维护一个公共子串长度表DP
DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度
如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1
如果A[i]==B[j], DP[i][j]=0
时间复杂度为:O(mn)
:param A:
:param B:
:return:
"""
na = len(A)
nb = len(B)
# na行,nb列的矩阵
dp = [[0 for _ in range(nb)] for _ in range(na)]
for i in range(na):
for j in range(nb):
if A[i] == B[j]:
if i >= 1 and j >= 1:
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = 1
else:
dp[i][j] = 0
max_length = max(max(row) for row in dp)
return max_length
sol = Solution()
la = [0,0,0,0,1]
lb = [1,0,0,0,0]
print(sol.findLength(la, lb)) |
6,674 | 7864138459caf469a0148420718b2282598141de | #!/usr/bin/env python
# coding: utf-8
import sys
sys.path.insert(0, "/code/huggingface/transformers-fair-wmt/src")
import logging
logging.disable(logging.INFO) # disable INFO and DEBUG logger everywhere
from transformers.tokenization_fsmt import FSMTTokenizer
from transformers.modeling_fsmt import FSMTForConditionalGeneration
def translate(src, tgt, text):
# to switch to local model
#mname = "/code/huggingface/transformers-fair-wmt/data/wmt19-{src}-{tgt}"
# s3 uploaded model
mname = f"stas/wmt19-{src}-{tgt}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
encoded = tokenizer.encode(text, return_tensors='pt')
# print(encoded)
output = model.generate(encoded, num_beams=5, early_stopping=True)[0]
# print(output)
decoded = tokenizer.decode(output, skip_special_tokens=True)
#print(decoded)
return decoded
def paraphrase(src, tgt, text):
return translate(tgt, src, translate(src, tgt, text))
#text = """Here's a little song I wrote. You might want to sing it note for note. Don't worry, be happy. In every life we have some trouble. But when you worry you make it double. Don't worry, be happy. Don't worry, be happy now."""
text = "Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?"
en_ru = paraphrase('en', 'ru', text)
en_de = paraphrase('en', 'de', text)
# print together to avoid the logger noise :(
print("Paraphrasing:")
print(f"en : {text}")
print(f"en-ru-en: {en_ru}")
print(f"en-de-en: {en_de}")
# Paraphrasing:
# en : Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?
# en-ru-en: Every morning when I wake up, I have a delightful joy - the joy of being Salvador Dali - and I ask myself in delight: What wonderful things is this Salvador Dali going to do today?
# en-de-en: Every morning when I wake up, I experience an extraordinary joy - the joy of being Salvador Dalí - and I wonder with delight: what wonderful things will this Salvador Dalí do today?
|
6,675 | e79e4eb1640d5ad6e360dfb18430fbf261cf9d3b | # encoding=utf-8
######
# 遗传算法应用于旅行商问题(TSP)
# Python 3.6
# https://morvanzhou.github.io/tutorials/machine-learning/evolutionary-algorithm/2-03-genetic-algorithm-travel-sales-problem/
######
|
6,676 | 287d4c2d490c9dcdd7be7e86fe577139a3d30f54 | a=list(input("enter the string or sentence to perform caesar cipher : "))
b=int(input('enter the frequency to perform ceasar cipher '))
e=[]
#print(a)
#print (a[4])
c=len(a)
#print(c)
for i in range (0,c):
d=ord(a[i])
#print(d)
if b> 0:
for j in range (1,b+1):
if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z':
if d>= 65 and d< 90 or d>=97 and d<122:
d+=1
elif d==90:
d=65
elif d==122:
d=97
else :
pass
f=chr(d)
e.append(f)
if b<0:
g=abs(b)
for j in range (1,g+1):
if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z':
if d> 65 and d<= 90 or d>97 and d<=122:
d-=1
elif d==97:
d=122
elif d==65:
d=90
else :
pass
f=chr(d)
e.append(f)
#print (e)
for k in range (0,c):
print(e[k],end='')
'''65-90 A-Z
97-122 a-z'''
|
6,677 | 4744d594c0599f1aa807eefa0cb40a2a2a3c7926 | import io
import json
import sys
import time
from coord_tools import get_elevation
if len(sys.argv) != 3:
print('Wrong number of arguments! Exiting.')
infile_name = sys.argv[1]
outfile_name = sys.argv[2]
# Declare dict to hold coordinates
node_coords = {}
fail_count = 0
nodes_processed = 0
# Read in each node from a file
infile = open(infile_name,'r')
for line in infile.readlines():
fields = line.split()
node_id = int(fields[0])
lat = float(fields[1])
lon = float(fields[2])
elev = get_elevation(lat,lon)
if elev < 0:
print('Warning: bad elevation result')
fail_count += 1
else:
fail_count = 0
node_coords[node_id] = [lat,lon,elev]
nodes_processed += 1
if nodes_processed % 1000 == 0:
print(f'Processed {nodes_processed} nodes so far...')
time.sleep(10)
if fail_count > 100:
print('Aborting due to 100 consecutive failures')
break
#time.sleep(.5)
infile.close()
# Print the 3-coord nodes to the outfile
with open(outfile_name,'w') as outfile:
json.dump(node_coords,outfile)
print(f'Wrote {nodes_processed} nodes to file {outfile_name}.')
|
6,678 | f4ae34be2be2b47b3394e6da751c53c51a1c3174 | #!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
returns system activity (top)
"""
import collections
import tempfile
import subprocess
import os
import sys
import ujson
if __name__ == '__main__':
fieldnames = None
field_max_width = dict()
result = {'headers': [], 'details': []}
is_header = True
tidpid = dict()
for line in subprocess.run(['/usr/bin/procstat','-ath'], capture_output=True, text=True).stdout.split('\n'):
parts = line.split(maxsplit=2)
if len(parts) > 1:
tidpid[parts[1]] = parts[0]
# grab second display so that CPU time data appears
sp = subprocess.run(['/usr/bin/top','-aHSTn','-d2','999999'], capture_output=True, text=True)
topData = sp.stdout.strip().split('\n\n',2)[-1]
for line in topData.split('\n'):
# end of header, start of top detection
if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:
is_header = False
if is_header:
# parse headers from top command, add to result
if len(line.strip()) > 0:
result['headers'].append(line)
else:
# parse details including fieldnames (leave original)
if fieldnames is None:
fieldnames = ['PID'] + line.split()
else:
tmp = line.split(maxsplit=10)
record = {'C': '0'}
for field_id in range(len(fieldnames)):
fieldname = fieldnames[field_id]
if field_id == 0: # PID
record[fieldname] = tidpid[tmp[0]] if tmp[0] in tidpid else ''
else:
record[fieldname] = tmp[field_id - 1]
if fieldname not in field_max_width or field_max_width[fieldname] < len(record[fieldname]):
field_max_width[fieldname] = len(record[fieldname])
result['details'].append(record)
if len(sys.argv) > 1 and sys.argv[1] == 'json':
# output as json
print(ujson.dumps(result))
else:
# output plain (reconstruct data)
for header_line in result['headers']:
print (header_line)
print ("\n")
if fieldnames is not None:
format_str = ""
header_fields = {}
for fieldname in fieldnames:
format_str = '%s %%(%s)-%ds'%(format_str,fieldname, field_max_width[fieldname]+1)
header_fields[fieldname] = fieldname
print (format_str % header_fields)
for detail_line in result['details']:
print (format_str % detail_line)
|
6,679 | be5147efda879165107378527ebf44890c03be75 | import numpy as np
z = np.linspace(2,10,5) #from 2 to 10, with 5 elements
# OUT: array( [ 2. , 4. , 6. , 8. , 10. ] )
np.random.seed(0)
z1 = np.random.randint(10, size = 6)
# OUT: array( [5, 0, 3, 3, 7, 9] )
z = np.array([1,2,3,4,5])
z < 3
# OUT: array([T,T,F,F,F])
z[z<3]
# OUT: array([1,2])
a = np.array([1,2,3,4,5])
b = np.array([6,7,8,9,10])
a + b # - * /
# OUT: array([7,9,11,13,15])
a + 30 # - * /
# OUT: array([31,32,33,34,35])
a = np.array([[1,2,3],[4,5,6]])
print(a)
# OUT: [[1 2 3]
# [4 5 6]]
a.shape()
# OUT: (2,3)
a.ndim()
# OUT: 2
a[0,2]
# OUT: 3
a[0,:]
# array([1,2,3])
a[:,1]
# array([2,4])
np.min(a) #or MAX|SUM
# OUT: 1
np.zeros(5)
# OUT: array([0.,0.,0.,0.,0.])
np.zeros_like([[10,10],[1,1]])
# OUT: [[0,0],[0,0]]
np.ones(3,2)
# OUT: array([[1,1],
# [1,1],
# [1,1]])
np.full((2,2),100)
# OUT: array([[100,100],
# [100,100]])
np.full_like((2,2), 10, dtype = np.int)
# OUT: [[10,10][10,10]]
np.random.rand(2,4)
#OUT: array([[x,x,x,x],
# [x,x,x,x]])
np.random.randint(10)
#OUT: x # random from 0 to 10 (non include)
np.random.randint(5,10, size=(2,2)) #from 5 to 10(non include)
#OUT: array([[x,x],
# [x,x]])
a = [np.pi,-np.pi,0]
np.cos(a)
#OUT: [-1,-1,1]
np.arange(10)
#OUT: [0,1,...,9]
v1 = np.array([1,2,3])
v2 = np.array([4,5,6])
np.vstack([v1,v2,v1])
#1 2 3
#4 5 6
#1 2 3
a = np.array([1,2,3,4,5,6,7,8,9])
#a[[1,2,8]]
#OUT: 2,3,9
filedata = np.genfromtxt("name.txt", delimiter = ",")
# ?
filedata = filedata.astype("type") #!
# filedata[filedata > 50]
# ((filedata > 50) & (filedata < 100))
# bool Boolean (True or False) stored as a bit
# inti Platform integer (normally either int32 or int64)
# int8 Byte (-128 to 127)
# int16 Integer (-32768 to 32767)
# int32 Integer (-2 ** 31 to 2 ** 31 -1)
# int64 Integer (-2 ** 63 to 2 ** 63 -1)
# uint8 Unsigned integer (0 to 255)
# uint16 Unsigned integer (0 to 65535)
# uint32 Unsigned integer (0 to 2 ** 32 - 1)
# uint64 Unsigned integer (0 to 2 ** 64 - 1)
# float16 Half precision float: sign bit, 5 bits exponent, 10 bits mantissa
# float32 Single precision float: sign bit, 8 bits exponent, 23 bits mantissa
# float64 Double precision float: sign bit, 11 bits exponent, 52 bits mantissa
a = np.arange(7, dtype='f')
# Integer i
# Unsigned integer u
# Single precision float f
# Double precision float d
# Boolean b
# Complex D
# String S
# Unicode U
# Void V
x = np.arange(0,10,2) # x=([0,2,4,6,8])
y = np.arange(5) # y=([0,1,2,3,4])
m = np.vstack([x,y]) # m=([[0,2,4,6,8],
# [0,1,2,3,4]])
xy = np.hstack([x,y]) # xy =([0,2,4,6,8,0,1,2,3,4]) |
6,680 | 47f6c4b3c279a065b8f21dab2faa71271db8d6ab | from django.db import models
from datetime import datetime
# Message model for testing purposes
class Message(models.Model):
type = models.CharField(max_length=10)
body = models.CharField(max_length=50)
def __str__(self):
return self.type + ":" + self.body
# Company model
class Company(models.Model):
name = models.CharField(max_length=10)
@classmethod
def create(cls, name):
company = cls(name=name)
return company
def __str__(self):
return self.name
# model for storing message and its prediction
class Entry(models.Model):
fetched_date = models.DateTimeField()
message = models.CharField(max_length=200)
prediction = models.CharField(max_length=10)
parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)
@classmethod
def create(cls, message, prediction, company):
entry = cls(message=message, prediction=prediction, parent_company=company)
entry.fetched_date = datetime.now()
return entry
def __str__(self):
return self.fetched_date.strftime("%m/%d/%Y, %H:%M:%S") + " " + self.prediction + ":" + self.message
|
6,681 | a6617934c5e6527cf59225a5d159d1ce8a33db50 | import http.client
import json
conn = http.client.HTTPSConnection("v3.football.api-sports.io")
headers = {
'x-rapidapi-host': "v3.football.api-sports.io",
'x-rapidapi-key': ""
}
conn.request("GET", "/teams/statistics?season=2016&team=768&league=4", headers=headers)
res = conn.getresponse()
data = res.read()
pretty = json.loads(data)
|
6,682 | 2f714ed54a19ec26d7ecb1979e79366721b3d0fe | import matplotlib.pyplot as plt
import numpy as np
# 描画用サンプルデータ
#x= np.array([0,1,2,3,4])
y = np.array([2, 2, 3, 4, 5])
print(y)
#print(range(y))
plt.figure(figsize=(10,1))
plt.bar(range(len(y)), y)
plt.savefig('test.png')
plt.clf() |
6,683 | 9cff227eeeaffda777668aa3b90e3839426da811 | import tkinter as tk
import classejogo
class Tabuleiro():
def __init__(self):
self.jogo = classejogo.Jogo()
self.window = tk.Tk()
self.window.title("Jogo da Velha")
self.window.geometry("300x360+100+100")
self.window.rowconfigure(0, minsize=30, weight=1)
self.window.rowconfigure(1, minsize=100, weight=1)
self.window.rowconfigure(2, minsize=100, weight=1)
self.window.rowconfigure(3, minsize=100, weight=1)
self.window.rowconfigure(4, minsize=30, weight=1)
self.window.columnconfigure(0, minsize=100, weight=1)
self.window.columnconfigure(1, minsize=100, weight=1)
self.window.columnconfigure(2, minsize=100, weight=1)
self.window.columnconfigure(3, minsize=100, weight=1)
#Criando os Botões:
self.vitorias_X = tk.Label(self.window)
self.vitorias_X.grid(row=0, column=0, sticky="nsew")
self.vitorias_X.configure(text="Vitórias de X: {0} ".format(self.jogo.vitórias_x),font='Arial 10', bg='Blue', fg='White')
self.placar = tk.Label(self.window)
self.placar.grid(row=0, column=1, sticky="nsew")
self.placar.configure(text= "<- PLACAR ->",font='Arial 10', bg='Black', fg='Green')
self.vitorias_O = tk.Label(self.window)
self.vitorias_O.grid(row=0, column=2, sticky="nsew")
self.vitorias_O.configure(text="Vitórias de O: {0} ".format(self.jogo.vitórias_o), font='Arial 10', bg='Yellow', fg='Black')
self.botão0x0 = tk.Button(self.window)
self.botão0x0.grid(row=1, column=0, sticky="nsew")
self.botão0x0.configure(command=self.botão0x0_clicado)
self.botão0x1 = tk.Button(self.window)
self.botão0x1.grid(row=1, column=1, sticky="nsew")
self.botão0x1.configure(command=self.botão0x1_clicado)
self.botão0x2 = tk.Button(self.window)
self.botão0x2.grid(row=1, column=2, sticky="nsew")
self.botão0x2.configure(command=self.botão0x2_clicado)
self.botão1x0 = tk.Button(self.window)
self.botão1x0.grid(row=2, column=0, sticky="nsew")
self.botão1x0.configure(command=self.botão1x0_clicado)
self.botão1x1 = tk.Button(self.window)
self.botão1x1.grid(row=2, column=1, sticky="nsew")
self.botão1x1.configure(command=self.botão1x1_clicado)
self.botão1x2 = tk.Button(self.window)
self.botão1x2.grid(row=2, column=2, sticky="nsew")
self.botão1x2.configure(command=self.botão1x2_clicado)
self.botão2x0 = tk.Button(self.window)
self.botão2x0.grid(row=3, column=0, sticky="nsew")
self.botão2x0.configure(command=self.botão2x0_clicado)
self.botão2x1 = tk.Button(self.window)
self.botão2x1.grid(row=3, column=1, sticky="nsew")
self.botão2x1.configure(command=self.botão2x1_clicado)
self.botão2x2 = tk.Button(self.window)
self.botão2x2.grid(row=3, column=2, sticky="nsew")
self.botão2x2.configure(command=self.botão2x2_clicado)
#Criando a Label dos turnos:
self.label_turno = tk.Label(self.window)
self.label_turno.grid(row=4, column=0, columnspan=1, sticky="nsew")
self.label_turno.configure(text="Turno de : {0}" .format(self.jogo.player), bg='Black', fg='Green',font='Arial 9',)
#Criando Botão de Reiniciar:
self.reiniciar = tk.Button(self.window)
self.reiniciar.grid(row=4, column=1,columnspan=1, sticky="nsew")
self.reiniciar.configure(text="Reiniciar", font='Arial 18', activeforeground='Green', fg='Red', command=self.restart)
self.label_ganhador = tk.Label(self.window)
self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky="nsew")
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
###############################################################################
def clicou(self, i, j):
print("Turno de: {0} " .format(self.jogo.player))
print("Botão {0} x {1} clicado" .format(i,j))
###############################################################################
def botão0x0_clicado(self):
self.clicou(0,0)
self.botão0x0.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(0,0)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão0x1_clicado(self):
self.clicou(0,1)
self.botão0x1.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(0,1)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão0x2_clicado(self):
self.clicou(0,2)
self.botão0x2.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(0,2)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão1x0_clicado(self):
self.clicou(1,0)
self.botão1x0.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(1,0)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão1x1_clicado(self):
self.clicou(1,1)
self.botão1x1.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(1,1)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão1x2_clicado(self):
self.clicou(1,2)
self.botão1x2.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(1,2)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão2x0_clicado(self):
self.clicou(2,0)
self.botão2x0.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(2,0)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão2x1_clicado(self):
self.clicou(2,1)
self.botão2x1.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(2,1)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão2x2_clicado(self):
self.clicou(2,2)
self.botão2x2.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(2,2)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def iniciar(self):
self.window.mainloop()
###############################################################################
def restart(self):
self.jogo.limpa_jogadas()
self.botão0x0 = tk.Button(self.window)
self.botão0x0.grid(row=1, column=0, sticky="nsew")
self.botão0x0.configure(command=self.botão0x0_clicado)
self.botão0x1 = tk.Button(self.window)
self.botão0x1.grid(row=1, column=1, sticky="nsew")
self.botão0x1.configure(command=self.botão0x1_clicado)
self.botão0x2 = tk.Button(self.window)
self.botão0x2.grid(row=1, column=2, sticky="nsew")
self.botão0x2.configure(command=self.botão0x2_clicado)
self.botão1x0 = tk.Button(self.window)
self.botão1x0.grid(row=2, column=0, sticky="nsew")
self.botão1x0.configure(command=self.botão1x0_clicado)
self.botão1x1 = tk.Button(self.window)
self.botão1x1.grid(row=2, column=1, sticky="nsew")
self.botão1x1.configure(command=self.botão1x1_clicado)
self.botão1x2 = tk.Button(self.window)
self.botão1x2.grid(row=2, column=2, sticky="nsew")
self.botão1x2.configure(command=self.botão1x2_clicado)
self.botão2x0 = tk.Button(self.window)
self.botão2x0.grid(row=3, column=0, sticky="nsew")
self.botão2x0.configure(command=self.botão2x0_clicado)
self.botão2x1 = tk.Button(self.window)
self.botão2x1.grid(row=3, column=1, sticky="nsew")
self.botão2x1.configure(command=self.botão2x1_clicado)
self.botão2x2 = tk.Button(self.window)
self.botão2x2.grid(row=3, column=2, sticky="nsew")
self.botão2x2.configure(command=self.botão2x2_clicado)
#Criando a Label dos turnos:
self.label_turno = tk.Label(self.window)
self.label_turno.grid(row=4, column=0, columnspan=1, sticky="nsew")
self.label_turno.configure(text="Turno de : {0}" .format(self.jogo.player), bg='Black', fg='Green',font='Arial 9',)
#Criando Botão de Reiniciar:
self.reiniciar = tk.Button(self.window)
self.reiniciar.grid(row=4, column=1,columnspan=1, sticky="nsew")
self.reiniciar.configure(text="Reiniciar", font='Arial 24', activeforeground='Green', fg='Red', command=self.restart)
self.label_ganhador = tk.Label(self.window)
self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky="nsew")
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
self.vitorias_X.configure(text="Vitórias de X: {0} ".format(self.jogo.vitórias_x), bg='Blue', fg='White')
self.vitorias_O.configure(text="Vitórias de O: {0} ".format(self.jogo.vitórias_o), bg='Yellow', fg='Black')
###############################################################################
jogodavelha = Tabuleiro()
jogodavelha.iniciar()
|
6,684 | 4a118f9081a8b3baf0b074c8dc14eaeef4559c08 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('email', models.EmailField(max_length=100)),
('telephone', models.CharField(max_length=12)),
('cellphone', models.CharField(max_length=12)),
('img', models.ImageField(upload_to='')),
('role', models.IntegerField(default=5, choices=[(0, 'Pastor titular'), (1, 'Pastor/a'), (2, 'Diacono/a'), (3, 'Editor/a')])),
],
),
migrations.CreateModel(
name='Preach',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('title', models.CharField(max_length=60)),
('summary', models.CharField(blank=True, max_length=500)),
('date', models.DateField()),
('published_date', models.DateField(default=datetime.datetime(2017, 5, 7, 2, 3, 52, 71419))),
('url', models.URLField()),
('img', models.ImageField(verbose_name='Imagen', upload_to='images')),
('author', models.ForeignKey(to='preaches.Author')),
],
),
migrations.CreateModel(
name='Social_media',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.IntegerField(default=0, verbose_name='Nombre de la red social', choices=[(0, 'Facebook'), (1, 'Instagram'), (2, 'Twitter')])),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(verbose_name='Categoria', max_length=80)),
],
),
migrations.AddField(
model_name='preach',
name='tags',
field=models.ManyToManyField(to='preaches.Tags'),
),
migrations.AddField(
model_name='author',
name='social_media',
field=models.ManyToManyField(to='preaches.Social_media'),
),
]
|
6,685 | cc5b22a0246fcc9feaed6a0663095a6003e6cef1 | import json, re, bcrypt, jwt
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Avg
from django.http import JsonResponse
from django.views import View
from room.models import Room, Category, RoomAmenity,Image,Amenity,WishList,DisableDate,AbleTime
from reservation.check import check, check_in, check_out
from user.models import User, Host, Review
from user.utils import LoginRequired
class RoomListView(View):
def get(self,request):
try:
city = request.GET.get('city','')
checkin = request.GET.get('checkin',None)
checkout = request.GET.get('checkout', None)
adult = int(request.GET.get('adult','0'))
child = int(request.GET.get('child','0'))
min_price = request.GET.get('min_price',0)
max_price = request.GET.get('max_price',100000000)
is_refund = True if request.GET.get('is_refund',None) == 'true' else False
is_super = True if request.GET.get('is_super',None) == 'true' else False
room_types = request.GET.getlist('room_type',None)
amenities = request.GET.getlist('amenity',None)
page = int(request.GET.get('page', '1'))
#필터
list_criteria = {
'city__contains': city,
'price__range' : [min_price,max_price],
'capacity__gte' : adult+child
}
if room_types:
list_criteria['category__name__in'] = room_types
if amenities:
list_criteria['amenity__name__in'] = amenities
if is_super:
list_criteria['host__is_super'] = is_super
if is_refund:
list_criteria['is_refund'] = is_refund
#paginator
size = 10
offset = (page-1) * size
limit = page * size
room_list = Room.objects.filter(**list_criteria)
#날짜 필터
if checkin and checkout:
room_list = [room for room in room_list if check(room, checkin, checkout)]
if checkin:
room_list = [room for room in room_list if check_in(room, checkin)]
if checkout:
room_list = [room for room in room_list if check_out(room, checkout)]
if not room_list:
return JsonResponse({'message':'NO_ROOM_AVAILABLE'}, status=400)
rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]
room_thumbnail = [{
'room_id' : room.id,
'room_name' : room.name,
'price' : room.price,
'address' : room.city,
'room_type' : room.category.name,
'lat' : room.latitude,
'lng' : room.longtitude,
'image' : [image.url for image in room.image.all()],
'is_super' : room.host.is_super,
'capacity' : int(room.capacity),
'amenity' : [roomamenity.amenity.name for roomamenity in room.roomamenity_set.all()],
'rating' : [{
'category' : category,
'category_rating': Review.objects.filter(review_room=room).aggregate(rate_avg=Avg(category))['rate_avg']
} for category in rating_list
]
} for room in room_list[offset:limit]
]
common_data = len(room_list)
return JsonResponse({'thumbnail': room_thumbnail, 'common':common_data }, status=200)
except KeyError:
return JsonResponse({'message':'KeyError'}, status=400)
class RoomView(View):
def get(self,request, room_id):
try:
room = Room.objects.get(id=room_id)
rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]
room_detail = {
'room_name': room.name,
'address' : room.city,
'price' : room.price,
'room_type': room.category.name,
'image' : [image.url for image in room.image.all()][0],
'is_super' : room.host.is_super,
'host' : room.host.user.last_name + room.host.user.first_name,
'capacity' : room.capacity,
'amenity' : [{
'id' : roomamenity.amenity.id,
'icon' : re.sub('<i class=\\"|\\"></i>', '',roomamenity.amenity.image),
'description': roomamenity.amenity.name
} for roomamenity in room.roomamenity_set.all()
],
'rating' : [{
'category' : category,
'category_rating': int(Review.objects.filter(review_room=room).aggregate(Avg(category)).get(category+'__avg'))
} for category in rating_list
]
}
return JsonResponse({'detail': room_detail}, status=200)
except KeyError:
return JsonResponse({'message':'KeyError'}, status=400)
except Room.DoesNotExist:
return JsonResponse({'message':'NOT_FOUND_ROOM_ID'}, status=400)
class WishListView(View):
@LoginRequired
def post(self, request, room_id):
user = request.user
try:
if WishList.objects.filter(wish_user=user, wish_room_id=room_id).exists():
return JsonResponse({'MESSAGE':'Already Choosen'}, status=400)
WishList.objects.create(
wish_user_id = 1,
wish_room_id = room_id
)
return JsonResponse({'MESSAGE':'SUCCESS'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)
@LoginRequired
def delete(self, request, room_id):
try:
user = request.user
wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)
wish.delete()
return JsonResponse({'MESSAGE':'Delete Success'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)
except WishList.DoesNotExist:
return JsonResponse({'MESSAGE':'Already not Exist in list'}, status=400)
@LoginRequired
def get(self, request):
try:
user = request.user
wishlists = WishList.objects.filter(wish_user = user)
rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]
if not wishlists:
return JsonResponse({'MESSAGE':'nothing in cart'}, status=400)
result = [{
'room_id' : wishlist.wish_room.id,
'room_name': wishlist.wish_room.name,
'address' : wishlist.wish_room.city,
'price' : wishlist.wish_room.price,
'room_type': wishlist.wish_room.category.name,
'image' : [image.url for image in wishlist.wish_room.image.all()],
'is_super' : wishlist.wish_room.host.is_super,
'capacity' : wishlist.wish_room.capacity,
'lat' : wishlist.wish_room.latitude,
'lng' : wishlist.wish_room.longtitude,
'amenity' : [roomamenity.amenity.name for roomamenity in wishlist.wish_room.roomamenity_set.all()],
'rating' : [{
'category' : category,
'category_rating': Review.objects.filter(review_room=wishlist.wish_room).aggregate(Avg(category)).get(category+'__avg')
} for category in rating_list
]
} for wishlist in wishlists]
return JsonResponse({'result':result}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)
|
6,686 | 041f1d7c482fe4f65e8cc5a508da62ee6ccf59ff | from ...routes import Route
from .providers import SQSProvider
from .message_translators import SQSMessageTranslator, SNSMessageTranslator
class SQSRoute(Route):
def __init__(self, provider_queue, provider_options=None, *args, **kwargs):
provider_options = provider_options or {}
provider = SQSProvider(provider_queue, **provider_options)
kwargs['provider'] = provider
if 'message_translator' not in kwargs:
kwargs['message_translator'] = SQSMessageTranslator()
if 'name' not in kwargs:
kwargs['name'] = provider_queue
super().__init__(*args, **kwargs)
class SNSQueueRoute(Route):
def __init__(self, provider_queue, provider_options=None, *args, **kwargs):
provider_options = provider_options or {}
provider = SQSProvider(provider_queue, **provider_options)
kwargs['provider'] = provider
if 'message_translator' not in kwargs:
kwargs['message_translator'] = SNSMessageTranslator()
if 'name' not in kwargs:
kwargs['name'] = provider_queue
super().__init__(*args, **kwargs)
|
6,687 | 20e5220ce23aaaedbfafe599b352f5d3a220e82e | from django.db import models
class faculdades(models.Model):
codigo = models.IntegerField(primary_key = True)
nome = models.CharField(max_length=50)
cidade = models.CharField(max_length=30)
estado = models.CharField(max_length=20)
pais = models.CharField(max_length=20)
def __str__(self):
return self.nome
class Meta:
managed = False
db_table = 'faculdades'
verbose_name = 'Cad.Faculdade'
class cursos(models.Model):
codigo = models.AutoField(primary_key = True)
nome = models.CharField(max_length=50)
departamento = models.CharField(max_length=30)
faculdade = models.ForeignKey('faculdades', db_column='faculdade', on_delete=models.CASCADE)
def __str__(self):
return self.nome
class Meta:
managed = False
db_table = 'cursos'
verbose_name = 'Cad.Curso'
class profcoorest(models.Model):
masp = models.IntegerField(primary_key = True)
nome = models.CharField(max_length=50)
curso = models.ForeignKey('cursos', db_column='curso', on_delete=models.CASCADE)
def __str__(self):
return self.nome
class Meta:
managed = False
db_table = 'profcoorest'
verbose_name = 'Cad.Profcoorest'
class alunos(models.Model):
matricula = models.IntegerField(primary_key = True)
nome = models.CharField(max_length=100)
sexo = models.CharField(max_length=1)
datanasc = models.DateField()
periodo = models.IntegerField()
curso = models.ForeignKey('cursos', db_column='curso', on_delete=models.CASCADE)
def __str__(self):
return self.nome
class Meta:
managed = False
db_table = 'alunos'
verbose_name = 'Cad.Aluno'
class estagio(models.Model):
codigo = models.AutoField(primary_key = True)
aluno = models.ForeignKey('alunos', db_column='aluno', on_delete=models.CASCADE)
profest = models.ForeignKey('profcoorest', db_column='profest', on_delete=models.CASCADE)
remunerado = models.CharField(max_length=1)
valor = models.DecimalField(max_digits=6, decimal_places=2)
empresa = models.CharField(max_length=30)
cargahr = models.IntegerField()
descr_est = models.CharField(max_length=256)
resp_est = models.CharField(max_length=50)
def __str__(self):
return '%s' % (self.codigo)
class Meta:
managed = False
db_table = 'estagio'
verbose_name = 'Cad.Estagio' |
6,688 | 9f40162348d33d70639692dac87777a2799999e9 | #!/usr/bin/env python
from ks_auth import sess
from ks_auth import trust_auth
from ks_auth import ks
from ks_auth import utils
from novaclient import client
import novaclient.exceptions
from time import sleep
from uuid import uuid4
import sys
# RDTIBCC-1042
VERSION = '2'
nova = client.Client(VERSION, session=sess)
username = 'standard_user'
search_opts = {
'all_tenants': True
}
class PollingLimitException(Exception):
pass
def poll_server(server, interval=2, limit=4, *args, **kwargs):
for i in range(0, limit):
yield nova.servers.get(server.id)
sleep(interval)
raise PollingLimitException()
def is_active(server):
return (server.status == 'ACTIVE')
def is_shutoff(server):
return (server.status == 'SHUTOFF')
if __name__ == '__main__':
try:
instance_id = sys.argv[1]
except IndexError:
sys.exit('Specify an instance_id')
# Print some info
print('Initial Auth Info:')
for authtype, params in utils.initial_auth_info(ks.auth.client.session):
print(' %s' % authtype)
print(' %s' % params)
print('Access Info:')
for k, v in utils.access_info_vars(sess).iteritems():
print('* {}: {}'.format(k, v))
retry_count = 3
try:
server = nova.servers.get(instance_id)
print('* Deleting %s' % server.name)
for i in range(1, retry_count+1):
print('** Attempt %d' % i)
server.delete()
try:
for state in poll_server(server):
if state == 'DELETING':
print('** still deleting')
except novaclient.exceptions.NotFound:
print('*** done deleting')
break
except Exception, e:
print(e)
|
6,689 | 8e9db58488f6ee8aa0d521a19d9d89504d119076 | tp = 1, 2, 3
print(tp + (4,))
|
6,690 | 28cdb59e97f3052dd80f8437574f9ffe09fc1e84 | from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import classification_report
from BlogTutorials.pyimagesearch.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor
from BlogTutorials.pyimagesearch.preprocessing.ROIpreprocessor import ROIPreprocessor
from BlogTutorials.pyimagesearch.datasets.simpledatasetloader import SimpleDatasetLoader
from BlogTutorials.pyimagesearch.nn.conv.minivggnet import MiniVGGNet
from keras.optimizers import SGD
from keras.utils import np_utils
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
"""
VGG16 net trained on depth maps for mushrooms/end tool in frame. Attempts binary classification of a depth map,
with output softmax to classify a prediction of the pick will be successful.
Outline of training approach (dogs_vs_cats classifier as template)
320 x 240 input pixels
(add mask layer of target mushroom)
labels org: /pick_confidence_net/pick_fail_depth & /pick_success_depth
/config/pick_confidence_config.py
file paths
num classes, training/val/test ratio of data
HDF5 data locations
outputs: model, normalization distance values, charts/training data
/build_pick_conf_dataset.py
# Get image paths
# Sep training, test, and validation data
# datasets list
# Preprocessors images (crop to ROI (160:320,0:120), normalize distance points)
"""
dataset_path = r"/home/matthewlefort/Documents/gitRepos/bella_training/pick_confidence_net/Depth"
# get class labels
imagePaths = list(paths.list_images(dataset_path))
classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]
# init preprocessors
roip = ROIPreprocessor(xmin=0, xmax=120, ymin=160, ymax=320)
iap = ImageToArrayPreprocessor()
# load the dataset from disk then scale the raw pixels
sdl = SimpleDatasetLoader(preprocessors=[roip, iap], img_load="mat")
(data, labels) = sdl.load(imagePaths=imagePaths, verbose=250)
labels = np.array(labels)
le = LabelEncoder()
le.fit(labels)
labels = np_utils.to_categorical(le.transform(labels), 2)
# accouunt for the skew in data labels. Used to amplifiy training weights of "smiling" case given the ratio of
# [9475 to 3690] (non smiling to smiling)
classTotals = labels.sum(axis=0)
classWeight = float(classTotals.max()) / classTotals
# split data (stratify sampling samples at same ratio of data set for test to train split. i.e the 9475:3690 ratio
# is kept in both the test and training set
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=.2, stratify=labels, random_state=42)
# partision data
# split_data = StratifiedShuffleSplit(1, test_size=0.2, random_state=42)
# for train_idx, test_idx in split_data.split(data, labels):
# trainX = data[train_idx]
# testX = data[test_idx]
# trainY = labels[train_idx]
# testY = labels[test_idx]
# initialize optimizer
print("[info] compile model...")
opt = SGD(lr=0.05)
model = MiniVGGNet.build(width=64, height=64, depth=3, classes=len(classNames))
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
epochs = 100
print("[info] Training model")
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=epochs, verbose=1)
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=classNames))
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show() |
6,691 | d340ac979f57cf4650131665e4fa5b9923f22a3e | from collections.abc import Iterator
import json
import click
def print_json(obj, err=False):
if isinstance(obj, Iterator):
obj = list(obj)
click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False),
err=err)
def show_fields(*fields):
def show(obj, verbose=False):
if verbose:
return obj
about = {}
for entry in fields:
if isinstance(entry, str):
entry = (entry,)
name, *subpath = entry
try:
value = obj[name]
except KeyError:
continue
for sp in subpath:
if value is None:
break
elif callable(sp):
value = sp(value)
elif isinstance(value, list):
value = [v and v[sp] for v in value]
else:
value = value[sp]
about[name] = value
return about
return show
repo_info = show_fields(
("owner", "login"),
"name",
"url",
"html_url",
"clone_url",
"git_url",
"ssh_url",
"full_name",
"description",
"homepage",
"private",
"default_branch",
"created_at",
"updated_at",
"pushed_at",
"fork",
"forks_count",
"watchers_count",
"size",
"subscribers_count",
"stargazers_count",
"id",
"language",
"network_count",
"open_issues_count",
("parent", "full_name"),
("source", "full_name"),
)
gist_info = show_fields(
"id",
"url",
"git_push_url",
("files", lambda files: {
fname: {k:v for k,v in about.items() if k != 'content'}
for fname, about in files.items()
}),
"public",
"html_url",
("owner", "login"),
"description",
"created_at",
"updated_at",
"comments",
("fork_of", "id"),
("forks", "id"),
)
issue_info = show_fields(
("assignees", "login"),
"closed_at",
("closed_by", "login"),
"comments",
"created_at",
"html_url",
"id",
("labels", "name"),
"locked",
("milestone", "title"),
"number",
"state",
"title",
"updated_at",
"url",
("user", "login"),
"repository_url",
### pull_request
)
|
6,692 | 792f62c72f1667f651567314b062d862abbc9aa5 | # created by Angus Clark 9/2/17 updated 27/2/17
# ToDo impliment traceroute function into this
# Perhaps get rid of unnecessary itemediate temp file
import socket
import os
import json
import my_traceroute
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '130.56.253.43'
#print host
port = 5201 # Change port (must enable security settigns of server)
s.bind((host,port))
s.listen(5)
MAX_HOPS = 30 # max hops for traceroute
while True:
c, addr = s.accept() #accept incoming Connection
f = open('temp.json','wb') # open blank binary to dump incoming data
#print addr[0]
l = c.recv(1024)
while(l):
# Dump data into temp file and get next chunk of data
f.write(l)
l = c.recv(1024)
f.close()
c.close()
tempfile = open('temp.json','rb')
info = json.load(tempfile)
info["UserInfo"]["ip"] = addr[0] # store ip address of sender
last_addr = '0.0.0.0' # placeholder for first iteration
for hop in range(1,MAX_HOPS):
result = my_traceroute.traceroute(hop, info["UserInfo"]["ip"])
#print result
if result == -1:
break
if result[1] == last_addr:
break
info["TRACEROUTE"][str(result[0])] = {}
info["TRACEROUTE"][str(result[0])].update({'node':result[1], 'rtt':result[2]})
last_addr = result[1]
id = info["UserInfo"]["user id"]
timestamp = info["UserInfo"]["timestamp"]
os.system('mkdir /home/ubuntu/data/'+id)
path = "/home/ubuntu/data/" + id + "/"
filename = timestamp + '.json'
savefile = open(path + filename, 'w+')
savefile.write(json.dumps(info))
savefile.close() |
6,693 | 3d16f2da03c067d410bec7bfe96d874322533d30 | import numpy
d1 = numpy.array([1.,0.,0.])
d2 = numpy.array([0.,1.,0.])
d3 = numpy.array([0.,0.,1.])
s0 = numpy.array([0.,0.,1.])
m2 = numpy.array([1.,0.,0.])
print "x y zeta"
for x in xrange(-100, 101):
for y in xrange(-100, 101):
s = x*d1 + y*d2 + 100*d3
e1 = numpy.cross(s, s0)
e1 /= numpy.linalg.norm(e1)
zeta = abs(numpy.dot(e1, m2))
print x,y,zeta
|
6,694 | 5fa91a5061a5e87a4a2b8fece0378299e87e5a48 | # Interprets the AST
class Program:
def __init__(self, code):
self.code = code
def eval(self, binding):
return self.code.eval(binding)
class Code:
def __init__(self, statements):
self.statements = statements
def eval(self, binding):
val = 0
for statement in self.statements:
val = statement.eval(binding)
return val
class Statement:
def __init__(self, statement):
self.statement = statement
def eval(self, binding):
return self.statement.eval(binding)
class Expr:
def __init__(self, expression):
self.expression = expression
def eval(self, binding):
return self.expression.eval(binding)
class Integer:
def __init__(self, value):
self.value = value
def eval(self, binding):
return int(self.value)
class BinaryOp:
def __init__(self, left, right):
self.left = left
self.right = right
class Sum(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) + self.right.eval(binding)
class Sub(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) - self.right.eval(binding)
class Mult(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) * self.right.eval(binding)
class Div(BinaryOp):
def eval(self, binding):
return int(self.left.eval(binding) / self.right.eval(binding))
class BuiltInFunction:
# built-in functions are print, and ()
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
args = self.call_args.eval(binding)
if self.func_name == "print":
if type(args) == int:
print(args)
else:
print("Print: ")
count = 0
for arg in args:
out = str(arg)
if count != len(args) - 1:
print(out, end="|")
else:
print(out)
count += 1
return
else:
return args[0]
# binding class used to store variables and functions
class Binding:
def __init__(self, parent, binding):
self.parent = parent
self.binding = binding
def get(self, name):
if name in self.binding:
return self.binding[name]
return self.parent.get(name)
def add(self, var_name, value):
self.binding[var_name] = value
def contains(self, name):
for i in self.binding:
if i == name:
return True
if self.parent:
return self.parent.contains(name)
return False
class FunctionCall:
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
# if function has no parameters
if type(self.call_args) == VarReference:
args = self.call_args.eval(binding)
return args[1].eval(binding)
# else
# creates a new function binding that is a child of the binding when the function was created
func_binding = Binding(binding.get(self.func_name.value)[2], {})
# sets parameters and arguments and adds them to the function binding
parameters = self.call_args[0].eval(func_binding)[0]
# checks to see if the arg values for param_list are in the function binding. This is for recursion.
if func_binding.contains(parameters[0]):
args = self.call_args[1].eval(func_binding)
# if not, checks if the arg values are in the global binding
else:
args = self.call_args[1].eval(binding)
# assigns the arg values to the parameters and adds it to the function binding
for i in range(len(parameters)):
func_binding.add(parameters[i], args[i])
# returns the evaluated code using the function binding
code = func_binding.get(self.func_name.value)[1]
return code.eval(func_binding)
class FunctionDefinition:
def __init__(self, param_list, code_block):
self.param_list = param_list
self.code_block = code_block
def eval(self, binding):
# creates a new binding
func_binding = Binding(binding, {})
# used to store a function's parameters, code, and function binding in global binding
return self.param_list, self.code_block, func_binding
class CallArguments:
def __init__(self, arguments):
self.arguments = arguments
def eval(self, binding):
arg_list = []
for arg in self.arguments:
arg_list.append(arg.eval(binding))
return arg_list
class VariableDecl:
def __init__(self, declarations):
self.declarations = declarations
def eval(self, binding):
for decl in self.declarations:
temp = decl.eval(binding)
return temp
class Decl:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class Assignment:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class VarReference:
def __init__(self, var_name):
self.var_name = var_name
def eval(self, binding):
return binding.get(self.var_name)
class EqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) == self.right.eval(binding):
return 1
else:
return 0
class NotEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) != self.right.eval(binding):
return 1
else:
return 0
class LessThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) < self.right.eval(binding):
return 1
else:
return 0
class LessThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) <= self.right.eval(binding):
return 1
else:
return 0
class GreaterThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) > self.right.eval(binding):
return 1
else:
return 0
class GreaterThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) >= self.right.eval(binding):
return 1
else:
return 0
class IfExpression:
def __init__(self, bool_expr, if_block):
self.bool_expr = bool_expr
self.if_block = if_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return 0
class IfElseExpression:
def __init__(self, bool_expr, if_block, else_block):
self.bool_expr = bool_expr
self.if_block = if_block
self.else_block = else_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return self.else_block.eval(binding)
|
6,695 | 3457a7c080da041ad279239bd6a3d214a3b8e49f | """
Routes and views for the flask application.
"""
from datetime import datetime
from flask import render_template, redirect, url_for, request, jsonify
from athena_App import app
from athena_App.formClass import QuestionForm
import time
#attention:
#this module include large word vector which need a lot of time to load
#turn it off when when you debugging other module
#
#from athena_App.data_process.es_QAsearch import *
#
#from athena_App.data_process.keywordCompare import Keyword_Compare, Answer
#from athena_App.data_process.word2vecCompareModel import *
#from athena_App.data_process.graph_query import *
#from athena_App.openlaw.graphOfcase_query_echart import *
#reconstruct series
from athena_App.layer_frontInteracting.qa_module import answerFinder
from athena_App.layer_frontInteracting.kg_module import knowledgeSearch
from athena_App.layer_frontInteracting.case_module import caseQuery
@app.route('/QAsearch', methods=['POST','GET'])
def QAsearch():
"""Renders the QAsearch page."""
question = ''
form = QuestionForm()
question = form.question.data
if form.validate_on_submit():
return redirect(url_for('answer',word=question))
return render_template(
'QAsearch.html',
title = 'QAsearch Page',
year = datetime.now().year,
form = form,
question = question
)
@app.route('/instruction')
def instruction():
"""Renders the instruction page."""
return render_template(
'instruction.html',
title='说明',
year=datetime.now().year,
message='Instruction'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About',
year=datetime.now().year,
message='Your application description page.'
)
@app.route('/answer/<word>')
def answer(word):
"""Renders the answer page"""
print(word)
start=time.clock()
finder=answerFinder()
answer=finder.findANDpack(word)
end=time.clock()
print(str(end-start))
return render_template(
'answer.html',
title='Answer',
answer=answer
)
@app.route('/main')
@app.route('/')
def main():
return render_template(
'newMain.html',
title = 'Welcome Page',
year = datetime.now().year
)
@app.route('/graph_search',methods=['get','post'])
def graph_search():
return render_template(
'graph_search.html',
title = 'Graph search page',
year = datetime.now().year)
@app.route('/knowledge_search',methods=['get','post'])
def knowledge_search():
#initialize graph search object
searchKnowledge=knowledgeSearch()
des=request.args.get('description')
json_data=searchKnowledge.getTotalData_forKnowledgeSearch(des)
print(json_data)
return jsonify(json_data)
@app.route('/case_search_Test',methods=['get','post'])
def case_search_Test():
return render_template(
'case_search_Test.html',
title = 'Case search page',
year = datetime.now().year)
@app.route('/case_graph_search',methods=['get','post'])
def case_graph_search():
caseDes=request.args.get('caseDes')
#initialize graph search object
case_graph_result=caseQuery(caseDes)
pre_json_data=case_graph_result.getData()
print(pre_json_data)
return jsonify(pre_json_data)
@app.route('/knife',methods=['get','post'])
def knife():
return render_template(
'knife.html',
title = 'KNIFE SEARCH',
year = datetime.now().year
)
@app.route('/searchAll',methods=['get','post'])
def searchAll():
pass |
6,696 | afecbb46a98fbf6b5c26f5b6c8026aec035fadf1 | from typing import List, Tuple
from unittest import TestCase
from solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration
class TestTiming(TestCase):
def test_decompose_ns(self):
# Given
duration: int = 234
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_us(self):
# Given
duration: int = 23456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_ms(self):
# Given
duration: int = 1023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_s(self):
# Given
duration: int = 45001023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_min(self):
# Given
duration: int = 65001023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_h(self):
# Given
duration: int = 7995125885088
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'),
(125, 'ms'), (885, 'μs'), (88, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_parse_decomposed_duration_ns(self):
# Given
decomposition: List[Tuple[int, str]] = [(234, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '234 ns'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_us(self):
# Given
decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '23.456 μs'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_ms(self):
# Given
decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '1.023 ms'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_s(self):
# Given
decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '45.001 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_min(self):
# Given
decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '1 min 5 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_h(self):
# Given
decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '2 h 13 min'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_format_duration_h(self):
# Given
duration_ns: int = 7995125885088
# When
formatted_duration: str = format_duration(duration_ns)
# Then
expected_formatted_duration: str = '2 h 13 min'
self.assertEqual(expected_formatted_duration, formatted_duration)
def test_format_duration_us(self):
# Given
duration_ns: int = 23456
# When
formatted_duration: str = format_duration(duration_ns)
# Then
expected_formatted_duration: str = '23.456 μs'
self.assertEqual(expected_formatted_duration, formatted_duration)
|
6,697 | be69a9981fe6b53c3b9c4d2893913e4f9f7efb26 | from google.cloud import vision
from google.cloud.vision import types
from google.oauth2 import service_account
import os
# import re
import io
import pdf2image
import tempfile
import datetime
# Google API
credentials = service_account.Credentials.from_service_account_file("APIKey.json")
client = vision.ImageAnnotatorClient(credentials=credentials)
def OCRscan(self, imgfile):
print("Performing OCR Scan on the image ", imgfile)
with io.open(imgfile, "rb") as image_file:
content = image_file.read()
image = types.Image(content=content)
response_with_text = client.document_text_detection(image=image)
document = response_with_text.full_text_annotation
return document
def boxes_to_obj(self,bound):
return {'x1': bound.vertices[0].x ,'x2':bound.vertices[1].x ,
'y1':bound.vertices[0].y ,'y2':bound.vertices[2].y }
def generateTempFolder(self, prifx, src):
"Creating temp directory.."
print("Creating temp directory.. with src and prefix .. ", prifx, src)
# temp_dir = tempfile.mkdtemp(("-"+str(datetime.datetime.now()).replace(":", "-")), "PMR_Claims", self.cwd+os.sep
# + "GENERATED"+os.sep+"CLAIMS")
temp_dir = tempfile.mkdtemp(
("-"+str(datetime.datetime.now()).replace(":", "-")), prifx, src)
print("Temp directory created", temp_dir)
return temp_dir
def createSubDir(self, src, subDirNameList):
print("Creating a subdirectory..")
for subfolder_name in subDirNameList:
os.makedirs(os.path.join(src, subfolder_name))
def getFilesindir(self, dire):
print('Fetching the file in the directory')
print(dire)
return os.listdir(dire)
|
6,698 | ad1ec5dd8fae290ab6cb73b17c5522e062261359 | import os
import inspect
import pytest
from ._common import copy_default_profile_collection, patch_first_startup_file
from bluesky_queueserver.manager.profile_tools import global_user_namespace, load_devices_from_happi
from bluesky_queueserver.manager.profile_ops import load_profile_collection
def create_local_imports_files(tmp_path):
path_dir = os.path.join(tmp_path, "dir_local_imports")
fln_func = os.path.join(path_dir, "file_func.py")
fln_gen = os.path.join(path_dir, "file_gen.py")
os.makedirs(path_dir, exist_ok=True)
# Create file1
code1 = """
from bluesky_queueserver.manager.profile_tools import set_user_ns
# Function that has the parameter 'ipython'
@set_user_ns
def f1(some_value, user_ns, ipython):
user_ns["func_was_called"] = "func_was_called"
return (some_value, user_ns["v_from_namespace"], bool(ipython))
# Function that has no parameter 'ipython'
@set_user_ns
def f1a(some_value, user_ns):
user_ns["func_A_was_called"] = "func_was_called"
return (some_value, user_ns["v_from_namespace"])
"""
with open(fln_func, "w") as f:
f.writelines(code1)
# Create file2
code2 = """
from bluesky_queueserver.manager.profile_tools import set_user_ns
# Function that has the parameter 'ipython'
@set_user_ns
def f2(some_value, user_ns, ipython):
user_ns["gen_was_called"] = "gen_was_called"
yield (some_value, user_ns["v_from_namespace"], bool(ipython))
# Function that has no parameter 'ipython'
@set_user_ns
def f2a(some_value, user_ns):
user_ns["gen_A_was_called"] = "gen_was_called"
yield (some_value, user_ns["v_from_namespace"])
@set_user_ns
def f3(some_value, user_ns, ipython):
user_ns["value_f3"] = some_value
f3(91)
"""
with open(fln_gen, "w") as f:
f.writelines(code2)
patch_code = """
from dir_local_imports.file_func import f1, f1a
from dir_local_imports.file_gen import f2, f2a
from bluesky_queueserver.manager.profile_tools import set_user_ns
@set_user_ns
def f4(some_value, user_ns, ipython):
user_ns["value_f4"] = some_value
f4(90)
"""
def test_set_user_ns_1(tmp_path):
"""
Tests for ``set_user_ns`` decorator. The functionality of the decorator
is fully tested (only without IPython):
- using ``global_user_namespace`` to pass values in and out of the function
defined in the imported module (emulation of ``get_ipython().user_ns``).
- checking if the function is executed from IPython (only for the function
defined in the imported module).
"""
pc_path = copy_default_profile_collection(tmp_path)
create_local_imports_files(pc_path)
patch_first_startup_file(pc_path, patch_code)
nspace = load_profile_collection(pc_path)
assert len(nspace) > 0, "Failed to load the profile collection"
assert "f1" in nspace, "Test for local imports failed"
assert "f2" in nspace, "Test for local imports failed"
# Test if the decorator `set_user_ns` does not change function type
assert inspect.isgeneratorfunction(nspace["f1"]) is False
assert inspect.isgeneratorfunction(nspace["f2"]) is True
# Check if the extra arguments are removed from the function signature
def check_signature(func):
params = inspect.signature(func).parameters
assert "user_ns" not in params
assert "ipython" not in params
check_signature(nspace["f1"])
check_signature(nspace["f1a"])
check_signature(nspace["f2"])
check_signature(nspace["f2a"])
assert nspace["value_f3"] == 91
assert nspace["value_f4"] == 90
# Test function
global_user_namespace.set_user_namespace(user_ns=nspace, use_ipython=False)
global_user_namespace.user_ns["v_from_namespace"] = "value-sent-to-func"
assert nspace["v_from_namespace"] == "value-sent-to-func"
result_func = nspace["f1"](60)
assert nspace["func_was_called"] == "func_was_called"
assert result_func[0] == 60
assert result_func[1] == "value-sent-to-func"
assert result_func[2] is False
result_func = nspace["f1a"](65)
assert nspace["func_A_was_called"] == "func_was_called"
assert result_func[0] == 65
assert result_func[1] == "value-sent-to-func"
# Test generator
global_user_namespace.user_ns["v_from_namespace"] = "value-sent-to-gen"
result_func = list(nspace["f2"](110))[0]
assert nspace["gen_was_called"] == "gen_was_called"
assert result_func[0] == 110
assert result_func[1] == "value-sent-to-gen"
assert result_func[2] is False
result_func = list(nspace["f2a"](115))[0]
assert nspace["gen_A_was_called"] == "gen_was_called"
assert result_func[0] == 115
assert result_func[1] == "value-sent-to-gen"
def test_global_user_namespace():
"""
Basic test for ``global_user_namespace``.
"""
ns = {"ab": 1, "cd": 2}
global_user_namespace.set_user_namespace(user_ns=ns)
assert global_user_namespace.user_ns == ns
assert global_user_namespace.use_ipython is False
global_user_namespace.set_user_namespace(user_ns={}, use_ipython=True)
assert global_user_namespace.user_ns == {}
assert global_user_namespace.use_ipython is True
global_user_namespace.set_user_namespace(user_ns=ns, use_ipython=False)
assert global_user_namespace.user_ns == ns
assert global_user_namespace.use_ipython is False
_happi_json_db_1 = """
{
"det": {
"_id": "det",
"active": true,
"args": [],
"device_class": "ophyd.sim.DetWithCountTime",
"documentation": null,
"kwargs": {
"name": "{{name}}"
},
"name": "det",
"type": "OphydItem"
},
"motor": {
"_id": "motor",
"active": true,
"args": [],
"device_class": "ophyd.sim.SynAxisNoPosition",
"documentation": null,
"kwargs": {
"name": "{{name}}"
},
"name": "motor",
"type": "OphydItem"
},
"motor1": {
"_id": "motor1",
"active": true,
"args": [],
"device_class": "ophyd.sim.SynAxisNoHints",
"documentation": null,
"kwargs": {
"name": "{{name}}"
},
"name": "motor1",
"type": "OphydItem"
},
"tst_motor2": {
"_id": "tst_motor2",
"active": true,
"args": [],
"device_class": "ophyd.sim.SynAxisNoHints",
"documentation": null,
"kwargs": {
"name": "{{name}}"
},
"name": "tst_motor2",
"type": "OphydItem"
},
"motor3": {
"_id": "motor3",
"active": true,
"args": [],
"device_class": "ophyd.sim.SynAxis",
"documentation": null,
"kwargs": {
"name": "{{name}}"
},
"name": "motor3",
"type": "OphydItem"
},
"motor3_duplicate_error": {
"_id": "motor3",
"active": false,
"args": [],
"device_class": "ophyd.sim.SynAxis",
"documentation": null,
"kwargs": {
"name": "{{name}}"
},
"name": "motor3",
"type": "OphydItem"
}
}
"""
def _configure_happi(tmp_path, monkeypatch, json_devices):
path_json = os.path.join(tmp_path, "sim_devices.json")
path_ini = os.path.join(tmp_path, "happi.ini")
happi_ini_text = f"[DEFAULT]\nbackend=json\npath={path_json}"
with open(path_ini, "w") as f:
f.write(happi_ini_text)
with open(path_json, "w") as f:
f.write(json_devices)
monkeypatch.setenv("HAPPI_CFG", path_ini)
# fmt: off
@pytest.mark.parametrize("device_names, loaded_names, kw_args, success, errmsg", [
([], [], {}, True, ""), # No devices are loaded if the list of devices is empty
(("det", "motor"), ("det", "motor"), {}, True, ""),
(["det", "motor"], ("det", "motor"), {}, True, ""),
((("det", ""), ["motor", ""]), ("det", "motor"), {}, True, ""),
(("det", ["motor", ""]), ("det", "motor"), {}, True, ""),
(("det", ("motor", ""), ("tst_motor2", "motor2")), ("det", "motor", "motor2"), {}, True, ""),
# This is not typical use case, but the same device may be loaded multiple times
# with different names if needed.
((("motor1", "motor1_copy1"), ("motor1", "motor1_copy2")), ("motor1_copy1", "motor1_copy2"), {}, True, ""),
# Incorrect type of the device list
(10, ("det", "motor"), {}, False, "Parameter 'device_names' value must be a tuple or a list"),
("string", ("det", "motor"), {}, False, "Parameter 'device_names' value must be a tuple or a list"),
# Incorrecty type or form of a device list element
(("det", 10), ("det", "motor"), {}, False, "Parameter 'device_names': element .* must be str, tuple or list"),
((10, "motor"), ("det", "motor"), {}, False,
"Parameter 'device_names': element .* must be str, tuple or list"),
(("det", (10, "motor2")), ("det", "motor"), {}, False, "element .* is expected to be in the form"),
(("det", ("tst_motor2", 10)), ("det", "motor"), {}, False, "element .* is expected to be in the form"),
(("det", ("tst_motor2", "motor2", 10)), ("det", "motor"), {}, False,
"element .* is expected to be in the form"),
# No device found
(("det", "motor10"), ("det", "motor10"), {}, False, "No devices with name"),
# Multiple devices found (search for "motor3" yields multile devices, this is database issue)
(("det", "motor3"), ("det", "motor3"), {}, False, "Multiple devices with name"),
# Use additional search parameters. (Two entries for "motor3" differ in the value of `active` field.
# A single entry for `det` has `active==True`.)
(("det", "motor3"), ("det", "motor3"), {"active": True}, True, ""),
(("det", "motor3"), ("det", "motor3"), {"active": False}, False,
"No devices with name 'det' were found in Happi database."),
(("motor3",), ("motor3",), {"active": False}, True, ""),
# Verify that valid device names are accepted
(("det", ["motor", "motor3_new"]), ("det", "motor3_new"), {}, True, ""),
# Invalid new device name
(("det", ["motor", "Motor"]), ("det", "motor"), {}, False, "may consist of lowercase letters, numbers"),
(("det", ["motor", "moTor"]), ("det", "motor"), {}, False, "may consist of lowercase letters, numbers"),
(("det", ["motor", "_motor"]), ("det", "motor"), {}, False, "may consist of lowercase letters, numbers"),
(("det", ["motor", " motor"]), ("det", "motor"), {}, False, "may consist of lowercase letters, numbers"),
(("det", ["motor", "motor "]), ("det", "motor"), {}, False, "may consist of lowercase letters, numbers"),
(("det", ["motor", "motor new"]), ("det", "motor"), {}, False, "may consist of lowercase letters, numbers"),
(("det", ["motor", "motor_$new"]), ("det", "motor"), {}, False, "may consist of lowercase letters, numbers"),
(("det", ["motor", "2motor_$new"]), ("det", "motor"), {}, False, "may consist of lowercase letters, numbers"),
])
# fmt: on
def test_load_devices_from_happi_1(tmp_path, monkeypatch, device_names, loaded_names, kw_args, success, errmsg):
"""
Tests for ``load_devices_from_happi``.
"""
_configure_happi(tmp_path, monkeypatch, json_devices=_happi_json_db_1)
# Load as a dictionary
if success:
ns = {}
dlist = load_devices_from_happi(device_names, namespace=ns, **kw_args)
assert len(ns) == len(loaded_names), str(ns)
for d in loaded_names:
assert d in ns
assert set(dlist) == set(loaded_names)
else:
with pytest.raises(Exception, match=errmsg):
ns = {}
load_devices_from_happi(device_names, namespace=ns, **kw_args)
# Load in local namespace
def _test_loading(device_names, loaded_names):
if success:
load_devices_from_happi(device_names, namespace=locals(), **kw_args)
for d in loaded_names:
assert d in locals()
else:
with pytest.raises(Exception, match=errmsg):
load_devices_from_happi(device_names, namespace=locals(), **kw_args)
_test_loading(device_names=device_names, loaded_names=loaded_names)
def test_load_devices_from_happi_2_fail(tmp_path, monkeypatch):
"""
Function ``load_devices_from_happi``: parameter ``namespace`` is required and must be of type ``dict``.
"""
_configure_happi(tmp_path, monkeypatch, json_devices=_happi_json_db_1)
# Missing 'namespace' parameter
with pytest.raises(TypeError, match="missing 1 required keyword-only argument: 'namespace'"):
load_devices_from_happi(["det", "motor"])
# Incorrect type of 'namespace' parameter
with pytest.raises(TypeError, match="Parameter 'namespace' must be a dictionary"):
load_devices_from_happi(["det", "motor"], namespace=[1, 2, 3])
|
6,699 | 835beebe452a252fb744a06d3e6ff221469af6bf | data = [] ##用來裝reviews.txt的留言
count = 0 ##計數目前檔案讀取到第幾筆
with open('reviews.txt', 'r') as f:
for line in f:
data.append(line)
count += 1
if count % 1000 == 0:
print(len(data))
total = len(data)
print('檔案讀取完了,總共有', len(data), '筆資料')
print(len(data)) #印出data串列的--項目數量
print(len(data[0])) #印出data串列第一個項目的--字元數量
print(data[0])
sum_len = 0
for d in data:
sum_len += len(d)
print(sum_len) #data串列的總字元數
print("留言平均長度為", sum_len / count)
##印出字數小於100的留言
new = []
for d in data:
if len(d) < 100:
new.append(d)
print('一共有', len(new), '比留言長度小於100')
print(new[0])
##印出提到good的留言
good = []
for d in data:
if 'good' in d: #d裡面是否有'good'字串
good.append(d)
print('一共有', len(good), '提到good')
print(good[0])
####################
#清單快寫法1
good = [d for d in data if 'good' in d] #[|d運算| for |d變數| in |data清單| |if 'good' in d篩選條件|]
print(good)
####################
#清單快寫法2
bad = ['bad' in d for d in data] #[|d運算| for |d變數| in |data清單|]
print(bad)
#普通寫法
bad = []
for d in data:
bad.append('bad' in d)
print(bad)
# 文字計數
wc = {} # word_count
for d in data:
words = d.split()
for word in words:
if word in wc:
wc[word] += 1
else:
wc[word] = 1 # 新增新的key進wc字典
for word in wc:
if wc[word] > 1000000:
print(word, wc[word])
print(len(wc))
print(wc['Allen'])
while True:
word = input('請問你想查甚麼字: ')
if word == 'q':
print('感謝使用')
break
elif word not in wc:
print('沒這個字')
continue
else:
print(word, '出現過的次數: ', wc[word])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.