text stringlengths 38 1.54M |
|---|
from . import admin
from flask import render_template,redirect,url_for,flash,session,request
from app.admin.forms import LoginForm,TagForm
from app.models import Admin,Tag
from functools import wraps
from app import db
def admin_login_req(f):
@wraps(f)
def decorate_function(*args,**kwargs):
if "admin" not in session:
return redirect(url_for("admin.login",next=request.url))
return f(*args,**kwargs)
return decorate_function
@admin.route("/")
@admin_login_req
def index():
return render_template("admin/index.html")
@admin.route("/login/",methods=["POST","GET"])
def login():
form = LoginForm()
if form.validate_on_submit():
data = form.data
admin = Admin.query.filter_by(name=data['account']).first()
if not admin.check_pwd(data['pwd']):
flash("password error")
return redirect(url_for("admin.login"))
session['admin'] = data['account']
return redirect(request.args.get("next") or url_for("admin.index"))
return render_template("admin/login.html",form = form)
@admin.route("/logout/")
def logout():
session.pop('admin',None)
return redirect(url_for("admin.login"))
@admin.route("/pwd/")
def pwd():
return render_template("admin/pwd.html")
@admin.route("/tag/add/",methods=["GET","POST"])
@admin_login_req
def tag_add():
form = TagForm()
if form.validate_on_submit():
data = form.data
tag = Tag.query.filter_by(name=data['name']).count()
if tag ==1:
flash("tag name exist!","err")
return redirect(url_for("admin.tag_add"))
tag = Tag(
name = data['name']
)
db.session.add(tag)
db.session.commit()
flash("add tag successfully","ok")
redirect(url_for("admin.tag_add"))
return render_template("admin/tag_add.html",form=form)
@admin.route("/tag/list/<int:page>/",methods=["GET"])
@admin_login_req
def tag_list(page=None):
if page is None:
page = 1
page_data = Tag.query.order_by(
Tag.addtime.desc()
).paginate(page=page,per_page=10)
return render_template("admin/tag_list.html",page_data=page_data)
@admin.route("/tag/del/<int:id>/",methods=["GET"])
@admin_login_req
def tag_del(id=None):
tag = Tag.query.filter_by(id=id).first_or_404()
db.session.delete(tag)
db.session.commit()
flash("Delete successfully","ok")
return redirect(url_for("admin.tag_list",page=1))
@admin.route("/tag/edit/<int:id>/",methods=["GET","POST"])
@admin_login_req
def tag_edit(id):
form = TagForm()
tag = Tag.query.get_or_404(id)
if form.validate_on_submit():
data = form.data
tag_count = Tag.query.filter_by(name=data['name']).count()
if tag_count == 1 and tag.name!=data['name']:
flash("tag name exist!","err")
return redirect(url_for("admin.tag_edit",id=id))
tag.name = data['name']
db.session.add(tag)
db.session.commit()
flash("edit tag successfully","ok")
redirect(url_for("admin.tag_edit",id=id))
return render_template("admin/tag_edit.html",form=form,tag=tag)
@admin.route("/movie/add/")
def movie_add():
return render_template("admin/movie_add.html")
@admin.route("/movie/list/")
def movie_list():
return render_template("admin/movie_list.html")
@admin.route("/preview/add/")
def preview_add():
return render_template("admin/preview_add.html")
@admin.route("/preview/list/")
def preview_list():
return render_template("admin/preview_list.html")
@admin.route("/user/view/")
def user_view():
return render_template("admin/user_view.html")
@admin.route("/user/list/")
def user_list():
return render_template("admin/user_list.html")
@admin.route("/comment/list/")
def comment_list():
return render_template("admin/comment_list.html")
@admin.route("/moviecol/list/")
def moviecol_list():
return render_template("admin/moviecol_list.html")
@admin.route("/oplog/list/")
def oplog_list():
return render_template("admin/oplog_list.html")
@admin.route("/adminloginlog/list/")
def adminloginlog_list():
return render_template("admin/adminloginlog_list.html")
@admin.route("/userloginlog/list/")
def userloginlog_list():
return render_template("admin/userloginlog_list.html")
@admin.route("/role/list/")
def role_list():
return render_template("admin/role_list.html")
@admin.route("/role/add/")
def role_add():
return render_template("admin/role_add.html")
@admin.route("/auth/list/")
def auth_list():
return render_template("admin/auth_list.html")
@admin.route("/auth/add/")
def auth_add():
return render_template("admin/auth_add.html")
@admin.route("/admin/list/")
def admin_list():
return render_template("admin/admin_list.html")
@admin.route("/admin/add/")
def admin_add():
return render_template("admin/admin_add.html")
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
#TODO
#from sklearn.datasets import load_iris
# Load dataset
traindata = pd.read_csv('./Datasets/iris/iris.csv')
# Change string value to numeric
traindata.set_value(traindata['species'] == 'Iris-setosa', ['species'], 0)
traindata.set_value(traindata['species'] == 'Iris-versicolor', ['species'], 1)
traindata.set_value(traindata['species'] == 'Iris-virginica', ['species'], 2)
traindata = traindata.apply(pd.to_numeric)
# Change dataframe to array
data_array = traindata.as_matrix()
# Split x and y (feature and target)
X_train, X_test, y_train, y_test = train_test_split(data_array[:, :4],
data_array[:, 4],
test_size=0.2)
"""
SECTION 2 : Build and Train Model
Multilayer perceptron model, with one hidden layer.
input layer : 4 neuron, represents the feature of Iris
hidden layer : 10 neuron, activation using ReLU
output layer : 3 neuron, represents the class of Iris, Softmax Layer
optimizer = stochastic gradient descent with no batch-size
loss function = categorical cross entropy
learning rate = 0.01
maximum iterations = 1000
"""
mlp = MLPClassifier(hidden_layer_sizes=(10),solver='sgd',learning_rate_init=0.01,max_iter=1000)
# Train the model
mlp.fit(X_train, y_train)
# Test the model
# Changed since earlier type was deprecated
print(f"{mlp.score(X_test,y_test)}")
sl = 5.9
sw = 3.0
pl = 5.1
pw = 1.8
data = [[sl,sw,pl,pw]]
# Changed since earlier type was deprecated
print(f"{mlp.predict(data)}")
|
from django.shortcuts import render
from django.http import HttpResponse
import requests
# Create your views here.
def test(request):
response = requests.get('http://192.168.198.140:5555/images/json')
data = response.json()
return render(request, 'ssh/base.html', {
'Id': data['Id'],
'RepoTags': data['RepoTags']
})
def index(request):
import paramiko
ssh = paramiko.SSHClient()
# Auto add host to known hosts
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Connect to server
ssh.connect("192.168.198.140", username="tbv", password="iamTR")
# Do command
(ssh_stdin, ssh_stdout, ssh_stderr) = ssh.exec_command("ifconfig")
# Get status code of command
exit_status = ssh_stdout.channel.recv_exit_status()
# Print status code
print ("exit status: %s" % exit_status)
# Print content
for line in ssh_stdout.readlines():
print(line.rstrip())
# Close ssh connect
ssh.close()
return render(request, 'ssh/base.html', {'exit_status' : exit_status})
|
from logging import debug
import os
from flask_socketio import SocketIO
from app import create_app, wss
from flask_admin import Admin
config_name = os.getenv('FLASK_CONFIG')
app = create_app(config_name)
wss.init_app(app)
if __name__ == '__main__':
wss.run(app, debug=True) |
# 1 Что бы решить проблему в '01_problem_demo.py', необходимо read_ints запустить в новом потоке.
import threading
import time
from multithreading.count_three_sum import count_three_sum, read_ints
if __name__ == '__main__':
print('Started main.')
ints = read_ints('../data/1Kints.txt')
t1 = threading.Thread(target=count_three_sum, args=(ints,), daemon=True) # 1 Создадим объект потока
t1.start()
time.sleep(3) # Поиск троек будет осуществяться только в это время, если daemon=True!
# t1 = threading.Thread(target=count_three_sum, args=(ints,)) # 1 Создадим объект потока
# t1.start()
# Таким образом основной поток нее будет заблокирован,
# Функции print (ниже) выполнятся, а поиск троек будет продолжаться.
print('What are we waiting for?')
t1.join() # Основной поток будет заблокирован пока не выполнится t1
print('Ended main.')
|
from django.contrib import admin
from django.conf import settings
from api.models import Comment, Follower, Like, Post, PostMeta, Relation, RelationMeta, User, UserMeta
class CommentAdmin(admin.ModelAdmin):
actions = ['make_inactive', 'make_active']
def make_inactive(self, request, queryset):
queryset.update(active=False)
make_inactive.short_description = "Mark selected stories as unpublished"
make_inactive.allowed_permissions = ('change',)
def make_active(self, request, queryset):
queryset.update(active=True)
make_active.short_description = "Mark selected stories as published"
make_active.allowed_permissions = ('change',)
class UserAdminModel(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return False
class CommentInline(admin.TabularInline):
model = Comment
class MetaInline(admin.TabularInline):
model = UserMeta
class RelationInline(admin.TabularInline):
model = Relation
class PostInline(admin.TabularInline):
model = Post
class PostMetaInline(admin.TabularInline):
model = PostMeta
class RelationMetaInline(admin.TabularInline):
model = RelationMeta
class PostAdmin(admin.ModelAdmin):
inlines = [CommentInline, PostMetaInline]
list_display = ('__str__', 'user', 'image', 'description',
'likes', 'created', 'updated')
list_filter = ('likes', 'created', 'user')
date_hierarchy = 'created'
ordering = ('likes', 'created')
class UserAdmin(UserAdminModel):
def save_model(self, request, obj, form, change):
if obj.pk:
orig_obj = User.objects.get(pk=obj.pk)
if obj.password != orig_obj.password:
obj.set_password(obj.password)
else:
obj.set_password(obj.password)
obj.save()
inlines = [MetaInline, PostInline, CommentInline, RelationInline]
list_display = ('email', 'username', 'last_name', 'first_name', 'is_staff')
class CommentAdmin(CommentAdmin):
list_display = ('__str__', 'body', 'active')
class LikeAdmin(admin.ModelAdmin):
list_display = ('__str__', 'user', 'post', 'created')
class FollowerAdmin(admin.ModelAdmin):
list_display = ('__str__', 'user', 'user_being_followed',
'started_following')
class RelationAdmin(admin.ModelAdmin):
inlines = [RelationMetaInline, ]
list_display = ('__str__', 'user')
class UserMetaAdmin(admin.ModelAdmin):
list_display = ('__str__', )
class PostMetaAdmin(admin.ModelAdmin):
list_display = ('__str__', )
class RelationMetaAdmin(admin.ModelAdmin):
list_display = ('__str__', )
from django.contrib.admin import AdminSite
class MyAdminSite(AdminSite):
login_template = "api/templates/admin/login.html"
site_title = "PhotoApp Admin Portal"
site_header = "PhotoApp Admin"
index_title = "Welcome to PhotoApp Admin Portal"
site_url = settings.FRONT_URL
site = MyAdminSite()
site.register(Post, PostAdmin)
site.register(User, UserAdmin)
site.register(Comment, CommentAdmin)
site.register(Like, LikeAdmin)
site.register(Follower, FollowerAdmin)
site.register(Relation, RelationAdmin)
site.register(UserMeta, UserMetaAdmin)
site.register(PostMeta, PostMetaAdmin)
site.register(RelationMeta, RelationMetaAdmin)
|
def pred_clean(string):
# bibliotecas padrões
import pandas as pd
import numpy as np
# bibliotecas para NLP
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import re
from unidecode import unidecode
from sklearn.feature_extraction.text import TfidfVectorizer
# bibliotecas para modelagem
from sklearn.svm import SVC
from sklearn.metrics import precision_recall_fscore_support, classification_report
from sklearn.model_selection import KFold, cross_validate
from joblib import load
# dados a classificar
string_feedback = string
string_feedback = {'Feedback': string_feedback}
dados = pd.DataFrame({'Feedback': string_feedback})
dados
# limpeza dos dados
dados.Feedback = dados.Feedback.str.lower()
dados['Feedback'] = dados['Feedback'].apply(lambda x: unidecode(x))
def RemovePunctuation(feedback):
feedback = re.sub(r"[-|0-9]", "", feedback).lower()
feedback = re.sub(r'[-./?!,":;()\']', ' ', feedback).lower()
return (feedback)
lista_feedback = []
lista_feedback = [RemovePunctuation(feedback) for feedback in dados.Feedback]
dados.Feedback = lista_feedback
dados['Feedback'] = dados['Feedback'].apply(lambda x: x.strip())
dados['Feedback'] = dados['Feedback'].apply(lambda x: x.replace(" ", " "))
# Pŕe-Processamento
nlp = word_tokenize
dados['Contagem_palavras'] = dados['Feedback'].apply(lambda x: len(str(x).split(" ")))
def RemoveStopWords(feedback):
palavras = [i for i in feedback.split() if not i in corr_palavras]
return (" ".join(palavras))
lemmatizer = WordNetLemmatizer()
def Lemmatization(feedback):
palavras = []
for w in feedback.split():
palavras.append(lemmatizer.lemmatize(w))
return (" ".join(palavras))
#dados = dados[dados['Contagem_palavras'] > 2]
corr_palavras = pd.read_csv('palavras_corr.csv')
corr_palavras = list(corr_palavras.Palavras_corr.values)
lista_feedback = []
lista_feedback = [RemoveStopWords(feedback) for feedback in dados.Feedback]
dados['Feedback_palavras_irrelevantes'] = lista_feedback
# função para transformar o texto em Matrix Count e TFIDF
tfidf = load('vectorizer_tfidf.joblib')
def bow(feedbacks):
bag_of_words_transformer = tfidf
mx = bag_of_words_transformer.transform(feedbacks).todense()
terms = bag_of_words_transformer.get_feature_names()
dados_tfidf = pd.DataFrame(mx, columns=terms, index=feedbacks)
return (dados_tfidf)
feedback_lem = dados['Feedback_palavras_irrelevantes'].apply(lambda x: Lemmatization(x))
dados_tfidf = bow(feedback_lem)
svc = load('model_svc.joblib')
classe = svc.predict(dados_tfidf)
if classe in classe == 0:
classe = 'Negativo'
else:
classe = 'Positivo'
proba = svc.predict_proba(dados_tfidf)
proba0 = proba[0][0]
proba1 = proba[0][1]
proba0 = str(round(proba0 *100, 2)) + " %"
proba1 = str(round(proba1 *100, 2)) + " %"
return (classe, proba0, proba1)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from bin.base.sys import PR
from bin.base.db.MongoEng import MongoEng
from bin.base.sys.Bean import Bean
from mongoengine import Q
from bin import init
# 查询默认最大值
DEFAULT_SEARCH_MAX_SIZE = 10000
class SingleTableOpt(object):
def __init__(self, ds, bo, data):
self.BO = None if bo is None else bo
self.data = {} if data is None else data
self.ds = ds
self.__session__ = None
def set_session(self, session):
self.data['__sessionId__'] = session
return self
def setBO(self, Bo):
self.BO = Bo
return self
def setData(self, data):
self.data = data
return self
def setDataSource(self, ds):
self.ds = ds
return self
def delete(self):
_PR = PR.getInstance()
if self.BO is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用删除,未设置修改对象")
if self.data is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用删除,未给出参数")
if self.ds is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用删除,未设置数据源")
else:
try:
if self.data.get('_id') is None:
return _PR.setResult(self.data).setCode(PR.Code_PARERROR).setMsg('通用删除,参数缺失')
MongoEng(self.ds).getCollection()
x = {
'id': self.data.get('_id')
}
f = Q(**Bean().getSearchBean(x))
res = self.BO.objects.filter(f).delete()
if res == 1:
return _PR.setResult(res).setCode(PR.Code_OK).setMsg('通用删除,删除成功')
else:
return _PR.setResult(res).setCode(PR.Code_WARNING).setMsg('通用删除,未找到删除数据')
except Exception as e:
return _PR.setResult(None).setCode(PR.Code_ERROR).setMsg('通用删除,删除异常:' + e)
# 通用新增
def insert(self):
_PR = PR.getInstance()
if self.BO is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用新增,未设置修改对象")
if self.data is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用新增,未给出参数")
if self.ds is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用新增,未设置数据源")
else:
try:
insert_bean = Bean().getInsertBean(self.data, self.BO)
MongoEng(self.ds).getCollection()
bo = self.BO(**insert_bean)
bo.save()
return _PR.setResult(bo).setCode(PR.Code_OK).setMsg('通用查询,查询成功')
except Exception as e:
return _PR.setResult(None).setCode(PR.Code_ERROR).setMsg('通用新增,新增异常:' + str(e))
# 通用更新
def update(self):
_PR = PR.getInstance()
if self.BO is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用更新,未设置修改对象")
if self.data is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用更新,未给出参数")
if self.ds is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用更新,未设置数据源")
else:
try:
_id, update_bean = Bean().getUpdateBean(self.data, self.BO, ['_id'])
MongoEng(self.ds).getCollection()
res = self.BO.objects(id=_id).update_one(**update_bean)
if res == 1:
return _PR.setResult(self.data).setCode(PR.Code_OK).setMsg('通用更新,更新成功')
else:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg('通用更新,更新失败')
except Exception as e:
return _PR.setResult(None).setCode(PR.Code_ERROR).setMsg('通用更新,更新异常:' + str(e))
# 通用查询
# 不带 filters 返回结果为一条数据,否则返回结果为数组
# filters = {
# 'toolCode': data.get('toolCode'),
# 'toolCodeId': data.get('toolCodeId'),
# 'toolName__contains': data.get('toolName'),
# 'toolPort': data.get('toolPort'),
# 'toolHost': data.get('toolHost'),
# 'toolUser__contains': data.get('toolUser')
# }
# par = {'pageNum':1,'pageSize':2,'order':'+_id'}
def search(self, filters=None, par=None, only=[]):
_PR = PR.getInstance()
page_num = 1
page_size = DEFAULT_SEARCH_MAX_SIZE
order = '+_id'
if self.BO is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用查询,未设置修改对象")
if self.data is None and filters is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用查询,未给出参数")
if self.ds is None:
return _PR.setResult(self.data).setCode(PR.Code_ERROR).setMsg("通用查询,未设置数据源")
else:
if par is not None:
page_num = par.get('pageNum', 1)
page_size = par.get('pageSize', DEFAULT_SEARCH_MAX_SIZE)
order = par.get('order', '+_id')
try:
MongoEng(self.ds).getCollection()
if filters is None:
if len(only) > 0:
res = self.BO.objects.filter(id=self.data.get('_id')).only(*only).first()
else:
res = self.BO.objects.filter(id=self.data.get('_id')).first()
else:
f = Q(**Bean().getSearchBean(filters))
skip = (page_num - 1) * page_size
if len(only) > 0:
res = self.BO.objects.filter(f).only(*only)[skip: skip + page_size].order_by(order)
else:
res = self.BO.objects.filter(f)[skip: skip + page_size].order_by(order)
return _PR.setCode(PR.Code_OK).setPageNum(page_num).setPageSize(page_size).setResult(res).setMsg('通用查询,查询成功')
except Exception as e:
return _PR.setResult(None).setCode(PR.Code_ERROR).setMsg('通用查询,查询异常:%s' % e)
def getInstance(ds=init.ROOT_DB_DS, bo=None, data=None):
return SingleTableOpt(ds=ds, bo=bo, data=data)
|
# coding=UTF-8
def read_dict(dict_file):
set_dict = set()
in_file = open(dict_file, 'r')
for line in in_file:
set_dict.add(line.rstrip())
return set_dict
def read_dict_map(dict_file):
kv_dict = dict()
in_file = open(dict_file, 'r')
for line in in_file:
toks = line.rstrip().split('\t')
if len(toks) != 4:
print "invalid line:" + line
continue
if toks[1] < 100:
print "unexpected state:" + line
continue
kv_dict[toks[0]] = toks[3]
in_file.close()
return kv_dict
def match(dict, target_file, match_file, unmatch_file):
in_file = open(target_file, 'r')
match = open(match_file, "w")
unmatch = open(unmatch_file, "w")
for line in in_file:
line = line.rstrip()
toks = line.split('\t')
digest = toks[0]
if digest in dict:
match.writelines(line + '\n')
else:
unmatch.write(line + '\n')
if __name__ == "__main__":
dict_file = "dict"
target_file = "todo"
match_file = "match.out"
unmatch_file = "unmatch.out"
dict1 = read_dict(dict_file)
match(dict1, target_file, match_file, unmatch_file)
|
from abc import ABC, abstractmethod
class StateManager(ABC):
"""
ABSTRACT CLASS FOR STATE MANAGER
Class uses internal representation of state ([int], bool) to make computations.
All communication with the outside is done with string representations
"""
@staticmethod
@abstractmethod
def generate_child_states(state: str) -> [str]:
"""
Takes in a parent state and returns the child states from this state
:param state: string representing state of game
:return: list of strings representing child states
"""
@staticmethod
@abstractmethod
def init_game_state(**kwargs) -> str:
"""
:param kwargs: parameters for game
:return: string rep of all
"""
@staticmethod
@abstractmethod
def is_end_state(state: str) -> str:
"""
:param state: string representing state of game
:return: a boolean stating if state is end state
"""
@staticmethod
@abstractmethod
def pretty_state_string(state: str, **kwargs) -> str:
"""
Game specific string representation of state
:param state: string representing state of game
:return: string representation
"""
@staticmethod
@abstractmethod
def get_move_string(prev_state: str, state: str) -> str:
"""
:param prev_state: string representing the previous state of game
:param state: string representing state of game
:return: string to be presented in verbose mode
"""
@staticmethod
def _get_internal_state_rep(state: str) -> ([int], bool):
"""
Method to be used by subclass to convert from string state rep to internal representation
:param state: string representing state of game
:return: internal state representation
"""
state_str, player_str = state.split(":")
return [int(cell) for cell in state_str.split(",")], player_str == "1"
@staticmethod
def _get_external_state_rep(state: ([int], bool)) -> str:
"""
External representation format ´<state/board>:<player nr.>´
:param state: internal representation of state
:return: external representation of state
"""
output = ""
for cell in state[0]:
output += f"{str(cell)},"
output = output[:-1] # Removing last comma
output += ":1" if state[1] else ":2"
return output
@staticmethod
def is_player_1(state: str) -> bool:
return state[-1] == "1"
@staticmethod
def graph_label(state: str) -> str:
return str(StateManager._get_internal_state_rep(state)[0])
class Nim(StateManager):
@staticmethod
def init_game_state(**kwargs) -> str:
return StateManager._get_external_state_rep(
([kwargs.get("N"), kwargs.get("K")], kwargs.get("P"))
)
@staticmethod
def is_end_state(state: str) -> bool:
(
[remaining_pieces, max_to_remove],
player,
) = StateManager._get_internal_state_rep(state)
return remaining_pieces == 0
@staticmethod
def get_move_string(prev_state: str, state: str) -> str:
(
[prev_remaining_pieces, max_to_remove],
player,
) = StateManager._get_internal_state_rep(prev_state)
(
[current_remaining_pieces, max_to_remove],
player,
) = StateManager._get_internal_state_rep(state)
return f"removed {prev_remaining_pieces - current_remaining_pieces} pieces"
@staticmethod
def generate_child_states(state: str) -> [str]:
if Nim.is_end_state(state):
return []
(
[remaining_pieces, max_to_remove],
player,
) = StateManager._get_internal_state_rep(state)
min_remaining_pieces = remaining_pieces - max_to_remove
min_remaining_pieces = min_remaining_pieces if min_remaining_pieces > 0 else 0
return_statement = [
([i, max_to_remove], not player)
for i in range(remaining_pieces - 1, min_remaining_pieces - 1, -1)
]
return [
StateManager._get_external_state_rep(child_state)
for child_state in return_statement
]
@staticmethod
def pretty_state_string(state: str, **kwargs) -> str:
(
[remaining_pieces, max_to_remove],
player,
) = StateManager._get_internal_state_rep(state)
output = f"Remaining pieces: {remaining_pieces}"
if kwargs.get("include_max", False):
output += f" (Max number of removed pieces per move: {max_to_remove}) "
return output
class Ledge(StateManager):
@staticmethod
def get_move_string(prev_state: str, state: str) -> str:
prev_board, prev_player = StateManager._get_internal_state_rep(prev_state)
current_board, current_player = StateManager._get_internal_state_rep(state)
if prev_board[0] - current_board[0] == 1:
return "picks up copper"
if prev_board[0] - current_board[0] == 2:
return "picks up gold"
# Find changed indices
to_cell_index, from_cell_index = [
i for i in range(len(current_board)) if current_board[i] != prev_board[i]
]
# Determine type of piece
moved_piece_string = "gold" if prev_board[from_cell_index] == 2 else "copper"
return (
f"moves {moved_piece_string} from cell {from_cell_index} to {to_cell_index}"
)
@staticmethod
def generate_child_states(state: str) -> [str]:
states = []
if Ledge.is_end_state(state):
return []
board, player = StateManager._get_internal_state_rep(state)
if board[0] > 0:
states.append(([0] + board[1:], not player))
for j in range(len(board) - 1, 0, -1):
if board[j] == 0:
continue
i = j - 1
while board[i] == 0 and i >= 0:
copy_list_state = board.copy()
copy_list_state[i] = copy_list_state[j]
copy_list_state[j] = 0
states.append((copy_list_state, not player))
i -= 1
return [StateManager._get_external_state_rep(in_state) for in_state in states]
@staticmethod
def init_game_state(**kwargs):
return StateManager._get_external_state_rep(
(kwargs.get("B_init"), kwargs.get("p"))
)
@staticmethod
def is_end_state(state: str) -> bool:
board, player = StateManager._get_internal_state_rep(state)
return not 2 in board
@staticmethod
def pretty_state_string(state: str, **kwargs) -> str:
board, player = StateManager._get_internal_state_rep(state)
return str(board)
|
import pandas as pd
from valid_headers import column_names
def check_columns(df,filename):
headers = column_names[filename]
cur_headers = [str(key) for key in df.keys()]
if headers != cur_headers:
return False
else:
return True
def check_filename(filename):
try:
column_names[filename]
return True
except KeyError:
return False
def validate(filename):
#checks if extension type is valid
if filename.endswith(".csv"):
return pd.read_csv(filename)
elif filename.endswith(".xls"):
return pd.read_excel(filename)
elif filename.endswith(".xlsx"):
return pd.read_excel(filename)
else:
return "invalid file extension"
def perform_checks(filename,local_file):
if not check_filename(filename):
return "filename does not exist in our records"
df = validate(local_file)
if type(df) == type(str()):
return "file does not have an extension this program can work with, please use .xlsx, .xls, or .csv"
if not check_columns(df,filename):
return "columns do not match our records for this file"
else:
return "no error"
|
from django.forms import ModelForm
from django.forms.models import fields_for_model
from .models import *
class ProjectForm(ModelForm):
class Meta:
model = Project
fields = ['name','descrp']
class TaskForm(ModelForm):
class Meta:
model = Task
fields = ['name','descr'] |
"""Defines URL patterns for ians_py_page."""
from django.urls import path
from . import views
app_name = 'ians_py_page'
urlpatterns = [
#Home page
path('', views.index, name='index'),
] |
from unittest import mock
import pytest
from sqlalchemy import create_engine
from sqlalchemy import func
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
from ....fakes import FakeAdapter
from ....fakes import FakeEntryPoint
from shillelagh.backends.apsw.dialects.base import APSWDialect
from shillelagh.exceptions import ProgrammingError
def test_create_engine(mocker):
entry_points = [FakeEntryPoint("dummy", FakeAdapter)]
mocker.patch(
"shillelagh.backends.apsw.db.iter_entry_points",
return_value=entry_points,
)
engine = create_engine("shillelagh://")
table = Table("dummy://", MetaData(bind=engine), autoload=True)
query = select([func.sum(table.columns.pets)], from_obj=table)
assert query.scalar() == 3
def test_create_engine_no_adapters(mocker):
engine = create_engine("shillelagh://")
with pytest.raises(ProgrammingError) as excinfo:
Table("dummy://", MetaData(bind=engine), autoload=True)
assert str(excinfo.value) == "Unsupported table: dummy://"
def test_dialect_ping():
mock_dbapi_connection = mock.MagicMock()
dialect = APSWDialect()
assert dialect.do_ping(mock_dbapi_connection) is True
|
import mxnet as mx
if __name__ == '__main__':
data = mx.sym.Variable('data')
fc1 = mx.sym.FullyConnected(data, name='fc1', num_hidden=128)
act1 = mx.sym.Activation(fc1, name='relu1', act_type="relu")
fc2 = mx.sym.FullyConnected(act1, name='fc2', num_hidden=10)
out = mx.sym.SoftmaxOutput(fc2, name='softmax')
mod = mx.mod.Module(out, context=mx.cpu(0)) # create a module by given a Symbol
batch_size = 16
num_in = 200
mod.bind(data_shapes=[('data', (batch_size, num_in))], for_training=False) # create memory by given input shapes
mod.init_params() # initial parameters with the default random initializer
tmp_data_batch = mx.io.DataBatch(data=[mx.nd.ones((batch_size, num_in))], provide_data=[('data', (batch_size, num_in))])
mod.forward(tmp_data_batch)
tmp = mod.get_outputs()
print(len(tmp), tmp[0].shape)
|
import numpy as np
import pygame as pg
class Graph(dict):
graph = {}
def __init__(self):
self = dict()
def add_vertex(self, key, edges=None):
if edges is None:
edges = []
self[key]=edges
def add_edge(self, key, edge=None):
self[key].append(edge)
def get_edges(self, key):
return self[key]
def show_vertex(self, vertex):
edges = self[vertex]
return str(vertex)+'->'+'->'.join(self[vertex])
def get_matrix(self):
keys = [key for key in self] #vertices
keys.sort()
n = len(keys) #number of vertices
matrix = np.zeros((n,n), dtype=int)
for i in range(n):
for j in range(n):
matrix[i][j]=self.is_edge(keys[i],keys[j])
print(matrix)
def is_edge(self,v1,v2):
if v2 in self.get(v1):
return 1
else:
return 0
def draw(self):
pg.init()
screen = pg.display.set_mode([500,500])
pg.display.set_caption('Grapher')
running = True
while running:
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
screen.fill((30,33,39))
pg.draw.circle(screen,(0,0,0),(250,250),75)
pg.display.flip()
pg.quit()
if __name__ == '__main__':
graph = Graph()
graph.add_vertex('a', edges=['b','c'])
graph.add_vertex('b', edges=['a','c'])
graph.add_vertex('c', edges=['a','b'])
graph.add_vertex('d')
graph.add_edge('a','d')
graph.add_edge('d','a')
graph.add_vertex('e')
graph.get_matrix()
graph.draw()
|
from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
class S3MediaStorage(S3BotoStorage):
def __init__(self, **kwargs):
kwargs['location'] = kwargs.get('location',
settings.MEDIA_ROOT.replace('/', ''))
super(S3MediaStorage, self).__init__(**kwargs)
class S3StaticStorage(S3BotoStorage):
def __init__(self, **kwargs):
kwargs['location'] = kwargs.get('location',
settings.STATIC_ROOT.replace('/', ''))
super(S3StaticStorage, self).__init__(**kwargs) |
from pkg_ml_prod.preprocessing import drop_na, train_test_split
from pkg_ml_prod import get_data, preprocessing, pipeline, model
df = get_data.get_data()
df = preprocessing.drop_na(df)
X_train, X_test, y_train, y_test = preprocessing.split(df)
pipe = pipeline.create_pipeline()
best_model = model.create_model(pipe,X_train,y_train)
prediction = model.prediction('Reunion',44,18.2,196,3750,'MALE', best_model)
print(prediction) |
# 셀프 넘버
# for i in range(1,100):
# for j in range(1,100):
# A = i
# B = j
# # AB = 10 * A + B
# str_AB = str(A)+str(B)
# AB = int(str_AB)
# if 100 - (A+B) == AB:
# print(A)
# print(B)
li = list(range(1,100))
n_list = []
for n in range(1,100):
if n < 10:
n = n + n//1 + n%1
else:
n = n + n//10 + n%10
if n > 100:
break
n_list.append(n)
# print(n_list)
# final = []
for i in range(1,100):
final= n_list.count(i)
if final == 0:
print(i)
# 제출 -> 맞았ㄴ..ㅔ..
def function(n):
str_n = list(map(int,str(n)))
for i in range(len(str_n)):
n = n + str_n[i]
return n
def self_num(length):
li = []
for i in range(length):
li.append(function(i))
final = li.count(i)
if final == 0:
print(i)
def self_num1():
li = []
for n in range(1,10000):
str_n = list(map(int,str(n)))
for i in range(len(str_n)):
n = n + str_n[i]
li.append(n)
for i in range(1,10000):
final = li.count(i)
if final == 0:
print(i)
self_num1()
# 밍듀풀이
# set은 차집합 가능
def d(n):
m = n
for i in str(n):
m += int(i)
return m
list_a = []
for i in range(10000):
list_a.append(d(i))
list_b = []
for i in range(10000):
list_b.append(i)
s1 = set(list_a)
s2 = set(list_b)
self_numbers = list(s2-s1)
self_numbers = sorted(self_numbers)
for i in self_numbers:
print(i) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 3 09:08:33 2017
@author: Charles
"""
from bisip.models import mcmcinv |
# Generated by Django 3.2.7 on 2021-11-10 02:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('helloworld', '0003_auto_20211109_2253'),
]
operations = [
migrations.AlterField(
model_name='disciplina',
name='nome',
field=models.CharField(help_text='Digite o nome da disciplina', max_length=100, unique=True),
),
migrations.AlterField(
model_name='topico',
name='nome',
field=models.CharField(help_text='Digite o nome do tópico', max_length=100, unique=True),
),
migrations.AlterField(
model_name='usuario',
name='email',
field=models.EmailField(help_text='Digite o email do usuário', max_length=254, unique=True),
),
]
|
# -*- coding: utf-8 -*-
"""
列表基本操作知识点(五大基本操作):
1、访问列表元素
2、 添加列表元素
2、补充知识点:从空列表构建新列表
3、修改列表元素
4、删除列表元素
5、列表排序及其他
"""
# 定义列表 -- Python实战圈成员列表
names_python_pc = ['陈升','刘德华','杨幂','TFboys']
'''
1、访问列表元素
'''
# 根据索引访问列表元素--访问杨幂
yangmi = names_python_pc[2]
print('Python实战圈成员列表种第三个是:',yangmi)
# 两种方法访问最后一个元素
print()
first_last = names_python_pc[-1]
print('第一种方法获取最后一个元素',first_last)
second_last = names_python_pc[3]
print('第二种方法获取最后一个元素',first_last)
'''
2、 添加列表元素
'''
# 第一种方法 insert(index,x)
print()
names_python_pc.insert(0, '魏璎珞')
print(f'插入新元素后的列表是:{names_python_pc}')
# 第二种方法:append(x)
names_python_pc.append('傅恒')
print(f'第二种方法插入新元素后的列表是:{names_python_pc}')
'''
2、补充知识点:从空列表构建新列表
'''
# 构建空的列表 [ ]
yan_xi_gong_luo = [ ]
# 为空的列表动态添加元素
yan_xi_gong_luo.append('魏璎珞')
yan_xi_gong_luo.append('皇后')
yan_xi_gong_luo.append('纯妃')
print('构建空的列表 [ ]: ',yan_xi_gong_luo)
'''
3、 修改列表元素
'''
# 根据索引修改列表元素
print()
print('修改前的列表元素:',names_python_pc)
names_python_pc[2] = '扶摇'
print('修改后的列表元素:',names_python_pc)
'''
4、删除列表元素
'''
print()
# 第一种方法删除 del pop
del names_python_pc[0]
print('del删除元素后的列表元素有:',names_python_pc)
# 根据位置信息删除
print()
pop_name = names_python_pc.pop()
print(pop_name)
print()
pop_name_1 = names_python_pc.pop(1)
print(f'根据位置信息删除: {pop_name_1}')
print('删除后的列表元素有:',names_python_pc)
# 第二种方法删除 remove('值')
print()
names_python_pc.remove('TFboys')
print('remove 删除后的列表元素有:',names_python_pc)
'''
5、列表排序及其他
'''
# 构建列表
list_1 = ['p','f','b','a','d','e','f','g']
# 复制列表copy()
list_2 = list_1.copy()
print()
print('5、列表排序及其他')
print()
print('复制列表:',list_2)
# 统计列表中f出现的次数 count()
print('统计列表中 f 出现的次数', list_1.count('f'))
print('统计列表中 b 出现的次数', list_1.count('b'))
# 找出某一个元素的位置信息 index
print('查找 b 元素所在的位置:', list_1.index('b'))
# print('B所在的位置', )
# 颠倒顺序 reverse()
print('原来的元素顺序:', list_1)
list_1.reverse()
print('颠倒后的元素顺序:', list_1)
'''
排序
'''
print()
print('5.1 、列表排序')
print()
# 永久顺序
list_1.sort()
print(' sort :', list_1)
# 降序
list_1.sort(reverse=True)
print('降序排列元素', list_1)
# len
print()
print('list_1长度为:' , len(list_1))
print()
# 临时排序 sorted()
temp_list = sorted(list_1)
print('临时排序', temp_list)
print('原来的列表元素顺序',list_1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from subprocess import Popen
from super_spider import app
"""
@author: peimingyuan
created on 2017/9/9 下午5:53
"""
config = app.config
def runserver(debug, port):
debug = debug or config.get("DEBUG")
if debug:
app.run(
host="0.0.0.0",
port=port,
debug=True
)
else:
cmd = (
"gunicorn "
"-w gevent "
"-b 0.0.0.0:{port} "
"super_spider:app".format(port=port)
)
Popen(cmd, shell=True).wait()
|
def returnFloat():
x=input("Please enter an number")
return(float(x))
def returnInt():
x=input()
print(bool(x))
def PrintFlote():
x=input(float)
print(round(x))
PrintFlote()
|
import yaml
import collections # For creating ordered dictionary
import json # For creating json data
import os
from pathlib import Path
from datetime import datetime, date
from pytz import timezone
import calendar
import random
import names
import Database_Ble
#----------newly added----
import pymysql
from pprint import pprint
#import datetime
from pyfcm import FCMNotification
import requests
push_service = FCMNotification(api_key="AAAAL-M_qjo:APA91bFDT028Itamu_P5o_qSw61l7t-5mwHvMP1Cri85wuRwkeeP8plh25JfDaBUVvAyeWvkQpB3ZJ3uuDVye9z9jgoVdB6NTsde3XDnPIlYygkAqWMLoKITz5IyMcukEFv8q9L5tdic")
#--------------------------
def Notification_for_outside_class(student_id, room_id):
headers = {'User-Agent': 'Mozilla/5.0'}
payload = {'user_name':'Shanta','secret_key':'hfjKricAdD'}
r = requests.post('http://182.163.112.219:9193/fcmtokenget', data = payload)
print(r.text)
print(r.json())
r_json = r.json()
print(type(r_json))
description = r_json['description']
error = r_json['error']
fcm_token = r_json['fcm_token'][0]
registration_id = fcm_token#"eVyxoZJlGmM:APA91bH4EUKRxsG3EbyV_jqX9dKd772H3tLbJ985adSx2NSjEMOapxzfW7luV76R7OV7qEhcEXUsChdEzNaYKuq4PTNVo8Ss5mtRJJWBtk-nKsr3L3cYgk9arpMJVbLdBf9pmFQ3hPqX"
registration_ids = [registration_id]
message_title = "Outside Class"
message_body = 'student id: ' + str(student_id) + ', room id: '+ str(room_id) +', Date & Time: ' + datetime.datetime.now().strftime('%B %d, %Y - %H:%M')
result = push_service.notify_multiple_devices(registration_ids=registration_ids, message_title=message_title, message_body=message_body)
class Query(Database_Ble.Query):
pass
#Query.create_connection()
class Processing:
def device_data_process(self, client_data):
# Try-catch block
try:
#print(type(client_data))
dictionary_data = yaml.load(client_data)
#print("Dictionary data--> ", dictionary_data)
# print(type(dictionary_data))
Query.create_connection()
# print("DATA ------------------------------------->", dictionary_data)
print("Data end---------------------------------------------------")
#############################################################################
# Required for retrieving device number from data stream
#def device_data_analysis(gateway_address, beacon_mac_address, beacon_rssi,beacon_data):
# length = len(beacon_data)
# print("Data Length is-->", length)
#i=0
#for obj in dictionary_data:
# if(i==0):
# gateway_address = obj['mac']
# print("gateway_address is-->", gateway_address)
# else:
#dictionary_data = json.loads(client_data)
gateway_address = ''
# for key, value in dictionary_data.items():
# if key == 'gmac':
# gateway_address = value
# print("Gateway address is ---->", gateway_address )
# if key == 'obj':
# #print(value)
# for i in value:
# beacon_mac_address = i['dmac']
# data_load= i['data1']
# beacon_rssi = i['rssi']
for k in dictionary_data:
if k["type"] == 'Gateway':
gateway_address = k["mac"]
print("Gateway ID is --> ", gateway_address)
if k["type"] == 'Unknown':
beacon_mac_address = k['mac']
data_load = k['rawData']
beacon_rssi = k['rssi']
# print("++++++++++++++data decoded+++++++++++++")
#print(beacon_mac_address)
if 1:
print("++++++++++++++Beacon matche Found+++++++++++++")
# conn = pymysql.connect(host=host, user=user, password=password, db=db, cursorclass=pymysql.cursors.DictCursor)
# cursor = conn.cursor()
#length = len(data_load)
#beacon_mac_address = obj['mac']
#beacon_rssi = obj['rssi']
#print("rssi is-->", beacon_rssi)
#if(beacon_mac_address.startswith(('AC233F29', 'ac233f29'))):
#if(beacon_mac_address=='AC233F292726' or beacon_mac_address=='AC233F292731'):
print("Gateway address--->", gateway_address)
print("beacon_mac_address is-->", beacon_mac_address)
print("RSSI--->", beacon_rssi)
#ts = time.time()
#time_stamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#print("Time is-->", time_stamp)
#florida = timezone('US/Eastern')
florida = timezone('Asia/Dhaka')
florida_time = datetime.now(florida)
time_stamp = florida_time.strftime('%Y-%m-%d %H:%M:%S')
print("Time is-->", time_stamp)
curr_date = time_stamp.split(' ')[0]
curr_time = str(time_stamp.split(' ')[1])
print("Current DATE is-->", curr_date)
print("Current TIME is-->", curr_time)
#-------------Find Day from Date------------------------
my_date = date.today()
day = calendar.day_name[my_date.weekday()] #'Wednesday'
print("day is --->", day)
#-------------Find Day from Date------------------------
#Retrieve Student_id and Gateway_id using Beacon_address
#----------------Retrieve Student_id and Gateway_id using Beacon_address------------------
student_id_query = ("SELECT student_id from tbl_entry_beacon where beacon_id = '%s'" %(beacon_mac_address))
student_id_data = Query.get_a_record(student_id_query)
print("student_data --> ",student_id_data)
#------------------- Random name and id generate part-------------------
if(str(student_id_data)=='None'):
pass
#print("In if condition")
#Insert a random name and random id for register student
#student_id = random.randint(13112,19999)
#print("Student id is--->", student_id)
#student_name = names.get_first_name()
#print("Student name is--->", student_name)
#Insert beacon_id and random_id to tbl_beacon_entry
#insert_query = ('INSERT INTO tbl_entry_beacon(beacon_id) VALUES (%s)',(beacon_mac_address))
#result = Query.commit(insert_query)
#print("Random Data Inserted Successfully in tbl_entry_beacon!!")
#Insert random_id and rand_name to tbl_student_info
# insert_query = ('INSERT INTO tbl_student_info(student_id,beacon_id,student_first_name) VALUES (%s,%s,%s)',(student_id,beacon_mac_address, student_name))
# result = Query.commit(insert_query)
# print("Random Data Inserted Successfully in tbl_student_info!!")
# get_id_query = "SELECT id from tbl_student_info where student_id='%s'", (student_id)
# print(get_id_query)
# st_uniqe_id_arr = Query.get_a_record2(get_id_query)
# print(st_uniqe_id_arr)
# st_uniqe_id = st_uniqe_id_arr[0]
# print("st_uniqe_id-->", st_uniqe_id)
# #Insert beacon_id and random_id to tbl_beacon_entry
# insert_query = ('INSERT INTO tbl_entry_beacon(beacon_id,student_id) VALUES (%s,%s)',(beacon_mac_address,st_uniqe_id))
# result = Query.commit(insert_query)
# print("Random Data Inserted Successfully in tbl_entry_beacon!!")
#------------------- Random name and id generate part-------------------
else:
student_id = student_id_data[0]
print("student_id is------>", student_id)
print("gateway_address is------>", gateway_address)
#----------------Retrieve Student_id and Gateway_id using Beacon_address---------------------------------
#-----------------Get student name and branch_id using studennt_id---------------
student_query = ("SELECT student_first_name, branch_id from tbl_student_info where id= '%s'" %(student_id))
student_data = Query.get_a_record(student_query)
print("student data is---> ", student_data)
if(str(student_data)=='None'):
pass
else:
print("PROBLEM Student data is--->", student_data)
student_name = student_data[0]
print("student_name is------->", student_name)
branch_id = student_data[1]
print("branch_id is-------->", branch_id)
#-----------------Get student name and branch_id using studennt_id---------------
#-----------------Get Room_ID-----------------------------------------------------
#AC233FC023B1
get_room_id_query = "SELECT room_id from tbl_entry_gateway where gateway_id = '%s'" %(gateway_address)
get_room_id_arr = Query.get_all_record(get_room_id_query)
# room_id = get_room_id_arr[0]
room_id = get_room_id_arr
for i in get_room_id_arr:
# room_id[i]
room = list(i)
for room_id in room:
print("\n \n\n \n\n \n Room id is--------------->", room_id)
#-----------------Get Room_ID-----------------------------------------------------
#-----------------Get Class_ID-----------------------------------------------------
get_class_id_query = "SELECT tbl_student_class.class_id from tbl_student_class JOIN tbl_class_time ON tbl_student_class.class_id = tbl_class_time.class_id AND '%s'>=start_time and '%s'<=end_time and student_id='%s' and day = '%s' and room_id = '%s'" %(curr_time, curr_time, student_id, day, room_id)
get_class_id_arr = Query.get_a_record(get_class_id_query)
print("\n \n \n \nThis is the class id array",get_class_id_arr)
if(str(get_class_id_arr)=='None'):
# #id_class_exist_query = ("SELECT student_id from tbl_attends_students_list where student_id = (%s) and branch_id=(%s) and room_id=(%s)" %(student_id, branch_id, room_id))
# id_class_exist_query = ("SELECT student_id from tbl_attends_students_list where student_id = (%s)" %(student_id))
# print(id_class_exist_query)
# id_class_exist_data = Query.get_a_record(id_class_exist_query)
# if(str(id_class_exist_data)=='None'):
# print("Inside of insert query ")
# insert_query = 'INSERT INTO tbl_attends_students_list(student_id, student_name, updated_time, rssi_value, branch_id, room_id) VALUES (%s,%s,%s,%s,%s,%s)', (student_id, student_name, time_stamp, beacon_rssi, branch_id, room_id)
# result = Query.commit(insert_query)
# print("Data Inserted Successfully in Attendance Table!!!!")
# else:
# #update
# update_query = ('UPDATE tbl_attends_students_list SET student_name = (%s), updated_time = (%s), rssi_value = (%s), branch_id = (%s), room_id = (%s) where student_id = (%s)',(student_name, time_stamp,beacon_rssi,branch_id, room_id,student_id))
# # Update a previous recordupdated_time
# result = Query.commit(update_query)
# print("Data updated in Attendance Table!!")
pass
else:
class_id = get_class_id_arr[0]
print("Class id is--->", class_id)
print("Class id is --->", class_id)
#-----------------Get Class_ID-----------------------------------------------------
#--------INSERET student in attendance table---------------------------------
#If id exist in a particular class don't insert
#If id is not exist just insert
id_class_exist_query = ("SELECT student_id from tbl_attends_students_list where student_id = (%s)" %(student_id))
#id_class_exist_query = ("SELECT student_id from tbl_attends_students_list where student_id = (%s) and class_id=(%s) and branch_id=(%s) and room_id=(%s)" %(student_id, class_id, branch_id, room_id))
print(id_class_exist_query)
id_class_exist_data = Query.get_a_record(id_class_exist_query)
#id_exist = id_exist_data[0]
print("id_class_exist_data, -->", id_class_exist_data)
#id_exist_data = str(id_exist_data)
if(str(id_class_exist_data)=='None'):
#-----------------Get branch_id from room_id---------------
branch_query = ("SELECT `branch_id` FROM `tbl_floor` WHERE `floor_id` in (SELECT `floor_id` FROM `tbl_room` WHERE `room_id` = '%s')" %(room_id))
branch_data = Query.get_a_record(branch_query)
print("-------------------------branch ID---------------\n\n\n",branch_data)
branch_id = branch_data[0]
print("Inside of insert query ")
insert_query = 'INSERT INTO tbl_attends_students_list(class_id, student_id, student_name, updated_time, rssi_value, branch_id, room_id) VALUES (%s,%s,%s,%s,%s,%s,%s)', (class_id, student_id, student_name, time_stamp, beacon_rssi, branch_id, room_id)
result = Query.commit(insert_query)
print("Data Inserted Successfully in Attendance Table!!!!")
else:
#-----------------Get branch_id from room_id---------------
branch_query = ("SELECT `branch_id` FROM `tbl_floor` WHERE `floor_id` in (SELECT `floor_id` FROM `tbl_room` WHERE `room_id` = '%s')" %(room_id))
branch_data = Query.get_a_record(branch_query)
print("-------------------------branch ID---------------\n\n\n",branch_data)
branch_id = branch_data[0]
#update
update_query = ('UPDATE tbl_attends_students_list SET class_id= (%s), student_name = (%s), updated_time = (%s), rssi_value = (%s), branch_id = (%s), room_id = (%s) where student_id = (%s)',(class_id, student_name, time_stamp,beacon_rssi,branch_id, room_id,student_id))
# Update a previous recordupdated_time
result = Query.commit(update_query)
print("Data updated in Attendance Table!!")
#------------INSERT student in attendance table-------------------
#-------------Ignore rest of the code, those are for notification---------------------
#---------------Get Room Type from gateway id---------------
#get_room_type_query = ("SELECT room_type from tbl_entry_gateway where gateway_id = '%s'" %(gateway_address))
#get_room_type_data = Query.get_a_record(get_room_type_query)
#print(get_room_type_data)
#curent_room_type = get_room_type_data[0]
#print("current room_type is-->", curent_room_type)
#---------------Get Room Type from gateway id---------------
#------Check from decision table student last status--------
#prev_room_type_query=("SELECT room_type from tbl_decision where student_id='%s'" %(student_id))
#prev_room_type_data = Query.get_a_record(prev_room_type_query)
#if(str(prev_room_type_data)=='None'):
# select_query = ("SELECT room_type from tbl_entry_gateway where id = (%s)" %(class_id))
# room_type_data = Query.get_a_record(select_query)
# room_type = room_type_data[0]
# print("room_type for new student", room_type)
# insert_query = ("INSERT INTO tbl_decision(student_id, room_type) VALUES (%s, %s)", (student_id, room_type))
# result = Query.commit(insert_query)
# print("Data Inserted Successfully for new comer!!")
# prev_room_type = room_type
#else:
# prev_room_type= prev_room_type_data[0]
# print("prev_room_type is --> ", prev_room_type)
#------Check from decision table student last status--------
#---If consecutive two 0 in class room and two 1 in washroom----------
#if(prev_room_type=='washroom' and curent_room_type == 'washroom'):
# print("Student is in washroom, send notification")
# print("True")
#student_id
#student_name
#alarm_status
# alarm_status = "True"
# message = "washroom"
#---------Send Student_id, student_name and alarm_status----
#insert_query = ('INSERT INTO tbl_api_notification(student_id, student_name, message, updated_time) VALUES (%s,%s,%s,%s)',(student_id, student_name, message, time_stamp))
#result = Query.commit(insert_query)
#print("Data Inserted Successfully!!")
# id_exist_query = ("SELECT student_id from tbl_api_notification where student_id = (%s)"%(student_id))
# id_exist_data = Query.get_a_record(id_exist_query)
# print("id_exist -->", id_exist_data)
# id_exist_data = str(id_exist_data)
# if(id_exist_data=='None'):
# print("student id is not in tbl_api_notification")
# insert_query = ('INSERT INTO tbl_api_notification(student_id, student_name, message, updated_time) VALUES (%s,%s,%s,%s)',(student_id, student_name, message, time_stamp))
# print("insert query--->", insert_query)
# result = Query.commit(insert_query)
# print("Data Inserted Successfully!!")
# else:
# id_exist = id_exist_data[0]
# print("student id is in tbl_api_notification")
# update_query = ('UPDATE tbl_api_notification SET student_name = (%s), message = (%s), updated_time = (%s) where student_id = (%s)',(student_name, message,time_stamp, student_id))
# print("Update query--->", update_query)
# Update a previous record
# result = Query.commit(update_query)
# print("Data Updated Successfully in tbl_api_notification!!!")
#---------Send Student_id, student_name and alarm_status----
#if(prev_room_type=='classroom' and curent_room_type == 'classroom'):
# print("Student in classroom, no need to send any notification")
# print("False")
#else:
# print("False")
# alarm_status = "False"
#---If consecutive two 0 in class room and two 1 in washroom----------
#-------------Ignore those are for nootification---------------------
#i+=1
return "Hello"
#############################################################################
except Exception as e:
print ("Caught exception socket.error : %s \n" % e)
#############################################################################
|
from django.urls import include, path
# from django.conf.urls import url
from . import views
urlpatterns = [
path('create_user/', views.CreateUserView.as_view(), name='create_user'),
path('change_user/<int:user_id>/', views.ChangeUserView.as_view(), name='change_user'),
path('users/<int:user_id>/', views.ShowUserView.as_view(), name='user_page'),
path('login/', views.LoginView.as_view(), name='login'),
path('logout/', views.LogoutView.as_view(), name='logout'),
]
|
#!/usr/bin/env python
##-----------------------------
"""Loops over events in the data file,
gets calibrated n-d array for cspad,
evaluates n-d arrays for averaged and maximum values
Usage::
python ex_nda_average.py
bsub -q psfehq -o log-r0092.log python ex_nda_average.py
"""
from __future__ import print_function
from __future__ import division
##-----------------------------
import sys
import psana
import numpy as np
from time import time
#from Detector.AreaDetector import AreaDetector
from ImgAlgos.PyAlgos import reshape_nda_to_2d, reshape_nda_to_3d, print_arr_attr, print_arr
##-----------------------------
def example_01():
# control parameters
SKIP = 0
EVENTS = 100 + SKIP
DO_PLOT = False
DO_PLOT = True
dsname = 'exp=cxif5315:run=169'
#src = psana.Source('DetInfo(CxiDs2.0:Cspad.0)')
src = 'CxiDs2.0:Cspad.0'
print('Example of the detector calibrated data average for\n dataset: %s\n source : %s' % (dsname, src))
# Non-standard calib directory
#psana.setOption('psana.calib-dir', './calib')
ds = psana.DataSource(dsname)
evt = next(ds.events())
env = ds.env()
rnum = evt.run()
#det = AreaDetector(src, env, pbits=0, iface='P')
det = psana.Detector(src, env)
shape = det.shape(rnum)
print(' det.shape() = ', shape)
#mask = det.mask(evt)
t0_sec = time()
counter = 0
arr_sum = np.zeros(shape, dtype=np.double)
arr_max = np.zeros(shape, dtype=np.double)
for i, evt in enumerate(ds.events()) :
if i<SKIP : continue
if not i<EVENTS : break
cdata = det.calib(evt)
#cdata = det.raw_data(evt)
if cdata is None : continue
if not i%10 : print(' Event: %d' % i)
counter += 1
arr_sum += cdata
arr_max = np.maximum(arr_max, cdata)
print(' Detector data found in %d events' % counter)
print(' Total consumed time = %f sec' % (time()-t0_sec))
arr_ave = arr_sum/counter if counter>0 else arr_sum
##-----------------------------
# Plot averaged image
if DO_PLOT :
import pyimgalgos.GlobalGraphics as gg
#nda = arr_ave
nda = arr_max
img = det.image(rnum, nda)
if img is None : sys.exit('Image is not available. FURTHER TEST IS TERMINATED')
ave, rms = nda.mean(), nda.std()
gg.plotImageLarge(img, amp_range=(ave-1*rms, ave+3*rms))
gg.show()
##-----------------------------
# Save n-d arrays in 2-d text files
arr_ave = reshape_nda_to_2d(arr_ave)
ofname_ave = 'nda-ave-%s-r%04d.txt' % (env.experiment(), evt.run())
print('Save averaged array in file %s' % ofname_ave)
np.savetxt(ofname_ave, arr_ave, fmt='%8.1f', delimiter=' ', newline='\n')
arr_max = reshape_nda_to_2d(arr_max)
ofname_max = 'nda-max-%s-r%04d.txt' % (env.experiment(), evt.run())
print('Save maximum array in file %s' % ofname_max)
np.savetxt(ofname_max, arr_ave, fmt='%8.1f', delimiter=' ', newline='\n')
##-----------------------------
if __name__ == "__main__" :
ntest = int(sys.argv[1]) if len(sys.argv)>1 else 1
print('%s\nExample # %d' % (80*'_', ntest))
example_01()
sys.exit(0)
##-----------------------------
|
# Generated by Django 2.0 on 2019-08-13 10:15
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='departments1',
fields=[
('department_id', models.IntegerField(primary_key=True, serialize=False)),
('department_name', models.CharField(max_length=30)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('update_date', models.DateTimeField(default=django.utils.timezone.now)),
('created_by', models.CharField(max_length=120, null=True)),
('updated_by', models.CharField(max_length=120, null=True)),
],
options={
'db_table': 'departments1',
},
),
migrations.CreateModel(
name='employees1',
fields=[
('employee_id', models.AutoField(primary_key=True, serialize=False)),
('emp_uid', models.CharField(max_length=120, null=True, unique=True)),
('first_name', models.CharField(max_length=20, null=True)),
('last_name', models.CharField(max_length=25)),
('Gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('O', 'Others')], max_length=3, null=True)),
('Date_of_birth', models.DateField(blank=True, null=True)),
('date_of_join', models.DateField(blank=True, default=django.utils.timezone.now, null=True)),
('emp_age', models.IntegerField(blank=True, null=True)),
('email', models.CharField(max_length=25)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('update_date', models.DateTimeField(default=django.utils.timezone.now)),
('created_by', models.CharField(max_length=120, null=True)),
('updated_by', models.CharField(max_length=120, null=True)),
],
options={
'db_table': 'employees1',
},
managers=[
('all_employees', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('description', models.TextField(blank=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10000)),
('summary', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sid', models.IntegerField()),
('sname', models.CharField(max_length=120)),
('dob', models.DateTimeField()),
],
),
migrations.AddField(
model_name='departments1',
name='manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='myapp.employees1'),
),
]
|
from sklearn.ensemble import RandomForestRegressor
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
f = open("train_data_f_pro.csv")
f2 = open("test_data_f_pro.csv")
data = np.loadtxt(f,delimiter=",")
data2 = np.loadtxt(f2,delimiter=",")
X = data[:,0:5]
X_two = data[:,3:5]
y = data[:,5]
x2 = data2[:,0:5]
x2_two = data2[:,3:5]
rf = RandomForestRegressor(n_estimators=12, criterion='mse', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None, verbose=0, warm_start=False)
rf.fit(X,y)
y_pred = rf.predict(x2)
err_mse = mean_squared_error(data2[:,5], y_pred)
err_mean = mean_absolute_error(data2[:,5], y_pred)
r2 = r2_score(data2[:,5], y_pred)
rf.fit(X_two,y)
y_pred2 = rf.predict(x2_two)
err_mse2 = mean_squared_error(data2[:,5], y_pred2)
err_mean2 = mean_absolute_error(data2[:,5], y_pred2)
r2_2 = r2_score(data2[:,5], y_pred2)
print("All features")
print y_pred
print ("All features: mse_error", err_mse)
print("All features: rms_error:", (err_mse**0.5))
print("All features: r2:", r2)
print("All features: mean_error:", err_mean)
print("Two features")
print y_pred2
print ("two features: mse_error", err_mse2)
print("two features: rms_error:", (err_mse2**0.5))
print("two features: r2:", r2_2)
print("two features: mean_error:", err_mean2)
plt.figure(1)
plt.plot(data2[:,5],data2[:,5],color = 'g',linestyle = "-", label ="data")
plt.plot(data2[:,5], y_pred,color = 'b' ,linestyle ="-", label="Random forest Regression (5 features)")
plt.legend(loc='upper right',prop={'size':6})
plt.xlim(5,10)
plt.ylim(1,10)
plt.title('Movie Prediction')
plt.xlabel('Predicted Rating')
plt.ylabel('Actual Rating')
plt.figure(2)
plt.plot(data2[:,5],data2[:,5],color = 'g',linestyle = "-", label ="data")
plt.plot(data2[:,5], y_pred2, linestyle ="-", color = 'b',label="Random forest Regression (2 features)")
plt.legend(loc='upper right',prop={'size':6})
plt.xlim(5,10)
plt.ylim(1,10)
plt.title('Movie Prediction')
plt.xlabel('Predicted Rating')
plt.ylabel('Actual Rating')
plt.show()
|
# Generated by Django 2.1 on 2018-08-12 19:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userId', models.CharField(max_length=100)),
('value', models.DecimalField(decimal_places=10, max_digits=15)),
('date', models.DateField()),
('time', models.TimeField()),
],
),
]
|
import time
start = time.process_time()
slownik = {}
slownik['Masło'] = 2.3
slownik['Mleko'] = 1.5
slownik['Chleb'] = 5.2
slownik['Jajka'] = 3.0
slownik['Bulki'] = 0.3
slownik['Banany'] = 2.3
slownik['Jablka'] = 1.4
slownik['Zioło'] = 30.5
slownik['Kasztelan'] = 2.3
slownik['Ksiazece'] = 4.5
# Suma elementów
s = sum(slownik.values())
print(s)
# Średnia
n = len(slownik.values())
print(s/n)
# Wartości maksymalne
for n in range(2):
slownik2 = slownik
slownik3 = slownik
if n == 0:
print("\nOto trzy największe wartości:\n")
else:
print("\nOto trzy najmniejsze wartości:\n")
for i in range(3):
slownik2 = slownik3
maxval = max(slownik2.values())
minval = min(slownik2.values())
maksimum = [k for k, v in slownik2.items() if v == maxval]
minimum = [k for k, v in slownik2.items() if v == minval]
if n == 0:
# print(f"Wartość nr.{n}")
print(f"Wartość nr.{i+1}", maksimum[0], slownik2[maksimum[0]])
slownik2.pop(maksimum[0])
else:
# print(f"Wartość nr.{i+1}")
print(f"Wartość nr.{i+1}", minimum[0], slownik2[minimum[0]])
slownik2.pop(minimum[0])
slownik3 = slownik2
duration = time.process_time() - start
print("{0:02f}s".format(duration))
|
fasta_file = input("Enter input file: ")
fasta_output = input("Enter output file: ")
print("Option-1) Read a FASTA format DNA sequence file and make a reverse sequence file.")
print("Option-2) Read a FASTA format DNA sequence file and make a reverse complement sequence file.")
print("Option-3) Convert GenBank format file to FASTA format file.")
option = int(input("Select the option: "))
seq = []
with open(fasta_file,"r") as handle:
for line in handle:
if line.startswith(">"):
header = line
else:
seq.append(line.strip())
seq = "".join(seq)
rev_seq = seq[::-1]
if option == 1:
with open(fasta_output,"w") as handle:
handle.write(header)
for i in range(0,len(rev_seq),70):
handle.write(rev_seq[i:i+70])
handle.write("\n")
elif option == 2:
d_comp = {"a":"T","g":"C","c":"G","t":"A"}
cmp_rev_seq = rev_seq.lower()
for k,v in d_comp.items():
cmp_rev_seq = cmp_rev_seq.replace(k,v)
with open(fasta_output,"w") as handle:
handle.write(header)
for i in range(0,len(cmp_rev_seq),70):
handle.write(cmp_rev_seq[i:i+70])
handle.write('\n')
elif option == 3:
with open(fasta_file,"r") as handle:
content = handle.readlines()
for i in range(len(content)):
if i == 0:
title = content[i]
elif content[i].startswith("ORIGIN"):
seq = [i.lstrip() for i in content[i+1:]]
seq = "".join(seq)
seq_line = seq.split("\n")
seq_result = ""
for line in seq_line:
seq_list = line.split(" ")
if len(seq_list) < 2:
continue
seq_result += "".join(seq_list[1:])
with open(fasta_output,"w") as handle:
handle.write(title)
for i in range(0,len(seq_result),70):
handle.write(seq_result[i:i+70])
handle.write('\n')
|
def change(amount):
if amount == 24:
return [5, 5, 7, 7]
if amount == 25:
return [5, 5, 5, 5, 5]
if amount == 26:
return [5, 7, 7, 7]
if amount == 27:
return [5, 5, 5, 5, 7]
if amount == 28:
return [7, 7, 7, 7]
coins = change(amount - 5)
coins.append(5)
return coins
for i in range(24, 1000):
if i != sum(change(i)):
print(i)
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from urllib.parse import urlencode
from copy import deepcopy
from scrapy.http import HtmlResponse
from instagram_parser.items import UserItem, FollowingItem
class InstagramSpider(scrapy.Spider):
name = 'instagram'
allowed_domains = ['instagram.com']
start_urls = ['https://www.instagram.com/']
auth_url = 'https://www.instagram.com/accounts/login/ajax/'
# todo: add authentications data
user_data = {
"username": "",
"password": "",
}
users_list = {"haikson"}
users_parsed = set()
graphql_url = 'https://www.instagram.com/graphql/query/?{params}'
graphql_hashes = {
"user": "c9100bf9110dd6361671f113dd02e7d6",
# {"user_id":"624689305","include_chaining":true,"include_reel":true,"include_suggested_users":true,"include_logged_out_extras":false,"include_highlight_reels":false,"include_related_profiles":false}
"followers": "c76146de99bb02f6415203be841dd25a",
# {"id":"624689305","include_reel":true,"fetch_mutual":false,"first":17,"after":"QVFBc3I0X0U5OW9OUHRjS1NRWHl5aml2ZDZHcGg4Yi0tSTdkb0p3WkE4ZWhsNWd3R2kwYXlJZ0NUZFJBQVhXQjE1Y0lCbHd3Ty1UY1BrVm1QLWhqZU5iMA=="}
"following": "d04b0a864b4b54837c0d870b0e77e076"
# {"id":"624689305","include_reel":true,"fetch_mutual":false,"first":24}
}
parse_limit = 200
def parse(self, response):
re_csrf = re.compile(r"\"csrf_token\":\"(\w+?)\"")
csrf_token = re_csrf.findall(response.text)[0]
form_data = deepcopy(self.user_data)
headers = {
"X-CSRFToken": csrf_token,
}
yield scrapy.FormRequest(
self.auth_url,
method='POST',
callback=self.user_parse,
formdata=form_data,
headers=headers
)
def user_parse(self, response: HtmlResponse):
auth_response = json.loads(response.text)
if auth_response.get("authenticated"):
for ig_user in self.users_list:
yield response.follow(f"/{ig_user}/", callback=self.parse_user_data, cb_kwargs={'username': ig_user})
def parse_user_data(self, response: HtmlResponse, username: str, user_id: int=None):
user_id = user_id or self.get_user_id(response.text, username)
user_data = self.get_user_data(response.text)
full_name = user_data.get("entry_data", {}).get("ProfilePage", [{}])[0].get("graphql", {}).get("user", {}).get("full_name", {})
yield UserItem(user_id=user_id, username=username, full_name=full_name)
if len(self.users_parsed) <= self.parse_limit:
followers_url = self.get_graphql_url(
query_hash_name="followers",
variables=self.followers_variables(user_id=user_id)
)
yield response.follow(followers_url, callback=self.parse_followers, cb_kwargs={"username": username, "user_id": int(user_id)})
following_url = self.get_graphql_url(
query_hash_name="following",
variables=self.followers_variables(user_id=user_id)
)
yield response.follow(following_url, callback=self.parse_following, cb_kwargs={"username": username, "user_id": int(user_id)})
def parse_followers(self, response: HtmlResponse, username: str, user_id: int):
self.users_parsed.add(username)
data = json.loads(response.text)
after = data.get("data", {}).get("user", {}).get("edge_followed_by", {}).get("page_info", {}).get("end_cursor")
has_next_page = data.get("data", {}).get("user", {}).get("edge_followed_by", {}).get("page_info", {}).get("has_next_page")
followers = data.get("data", {}).get("user", {}).get("edge_followed_by", {}).get("edges", [])
for follower in followers:
follower = follower.get("node", {})
if follower.get("username") not in self.users_parsed:
if len(self.users_parsed) <= self.parse_limit:
yield response.follow(
"/{}/".format(follower.get("username")),
callback=self.parse_user_data,
cb_kwargs={"username": follower.get("username"), "user_id": follower.get("id")}
)
yield FollowingItem(
user_id=int(follower.get('id')),
followers=[int(user_id)]
)
if has_next_page:
followers_url = self.get_graphql_url(
query_hash_name="followers",
variables=self.followers_variables(user_id=user_id, after=after)
)
yield response.follow(
followers_url,
callback=self.parse_followers,
cb_kwargs={"username": username, "user_id": user_id}
)
def parse_following(self, response: HtmlResponse, username: str, user_id: int):
self.users_parsed.add(username)
data = json.loads(response.text)
after = data.get("data", {}).get("user", {}).get("edge_follow", {}).get("page_info", {}).get("end_cursor")
has_next_page = data.get("data", {}).get("user", {}).get("edge_followed_by", {}).get("page_info", {}).get("has_next_page")
followers = data.get("data", {}).get("user", {}).get("edge_follow", {}).get("edges", [])
for follower in followers:
follower = follower.get("node", {})
if follower.get("username") not in self.users_parsed:
yield response.follow(
"/{}/".format(follower.get("username")),
callback=self.parse_user_data,
cb_kwargs={"username": follower.get("username"), "user_id": follower.get("id")}
)
yield FollowingItem(
user_id=int(user_id),
followers=[int(follower.get('id'))]
)
if has_next_page:
followers_url = self.get_graphql_url(
query_hash_name="following",
variables=self.followers_variables(user_id=user_id, after=after)
)
yield response.follow(
followers_url,
callback=self.parse_followers,
cb_kwargs={"username": username, "user_id": user_id}
)
def get_user_id(self, text, username):
matched = re.search("{\"id\":\"\\d+\",\"username\":\"%s\"}" % username, text).group()
return json.loads(matched).get("id")
def get_user_data(self, text):
regex = r"window._sharedData.*?({.*}?);";
matches = re.findall(regex, text)
if len(matches):
return json.loads(matches[0])
return {}
def get_graphql_url(self, query_hash_name, variables):
if isinstance(variables, dict):
variables = json.dumps(variables)
return self.graphql_url.format(
params=urlencode({"variables": variables, "query_hash":self.graphql_hashes[query_hash_name]})
)
@staticmethod
def followers_variables(user_id, after=None):
if isinstance(user_id, str):
user_id = int(user_id)
variables = {
"id": "{}".format(user_id),
"include_reel": True,
"fetch_mutual": False,
"first": 17,
}
if after:
variables.update({"after": after})
return variables |
import cv2
import numpy as np
img = cv2.imread('..\\MasterOpenCV\\images\\IMG_7539.jpg')
image = cv2.resize(img,(960,540))
height,width = image.shape[:2]
start_row,start_col = int(height*.25),int(width*.25)
end_row,end_col = int(height*.75),int(width*.75)
corpped = image[start_row:end_row,start_col:end_col]
cv2.imshow("original",image)
cv2.waitKey()
cv2.imshow("cropped",corpped)
cv2.waitKey()
cv2.destroyAllWindows() |
import numpy as np
import matplotlib.pyplot as plt
def convolve(x,h):
y=[]
b=(len(x)+len(h)-1)
for n in range(b):
s=0
for k in range(len(x)):
if n-k<len(h) and n-k>=0:
s=s+(x[k]*h[n-k])
y=np.append(y,s)
return(y)
def timerev(x):
lnx=len(x)
y=np.zeros(lnx)
for i in range(lnx):
if lnx-i>=0 and lnx-i<=lnx:
y[i]=x[lnx-i-1]
return(y)
m=int(input("enter number of samples for x[n]:"))
x=[]
for i in range(m):
y=int(input("enter samples for x[n]:"))
x=np.append(x,y)
print(x)
p=int(input("enter number of samples for h[n]:"))
h=np.zeros(p)
for i in range(p):
h[i]=int(input("enter samples for h[n]:"))
print(h)
result=convolve(x,h)
print("convolution of x[n] and h[n] is:",result)
f=timerev(result)
print("time reversal of convolution result is:",f)
rxy=convolve(x,f)
print("cross correlation:",rxy)
f1=int(input("enter signal 1 frequency:"))
f2=int(input("enter signal 2 frequency:"))
fs=int(input("enter sampling frequency:"))
x1=np.arange(0,10,0.1)
y1=np.sin(2*np.pi*(float(f1)/float(fs))*x1)
y2=np.sin(2*np.pi*(float(f2)/float(fs))*x1)
r=convolve(y1,y2)
q=timerev(r)
y3=convolve(y1,q)
N=np.random.rand(y1.shape[0])
xN=y1+N
h=[1.0/3.0,1.0/3.0,1.0/3.0]
y4=convolve(h,xN)
v=timerev(xN)
ac=convolve(v,xN)
plt.subplot(611)
plt.plot(y1)
plt.title("signal 1")
plt.subplot(612)
plt.plot(y2)
plt.title("signal 2")
plt.subplot(613)
plt.plot(r)
plt.title("convolution of signal 1 and signal 2")
plt.subplot(614)
plt.plot(y3)
plt.title("cross correlation")
plt.subplot(615)
plt.plot(y4)
plt.title("convolution of impulse response of MA system and xN")
plt.subplot(616)
plt.plot(ac)
plt.title("auto correlation")
plt.show()
|
a,b,x,y=[int(i)for i in input().split()]
while 1:
p=""
if y<b:y+=1;p="S"
if y>b:y-=1;p="N"
if x<a:x+=1;p+="E"
if x>a:x-=1;p+="W"
print(p) |
from .template import Element, element
@element()
class h1(Element):
...
@element()
class b(Element):
...
@element()
class i(Element):
...
@element()
class span(Element):
...
|
import multiprocessing
import cv2
import dlib
import time
import threading
import numpy as np
from skimage import io
from sklearn.externals import joblib
import datetime
import glob
import os
import time
import MySQLdb
import redis
redisdb = redis.Redis(host='localhost', port=6379, db=1)
mysqldb = MySQLdb.connect("localhost","root","root","zm" )
cursor = mysqldb.cursor()
detector = dlib.get_frontal_face_detector()
filepath = os.getcwd()
clf = joblib.load(filepath+'/trained_classifier.pkl')
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(filepath+"/old_shape_predictor_68_face_landmarks.dat")
facerec = dlib.face_recognition_model_v1(filepath+"/dlib_face_recognition_resnet_model_v1.dat")
#EVENTSTOVIDEO()
cam_queue = multiprocessing.Queue()
def calculatetime(time1):
fmt='%Y-%m-%d %H:%M:%S'
d1 = datetime.datetime.strptime(time1, fmt)
d2_current = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
d2 = datetime.datetime.strptime(d2_current, fmt)
d1_ts = time.mktime(d1.timetuple())
d2_ts = time.mktime(d2.timetuple())
restime=int(d2_ts-d1_ts) / 60
return restime
timedict={}
def appenddict(name,imgpath,imgname,crop_img):
current_time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if name in timedict:
if calculatetime(timedict[name])> 2:
timedict.update({name:current_time})
cv2.imwrite(filepath+"/static/media/detected/"+str(imgname)+".jpg",crop_img)
cursor.execute("INSERT INTO VideoCaptured(uuid,image, status,name,occupation,created_at,updated_at) VALUES ('%s', '%s','%s', '%s', '%s','%s','%s')" % (name, imgpath, "Known",name,'',current_time,current_time))
mysqldb.commit()
else:
print (name)
timedict.update({name:datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
cv2.imwrite(filepath+"/static/media/detected/"+str(imgname)+".jpg",crop_img)
cursor.execute("INSERT INTO VideoCaptured(uuid,image, status,name,occupation,created_at,updated_at) VALUES ('%s', '%s','%s', '%s', '%s','%s','%s')" % (name, imgpath, "Known",name,'',current_time,current_time))
mysqldb.commit()
def EVENTS_CREATION(image):
if (os.path.isdir("/home/zoom/Desktop/TASKS/events/"+todaystr) == True):
cv2.imwrite("/home/zoom/Desktop/TASKS/events/"+todaystr+"/"+datetime.datetime.now().strftime('%Hh%Mm%Ss%f') + '.jpg', image)
else :
os.mkdir("/home/zoom/Desktop/TASKS/events/"+todaystr)
cv2.imwrite("/home/zoom/Desktop/TASKS/events/"+todaystr+"/"+datetime.datetime.now().strftime('%Hh%Mm%Ss%f') + '.jpg', image)
def diffImg(t0, t1, t2):
d1 = cv2.absdiff(t2, t1)
d2 = cv2.absdiff(t1, t0)
return cv2.bitwise_and(d1, d2)
def cam_feed(cam_queue):
cap = cv2.VideoCapture("rtsp://192.168.0.16/user=admin&password=&channel=1&stream=0.sdp?")
#cap = cv2.VideoCapture(0)
cap1 = cap
cap.set(3,1920)
cap.set(4,1080)
cap.set(5, 10)
t_minus = cv2.cvtColor(cap1.read()[1], cv2.COLOR_RGB2GRAY)
t = cv2.cvtColor(cap1.read()[1], cv2.COLOR_RGB2GRAY)
t_plus = cv2.cvtColor(cap1.read()[1], cv2.COLOR_RGB2GRAY)
while(cap.isOpened()):
ret,image = cap1.read()
motion = diffImg(t_minus, t, t_plus)
t_minus = t
t = t_plus
t_plus = cv2.cvtColor(cap1.read()[1], cv2.COLOR_RGB2GRAY)
print (cv2.countNonZero(motion))
if cv2.countNonZero(motion) > 400000: # MOTION DETECTION VALUE
ret,frame = cap.read()
# fm = cv2.Laplacian(frame, cv2.CV_64F).var() # BLUR DETECTION
print "BEFORE MOTION DETECTION"
# if fm > 500 :
# print "AFTER MOTION DETECTION",fm
cam_queue.put(frame)
# EVENTS_CREATION(frame)
def croping(img):
dets1, scores, idx = detector.run(img, 1, -1)
print scores[0]
if scores[0] > 1.6 :
print " IN IF ------------------------------------------"
dets = detector(img, 1)
print "dets"
for k, d in enumerate(dets):
print "in for"
crop_img = img[(d.top()):(d.bottom()),(d.left()):(d.right())]
predictions(crop_img)
def predictions(img1):
imgname=(((str(datetime.datetime.now())).split(" ")[1]).replace(":","")).replace(".","")
print "PREDECTION"
try:
dets = detector(img1, 1)
for k, d in enumerate(dets):
cv2.imwrite("test.jpg",img1)
shape = sp(img1, d)
face_descriptor = facerec.compute_face_descriptor(img1, shape)
a = face_descriptor
vector21 = np.array(a)
y_pred = clf.predict(vector21)
predictions = clf.predict_proba(vector21).ravel()
imgpath="/static/media/detected/"+str(imgname)+".jpg"
print predictions
if ((predictions[0]*100) > 70):
print (y_pred[0])
appenddict(y_pred[0],imgpath,imgname,img1)
elif ((predictions[1]*100) > 70):
print ("same face")
print (y_pred[0])
appenddict(y_pred[0],imgpath,imgname,img1)
elif ((predictions[2]*100) > 70):
print (y_pred[0])
appenddict(y_pred[0],imgpath,imgname,img1)
else:
print ("unknown")
current_time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
imgpath="/static/media/undetected/"+str(imgname)+".jpg"
cv2.imwrite(filepath+"/static/media/undetected/"+str(imgname)+".jpg",img1)
cursor.execute("INSERT INTO VideoCaptured(uuid,image, status,name,occupation,created_at,updated_at) VALUES ('%s', '%s','%s', '%s', '%s','%s','%s')" % (imgname, imgpath, "UnTrained","",'',current_time,current_time))
mysqldb.commit()
except Exception as e:
print e.message
def from_queue():
while True:
from_queue1 = cam_queue.get()
#Prediction(from_queue1)
croping(from_queue1)
if __name__ == "__main__":
cam_process = multiprocessing.Process(target=cam_feed,args=(cam_queue,))
cam_process.start()
c2 = multiprocessing.Process(target =from_queue)
c2.start() |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^faq/$', views.faq, name='faq'),
url(r'^start/$', views.start, name='start'),
url(r'^profile/(?P<user_pk>\d+)/$', views.profile, name='profile'),
url(r'^faq/1/$', views.faq_detail1, name='faq_detail1'),
url(r'^faq/2/$', views.faq_detail2, name='faq_detail2'),
url(r'^faq/3/$', views.faq_detail3, name='faq_detail3'),
url(r'^faq/4/$', views.faq_detail4, name='faq_detail4'),
url(r'^faq/5/$', views.faq_detail5, name='faq_detail5'),
url(r'^faq/6/$', views.faq_detail6, name='faq_detail6'),
url(r'^faq/7/$', views.faq_detail7, name='faq_detail7'),
url(r'^faq/8/$', views.faq_detail8, name='faq_detail8'),
url(r'^faq/9/$', views.faq_detail9, name='faq_detail9'),
url(r'^faq/10/$', views.faq_detail10, name='faq_detail10'),
url(r'^faq/11/$', views.faq_detail11, name='faq_detail11'),
url(r'^faq/12/$', views.faq_detail12, name='faq_detail12'),
url(r'^faq/13/$', views.faq_detail13, name='faq_detail13'),
url(r'^faq/14/$', views.faq_detail14, name='faq_detail14'),
]
|
"""Generator Comprehensions
* Similar syntax to list comprehensions
* Create a generator object
* Concise
* Lazy evaluation
"""
from utils import *
def is_odd(x):
return x % 2 == 1
def main():
hrule("Create a generator comprehension")
million_squares = (x*x for x in range(1, 1000001))
print(million_squares)
hrule("Create a list from generator comprehension (almost 40MB of RAM required)")
as_a_list = list(million_squares)
print(type(as_a_list))
hrule("The generator is now empty! Creating another list from this generator will return an EMPTY LIST")
as_a_second_list = list(million_squares)
print(as_a_second_list)
hrule("Use the sum() to produce the SUM of 1.000.000 square numbers, with generator comprehension")
s = sum(x*x for x in range(1, 1000001))
print(s)
hrule("You can use generator comprehension with filter clause")
s = sum(x for x in range(1000000) if is_odd(x))
print(s)
if __name__ == '__main__':
main() |
import newspaper
from newspaper import Article
import time
import nltk
import json
import requests
from bs4 import BeautifulSoup as soup
from requests_oauthlib import OAuth1
import secrets
CACHE_FILENAME = "twitter_cache.json"
CACHE_DICT = {}
client_key = secrets.TWITTER_API_KEY
client_secret = secrets.TWITTER_API_SECRET
access_token = secrets.TWITTER_ACCESS_TOKEN
access_token_secret = secrets.TWITTER_ACCESS_TOKEN_SECRET
bearer_token = secrets.BEARER_TOKEN
oauth = OAuth1(client_key,
client_secret=client_secret,
resource_owner_key=access_token,
resource_owner_secret=access_token_secret)
def test_oauth():
''' Helper function that returns an HTTP 200 OK response code and a
representation of the requesting user if authentication was
successful; returns a 401 status code and an error message if
not. Only use this method to test if supplied user credentials are
valid. Not used to achieve the goal of this assignment.'''
url = "https://api.twitter.com/1.1/account/verify_credentials.json"
auth = OAuth1(client_key, client_secret, access_token, access_token_secret)
authentication_state = requests.get(url, auth=auth).json()
return authentication_state
def open_cache():
''' Opens the cache file if it exists and loads the JSON into
the CACHE_DICT dictionary.
if the cache file doesn't exist, creates a new cache dictionary
Parameters
----------
None
Returns
-------
The opened cache: dict
'''
try:
cache_file = open(CACHE_FILENAME, 'r')
cache_contents = cache_file.read()
cache_dict = json.loads(cache_contents)
cache_file.close()
except:
cache_dict = {}
return cache_dict
def save_cache(cache_dict):
''' Saves the current state of the cache to disk
Parameters
----------
cache_dict: dict
The dictionary to save
Returns
-------
None
'''
dumped_json_cache = json.dumps(cache_dict)
fw = open(CACHE_FILENAME,"w")
fw.write(dumped_json_cache)
fw.close()
def construct_unique_key(baseurl, params):
''' constructs a key that is guaranteed to uniquely and
repeatably identify an API request by its baseurl and params
AUTOGRADER NOTES: To correctly test this using the autograder, use an underscore ("_")
to join your baseurl with the params and all the key-value pairs from params
E.g., baseurl_key1_value1
Parameters
----------
baseurl: string
The URL for the API endpoint
params: dict
A dictionary of param:value pairs
Returns
-------
string
the unique key as a string
'''
param_strings = []
connector = '_'
for k in params.keys():
param_strings.append(f'{k}_{params[k]}')
unique_key = baseurl + connector + connector.join(param_strings)
return unique_key
def make_request(baseurl, params):
'''Make a request to the Web API using the baseurl and params
Parameters
----------
baseurl: string
The URL for the API endpoint
params: dictionary
A dictionary of param:value pairs
Returns
-------
dict
the data returned from making the request in the form of
a dictionary
'''
r = requests.get(baseurl, params, auth=oauth)
return r.json()
def make_request_with_cache(baseurl, query, count):
'''Check the cache for a saved result for this baseurl+params:values
combo. If the result is found, return it. Otherwise send a new
request, save it, then return it.
AUTOGRADER NOTES: To test your use of caching in the autograder, please do the following:
If the result is in your cache, print "fetching cached data"
If you request a new result using make_request(), print "making new request"
Do no include the print statements in your return statement. Just print them as appropriate.
This, of course, does not ensure that you correctly retrieved that data from your cache,
but it will help us to see if you are appropriately attempting to use the cache.
Parameters
----------
baseurl: string
The URL for the API endpoint
hashtag: string
The hashtag to search for
count: integer
The number of results you request from Twitter
Returns
-------
dict
the results of the query as a dictionary loaded from cache
JSON
'''
params = {'q':query,
'count': count
}
new_key_list = []
CACHE_DICT = open_cache() #checks that the cahce dictionary os open
key_for_request = construct_unique_key(baseurl, params)
if key_for_request in CACHE_DICT.keys():
return CACHE_DICT[key_for_request]
else:
response = make_request(baseurl,params)
CACHE_DICT[key_for_request] = response
return CACHE_DICT[key_for_request]
my_url = "https://www.nytimes.com/section/technology"
CACHE_FILE_NAME = "news_cache.json"
CACHE_DICT = {}
def make_request_using_cache(url):
cache = open_cache()
if url in cache.keys(): # the url is our unique key
print("Using cache")
return cache[url]
else:
print("Fetching")
time.sleep(1)
response = requests.get(url)
cache[url] = response.text
save_cache(cache)
return cache[url]
def get_content_string(url):
'''
Purpose:
--------
This method extracts a content dictionary from an HTML outline of the NY Times Tech Section.
Paramater:
--------
the url of the ny times tech section
Returns:
-------
a list of contents
'''
cache_dict_response = make_request_using_cache(url)
page_soup = soup(cache_dict_response, 'html.parser')
# Use the below statement as a visualizer of the HTML outline.
# print(page_soup)
containers = page_soup.find_all("script", {"type": "application/ld+json"})
article_list = []
for container in containers:
for dictionary in container:
article_list.append(dictionary)
article_list[0:2] = [''.join(article_list[0:2])]
content_string = article_list[0]
article_index = content_string.index("itemListElement")
content_string = content_string[article_index + 18:]
return content_string
def find_occurrences(content_string):
'''
Purpose:
----------
finds the start and end of all correct article hyperlinks in the previously extracted content string
Parameters:
-----------
the content string from the NY tymes tech section html outline
Returns:
--------
lists of the starting and ending indices of the hyperlinks in the content string
'''
start_indices = [i for i in range(len(content_string)) if
content_string.startswith('https://www.nytimes.com/2021', i)]
end_indices = [i for i in range(len(content_string)) if content_string.startswith('.html', i)]
end_indices = [x + 5 for x in end_indices]
if len(start_indices) > len(end_indices):
difference = len(start_indices) - len(end_indices)
start_indices = start_indices[:difference]
if len(end_indices) > len(start_indices):
difference = len(end_indices) - (len(end_indices) - len(start_indices))
end_indices = end_indices[:difference]
return start_indices, end_indices
def get_all_urls(start_indices, end_indices, content_string):
'''
Purpose:
--------
Extracts all article hyperlinks from the content string
Parameters:
----------
The starting and ending indices of the hyperlinks in the content string
Returns:
----------
list of urls
'''
url_list = []
for i in range(len(start_indices)):
url_list.append(content_string[start_indices[i]:end_indices[i]])
return url_list
def summarize_article(url):
'''
Purpose:
--------
Summarizes the article and provides valuable info including images and attritbutions
Parameters:
-----------
url
Returns:
---------
article summary
'''
article = Article(url)
article.download()
article.parse()
# Punkt is a sentence tokenizer which is useful for extracting and detecting text.
article.download('punkt') #run commamd python -m nltk.downloader 'punkt' in terminal to download punkt
article.nlp()
# Gets the author or authors of the article
author_string = "Author(s): "
for author in article.authors:
author_string += author # adds all authors (if more than 1) to the author string.
print(author_string)
# Gets the publish date of the article
date = article.publish_date
# strftime() converts a tuple or struct_time representing a time to a string as specified by the format argument.
# Here, it is used to mark the month, day, and year of the date in a readable format.
print("Publish Date: " + str(date.strftime("%m/%d/%Y")))
# Gets the top image of the article
print("Top Image Url: " + str(article.top_image))
# Gets the article images
image_string = "All Images: "
for image in article.images:
image_string += "\n\t" + image # adds a newline and a tab before each image is printed
print(image_string)
print()
# Gets the article summary
print("A Quick Article Summary")
print("----------------------------------------")
print(article.summary)
return article.summary
def search_twitter(query, tweet_fields, bearer_token):
headers = {"Authorization": "Bearer {}".format(bearer_token)}
url = "https://api.twitter.com/2/tweets/search/recent?query={}&{}".format(
query, tweet_fields
)
response = requests.request("GET", url, headers=headers)
print(response.status_code)
if response.status_code != 200:
raise Exception(response.status_code, response.text)
return response.json()
if __name__ == "__main__":
# Welcome Messages and Introduction
print()
print("Welcome to the Newspaper Scrape Project. \nIn seconds, you will have access to the latest articles "
"in the technology section of the New York Times. \nIn addition, you will also be able to know whether the "
"article is positive or negative and the extent of the writer's bias.")
print()
# Getting the user input; adding an element of personalization!
name = input("Enter your name to get started: ")
# Console Display
print("Welcome " + name + "! \nYou will now see the latest technology articles in the New York Times...")
print("Extracting article hyperlinks...")
time.sleep(2)
print("Retrieving summaries...")
print()
time.sleep(2)
# Gets all the latest URL's from the NY Times Technology Section. (see news_extract.py for more detail)
my_url = "https://www.nytimes.com/section/technology"
content_string = get_content_string(my_url)
starts, ends = find_occurrences(content_string)
url_list = get_all_urls(starts, ends, content_string)
# Gets the article summary and performs sentiment analysis on the chosen URL.
for url in url_list:
print("Article URL: " + str(url))
article_summary = summarize_article(url)
print("------------------------------------------------")
time.sleep(7) # Allows user to get through all the text.
print()
print("The articles have been successfully extracted!")
print("In total, we were able to extract " + str(len(url_list)) + " different articles!")
#Retrieves the latest 10 tweets related to the keyword or phrase entered by the user
query = input("Enter a keyword for the topic you are interested in from these articles to get their latest tweets: ")
if not client_key or not client_secret:
print("You need to fill in CLIENT_KEY and CLIENT_SECRET in secrets.py.")
exit()
if not access_token or not access_token_secret:
print("You need to fill in ACCESS_TOKEN and ACCESS_TOKEN_SECRET in secrets.py.")
exit()
CACHE_DICT = open_cache()
baseurl = "https://api.twitter.com/1.1/search/tweets.json"
tweet_fields = "tweet.fields=text"
count = 10
make_request_with_cache(baseurl, query, count)
for url in url_list:
if query in url:
tweet_data = search_twitter(query=query, tweet_fields=tweet_fields, bearer_token=bearer_token)
print(tweet_data)
print("Thanks for participating, " + name + "!")
|
// https://leetcode.com/problems/binary-tree-preorder-traversal
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def preorderTraversal(self, root):
st = list()
lt = list()
if root == None:
return lt
st.append(root)
while len(st):
node = st.pop()
lt.append(node.val)
if node.right:
st.append(node.right)
if node.left:
st.append(node.left)
return lt
|
#encoding=UTF-8
'''
匿名函数:
lambda 参数1,参数2,参数3:表达式
特点:
使用lambda创建函数 没有名称
匿名函数冒号后面的表达式有且只有一个,是表达式,不是语句!!!!
匿名函数自带return,返回的 结果就是表达式计算后的结果
'''
def sum(x,y):
'''
求和
:param x:
:param y:
:return:
'''
return x+y
pass
print(sum(10,2))
M = lambda x,y:x+y
print(M(1,2))
age = 20
print('可以yp' if age > 18 else '不可以yp') #替换传统的if else 语句
result = lambda x,y:x if x>y else y
print(result(10,2))
result = (lambda x,y:x if x>y else y)(12,20) #相当于直接调用参数传参
print(result)
|
import requests
import re
url_1 = 'https://stepic.org/media/attachments/lesson/24472/sample0.html'
url_2 = 'https://stepic.org/media/attachments/lesson/24472/sample2.html'
pattern = r'<a\s+(?:[^>]*?\s+)?href="([^"]*)"'
urls = []
answer = 'No'
for x in range(2):
urls.append(input().rstrip())
res = requests.get(urls[0])
first_step_text = res.text
all_inclusions = re.findall(pattern, res.text)
# print(all_inclusions)
for link in all_inclusions:
temp_res = requests.get(link)
links_in_str = re.findall(pattern, temp_res.text)
# print(links_in_str)
for link_2 in links_in_str:
if link_2 == urls[1]:
answer = 'Yes'
break
print(answer)
'''
https://stepic.org/media/attachments/lesson/24472/sample0.html
https://stepic.org/media/attachments/lesson/24472/sample2.html
Sample Output 1:
Yes
''' |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for generated bridges
"""
import sys
from attr.setters import convert
sys.path.insert(0, '../..')
import pytest
from sim import Simulator, CliArgs, path_join, parent_dir
import corsair
TEST_DIR = parent_dir(__file__)
def gen_bridge(tmpdir, bridge, reset):
config = corsair.Configuration()
config['lb_bridge']['type'].value = bridge
config['register_reset'].value = reset
bridge_path = path_join(tmpdir, '%s2lb.v' % bridge)
corsair.LbBridgeWriter()(bridge_path, config)
return (bridge_path, config)
@pytest.fixture()
def simtool():
return 'modelsim'
@pytest.fixture(params=['apb', 'axil', 'amm', 'spi'])
def bridge(request):
return request.param
@pytest.fixture(params=['sync_pos', 'sync_neg', 'async_pos', 'async_neg'])
def reset(request):
return request.param
def test(tmpdir, bridge, reset, simtool, defines=[], gui=False, pytest_run=True):
# create sim
tb_dir = path_join(TEST_DIR, 'test_lb_bridge')
beh_dir = path_join(TEST_DIR, 'beh')
sim = Simulator(name=simtool, gui=gui, cwd=tmpdir)
sim.incdirs += [tmpdir, tb_dir, beh_dir]
sim.sources += [path_join(tb_dir, 'tb.sv')]
sim.sources += beh_dir.glob('*.sv')
sim.defines += defines
sim.top = 'tb'
sim.setup()
# prepare test
dut_src, dut_config = gen_bridge(tmpdir, bridge, reset)
sim.sources += [dut_src]
sim.defines += [
'DUT_DATA_W=%d' % dut_config['data_width'].value,
'DUT_ADDR_W=%d' % dut_config['address_width'].value,
'DUT_%s' % bridge.upper(),
'RESET_ACTIVE=%d' % ('pos' in reset),
]
# run sim
sim.run()
if pytest_run:
assert sim.is_passed
if __name__ == '__main__':
# run script with key -h to see help
cli = CliArgs(default_test='test')
cli.args_parser.add_argument('--bridge', default='apb', metavar='<bridge>', dest='bridge',
help="bridge <bridge> to LocalBus; default is 'apb'")
cli.args_parser.add_argument('--reset', default='sync_pos', metavar='<reset>', dest='reset',
help="reset <reset> for bridge registers; default is 'sync_pos'")
args = cli.parse()
try:
globals()[args.test](tmpdir='work',
bridge=args.bridge,
reset=args.reset,
simtool=args.simtool,
gui=args.gui,
defines=args.defines,
pytest_run=False)
except KeyError:
print("There is no test with name '%s'!" % args.test)
|
import json
from subprocess import PIPE,Popen
s = Popen("ruby ast.rb /home/aiyanxu/study/diff++/test/test1.rb", shell=True, stdout=PIPE).stdout.read()
h = json.loads(s)
print h.get('type')
|
from unityagents import UnityEnvironment
import numpy as np
class EnvWrapper():
"""
Wrapper for unity framework to match OpenAI environment interface
"""
def __init__(self, filename):
self.env = UnityEnvironment(file_name=filename)
self.brain_name = self.env.brain_names[0]
brain = self.env.brains[self.brain_name]
env_info = self.env.reset(train_mode=True)[self.brain_name]
self.nA = brain.vector_action_space_size
print('Number of actions:', self.nA)
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
self.nS = len(state)
print('State space dimension:', self.nS)
def reset(self):
"""
Returns the state, as OpenAI env would
"""
env_info = self.env.reset(train_mode = True)[self.brain_name]
return np.array(env_info.vector_observations)
def step(self, action):
"""
Updates the environment with action and sends feedback to the agent
"""
env_info = self.env.step(action)[self.brain_name]
next_state, reward, done = np.array(env_info.vector_observations), np.array(env_info.rewards), np.array(env_info.local_done)
if np.any(done):
next_state = self.reset()
return next_state, reward, done
def close(self):
self.env.close() |
import pytest
from werkzeug.exceptions import Forbidden
from flask_allows import Allows, Permission
def test_Permission_provide_ident(app, member, ismember):
Allows(app=app)
p = Permission(ismember, identity=member)
assert p.identity is member
def test_Permission_as_bool(app, member, always):
Allows(app=app, identity_loader=lambda: member)
p = Permission(always)
with app.app_context():
result = bool(p)
assert result and always.called_with["user"] is member
def test_Permission_bool_doesnt_raise(app, member, never):
Allows(app=app, identity_loader=lambda: member)
p = Permission(never)
with app.app_context():
result = bool(p)
assert not result and never.called_with["user"] is member
def test_Permission_allowed_context(app, member, always):
Allows(app=app, identity_loader=lambda: member)
allowed = False
p = Permission(always)
with app.app_context(), p:
allowed = True
assert allowed
def test_Permission_forbidden_context(app, member, never):
Allows(app=app, identity_loader=lambda: member)
p = Permission(never)
with app.app_context():
with pytest.raises(Forbidden) as excinfo:
with p:
pass
assert excinfo.value.code == 403
def test_Permission_on_fail(app, member, never):
Allows(app=app, identity_loader=lambda: member)
def on_fail(*a, **k):
on_fail.failed = True
on_fail.failed = False
p = Permission(never, on_fail=on_fail)
with app.app_context():
with pytest.raises(Forbidden):
with p:
pass
assert on_fail.failed
|
#!/usr/bin/env python3
import argparse
import re
import sys
from typing import List
# TODO extract into utils module
def yes_no_prompt(prompt_msg: str) -> bool:
return input(prompt_msg + " [Y/n] ") in ["Y", "y", ""]
# TODO extract into utils module
def is_timecode_line(line: str) -> bool:
timecode_line_pattern = re.compile(
"\d\d:\d\d:\d\d,\d\d\d --> \d\d:\d\d:\d\d,\d\d\d\s*")
return timecode_line_pattern.match(line)
def inject_timecodes(source: List[str], target: List[str]) -> bool:
timecode_lines_source = [line for line in source if is_timecode_line(line)]
timecode_lines_target = [line for line in target if is_timecode_line(line)]
num_of_timecode_lines_in_source = len(timecode_lines_source)
num_of_timecode_lines_in_target = len(timecode_lines_target)
msg = "The source and target file don't seem to have the same number of text groups. Continue and replace as many timecode lines in the target file as possible?"
if (num_of_timecode_lines_in_source != num_of_timecode_lines_in_target
and not yes_no_prompt(msg)):
return False
j = 0
for i in range(len(target)):
if is_timecode_line(target[i]):
target[i] = timecode_lines_source[j]
j += 1
if j >= num_of_timecode_lines_in_source:
break
return True
def main():
if sys.argv[1:]:
ap = argparse.ArgumentParser(
description="Inject timecodes from one srt file to another")
ap.add_argument("-s", "--source",
help="source file")
ap.add_argument("-t", "--target",
help="target file")
ap.add_argument("-o", "--output",
required=False,
help="output file (if not given, overwrites the original)")
args = ap.parse_args()
source_path = args.source
target_path = args.target
out_path = args.output if args.output else source_path
else: # ask for input
source_path = input('source file: ')
target_path = input('target file: ')
out_path = (input('output file (hit Enter to overwrite the original): ')
or source_path)
with open(source_path) as source:
source_lines = source.readlines()
with open(target_path) as target:
target_lines = target.readlines()
if inject_timecodes(source_lines, target_lines):
with open(out_path, 'w') as out:
out.writelines(target_lines)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
from . import models
class JobSerializer(serializers.ModelSerializer):
class Meta:
model = models.Job
read_only_fields = ('state',)
class ImageInfoSerializer(serializers.ModelSerializer):
width = serializers.IntegerField()
height = serializers.IntegerField()
size = serializers.IntegerField()
filename = serializers.CharField()
path = serializers.CharField()
mimetype = serializers.CharField()
class Meta:
model = models.ImageInfo
exclude = ('job', )
|
import sys
import os
from PyQt5 import Qt
from settingsController import SettingsController
from gitController import GitController
from projectController import ProjectController
def main(argv):
settingsController = SettingsController()
prefs = settingsController.loadPrefs()
prefs['credentials']['password'] = settingsController.decrypt(
prefs['credentials']['password'])
if len(argv) > 1 and argv[1] is not None:
gitController = GitController()
gitController.create_repo(prefs, argv[1])
if __name__ == "__main__":
app = Qt.QApplication(sys.argv)
if(len(sys.argv) < 2):
projectController = ProjectController()
projectController.exec_()
sys.argv.append(projectController.getProjectName())
main(sys.argv)
|
#dp[i]=min(dp[i-1],dp[i-2])+cost[i-2]
#表示最后一阶必须要走过时的最小的花费
#最后结果从倒数第一个与倒数第二个选一个
from typing import List
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
cur,pre=0,0
for i in range(len(cost)):
cur,pre=min(cur,pre)+cost[i],cur
return min(cur,pre)
|
import numpy as np
# initialize parameters of LDA model
#
# input:
# - num_words: number of words
# - num_docs: number of documents
# - num_topics: number of topics
#
# output (tuple of length 2):
# [0] (array shape(num_topics, num_docs)): topic distribution for each doc
# [1] (array shape(num_words, num_topics)): word distribution for each topic
def init_params(num_words, num_docs, num_topics):
# TODO: finish this function
log_p = np.zeros((num_topics, num_docs))
log_theta = np.zeros((num_words, num_topics))
a = 1.
b = 1.
for d in range(num_docs):
log_p[:,d] = np.log(np.random.dirichlet(a * np.ones(num_topics)))
for t in range(num_topics):
log_theta[:, t] = np.log(np.random.dirichlet(b * np.ones(num_words)))
#print(log_p,"\n\n\n\n",log_theta)
return log_p, log_theta
# compute topic probability for each word/doc
#
# Note: overwrites the topic_probs parameter
#
# input:
# - topic_probs (array shape (num_words, num_docs, num_topics)): current topic probabilities for each word/doc
# - x (array shape (num_words, num_docs)): word/doc occurrence counts
# - log_p (array shape(num_topic, num_docs)): topic distribution for each doc (in log space)
# - log_theta (array shape(num_words, num_topics)): word distribution for each topic (in log space)
def compute_topic_probs(topic_probs, x, log_p, log_theta):
nz_words, nz_docs = x.nonzero()
for w, d in zip(nz_words, nz_docs):
topic_probs[w,d,:] = log_p[:,d] * log_theta[w,:]
# reassign word occurrences across topics
#
# Note: this should rewrite delta matrix
#
# input:
# - delta (array (num_words, num_docs, num_topics)): current sample of word/doc/topic counts
# - x (array (num_words, num_docs)): word/doc occurrence count matrix
# - topic_probs (array (num_words, num_docs, num_topics)): topic probability for each word/doc
#
# output:
# NONE - overwrites delta parameter
def reassign_words(delta, x, topic_probs):
nz_words, nz_docs = x.nonzero()
for d, w in zip(nz_docs, nz_words):
delta[w,d,:] = x[w,d] * topic_probs[w,d,:]
#print(delta)
# resample topic distributions for each document
#
# input:
# - delta: sample of word/doc/topic counts
# - alpha: parameter of the Dirichlet prior
#
# output:
# (array shape (num_topics, num_docs)): topic distribution for each document
def resample_p(delta, alpha):
_, num_docs, num_topics = delta.shape
log_p = np.zeros((num_topics, num_docs))
for d in range(num_docs):
#print(np.sum(delta, axis=0)[0])
log_p[:, d] = np.log(np.random.dirichlet(np.sum(delta, axis=0)[d] + (alpha * np.ones(num_topics))))
return log_p
# resample word distributions for each topic
#
# input:
# - delta: sample of word/doc/topic counts
# - beta: parameter of the Dirichlet prior
#
# output:
# (matrix shape (num_words, num_topics)): sample of word distribution for each topic
def resample_theta(delta, beta):
num_words, _, num_topics = delta.shape
log_theta = np.zeros((num_words, num_topics))
for t in range(num_topics):
#print(np.sum(delta,axis=2)[0])
log_theta[:, t] = np.log(np.random.dirichlet(np.transpose(np.sum(delta, axis=1))[t] + (beta * np.ones(num_words))))
#print(log_theta[0][0])
return log_theta
# Gibbs sampler for the LDA topic model
#
# input
# - x (array shape (num_words, num_docs)): number of word occurrences per document
# - num_topics (int): number of topics in LDA model
# - num_rounds (int): number of total rounds of sampling
# - burnin_fraction (float): fraction of sampling rounds used for burn in
# - alpha (float): parameter in Dirichlet prior for p (topic distirbution per document)
# - beta (float): parameter in Dirichlet prior of theta (word distribution per topic)
# - verbose (bool): print iteration count
#
# output (tuple of length 2):
# [0] (array shape (num_topics, num_docs)): topic distribution for each document
# [1] (array shape (num_words, num_topics)): word distribution for each topic
def lda_gibbs(x, num_topics=8, num_rounds=200, burnin_fraction=.2, alpha=1., beta=1., verbose=False):
num_words, num_docs = x.shape
# figure out how many samples to use in burn in
num_burnin = int(num_rounds * burnin_fraction)
num_samples = num_rounds - num_burnin
# initialize parameters
log_p, log_theta = init_params(num_words, num_docs, num_topics)
# matrix to store word/doc/topic probabilities
topic_probs = np.zeros((num_words, num_docs, num_topics))
# matrix to store word/doc/topic count samples
delta = np.zeros((num_words, num_docs, num_topics))
# matrix to store running sum of word/doc/topic count samples
delta_sum = np.zeros((num_words, num_docs, num_topics))
i = 0
while i < num_rounds:
# update the word/doc/topic probabilities
# this overwrites matrix topic_probs1
compute_topic_probs(topic_probs, x, log_p, log_theta)
# reassign word occurrences to topics based on probabilities
# this overwrites matrix delta
reassign_words(delta, x, topic_probs)
# add delta to running sum if we are beyond burnin
if i > num_burnin:
delta_sum += delta
# resample topic distributions for each document
log_p = resample_p(delta, alpha)
# resample word distribution for each topic
log_theta = resample_theta(delta, alpha)
if verbose and i % 10 == 0:
print("Iteration {}".format(i))
i += 1
# done with samples, compute the mean word/doc/topic counts
delta_hat = delta_sum / num_samples
# compute final topic distributions for each document
p_hat = np.zeros((num_topics, num_docs))
#print((np.sum(delta_hat, axis=0)),(alpha * np.ones(num_topics)))
for d in range(num_docs):
numer = np.sum(delta_hat, axis=0)[d] + (alpha * np.ones(num_topics))
denom = np.transpose(np.sum(np.sum(delta_hat, axis=0),axis=1))[d] + alpha
p_hat[:, d] = numer / denom
#print(p_hat)
# compute final word distributions for each topic
theta_hat = np.zeros((num_words, num_topics))
#print(np.sum(np.sum(delta_hat, axis=1),axis=1),(alpha * np.ones(num_words)))
for t in range(num_topics):
numer = np.transpose(np.sum(delta_hat, axis=1))[t] + (beta * np.ones(num_words))
denom = np.sum(np.sum(delta_hat, axis=1), axis=0)[t] + beta
theta_hat[:, t] = numer / denom
return p_hat, theta_hat |
import os, sys, pygame
BASE_DIR = os.path.abspath('')
IMAGE_PATH = 'assets/1x/bomb50.png'
class Enemy(pygame.sprite.Sprite):
"""
# use os.path.join to load files from subdirectories for portability
# image.load returns a surface object
"""
def __init__(self, screen_dims):
self.surface = pygame.image.load(os.path.join(BASE_DIR, IMAGE_PATH))
self.rect = self.surface.get_rect(center=(25, screen_dims[1] / 1.5))
self.speed = [4, 0]
def move(self):
self.rect = self.rect.move(self.speed) |
#!/usr/bin/env python
from glob import glob
import os
from setuptools import setup
PACKAGE_NAME = "osm_cartography"
SHARE_DIR = os.path.join("share", PACKAGE_NAME)
setup(
name=PACKAGE_NAME,
version='0.2.5',
packages=["osm_cartography", "osm_cartography.nodes"],
data_files=[
('share/ament_index/resource_index/packages', ['resource/' + PACKAGE_NAME]),
('share/' + PACKAGE_NAME, ['package.xml']),
(os.path.join(SHARE_DIR, "launch"), glob(os.path.join("launch", "*.launch.py"))),
(os.path.join(SHARE_DIR, "config"), glob(os.path.join("config", "*.yaml"))),
(os.path.join(SHARE_DIR, "tests"), glob(os.path.join("tests", "*.osm"))),
(os.path.join(SHARE_DIR, "rviz"), glob(os.path.join("rviz", "*.rviz")))
],
package_dir={'': 'src'},
py_modules=[],
zip_safe=True,
install_requires=['setuptools',
'pyproj'],
author="Jack O'Quin",
maintainer="Jack O'Quin, Bence Magyar",
keywords=['ROS2'],
description='Geographic mapping using Open Street Map data.',
license='BSD',
tests_require=['pytest'],
entry_points={
'console_scripts': ['osm_client = osm_cartography.nodes.osm_client:main',
'osm_server = osm_cartography.nodes.osm_server:main',
'viz_osm = osm_cartography.nodes.viz_osm:main'
],
}
)
|
from PIL import Image
import re
src_x = 800
src_y = 800
img = Image.new('RGB', (src_x , src_y), 'black')
pixels = img.load()
color = (255, 255, 255)
#loading obj file which has vertex coords
with open('model.obj', 'r') as f:
lines = f.read().splitlines()
for line in lines:
try:
v, x, y, z = line.split()
except:
continue
#uploading points that we are interested in
if v == 'v':
x = int((float(x) + 10) * 35)
y = src_y - int((float(y)+1) * 35)
pixels[x, y] = color
img.show() |
# MIT License
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This software is published at https://github.com/anvilistas/anvil-extras
from .. import session
from ._anvil_designer import MessagePillTemplate
__version__ = "1.1.0"
css = """
.anvil-role-message-pill {
padding-left: 1em;
border-radius: 2em;
}
"""
session.style_injector.inject(css)
class MessagePill(MessagePillTemplate):
backgrounds = dict(
info="#bde5f8", success="#dff2bf", warning="#feefb3", error="#ffd2d2"
)
foregrounds = dict(
info="#00529b", success="#4f8a10", warning="#9f6000", error="#d8000c"
)
icons = dict(
info="fa:info-circle",
success="fa:check",
warning="fa:warning",
error="fa:times-circle",
)
def __init__(self, **properties):
self.label.role = "message-pill"
self.init_components(**properties)
@property
def level(self):
return self.item["level"]
@level.setter
def level(self, value):
if value not in ("info", "success", "warning", "error"):
raise ValueError(
"level must be one of 'info', 'success', 'warning' or 'error'"
)
self.item["level"] = value
self.label.background = self.backgrounds[value]
self.label.foreground = self.foregrounds[value]
self.label.icon = self.icons[value]
@property
def message(self):
return self.item["message"]
@message.setter
def message(self, value):
self.item["message"] = value
self.label.text = value
|
import re
# s = 'A8C3721D86'
#
#
# def convert(value):
# matched = value.group()
# if int(matched) >= 6:
# return '9'
# else:
# return '0'
#
#
# r = re.sub('\d', convert, s)
# print(r)
s = 'A83C72D1D8E67'
def convert(value):
matched = value.group()
if int(matched) >= 50:
return '100'
else:
return '0'
r = re.sub('\d+', convert, s)
print(r)
|
from tdasm import Runtime
import renmas3.osl
from renmas3.osl import create_shader, create_argument, create_struct_argument
from renmas3.core import Vector3
a1 = create_argument('p1', 2)
a2 = create_argument('p2', 1)
a3 = create_argument('p3', 5.0)
#a4 = create_argument('p4', Vector3(2,5,6))
a4 = create_argument('p4', [2,5,6])
a5 = create_struct_argument(typ="point", name="ps", fields=[('x', 5.5), ('y', (4,5,6))])
a6 = create_argument('p6', 99)
a7 = create_argument('p7', 8.3)
a8 = create_argument('p8', (4,5,6))
args = {a1.name:a1, a2.name: a2, a3.name: a3, a4.name: a4, a5.name: a5}
code = """
ps.x = 8.33
ps.y = (9,9,8)
p2 = p6
"""
shader = create_shader("test", code, args, input_args=[a6], func=True)
runtimes = [Runtime()]
shader.prepare(runtimes)
m1 = create_argument('m1', 2)
code2 = """
a = 44
test(a)
"""
args2 = {m1.name:m1}
shader2 = create_shader("test2", code2, args2, shaders=[shader], func=False)
shader2.prepare(runtimes)
#shader.execute()
shader2.execute()
print(shader.get_value('p1'))
print(shader.get_value('p2'))
print(shader.get_value('p3'))
print(shader.get_value('p4'))
print(shader.get_value('ps.x'))
print(shader.get_value('ps.y'))
|
### MODELS
import numpy as np
from keras.layers import Dense, GlobalMaxPool2D, BatchNormalization, Dropout
from keras.applications import ResNet50, VGG16
from keras.models import Model, Sequential
def R50_mod(seed=0):
"""
Create a new feature extractor based on the pretrained ResNet50 model:
The model is frozen from the bottom layer to activation layer 24.
Activation layer 25 is then pooled with GlobalMaxPool2D, connected to a
Dense layer with 1024 neurons, and then to the softmax layer.
"""
np.random.seed(seed)
r50 = ResNet50(weights="imagenet", include_top=False)
for layer in r50.layers[:86]:
layer.trainable = False
for layer in r50.layers[86:]:
layer.trainable = True
x = (r50.get_layer("activation_25")).output
mx = GlobalMaxPool2D()(x)
x = BatchNormalization()(mx)
x = Dropout(.5)(x)
x = Dense(1024, activation='relu', name="dense_1024")(x)
x = BatchNormalization()(x)
x = Dropout(.5)(x)
preds = Dense(5,activation='softmax')(x)
model = Model(inputs=r50.input, outputs=preds)
return model
def VGG_mod(seed=0):
"""
Create a new feature extractor based on the pretrained VGG16 model:
The model is frozen from the bottom layer up to block4 conv layer 2.
Conv layer 3 from block 4 is then pooled with GlobalMaxPool2D, and
connected to a Dense layer with 1024 neurons, and then to the softmax layer.
"""
np.random.seed(seed)
vgg16 = VGG16(weights="imagenet", include_top=False)
for layer in vgg16.layers[:13]:
layer.trainable = False
for layer in vgg16.layers[13:]:
layer.trainable = True
x = (vgg16.get_layer("block4_conv3")).output
x = GlobalMaxPool2D()(x)
x = BatchNormalization()(x)
x = Dropout(.5)(x)
x = Dense(1024, activation='relu', name="dense_1024")(x)
x = BatchNormalization()(x)
x = Dropout(.5)(x)
preds = Dense(5,activation='softmax')(x)
model = Model(inputs=vgg16.input,outputs=preds)
return model |
# -*- coding: utf-8 -*-
# Copyright notice
# --------------------------------------------------------------------
# Copyright (C) 2019 Deltares
# Rob Rikken
# Rob.Rikken@deltares.nl
# #
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# #
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# #
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
# #
# This tool is part of <a href="http://www.OpenEarth.eu">OpenEarthTools</a>.
# OpenEarthTools is an online collaboration to share and manage data and
# programming tools in an open source, version controlled environment.
# Sign up to receive regular updates of this function, and to contribute
# your own tools.
# #
#
from threading import Thread
from typing import Dict, List
from detectors.Detector import Detector
# This class holds the functions to detect errors in the weirs.
class StuwDetector(Detector):
__hydroobject_distance = None
__has_hydroobject_suggestion = {
'code': None,
'suggestion': None
}
__flow_height_difference_suggestion = {
'code': None,
'suggestion': None
}
def __init__(self):
super().__init__()
self.set_table_name('stuw')
self.set_hydroobject_distance(1)
self.set_has_hydroobject_suggestion(
2101,
'The weir needs to lie within a distance of ' + str(self.get_hydroobject_distance()) + ' meters of a '
'hydroobject.'
)
self.set_flow_height_difference_suggestion(
2102,
'The lowest flow height needs to be lower than the highest flow height.'
)
def get_has_hydroobject_suggestion(self) -> Dict:
return self.__has_hydroobject_suggestion
def set_has_hydroobject_suggestion(self, code: int, suggestion: str) -> None:
self.__has_hydroobject_suggestion = {
'code': code,
'suggestion': suggestion
}
def get_flow_height_difference_suggestion(self) -> Dict:
return self.__flow_height_difference_suggestion
def set_flow_height_difference_suggestion(self, code: int, suggestion: str) -> None:
self.__flow_height_difference_suggestion = {
'code': code,
'suggestion': suggestion
}
def get_hydroobject_distance(self) -> float:
return self.__hydroobject_distance
def set_hydroobject_distance(self, object_range) -> None:
self.__hydroobject_distance = object_range
# This functions checks if a weir is close to hydroobject.
def check_hydroobject_distance(self) -> None:
# Get the connection and select the weirs that do not lie close to the hydroobjects
with self.get_datasource().get_connection() as connection:
results = connection.execute('''
SELECT DISTINCT *
FROM stuw
LEFT JOIN hydroobject ON st_dwithin(stuw.geometriepunt, hydroobject.geometrielijn, {distance})
WHERE hydroobject.hydroobjectid IS NULL
'''.format(distance=self.get_hydroobject_distance()))
# Insert the weirs into the suggestion table for the weirs.
self.insert_suggestions_records(results, self.get_has_hydroobject_suggestion())
def check_flow_height_difference(self) -> None:
with self.get_datasource().get_connection() as connection:
erroneous_heights = connection.execute('''
SELECT DISTINCT stuw.*
FROM stuw
WHERE laagstedoorstroomhoogte > hoogstedoorstroomhoogte
''')
self.insert_suggestions_records(erroneous_heights, self.get_flow_height_difference_suggestion())
def build_threads(self) -> List[Thread]:
hydroobject_thread = Thread(target=self.check_hydroobject_distance)
flow_height_thread = Thread(target=self.check_flow_height_difference)
return [hydroobject_thread, flow_height_thread]
|
from flask import Flask, jsonify
from app.newspaper import Newspaper
from os.path import abspath
import json
import time
app = Flask(__name__)
languages = ['pt', 'en']
categories = ['last_news','sports', 'economy', 'health', 'tech']
f = open(abspath('./app/sources.json'), 'r')
sources = json.loads(f.read())
f.close
srcs = {}
for src in sources['sources']:
print(src)
srcs[src['language'] + '_' + src['category']] = Newspaper(src['language'],
src['category'],
src['link'],
src['keyword'])
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/api/news/languages')
def get_languages():
return jsonify(languages)
@app.route('/api/news/categories')
def get_categories():
cat = []
for cate in categories:
cat_d = {}
cat_d['category'] = cate
cat.append(cat_d)
return jsonify(cat)
@app.route('/api/news/sources_generate')
def generate_json():
for lang in languages:
for cat in categories:
f = open('./app/news/' + lang + '_' + cat + '.json', 'w+')
f.close()
@app.route('/api/news/<language>/<category>')
def top_news(language, category):
return srcs[language + "_" + category].getArticles()
@app.route('/api/news/update_news/<language>/<category>')
def update_news(language, category):
srcs[language + "_" + category].refreshArticles()
return('Updated!')
@app.route('/api/news/update_all_news/<passwd>')
def update_all_news(passwd):
if passwd == 'updateNews':
while True:
for src in sources['sources']:
srcs[src['language'] + '_' + src['category']].refreshArticles()
print('Articles up-to-date!')
print('sleeping for 600 secs...')
time.sleep(600)
else:
return('Sorry, wrong password!')
|
from config import Key
from pong_game.paddle import State as PState
from pong_game.pong_game import PongGame, State
from system_manager import SystemManager
sys_manager = SystemManager.get_instance()
class SoloMode(PongGame):
init_score = 0
def __init__(self):
super().__init__()
self.update_score()
self.action_count = 0
# <--- SETUP --->
# <--- GAME FLOW --->
def loop_playing(self):
ball_dir = self.ball.direction()
if self.action_count == 0:
self.paddles[ball_dir.value].set_state(PState.moving)
self.paddles[ball_dir.opposite().value].set_state(PState.inactive)
else:
self.paddles[ball_dir.value].set_state(PState.done_moving)
self.paddles[ball_dir.opposite().value].set_state(PState.inactive)
if self.ball_hits_boundary():
self.ball.reflect_y()
elif self.ball_hits_paddle():
paddle = self.paddles[ball_dir.value]
self.ball.reflect_x(paddle.y(), paddle.rect().height())
if paddle.state == PState.done_moving:
self.score += 1000
paddle.reduce_height()
self.action_count = 0
self.update_score()
elif self.ball_missed() is not None:
self.game_over()
else:
self.ball.move()
self.paddles["left"].move()
self.paddles["right"].move()
# <--- ACTION METHODS --->
def reset(self):
super(SoloMode, self).reset()
self.action_count = 0
self.paddles["right"].reset()
self.paddles["left"].reset()
# <--- HELPER METHODS --->
def update_score(self):
message = str(self.score)
self.text_boxes["score"].set_text(message)
def handle_key_press(self, event):
key = event.key()
if self.state == State.playing:
ball_dir = self.ball.direction()
if key == Key.solo_up:
sys_manager.current_action = "arm/{}".format(ball_dir)
if self.action_count > 0:
self.game_over()
self.paddles[ball_dir.value].start_up()
elif key == Key.solo_down:
sys_manager.current_action = "foot/{}".format(ball_dir)
if self.action_count > 0:
self.game_over()
self.paddles[ball_dir.value].start_down()
elif self.state == State.game_over:
if key == Key.replay:
self.start()
elif key == Key.exit:
self.finished_signal.emit()
def handle_key_release(self, event):
key = event.key()
if self.state == State.playing:
if key in [Key.solo_up, Key.solo_down]:
sys_manager.current_action = None
self.action_count += 1
self.paddles["left"].stop()
self.paddles["right"].stop()
|
from django.shortcuts import render, redirect, HttpResponse
from django.contrib import messages
from django.contrib.auth.forms import UserCreationForm
import json
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from Apps.Notes import views as views_notas
# Create your views here.
def index_login(request):
if 'usuario' in request.session:
return redirect(views_notas.index_notes)
else:
return render(request, "home.html")
def RegistroUsuarios(request):
return render(request, "registro.html")
def iniciarSesion(request):
usuario = request.POST['usuario']
contraseña = request.POST['contraseña']
user = authenticate(username=usuario,password=contraseña)
if user is not None:
if user.is_active:
login(request, user)
request.session['usuario'] = usuario
return HttpResponse("1")
else:
return HttpResponse("Datos incorrectos")
else:
return HttpResponse("Datos incorrectos")
def registrarUsuario(request):
respuesta = ""
datos = request.POST.get("datosUsuario[]")
datos = json.loads(datos)
usuario = datos[0]
contraseña = datos[1]
nombre = datos[2]
apellido = datos[3]
correo = datos[4]
try:
usuarios = User.objects.get(email=correo)
respuesta = True
except Exception as e:
print(e)
respuesta = False
if respuesta:
respuesta = "Registro fallido: ya existe un usuario con ese correo"
else:
try:
user = User.objects.create_user(usuario, correo, contraseña)
user.first_name = nombre
user.last_name = apellido
user.save()
respuesta = "Registro exitoso"
except Exception as e:
print(e)
respuesta = "Registro fallido: el usuario ya existe"
return HttpResponse(respuesta)
def cerrar_sesion(request):
print(request.session['usuario'])
del request.session['usuario']
return redirect('/') |
#=============================================================================
#
# ALLSorts v2 - Feature Creation Stage
# Author: Breon Schmidt
# License: MIT
#
#=============================================================================
''' --------------------------------------------------------------------------
Imports
---------------------------------------------------------------------------'''
''' Internal '''
from ALLSorts.common import message, root_dir
''' External '''
from sklearn.base import BaseEstimator, TransformerMixin
from scipy.ndimage import median_filter
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
import time
''' --------------------------------------------------------------------------
Classes
---------------------------------------------------------------------------'''
class FeatureCreation(BaseEstimator, TransformerMixin):
"""
A class that represents a feature creation stage
...
Attributes
__________
kernel_div : int (5)
# genes in a chromosome / kernel_div is a maximum size of a sliding
window that the median iterative filter uses.
n_jobs: int (1)
Number of concurrent tasks to be run in parallel. Dependent on system.
fusions: list (False)
List of fusions with genes seperated with "_". The log difference will
be taken in order to create a new feature, i.e. "GENE1_GENE2"
Methods
-------
fit(counts, y)
Get the median absolute devation of the training set.
transform(counts)
Create iAMP21_RATIO, chr1:22+X/Y, B_ALL features.
fit_transform(counts, y=False)
Apply fit and then transform.
"""
def __init__(self, kernel_div=5, n_jobs=1, fusions=False, fusion_feature=False,
iamp21_feature=True, chrom_feature=True):
self.kernel_div = kernel_div
self.n_jobs = n_jobs
self.fusions = fusions
self.fusion_feature = fusion_feature
self.iamp21_feature = iamp21_feature
self.chrom_feature = chrom_feature
def _loadChrom(self):
chrom_ref_path = str(root_dir())+"/data/chrom_refs.txt"
self.chrom_ref = pd.read_csv(chrom_ref_path, sep="\t", header=None)
self.chrom_ref.drop([1, 2, 5, 6, 7], axis=1, inplace=True)
self.chrom_ref.columns = ["chrom", "start", "end", "meta"]
self.chrom_ref["length"] = (self.chrom_ref["start"] -
self.chrom_ref["end"]).abs()
# Extract gene names
gene_names = []
for line, meta in self.chrom_ref["meta"].iteritems():
gene_name = meta.split(";")[2].split('"')[1]
gene_names.append(gene_name)
self.chrom_ref["gene"] = gene_names
self.chrom_ref.index = self.chrom_ref["chrom"]
self.chrom_ref.drop(["chrom", "meta"], axis=1, inplace=True)
self.chrom_ref["start"] = pd.to_numeric(self.chrom_ref["start"])
self.chrom_ref["end"] = pd.to_numeric(self.chrom_ref["end"])
# Create dictionary of genes per chromosome
self.chrom_dict = {}
for chrom, info in self.chrom_ref.iterrows():
if chrom in self.chrom_dict:
self.chrom_dict[chrom].append(info["gene"])
else:
self.chrom_dict[chrom] = [info["gene"]]
self.chroms = list(range(1, 23)) + ["X", "Y"]
def _median_filter(self, sample, size=5):
filtered = median_filter(sample, mode="constant", size=size)
return pd.Series(filtered)
def _chromSmoothing(self, chrom, counts_norm):
chrom_counts = counts_norm.reindex(self.chrom_dict[str(chrom)], axis=1).dropna(axis=1)
q1 = chrom_counts.quantile(0.25, axis=1)
q3 = chrom_counts.quantile(0.75, axis=1)
iqr = q3 - q1
upper = q3 + iqr * 1.5
lower = q1 - iqr * 1.5
c = np.matrix(chrom_counts)
c_clip = c.transpose().clip(min=np.array(lower), max=np.array(upper))
chrom_counts = pd.DataFrame(c_clip.transpose(), columns=chrom_counts.columns, index=chrom_counts.index)
filt_columns = list(chrom_counts.columns)
kernel_size = 5
break_count = 0
filtered = chrom_counts.copy() # For first iteration
while kernel_size <= len(filt_columns) / self.kernel_div:
filtered = filtered.apply(self._median_filter, size=kernel_size, axis=1)
filtered.columns = filt_columns
kernel_size = int(kernel_size * 3)
kernel_size = kernel_size + 1 if kernel_size % 2 == 0 else kernel_size
if break_count > 3:
break
else:
break_count += 1
return filtered
def _mads(self, counts):
mad_sub = self.mad.loc[counts.columns]
mad_sub = mad_sub[mad_sub != 0]
fcounts = counts[mad_sub.index]
mads = np.subtract(np.matrix(fcounts), np.median(np.matrix(fcounts), axis=0))
mads = pd.DataFrame(np.divide(mads, np.array(mad_sub)), index=counts.index, columns=mad_sub.index)
return mads
def _smoothSamples(self, counts):
# Scale mads
mads = self._mads(counts)
# Smooth chromosomes
smooth_chroms = Parallel(n_jobs=self.n_jobs, prefer="threads")(delayed(self._chromSmoothing)
(chrom, mads)
for chrom in self.chroms)
# Aggregate results
smooth_samples = pd.DataFrame(index=counts.index)
for smooth_chrom in smooth_chroms:
smooth_samples = pd.concat([smooth_samples, smooth_chrom], axis=1, join="inner")
return smooth_samples
def _chromFeatures(self, scounts):
chrom_features = pd.DataFrame(index=scounts.index)
for chrom in self.chroms:
chrom_ = str(chrom)
genes = list(set(self.chrom_dict[chrom_]).intersection(
set(scounts.columns)))
if len(genes) != 0:
chrom_features["chr" + chrom_] = list(scounts.loc[:, genes].median(axis=1))
else:
chrom_features["chr" + chrom_] = 0.0
# For general
chrom_high = []
chrom_low = []
for sample, chrom_meds in chrom_features.iterrows():
chrom_high.append(chrom_meds.iloc[:-1][chrom_meds.iloc[:-1] > 0].sum())
chrom_low.append(chrom_meds.iloc[:-1][chrom_meds.iloc[:-1] < 0].sum())
chrom_features["med"] = chrom_features.median(axis=1)
chrom_features["chr_high"] = chrom_high
chrom_features["chr_low"] = chrom_low
chrom_features["chr_abs"] = chrom_features["chr_high"].abs() + chrom_features["chr_low"].abs()
return chrom_features
def _iamp21Feature(self, counts):
bins = [15000000, 31514667, 43700713]
bin_medians = pd.DataFrame(index=counts.index)
chrom21 = self.chrom_ref[self.chrom_ref.index == "21"]
mads = self._mads(counts)
for i in range(0, len(bins)):
# Get counts for genes in region
if i == len(bins) - 1:
bin_ = chrom21[(chrom21["start"] >= bins[i])]
else:
bin_ = chrom21[(chrom21["start"] >= bins[i]) &
(chrom21["start"] < bins[i + 1])]
overlap = bin_["gene"].isin(list(mads.columns))
bin_genes = list(bin_[overlap]["gene"])
bin_counts = mads.loc[:, bin_genes]
if bin_counts.shape[1] != 0: # There were no genes in this bin of chrom21 specified in input
''' Smooth region '''
bin_scounts = bin_counts.apply(self._median_filter, size=11, axis=1)
bin_scounts.columns = bin_counts.columns
''' Get region median and add to growing list of features'''
bin_median = bin_scounts.median(axis=1)
bin_median.name = "IAMP21_bin" + str(i + 1)
bin_medians = pd.concat([bin_medians, bin_median],
axis=1,
join="inner")
else:
bin_medians["IAMP21_bin" + str(i + 1)] = 0.0
iamp21_ratio = bin_medians.iloc[:, 1].sub(bin_medians.iloc[:, 2])
bin_medians["IAMP21_ratio"] = iamp21_ratio
return bin_medians
def _immunoFeature(self, counts):
all_genes = ["CD19", "CD34", "CD22", "DNTT", "CD79A"]
all_immuno = pd.DataFrame(counts[all_genes].sum(axis=1), columns=["B-ALL"], index=counts.index)
return all_immuno
def _fusions(self, counts):
fusions = pd.DataFrame(index=counts.index)
for partners in self.fusions:
gene_1 = partners.split("_")[0]
gene_2 = partners.split("_")[1]
try:
fusions[partners] = counts[gene_1].sub(counts[gene_2])
except:
fusions[partners] = 0.0
return fusions
def _scale(self, counts):
scaler = StandardScaler().fit(counts)
return scaler.transform(counts)
def fit(self, counts, y=False):
self._loadChrom()
self.mad = 1.4826 * counts.sub(counts.median()).abs().median()
return self
def transform(self, counts, y=False):
scounts = self._smoothSamples(counts.fillna(0.0))
counts_orig = counts.fillna(0.0)
counts = pd.concat([counts,
self._immunoFeature(counts_orig)],
axis=1, join="inner")
if self.fusion_feature:
counts = pd.concat([counts,
self._fusions(counts_orig)],
join="inner",
axis=1)
if self.iamp21_feature:
counts = pd.concat([counts,
self._iamp21Feature(counts_orig)],
join="inner",
axis=1)
if self.chrom_feature:
counts = pd.concat([counts,
self._chromFeatures(scounts)],
join="inner",
axis=1)
return counts
def fit_transform(self, counts, y=False):
self.fit(counts, y)
return self.transform(counts)
|
# Created by Dayu Wang (dwang@stchas.edu) on 01-30-19.
# Last updated by Dayu Wang (dwang@stchas.edu) on 01-30-19.
# Let the user enter the dimensions of the rectangle.
width = int(input('Width: '))
height = int(input('Height: '))
area = width * height
print('The area is:', area) |
def div_mod(x, y):
if x is not None and y is not None:
reminder = x % y
quotient = int(x / y)
# comma seperated values create a tuple. following is a tuple. braces are not required
# interestingly we can return multiple values in this approach assuming they are part of a tuple
return reminder, quotient
print((div_mod(20, 8))) # 4, 2
print(type(div_mod(20, 8)))
# declaring a tuple
t = 20, 8
# invalid syntax. why?
# a_t = (a = 30, b = 9)
# print(type(a_t))
# unpacking the tuple (t)
print(div_mod(*t))
import os
# we don't care about the first part of the array
_, file_name = os.path.split('/home/demo/other_demo/more_demo/202.py')
print(file_name)
# other essentially will be a list containing everything after 3rd element
first, second, three, *other = range(7)
# 0 1 2 [3 4 5 6]
print(first, second, three, other)
person_attributes = ('x', 'y@gmail.com', 10, 'nid card')
# ___rest___ values containing variable will always be an array. Here the source is a tuple still the last variable is a list
name, email, *other_attributes = person_attributes
print(name, email, other_attributes)
# exception will occur because ___unpacking___ ensures number of unpacked elements is the same as the number of elements expected
# name, email = ('a', (45.56, 8), 'other_text', 89)
# using __rest__ on first attributes, we want the last one only
*other_attributes, nid = person_attributes
print(other_attributes, nid)# ['x', 'y@gmail.com', 10] nid card
metro_areas = [
('Tokyo', 'JP', 36.33, (35.68772, 139.98)),
('Delhi NCR', 'India', -36.33, (-35.68772, -139.98)),
('Mexico City', 'Mexico', -36.33, (35.68772, -139.98))
]
fmt = "{:15} | {:9.4f} | {:9.4f}"
for city, country, whatever, (lat, longi) in metro_areas:
if whatever < 0:
print(fmt.format(city, lat, longi))
from collections import namedtuple
LatLong = namedtuple("lat_long", ['lat', 'lon'])
# no need to use array. instead we can give a space seperated string where each space seperated string will a property
# ... city_description ... will be used when the tuple will be printed not ... CityDescription ...
CityDescription = namedtuple("city_description", "city country i_dont_know_what lat_long")
for cd in metro_areas:
# unpacking
city_description = CityDescription(*cd)
if(city_description.i_dont_know_what > 0):
print(city_description)# city_description(city='Tokyo', country='JP', i_dont_know_what=36.33, lat_long=(35.68772, 139.98))
# unpacking nested tuple to a stongly typed tuple
print(LatLong(*city_description.lat_long))
print()
print('--------------------- metadata -------------------')
print()
print(CityDescription._fields)# ('city', 'country', 'i_dont_know_what', 'lat_long')
print(CityDescription._make(('Mexico City', 'Mexico', -36.33, (35.68772, -139.98))))
print(CityDescription._make(('Mexico City', 'Mexico', -36.33, (35.68772, -139.98)))._asdict())
|
from unittest import TestCase
from mock import MagicMock
from pushbullet import PushBullet
from graphitepager.description import Description
from graphitepager.notifiers.pushbullet_notifier import PushBulletNotifier
from graphitepager.redis_storage import RedisStorage
from graphitepager.alerts import Alert
from graphitepager.config import Config
from graphitepager.level import Level
class TestPushBulletNotifier(TestCase):
def setUp(self):
self.alert_key = 'ALERT KEY'
self.description = MagicMock(Description)
self.description.__str__.return_value = 'ALERT DESCRIPTION'
self.description.graphite_url = 'GRAPGITE URL'
self.mock_redis_storage = MagicMock(RedisStorage)
self.mock_pushbullet_client = MagicMock(PushBullet)
self.mock_pushbullet_client.devices = [
MagicMock(device_iden="device1"),
MagicMock(device_iden="device2")
]
self.mock_pushbullet_client.contacts = [
MagicMock(email="contact1"),
MagicMock(email="contact2")
]
self.mock_alert = MagicMock(Alert)
self.mock_alert.get.return_value = "name"
def default_notifier(self):
def mock_get(key, default=None):
if key == 'PUSHBULLET_KEY':
return 'PUSHBULLET_KEY'
return default
mock_config = MagicMock(Config)
mock_config.get = mock_get
self.pbn = PushBulletNotifier(
self.mock_redis_storage, mock_config,
client=self.mock_pushbullet_client
)
def devices_notifier(self):
def mock_get(key, default=None):
if key == 'PUSHBULLET_KEY':
return 'PUSHBULLET_KEY'
if key == 'PUSHBULLET_DEVICES':
return 'device3, device1'
return default
mock_config = MagicMock(Config)
mock_config.get = mock_get
self.pbn = PushBulletNotifier(
self.mock_redis_storage, mock_config,
client=self.mock_pushbullet_client
)
def contacts_notifier(self):
def mock_get(key, default=None):
if key == 'PUSHBULLET_KEY':
return 'PUSHBULLET_KEY'
if key == 'PUSHBULLET_CONTACTS':
return 'contact3, contact1'
return default
mock_config = MagicMock(Config)
mock_config.get = mock_get
self.pbn = PushBulletNotifier(
self.mock_redis_storage, mock_config,
client=self.mock_pushbullet_client
)
def test_should_not_notify_pb_if_warning_and_already_notified(self):
self.default_notifier()
self.mock_redis_storage.is_locked_for_domain_and_key.\
return_value = True
self.pbn.notify(
self.mock_alert,
self.alert_key,
Level.WARNING,
self.description)
self.assertEqual(self.mock_pushbullet_client.mock_calls, [])
def test_should_notify_pb_resolved_if_nominal_and_had_notified(self):
self.default_notifier()
self.mock_redis_storage.is_locked_for_domain_and_key.\
return_value = True
self.pbn.notify(
self.mock_alert,
self.alert_key,
Level.NOMINAL,
self.description)
self.mock_redis_storage.is_locked_for_domain_and_key.\
assert_called_once_with('PushBullet', self.alert_key)
self.mock_pushbullet_client.push_link.assert_called_once_with(
"[%s]: %s" % (Level.NOMINAL, "name"),
self.description.graphite_url,
body=str(self.description)
)
self.mock_redis_storage.remove_lock_for_domain_and_key.\
assert_called_once_with('PushBullet', self.alert_key)
def test_should_notify_pb_of_warning_if_had_not_notified_before(self):
self.default_notifier()
self.mock_redis_storage.is_locked_for_domain_and_key.\
return_value = False
self.pbn.notify(
self.mock_alert,
self.alert_key,
Level.WARNING,
self.description)
self.mock_pushbullet_client.push_link.assert_called_once_with(
"[%s]: %s" % (Level.WARNING, "name"),
self.description.graphite_url,
body=str(self.description)
)
self.mock_redis_storage.set_lock_for_domain_and_key.\
assert_called_once_with('PushBullet', self.alert_key)
def test_should_notify_pbn_of_critical_if_had_not_notified_before(self):
self.default_notifier()
self.mock_redis_storage.is_locked_for_domain_and_key.\
return_value = False
self.pbn.notify(
self.mock_alert,
self.alert_key,
Level.CRITICAL,
self.description)
self.mock_pushbullet_client.push_link.assert_called_once_with(
"[%s]: %s" % (Level.CRITICAL, "name"),
self.description.graphite_url,
body=str(self.description)
)
self.mock_redis_storage.set_lock_for_domain_and_key.\
assert_called_once_with('PushBullet', self.alert_key)
def test_should_notify_pb_of_no_data_if_had_not_notified_before(self):
self.default_notifier()
self.mock_redis_storage.is_locked_for_domain_and_key.\
return_value = False
self.pbn.notify(
self.mock_alert,
self.alert_key,
Level.NO_DATA,
self.description)
self.mock_pushbullet_client.push_link.assert_called_once_with(
"[%s]: %s" % (Level.NO_DATA, "name"),
self.description.graphite_url,
body=str(self.description)
)
self.mock_redis_storage.set_lock_for_domain_and_key.\
assert_called_once_with('PushBullet', self.alert_key)
def test_should_notify_devices(self):
self.devices_notifier()
self.mock_redis_storage.is_locked_for_domain_and_key.\
return_value = False
self.pbn.notify(
self.mock_alert,
self.alert_key,
Level.WARNING,
self.description)
devices = self.mock_pushbullet_client.devices
devices[0].push_link.assert_called_once_with(
"[%s]: %s" % (Level.WARNING, "name"),
self.description.graphite_url,
body=str(self.description)
)
self.assertEqual(devices[1].mock_calls, [])
self.mock_redis_storage.set_lock_for_domain_and_key.\
assert_called_once_with('PushBullet', self.alert_key)
def test_should_notify_contacts(self):
self.contacts_notifier()
self.mock_redis_storage.is_locked_for_domain_and_key.\
return_value = False
self.pbn.notify(
self.mock_alert,
self.alert_key,
Level.WARNING,
self.description)
contacts = self.mock_pushbullet_client.contacts
contacts[0].push_link.assert_called_once_with(
"[%s]: %s" % (Level.WARNING, "name"),
self.description.graphite_url,
body=str(self.description)
)
self.assertEqual(contacts[1].mock_calls, [])
self.mock_redis_storage.set_lock_for_domain_and_key.\
assert_called_once_with('PushBullet', self.alert_key)
|
from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return PatientUser.query.get(int(user_id))
class Admin(UserMixin, db.Model):
__tablename__='admin'
id=db.Column(db.Integer, primary_key=True)
adminname=db.Column(db.String(255))
adminemail=db.Column(db.String(255))
passwr=db.Column(db.String(255))
passwr_secure=db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.passwr_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.passwr_secure, password)
def __repr__(self):
return f'Admin {self.username}'
class PatientUser(UserMixin, db.Model):
__tablename__='patientuser'
id= db.Column(db.Integer, primary_key=True)
username=db.Column(db.String(255))
first_name=db.Column(db.String(255))
second_name=db.Column(db.String(255))
pass_secure=db.Column(db.String(255))
password_hash=db.Column(db.String(255))
email=db.Column(db.String(255))
feedback_patient=db.Column(db.String(255))
patient_medical_prof=db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
def __repr__(self):
return f'PatientUser {self.username}'
class DoctorUser(UserMixin, db.Model):
__tablename__='doctoruser'
id=db.Column(db.Integer, primary_key=True)
medical_id=db.Column(db.String(255))
doctorname=db.Column(db.String(255))
passw_secure=db.Column(db.String(255))
pass_hash=db.Column(db.String(255))
docemail=db.Column(db.String(255))
feedback_doctor=db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.passw_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.passw_secure, password)
def __repr__(self):
return f'DoctorUser {self.doctorname}'
|
# PyTorch
import torch
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import torchvision.transforms as tr
import torch.nn.functional as F
from torch.nn import Sequential
# Models
from unet import Unet
from siamunet_conc import SiamUnet_conc
from SiamUnet_conc import SiamUnet_conc
from siamunet_diff import SiamUnet_diff
from fresunet import FresUNet
# Other
import os
import numpy as np
import random
from skimage import io
from scipy.ndimage import zoom
import matplotlib.pyplot as plt
from tqdm import tqdm as tqdm
from IPython import display
from eval import eval
from accloss import accloss
from spearman import Spear
import time
import warnings
print('IMPORTS OK')
# Global Variables' Definitions
PATH_TO_DATASET = r'D:\NTIRE Workshop and Challenges @ CVPR 2021\dataset'
conf_type = "img(mean_std_norm)_label(mean_std_norm)_maxpool_1e-5_type_covcat_pretrain_epoch-134_loss-0.432_mse1_pr0.5_spr0.5"
sorter_checkpoint_path = r"D:\NTIRE Workshop and Challenges @ CVPR 2021\codes\FC-Siam-diff\fully_convolutional_change_detection-master\best_model0.00463445740044117.pth.tar"
BATCH_SIZE = 30
NUM_WORKER = 4
scale_co_test = 1
epoch_start_ = 21
N_EPOCHS = 200
NORMALISE_IMGS = True
NORMALISE_LABELS = True
TYPE = 2 # 0-RGB | 1-RGBIr | 2-All bands s.t. resulution <= 20m | 3-All bands
apply_spearman = True
LOAD_TRAINED = True
conf_type_pretrain = r"2080server/11"
if LOAD_TRAINED:
ch_path = rf'./checkpoint/{conf_type}/ch_net-best_epoch-134_loss-0.4324578046798706.pth.tar'
# ch_path = rf'./checkpoint/{conf_type}/ch_net-best_epoch-52_loss-1.37092924118042.pth.tar'
# ch_path = rf'./checkpoint/{conf_type_pretrain}/ch_net-best_epoch-70_loss-0.6888124346733093.pth.tar'
DATA_AUG = True
print('DEFINITIONS OK')
def reshape_for_torch(I):
"""Transpose image for PyTorch coordinates."""
# out = np.swapaxes(I,1,2)
# out = np.swapaxes(out,0,1)
# out = out[np.newaxis,:]
out = I.transpose((2, 0, 1))
return torch.from_numpy(1.0 * out)
class NTIR(Dataset):
"""Change Detection dataset class, used for both training and test data."""
def __init__(self, path, train=True, transform=None):
self.transform = transform
self.path = path
self.names = [[], [], []]
self.train_m = train
if self.train_m:
img_12 = 'path_img_train.txt'
label = 'label_data_train.txt'
with open(os.path.join(self.path, img_12), "r") as img_file:
all_data = img_file.read().split("\n")[:-1]
self.names[0] = [img.split(",")[0] for img in all_data]
self.names[1] = [img.split(",")[1] for img in all_data]
with open(os.path.join(self.path, label), "r") as gt_file:
all_scores = gt_file.read().split("\n")[:-1]
self.names[2] = [float(score) for score in all_scores]
# self.names = [it[:200] for it in self.names]
else:
img_12 = 'path_img_test.txt'
label = 'label_data_test.txt'
with open(os.path.join(self.path, img_12), "r") as img_file:
all_data = img_file.read().split("\n")[:-1]
self.names[0] = [img.split(",")[0] for img in all_data]
self.names[1] = [img.split(",")[1] for img in all_data]
with open(os.path.join(self.path, label), "r") as gt_file:
all_scores = gt_file.read().split("\n")[:-1]
self.names[2] = [float(score) for score in all_scores]
# self.names = [it[:200] for it in self.names]
def __len__(self):
return len(self.names[0])
def __getitem__(self, idx):
I1_path = self.names[0][idx]
I2_path = self.names[1][idx]
I1_ = io.imread(I1_path)
I2_ = io.imread(I2_path)
if NORMALISE_IMGS:
I1_m = (I1_ - I1_.mean()) / I1_.std()
I2_m = (I2_ - I2_.mean()) / I2_.std()
else:
I1_m = I1_
I2_m = I2_
I1 = reshape_for_torch(I1_m)
I2 = reshape_for_torch(I2_m)
label = np.array([self.names[2][idx]])
if NORMALISE_LABELS:
# label_ = label - np.array(self.names[2]).mean()
label_ = (label - np.array(self.names[2]).mean()) / np.array(self.names[2]).std()
else:
label_ = label
label = torch.from_numpy(1.0 * label_).float()
sample = {'I1': I1, 'I2': I2, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample
class RandomFlip(object):
"""Flip randomly the images in a sample."""
# def __init__(self):
# return
def __call__(self, sample):
I1, I2, label = sample['I1'], sample['I2'], sample['label']
if random.random() > 0.5:
I1 = I1.numpy()[:, :, ::-1].copy()
I1 = torch.from_numpy(I1)
I2 = I2.numpy()[:, :, ::-1].copy()
I2 = torch.from_numpy(I2)
return {'I1': I1, 'I2': I2, 'label': label}
class RandomRot(object):
"""Rotate randomly the images in a sample."""
# def __init__(self):
# return
def __call__(self, sample):
I1, I2, label = sample['I1'], sample['I2'], sample['label']
n = random.randint(0, 3)
if n:
I1 = sample['I1'].numpy()
I1 = np.rot90(I1, n, axes=(1, 2)).copy()
I1 = torch.from_numpy(I1)
I2 = sample['I2'].numpy()
I2 = np.rot90(I2, n, axes=(1, 2)).copy()
I2 = torch.from_numpy(I2)
return {'I1': I1, 'I2': I2, 'label': label}
print('UTILS OK')
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# net.load_state_dict(torch.load('net-best_epoch-1_fm-0.7394933126157746.pth.tar'))
def train(epock_start, best_lss, best_acc, n_epochs=N_EPOCHS, save=True):
t = np.linspace(1, n_epochs, n_epochs)
epoch_train_loss = 0 * t
epoch_train_accuracy = 0 * t
epoch_test_loss = 0 * t
epoch_test_accuracy = 0 * t
plt.figure(num=1)
plt.figure(num=2)
try:
epoch_train_loss[:epock_start] = Train_loss_curve[1]
epoch_test_loss[:epock_start] = Test_loss_curve[1]
epoch_train_accuracy[:epock_start] = Train_accuracy_curve[1]
epoch_test_accuracy[:epock_start] = Test_accuracy_curve[1]
except:
epoch_train_loss[:epock_start] = 0 * np.array(list(range(epock_start)))
epoch_test_loss[:epock_start] = 0 * np.array(list(range(epock_start)))
epoch_train_accuracy[:epock_start] = 0 * np.array(list(range(epock_start)))
epoch_test_accuracy[:epock_start] = 0 * np.array(list(range(epock_start)))
for epoch_index in tqdm(range(epock_start, n_epochs)):
net.train()
print('Epoch: ' + str(epoch_index + 1) + ' of ' + str(N_EPOCHS))
for index, batch in enumerate(train_loader):
# im1 = batch['I1'][2, :, :, :].numpy().transpose(1, 2, 0).astype("uint8")
# im2 = batch['I2'][2, :, :, :].numpy().transpose(1, 2, 0).astype("uint8")
I1 = Variable(batch['I1'].float().cuda())
I2 = Variable(batch['I2'].float().cuda())
label = Variable(batch['label'].cuda())
optimizer.zero_grad()
output = net(I1, I2)
# loss = coef_loss_mae * criterion_mae(output, label) + coef_loss_pr * (
# 1 - (criterion_pr(output, label)) ** 2)
# zipp_sort_ind = zip(np.argsort(batch['label'].numpy())[::-1], range(BATCH_SIZE))
# ranks = [((y[1] + 1) / float(BATCH_SIZE)) for y in sorted(zipp_sort_ind, key=lambda x: x[0])]
# label_spr = torch.FloatTensor(ranks).cuda()
loss = coef_loss_mse * criterion_mse(output, label) + coef_loss_pr * (1-criterion_pr(output,
label)) + coef_loss_spr * criterion_spr(
output, label) + coef_loss_mae * criterion_mae(output, label)
with torch.no_grad():
print("@@@@",criterion_mse(output, label)," ", 1-criterion_pr(output,label), " ", criterion_spr(output, label))
# loss = criterion_mse(output, label)
loss.backward()
optimizer.step()
print(
"\ntrain : " + f"epoch : {epoch_index + 1} -- " + f"{index + 1}" + " / " + f"{len(train_loader)}" + " ----->" + "loss : "
+ f"{loss.detach().cpu().numpy():.04f}")
scheduler.step()
with torch.no_grad():
epoch_train_loss[epoch_index], epoch_train_accuracy[epoch_index] = test(train_loader_val)
epoch_test_loss[epoch_index], epoch_test_accuracy[epoch_index] = test(test_loader)
plt.figure(num=1)
plt.clf()
l1_1, = plt.plot(t[:epoch_index + 1], epoch_train_loss[:epoch_index + 1],
label='Train loss')
l1_2, = plt.plot(t[:epoch_index + 1], epoch_test_loss[:epoch_index + 1],
label='Test loss')
plt.legend(handles=[l1_1, l1_2])
plt.grid()
plt.gcf().gca().set_xlim(left=0)
plt.title('Loss')
display.clear_output(wait=True)
display.display(plt.gcf())
plt.figure(num=2)
plt.clf()
l2_1, = plt.plot(t[:epoch_index + 1], epoch_train_accuracy[:epoch_index + 1],
label='Train accuracy')
l2_2, = plt.plot(t[:epoch_index + 1], epoch_test_accuracy[:epoch_index + 1],
label='Test accuracy')
plt.legend(handles=[l2_1, l2_2])
plt.grid()
plt.gcf().gca().set_xlim(left=0)
plt.title('Accuracy')
display.clear_output(wait=True)
display.display(plt.gcf())
# lss = epoch_train_loss[epoch_index]
# accu = epoch_train_accuracy[epoch_index]
lss = epoch_test_loss[epoch_index]
accu = epoch_test_accuracy[epoch_index]
if accu > best_acc:
best_acc = accu
save_str = fr'./checkpoint/{conf_type}/ch_net-best_epoch-' + str(epoch_index + 1) + '_accu-' + str(
accu) + '.pth.tar'
# torch.save(net.state_dict(), save_str)
torch.save({
'epoch': epoch_index,
'model_state_dict': net.state_dict(),
'model_state_dict_head': criterion_spr.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
"Train loss": [t[:epoch_index + 1], epoch_train_loss[:epoch_index + 1]],
"Test loss": [t[:epoch_index + 1], epoch_test_loss[:epoch_index + 1]],
"Train accuracy": [t[:epoch_index + 1],
epoch_train_accuracy[:epoch_index + 1]],
"Test accuracy": [t[:epoch_index + 1], epoch_test_accuracy[:epoch_index + 1]],
'loss': lss,
'acc': accu
}, save_str)
if lss < best_lss:
best_lss = lss
save_str = rf'./checkpoint/{conf_type}/ch_net-best_epoch-' + str(epoch_index + 1) + '_loss-' + str(
lss) + '.pth.tar'
torch.save({
'epoch': epoch_index,
'model_state_dict': net.state_dict(),
'model_state_dict_head': criterion_spr.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
"Train loss": [t[:epoch_index + 1], epoch_train_loss[:epoch_index + 1]],
"Test loss": [t[:epoch_index + 1], epoch_test_loss[:epoch_index + 1]],
"Train accuracy": [t[:epoch_index + 1],
epoch_train_accuracy[:epoch_index + 1]],
"Test accuracy": [t[:epoch_index + 1], epoch_test_accuracy[:epoch_index + 1]],
'loss': lss,
'acc': accu
}, save_str)
print(
f"\n ################## \n epock : {epoch_index + 1} \n avg_loss_train : {lss} \n avg_acc_train : {accu} \n ################## \n")
accu_val = epoch_test_accuracy[epoch_index]
lss_val = epoch_test_loss[epoch_index]
print(
f"\n ################## \n epock : {epoch_index + 1} \n avg_loss_test : {lss_val} \n avg_acc_test : {accu_val} \n ################## \n")
if save:
im_format = 'png'
# im_format = 'eps'
plt.figure(num=1)
plt.savefig(net_name + '-01-loss.' + im_format)
plt.figure(num=2)
plt.savefig(net_name + '-02-accuracy.' + im_format)
out = {'train_loss': epoch_train_loss[-1],
'train_accuracy': epoch_train_accuracy[-1],
'test_loss': epoch_test_loss[-1],
'test_accuracy': epoch_test_accuracy[-1]}
return out
L = 1024
N = 2
def test(dset):
net.eval()
tot_loss = 0
tot_count = 0
all_predicted = []
all_gt = []
for index, batch in enumerate(dset):
I1 = Variable(batch['I1'].float().cuda())
I2 = Variable(batch['I2'].float().cuda())
cm = Variable(batch['label'].cuda())
output = net(I1, I2)
# loss = coef_loss_mae * criterion_mae(output, label) + coef_loss_pr * (
# 1 - (criterion_pr(output, label)) ** 2)
# zipp_sort_ind = zip(np.argsort(batch['label'].numpy())[::-1], range(BATCH_SIZE))
# ranks = [((y[1] + 1) / float(BATCH_SIZE)) for y in sorted(zipp_sort_ind, key=lambda x: x[0])]
# label_spr_cm = torch.FloatTensor(ranks).cuda()
loss = coef_loss_mse * criterion_mse(output, cm) + coef_loss_pr * (1-criterion_pr(output,
cm)) + coef_loss_spr * criterion_spr(
output, cm) + coef_loss_mae * criterion_mae(output, cm)
# loss = criterion_mse(output, cm)
print(
"\n val : " + f"{index + 1}" + " / " + f"{len(dset)}" + " ----->" + "loss : "
+ f"{loss.detach().cpu().numpy():.04f}")
tot_loss += loss.data * np.prod(cm.size())
tot_count += np.prod(cm.size())
all_predicted.extend(list(torch.squeeze(output).detach().cpu().numpy()))
all_gt.extend(list(torch.squeeze(cm).detach().cpu().numpy()))
net_loss = tot_loss / tot_count
accuracy, _, _ = eval(np.array(all_predicted), np.array(all_gt))
return net_loss, accuracy
def save_test_results(dset):
for name in tqdm(dset.names):
with warnings.catch_warnings():
I1, I2, cm = dset.get_img(name)
I1 = Variable(torch.unsqueeze(I1, 0).float()).cuda()
I2 = Variable(torch.unsqueeze(I2, 0).float()).cuda()
out = net(I1, I2)
_, predicted = torch.max(out.data, 1)
I = np.stack((255 * cm, 255 * np.squeeze(predicted.cpu().numpy()), 255 * cm), 2)
io.imsave(f'{net_name}-{name}.png', I)
if __name__ == '__main__':
if DATA_AUG:
data_transform = tr.Compose([RandomFlip(), RandomRot()])
else:
data_transform = None
train_dataset = NTIR(PATH_TO_DATASET, train=True, transform=data_transform)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKER,
drop_last=True)
train_loader_val = DataLoader(train_dataset, batch_size=int(BATCH_SIZE * scale_co_test), shuffle=True,
num_workers=NUM_WORKER,
drop_last=True)
test_dataset = NTIR(PATH_TO_DATASET, train=False)
test_loader = DataLoader(test_dataset, batch_size=int(BATCH_SIZE * scale_co_test), shuffle=True,
num_workers=NUM_WORKER,
drop_last=True)
print('DATASETS OK')
if TYPE == 0:
# net, net_name = Unet(2*3, 2), 'FC-EF'
# net, net_name = SiamUnet_conc(3, 2), 'FC-Siam-conc'
# net, net_name = SiamUnet_diff(3, 2), 'FC-Siam-diff'
net, net_name = FresUNet(2 * 3, 2), 'FresUNet'
elif TYPE == 1:
# net, net_name = Unet(2*4, 2), 'FC-EF'
# net, net_name = SiamUnet_conc(4, 2), 'FC-Siam-conc'
# net, net_name = SiamUnet_diff(4, 2), 'FC-Siam-diff'
net, net_name = FresUNet(2 * 4, 2), 'FresUNet'
elif TYPE == 2:
# net, net_name = Unet(2*10, 2), 'FC-EF'
net, net_name = SiamUnet_conc(3, 1), rf'./checkpoint/{conf_type}/FC-Siam-diff'
# net, net_name = SiamUnet_diff(10, 2), 'FC-Siam-diff'
# net, net_name = FresUNet(2 * 10, 2), 'FresUNet'
elif TYPE == 3:
# net, net_name = Unet(2*13, 2), 'FC-EF'
# net, net_name = SiamUnet_conc(13, 2), 'FC-Siam-conc'
net, net_name = SiamUnet_diff(3, 1), rf'./checkpoint/{conf_type}/FC-Siam-diff'
# net, net_name = FresUNet(2 * 13, 2), 'FresUNet'
net.cuda()
criterion_mse = F.mse_loss
criterion_mae = F.l1_loss
criterion_pr = accloss
coef_loss_mse = 1
coef_loss_mae = 0
coef_loss_pr = 0.5
coef_loss_spr = 0.5
print('Number of trainable parameters:', count_parameters(net))
print('NETWORK OK')
# optimizer = torch.optim.Adam(net.parameters(), lr=0.0005)
if LOAD_TRAINED:
checkpoint = torch.load(ch_path)
# checkpoint['optimizer_state_dict']['param_groups'][0]["lr"] = 1e-5
try:
net.load_state_dict(checkpoint['model_state_dict'])
criterion_spr = Spear(sorter_checkpoint_path)
optimizer = torch.optim.Adam(list(net.parameters()) + list(criterion_spr.parameters()), lr=1e-5,
weight_decay=1e-4)
# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.95)
# scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
# Train_loss_curve = checkpoint["Train loss"]
# Test_loss_curve = checkpoint["Test loss"]
# Train_accuracy_curve = checkpoint["Train accuracy"]
# Test_accuracy_curve = checkpoint["Test accuracy"]
# epoch_input = checkpoint['epoch'] + 1
# best_acc_ = checkpoint['epoch']
# best_lss_ = checkpoint['epoch']
epoch_input = 0
best_acc_ = 0
best_lss_ = 1000
except:
epoch_input = epoch_start_
net.load_state_dict(checkpoint)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.95)
best_acc_ = 0
best_lss_ = 1000
else:
epoch_input = 0
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.95)
best_acc_ = 0
best_lss_ = 1000
print('LOAD OK')
t_start = time.time()
out_dic = train(epoch_input, best_lss_, best_acc_)
t_end = time.time()
print(out_dic)
print('Elapsed time:')
print(t_end - t_start)
if not LOAD_TRAINED:
torch.save(net.state_dict(), rf'./checkpoint/{conf_type}/net_final.pth.tar')
print('SAVE OK')
import pdb
pdb.tra
# t_start = time.time()
# # save_test_results(train_dataset)
# save_test_results(test_dataset)
# t_end = time.time()
# print('Elapsed time: {}'.format(t_end - t_start))
|
n = int (input ("Moi ban 1 so tu nhien bat ky: "))
tong = 1
for i in range( 1, n+1 ):
tong = tong * i
print ("Tong la", tong) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class FQExtendParams(object):
def __init__(self):
self._fq_number = None
self._fq_seller_percent = None
@property
def fq_number(self):
return self._fq_number
@fq_number.setter
def fq_number(self, value):
self._fq_number = value
@property
def fq_seller_percent(self):
return self._fq_seller_percent
@fq_seller_percent.setter
def fq_seller_percent(self, value):
self._fq_seller_percent = value
def to_alipay_dict(self):
params = dict()
if self.fq_number:
if hasattr(self.fq_number, 'to_alipay_dict'):
params['fq_number'] = self.fq_number.to_alipay_dict()
else:
params['fq_number'] = self.fq_number
if self.fq_seller_percent:
if hasattr(self.fq_seller_percent, 'to_alipay_dict'):
params['fq_seller_percent'] = self.fq_seller_percent.to_alipay_dict()
else:
params['fq_seller_percent'] = self.fq_seller_percent
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FQExtendParams()
if 'fq_number' in d:
o.fq_number = d['fq_number']
if 'fq_seller_percent' in d:
o.fq_seller_percent = d['fq_seller_percent']
return o
|
import datetime
from datetime import date
from dateutil.relativedelta import relativedelta
def lifetime():
now = datetime.datetime.now()
end_date=date(2011,07,9)
rdelta = relativedelta(now, end_date)
if rdelta.years >0 and rdelta.months >0 and rdelta.days>0:
return str(rdelta.years)+' years '+str(rdelta.months)+' months '+str(rdelta.days)+ ' days ago'
elif rdelta.years >5:
return str(rdelta.years)+' years ago'
elif rdelta.months >0 and rdelta.days>0:
return rdelta.months, 'months',rdelta.days, 'days ago'
elif rdelta.days>1:
return rdelta.days, 'days ago'
elif rdelta.days:
return rdelta.days, 'days ago' |
# coding:utf-8
'''
@Copyright:LintCode
@Author: ultimate010
@Problem: http://www.lintcode.com/problem/reverse-linked-list
@Language: Python
@Datetime: 16-06-10 10:06
'''
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of the linked list.
@return: You should return the head of the reversed linked list.
Reverse it in-place.
"""
def reverse(self, head):
# write your code here
if head is None or head.next is None: # only one node
return head
prev, next = head, head.next
while next:
theNext = next.next
next.next = prev
prev = next
next = theNext
head.next = None
return prev |
# Copyright (c) 2020 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from collections import defaultdict
from importlib import import_module
from typing import List, Type, Dict, Tuple
from types import ModuleType
from cached_property import cached_property
from src import constants
from src.services.facescan.plugins import base, mixins
ML_MODEL_SEPARATOR = '@'
def import_classes(class_path: str):
module, class_name = class_path.rsplit('.', 1)
return getattr(import_module(module, __package__), class_name)
class PluginManager:
plugins_modules: Dict[ModuleType, List[str]]
def __init__(self):
self.plugins_modules = defaultdict(list)
for plugin_name in self.get_plugins_names():
module = import_module(f'{__package__}.{plugin_name.rsplit(".", 1)[0]}')
plugin_name = plugin_name.split('.')[-2] + '.' + plugin_name.split('.')[-1]
self.plugins_modules[module].append(plugin_name)
@property
def requirements(self):
requirements = set()
for module in self.plugins_modules:
requirements |= set(module.requirements)
return requirements
def get_plugins_names(self):
return list(filter(None, [
constants.ENV.FACE_DETECTION_PLUGIN,
constants.ENV.CALCULATION_PLUGIN,
*constants.ENV.EXTRA_PLUGINS
]))
@cached_property
def plugins(self):
plugins = []
for module, plugins_names in self.plugins_modules.items():
for pl_name in plugins_names:
mlmodel_name = None
if ML_MODEL_SEPARATOR in pl_name:
pl_name, mlmodel_name = pl_name.split(ML_MODEL_SEPARATOR)
pl_path = f'{module.__package__}.{pl_name}'
pl_class = import_classes(pl_path)
plugin = pl_class(ml_model_name=mlmodel_name)
plugins.append(plugin)
return plugins
@cached_property
def detector(self) -> mixins.FaceDetectorMixin:
return [pl for pl in self.plugins
if isinstance(pl, mixins.FaceDetectorMixin)][0]
@cached_property
def calculator(self) -> mixins.CalculatorMixin:
return [pl for pl in self.plugins
if isinstance(pl, mixins.CalculatorMixin)][0]
@cached_property
def face_plugins(self) -> List[base.BasePlugin]:
return [pl for pl in self.plugins
if not isinstance(pl, mixins.FaceDetectorMixin)]
def filter_face_plugins(self, slugs: List[str]) -> List[base.BasePlugin]:
return [pl for pl in self.face_plugins
if slugs is None or pl.slug in slugs]
def get_plugin_by_class(self, plugin_class: Type):
for plugin in self.plugins:
if isinstance(plugin, plugin_class):
return plugin
plugin_manager = PluginManager()
|
#%% Part 1
import re
mask_regex = re.compile(r"^mask\s+=\s+([X10]+)")
mem_regex = re.compile(r"^mem\[(\d+)\]\s=\s(\d+)")
result = {}
with open("day_14_input.txt") as input_data:
for line in input_data:
if mask_match := mask_regex.match(line):
and_pattern = int(mask_match.group(1).replace("1", "0").replace("X", "1"), 2)
or_pattern = int(mask_match.group(1).replace("X", "0"), 2)
else:
position, value = map(int, mem_regex.match(line).groups())
result[position] = value & and_pattern | or_pattern
print(f"Sum of remaining values: {sum(result.values())}")
#%% Part 2
result = {}
with open("day_14_input.txt") as input_data:
for line in input_data:
if mask_match := mask_regex.match(line):
and_pattern = int(mask_match.group(1).replace("0", "1").replace("X", "0"), 2)
or_template = mask_match.group(1)
or_patterns = []
x_count = line.count("X")
for x in range(2**x_count):
or_pattern = or_template
for c in bin(x)[2:].zfill(x_count):
or_pattern = or_pattern.replace("X", c, 1)
or_patterns.append(int(or_pattern, 2))
else:
position, value = map(int, mem_regex.match(line).groups())
for or_pattern in or_patterns:
result[position & and_pattern | or_pattern] = value
print(f"Sum of remaining values: {sum(result.values())}")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Edoardo Lenzi'
__version__ = '1.0'
__license__ = 'WTFPL2.0'
# load .env configs
from knights_tour.utils.env import Env
Env.load()
# load tests/
import unittest
from tests import CliTest
'''Run all tests in tests/'''
unittest.main() |
import datetime
import os
import random
import sys
import tempfile
import numpy as np
import pytest
import ray
from ray import cloudpickle as pickle
from ray._private import ray_constants
from ray._private.test_utils import (
client_test_enabled,
wait_for_condition,
wait_for_pid_to_exit,
)
from ray.actor import ActorClassInheritanceException
from ray.tests.client_test_utils import create_remote_signal_actor
from ray._private.test_utils import SignalActor
# NOTE: We have to import setproctitle after ray because we bundle setproctitle
# with ray.
import setproctitle # noqa
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
@pytest.mark.parametrize("set_enable_auto_connect", ["1", "0"], indirect=True)
def test_caching_actors(shutdown_only, set_enable_auto_connect):
# Test defining actors before ray.init() has been called.
@ray.remote
class Foo:
def __init__(self):
pass
def get_val(self):
return 3
if set_enable_auto_connect == "0":
# Check that we can't actually create actors before ray.init() has
# been called.
with pytest.raises(Exception):
f = Foo.remote()
ray.init(num_cpus=1)
else:
# Actor creation should succeed here because ray.init() auto connection
# is (by default) enabled.
f = Foo.remote()
f = Foo.remote()
assert ray.get(f.get_val.remote()) == 3
# https://github.com/ray-project/ray/issues/20554
def test_not_reusing_task_workers(shutdown_only):
@ray.remote
def create_ref():
ref = ray.put(np.zeros(100_000_000))
return ref
@ray.remote
class Actor:
def __init__(self):
return
def foo(self):
return
ray.init(num_cpus=1, object_store_memory=1000_000_000)
wrapped_ref = create_ref.remote()
print(ray.get(ray.get(wrapped_ref)))
# create_ref worker gets reused as an actor.
a = Actor.remote()
ray.get(a.foo.remote())
# Actor will get force-killed.
del a
# Flush the object store.
for _ in range(10):
ray.put(np.zeros(100_000_000))
# Object has been evicted and owner has died. Throws OwnerDiedError.
print(ray.get(ray.get(wrapped_ref)))
def test_remote_function_within_actor(ray_start_10_cpus):
# Make sure we can use remote funtions within actors.
# Create some values to close over.
val1 = 1
val2 = 2
@ray.remote
def f(x):
return val1 + x
@ray.remote
def g(x):
return ray.get(f.remote(x))
@ray.remote
class Actor:
def __init__(self, x):
self.x = x
self.y = val2
self.object_refs = [f.remote(i) for i in range(5)]
self.values2 = ray.get([f.remote(i) for i in range(5)])
def get_values(self):
return self.x, self.y, self.object_refs, self.values2
def f(self):
return [f.remote(i) for i in range(5)]
def g(self):
return ray.get([g.remote(i) for i in range(5)])
def h(self, object_refs):
return ray.get(object_refs)
actor = Actor.remote(1)
values = ray.get(actor.get_values.remote())
assert values[0] == 1
assert values[1] == val2
assert ray.get(values[2]) == list(range(1, 6))
assert values[3] == list(range(1, 6))
assert ray.get(ray.get(actor.f.remote())) == list(range(1, 6))
assert ray.get(actor.g.remote()) == list(range(1, 6))
assert ray.get(actor.h.remote([f.remote(i) for i in range(5)])) == list(range(1, 6))
def test_define_actor_within_actor(ray_start_10_cpus):
# Make sure we can use remote funtions within actors.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def new_actor(self, z):
@ray.remote
class Actor2:
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
self.actor2 = Actor2.remote(z)
def get_values(self, z):
self.new_actor(z)
return self.x, ray.get(self.actor2.get_value.remote())
actor1 = Actor1.remote(3)
assert ray.get(actor1.get_values.remote(5)) == (3, 5)
def test_use_actor_within_actor(ray_start_10_cpus):
# Make sure we can use actors within actors.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_val(self):
return self.x
@ray.remote
class Actor2:
def __init__(self, x, y):
self.x = x
self.actor1 = Actor1.remote(y)
def get_values(self, z):
return self.x, ray.get(self.actor1.get_val.remote())
actor2 = Actor2.remote(3, 4)
assert ray.get(actor2.get_values.remote(5)) == (3, 4)
def test_use_actor_twice(ray_start_10_cpus):
# Make sure we can call the same actor using different refs.
@ray.remote
class Actor1:
def __init__(self):
self.count = 0
def inc(self):
self.count += 1
return self.count
@ray.remote
class Actor2:
def __init__(self):
pass
def inc(self, handle):
return ray.get(handle.inc.remote())
a = Actor1.remote()
a2 = Actor2.remote()
assert ray.get(a2.inc.remote(a)) == 1
assert ray.get(a2.inc.remote(a)) == 2
def test_define_actor_within_remote_function(ray_start_10_cpus):
# Make sure we can define and actors within remote funtions.
@ray.remote
def f(x, n):
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
actor = Actor1.remote(x)
return ray.get([actor.get_value.remote() for _ in range(n)])
assert ray.get(f.remote(3, 1)) == [3]
assert ray.get([f.remote(i, 20) for i in range(10)]) == [
20 * [i] for i in range(10)
]
def test_use_actor_within_remote_function(ray_start_10_cpus):
# Make sure we can create and use actors within remote funtions.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_values(self):
return self.x
@ray.remote
def f(x):
actor = Actor1.remote(x)
return ray.get(actor.get_values.remote())
assert ray.get(f.remote(3)) == 3
def test_actor_import_counter(ray_start_10_cpus):
# This is mostly a test of the export counters to make sure that when
# an actor is imported, all of the necessary remote functions have been
# imported.
# Export a bunch of remote functions.
num_remote_functions = 50
for i in range(num_remote_functions):
@ray.remote
def f():
return i
@ray.remote
def g():
@ray.remote
class Actor:
def __init__(self):
# This should use the last version of f.
self.x = ray.get(f.remote())
def get_val(self):
return self.x
actor = Actor.remote()
return ray.get(actor.get_val.remote())
assert ray.get(g.remote()) == num_remote_functions - 1
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_actor_method_metadata_cache(ray_start_regular):
class Actor(object):
pass
# The cache of _ActorClassMethodMetadata.
cache = ray.actor._ActorClassMethodMetadata._cache
cache.clear()
# Check cache hit during ActorHandle deserialization.
A1 = ray.remote(Actor)
a = A1.remote()
assert len(cache) == 1
cached_data_id = [id(x) for x in list(cache.items())[0]]
for x in range(10):
a = pickle.loads(pickle.dumps(a))
assert len(ray.actor._ActorClassMethodMetadata._cache) == 1
assert [id(x) for x in list(cache.items())[0]] == cached_data_id
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_actor_class_name(ray_start_regular):
@ray.remote
class Foo:
def __init__(self):
pass
Foo.remote()
g = ray._private.worker.global_worker.gcs_client
actor_keys = g.internal_kv_keys(
b"ActorClass", ray_constants.KV_NAMESPACE_FUNCTION_TABLE
)
assert len(actor_keys) == 1
actor_class_info = pickle.loads(
g.internal_kv_get(actor_keys[0], ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
)
assert actor_class_info["class_name"] == "Foo"
assert "test_actor" in actor_class_info["module"]
def test_actor_exit_from_task(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self):
print("Actor created")
def f(self):
return 0
@ray.remote
def f():
a = Actor.remote()
x_id = a.f.remote()
return [x_id]
x_id = ray.get(f.remote())[0]
print(ray.get(x_id)) # This should not hang.
def test_actor_init_error_propagated(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self, error=False):
if error:
raise Exception("oops")
def foo(self):
return "OK"
actor = Actor.remote(error=False)
ray.get(actor.foo.remote())
actor = Actor.remote(error=True)
with pytest.raises(Exception, match=".*oops.*"):
ray.get(actor.foo.remote())
def test_keyword_args(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self, arg0, arg1=1, arg2="a"):
self.arg0 = arg0
self.arg1 = arg1
self.arg2 = arg2
def get_values(self, arg0, arg1=2, arg2="b"):
return self.arg0 + arg0, self.arg1 + arg1, self.arg2 + arg2
actor = Actor.remote(0)
assert ray.get(actor.get_values.remote(1)) == (1, 3, "ab")
actor = Actor.remote(1, 2)
assert ray.get(actor.get_values.remote(2, 3)) == (3, 5, "ab")
actor = Actor.remote(1, 2, "c")
assert ray.get(actor.get_values.remote(2, 3, "d")) == (3, 5, "cd")
actor = Actor.remote(1, arg2="c")
assert ray.get(actor.get_values.remote(0, arg2="d")) == (1, 3, "cd")
assert ray.get(actor.get_values.remote(0, arg2="d", arg1=0)) == (1, 1, "cd")
actor = Actor.remote(1, arg2="c", arg1=2)
assert ray.get(actor.get_values.remote(0, arg2="d")) == (1, 4, "cd")
assert ray.get(actor.get_values.remote(0, arg2="d", arg1=0)) == (1, 2, "cd")
assert ray.get(actor.get_values.remote(arg2="d", arg1=0, arg0=2)) == (3, 2, "cd")
# Make sure we get an exception if the constructor is called
# incorrectly.
with pytest.raises(TypeError):
actor = Actor.remote()
with pytest.raises(TypeError):
actor = Actor.remote(0, 1, 2, arg3=3)
with pytest.raises(TypeError):
actor = Actor.remote(0, arg0=1)
# Make sure we get an exception if the method is called incorrectly.
actor = Actor.remote(1)
with pytest.raises(Exception):
ray.get(actor.get_values.remote())
def test_actor_name_conflict(ray_start_regular_shared):
@ray.remote
class A(object):
def foo(self):
return 100000
a = A.remote()
r = a.foo.remote()
results = [r]
for x in range(10):
@ray.remote
class A(object):
def foo(self):
return x
a = A.remote()
r = a.foo.remote()
results.append(r)
assert ray.get(results) == [100000, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_variable_number_of_args(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self, arg0, arg1=1, *args):
self.arg0 = arg0
self.arg1 = arg1
self.args = args
def get_values(self, arg0, arg1=2, *args):
return self.arg0 + arg0, self.arg1 + arg1, self.args, args
actor = Actor.remote(0)
assert ray.get(actor.get_values.remote(1)) == (1, 3, (), ())
actor = Actor.remote(1, 2)
assert ray.get(actor.get_values.remote(2, 3)) == (3, 5, (), ())
actor = Actor.remote(1, 2, "c")
assert ray.get(actor.get_values.remote(2, 3, "d")) == (3, 5, ("c",), ("d",))
actor = Actor.remote(1, 2, "a", "b", "c", "d")
assert ray.get(actor.get_values.remote(2, 3, 1, 2, 3, 4)) == (
3,
5,
("a", "b", "c", "d"),
(1, 2, 3, 4),
)
@ray.remote
class Actor:
def __init__(self, *args):
self.args = args
def get_values(self, *args):
return self.args, args
a = Actor.remote()
assert ray.get(a.get_values.remote()) == ((), ())
a = Actor.remote(1)
assert ray.get(a.get_values.remote(2)) == ((1,), (2,))
a = Actor.remote(1, 2)
assert ray.get(a.get_values.remote(3, 4)) == ((1, 2), (3, 4))
def test_no_args(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self):
pass
def get_values(self):
pass
actor = Actor.remote()
assert ray.get(actor.get_values.remote()) is None
def test_no_constructor(ray_start_regular_shared):
@ray.remote
class Actor:
def get_values(self):
pass
actor = Actor.remote()
assert ray.get(actor.get_values.remote()) is None
def test_custom_classes(ray_start_regular_shared):
class Foo:
def __init__(self, x):
self.x = x
@ray.remote
class Actor:
def __init__(self, f2):
self.f1 = Foo(1)
self.f2 = f2
def get_values1(self):
return self.f1, self.f2
def get_values2(self, f3):
return self.f1, self.f2, f3
actor = Actor.remote(Foo(2))
results1 = ray.get(actor.get_values1.remote())
assert results1[0].x == 1
assert results1[1].x == 2
results2 = ray.get(actor.get_values2.remote(Foo(3)))
assert results2[0].x == 1
assert results2[1].x == 2
assert results2[2].x == 3
def test_actor_class_attributes(ray_start_regular_shared):
class Grandparent:
GRANDPARENT = 2
class Parent1(Grandparent):
PARENT1 = 6
class Parent2:
PARENT2 = 7
@ray.remote
class TestActor(Parent1, Parent2):
X = 3
@classmethod
def f(cls):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.X == 3
return 4
def g(self):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.f() == 4
return TestActor.X
t = TestActor.remote()
assert ray.get(t.g.remote()) == 3
def test_actor_static_attributes(ray_start_regular_shared):
class Grandparent:
GRANDPARENT = 2
@staticmethod
def grandparent_static():
assert Grandparent.GRANDPARENT == 2
return 1
class Parent1(Grandparent):
PARENT1 = 6
@staticmethod
def parent1_static():
assert Parent1.PARENT1 == 6
return 2
def parent1(self):
assert Parent1.PARENT1 == 6
class Parent2:
PARENT2 = 7
def parent2(self):
assert Parent2.PARENT2 == 7
@ray.remote
class TestActor(Parent1, Parent2):
X = 3
@staticmethod
def f():
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.X == 3
return 4
def g(self):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.f() == 4
return TestActor.X
t = TestActor.remote()
assert ray.get(t.g.remote()) == 3
def test_decorator_args(ray_start_regular_shared):
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote()
class Actor:
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote(invalid_kwarg=0) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote(num_cpus=0, invalid_kwarg=0) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_gpus=1) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1, num_gpus=1) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
def test_random_id_generation(ray_start_regular_shared):
@ray.remote
class Foo:
def __init__(self):
pass
# Make sure that seeding numpy does not interfere with the generation
# of actor IDs.
np.random.seed(1234)
random.seed(1234)
f1 = Foo.remote()
np.random.seed(1234)
random.seed(1234)
f2 = Foo.remote()
assert f1._actor_id != f2._actor_id
@pytest.mark.skipif(client_test_enabled(), reason="differing inheritence structure")
def test_actor_inheritance(ray_start_regular_shared):
class NonActorBase:
def __init__(self):
pass
# Test that an actor class can inherit from a non-actor class.
@ray.remote
class ActorBase(NonActorBase):
def __init__(self):
pass
# Test that you can't instantiate an actor class directly.
with pytest.raises(Exception, match="cannot be instantiated directly"):
ActorBase()
# Test that you can't inherit from an actor class.
with pytest.raises(
ActorClassInheritanceException,
match="Inheriting from actor classes is not currently supported.",
):
class Derived(ActorBase):
def __init__(self):
pass
def test_multiple_return_values(ray_start_regular_shared):
@ray.remote
class Foo:
def method0(self):
return 1
@ray.method(num_returns=1)
def method1(self):
return 1
@ray.method(num_returns=2)
def method2(self):
return 1, 2
@ray.method(num_returns=3)
def method3(self):
return 1, 2, 3
f = Foo.remote()
id0 = f.method0.remote()
assert ray.get(id0) == 1
id1 = f.method1.remote()
assert ray.get(id1) == 1
id2a, id2b = f.method2.remote()
assert ray.get([id2a, id2b]) == [1, 2]
id3a, id3b, id3c = f.method3.remote()
assert ray.get([id3a, id3b, id3c]) == [1, 2, 3]
def test_options_num_returns(ray_start_regular_shared):
@ray.remote
class Foo:
def method(self):
return 1, 2
f = Foo.remote()
obj = f.method.remote()
assert ray.get(obj) == (1, 2)
obj1, obj2 = f.method.options(num_returns=2).remote()
assert ray.get([obj1, obj2]) == [1, 2]
def test_options_name(ray_start_regular_shared):
@ray.remote
class Foo:
def method(self, name):
assert setproctitle.getproctitle() == f"ray::{name}"
f = Foo.remote()
ray.get(f.method.options(name="foo").remote("foo"))
ray.get(f.method.options(name="bar").remote("bar"))
def test_define_actor(ray_start_regular_shared):
@ray.remote
class Test:
def __init__(self, x):
self.x = x
def f(self, y):
return self.x + y
t = Test.remote(2)
assert ray.get(t.f.remote(1)) == 3
# Make sure that calling an actor method directly raises an exception.
with pytest.raises(Exception):
t.f(1)
def test_actor_deletion(ray_start_regular_shared):
# Make sure that when an actor handles goes out of scope, the actor
# destructor is called.
@ray.remote
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
a = None
wait_for_pid_to_exit(pid)
actors = [Actor.remote() for _ in range(10)]
pids = ray.get([a.getpid.remote() for a in actors])
a = None
actors = None
[wait_for_pid_to_exit(pid) for pid in pids]
def test_actor_method_deletion(ray_start_regular_shared):
@ray.remote
class Actor:
def method(self):
return 1
# Make sure that if we create an actor and call a method on it
# immediately, the actor doesn't get killed before the method is
# called.
assert ray.get(Actor.remote().method.remote()) == 1
def test_distributed_actor_handle_deletion(ray_start_regular_shared):
@ray.remote
class Actor:
def method(self):
return 1
def getpid(self):
return os.getpid()
@ray.remote
def f(actor, signal):
ray.get(signal.wait.remote())
return ray.get(actor.method.remote())
SignalActor = create_remote_signal_actor(ray)
signal = SignalActor.remote()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
# Pass the handle to another task that cannot run yet.
x_id = f.remote(a, signal)
# Delete the original handle. The actor should not get killed yet.
del a
# Once the task finishes, the actor process should get killed.
ray.get(signal.send.remote())
assert ray.get(x_id) == 1
wait_for_pid_to_exit(pid)
def test_multiple_actors(ray_start_regular_shared):
@ray.remote
class Counter:
def __init__(self, value):
self.value = value
def increase(self):
self.value += 1
return self.value
def reset(self):
self.value = 0
num_actors = 5
num_increases = 50
# Create multiple actors.
actors = [Counter.remote(i) for i in range(num_actors)]
results = []
# Call each actor's method a bunch of times.
for i in range(num_actors):
results += [actors[i].increase.remote() for _ in range(num_increases)]
result_values = ray.get(results)
for i in range(num_actors):
v = result_values[(num_increases * i) : (num_increases * (i + 1))]
assert v == list(range(i + 1, num_increases + i + 1))
# Reset the actor values.
[actor.reset.remote() for actor in actors]
# Interweave the method calls on the different actors.
results = []
for j in range(num_increases):
results += [actor.increase.remote() for actor in actors]
result_values = ray.get(results)
for j in range(num_increases):
v = result_values[(num_actors * j) : (num_actors * (j + 1))]
assert v == num_actors * [j + 1]
def test_inherit_actor_from_class(ray_start_regular_shared):
# Make sure we can define an actor by inheriting from a regular class.
# Note that actors cannot inherit from other actors.
class Foo:
def __init__(self, x):
self.x = x
def f(self):
return self.x
def g(self, y):
return self.x + y
@ray.remote
class Actor(Foo):
def __init__(self, x):
Foo.__init__(self, x)
def get_value(self):
return self.f()
actor = Actor.remote(1)
assert ray.get(actor.get_value.remote()) == 1
assert ray.get(actor.g.remote(5)) == 6
def test_get_non_existing_named_actor(ray_start_regular_shared):
with pytest.raises(ValueError):
_ = ray.get_actor("non_existing_actor")
# https://github.com/ray-project/ray/issues/17843
def test_actor_namespace(ray_start_regular_shared):
@ray.remote
class Actor:
def f(self):
return "ok"
a = Actor.options(name="foo", namespace="f1").remote()
with pytest.raises(ValueError):
ray.get_actor(name="foo", namespace="f2")
a1 = ray.get_actor(name="foo", namespace="f1")
assert ray.get(a1.f.remote()) == "ok"
del a
def test_named_actor_cache(ray_start_regular_shared):
"""Verify that named actor cache works well."""
@ray.remote(max_restarts=-1)
class Counter:
def __init__(self):
self.count = 0
def inc_and_get(self):
self.count += 1
return self.count
a = Counter.options(name="hi").remote()
first_get = ray.get_actor("hi")
assert ray.get(first_get.inc_and_get.remote()) == 1
second_get = ray.get_actor("hi")
assert ray.get(second_get.inc_and_get.remote()) == 2
ray.kill(a, no_restart=True)
def actor_removed():
try:
ray.get_actor("hi")
return False
except ValueError:
return True
wait_for_condition(actor_removed)
get_after_restart = Counter.options(name="hi").remote()
assert ray.get(get_after_restart.inc_and_get.remote()) == 1
get_by_name = ray.get_actor("hi")
assert ray.get(get_by_name.inc_and_get.remote()) == 2
def test_named_actor_cache_via_another_actor(ray_start_regular_shared):
"""Verify that named actor cache works well with another actor."""
@ray.remote(max_restarts=0)
class Counter:
def __init__(self):
self.count = 0
def inc_and_get(self):
self.count += 1
return self.count
# The third actor to get named actor. To indicates this cache doesn't
# break getting from the third party.
@ray.remote(max_restarts=0)
class ActorGetter:
def get_actor_count(self, name):
actor = ray.get_actor(name)
return ray.get(actor.inc_and_get.remote())
# Start a actor and get it by name in driver.
a = Counter.options(name="foo").remote()
first_get = ray.get_actor("foo")
assert ray.get(first_get.inc_and_get.remote()) == 1
# Start another actor as the third actor to get named actor.
actor_getter = ActorGetter.remote()
assert ray.get(actor_getter.get_actor_count.remote("foo")) == 2
ray.kill(a, no_restart=True)
def actor_removed():
try:
ray.get_actor("foo")
return False
except ValueError:
return True
wait_for_condition(actor_removed)
# Restart the named actor.
get_after_restart = Counter.options(name="foo").remote()
assert ray.get(get_after_restart.inc_and_get.remote()) == 1
# Get the named actor from the third actor again.
assert ray.get(actor_getter.get_actor_count.remote("foo")) == 2
# Get the named actor by name in driver again.
get_by_name = ray.get_actor("foo")
assert ray.get(get_by_name.inc_and_get.remote()) == 3
def test_wrapped_actor_handle(ray_start_regular_shared):
@ray.remote
class B:
def doit(self):
return 2
@ray.remote
class A:
def __init__(self):
self.b = B.remote()
def get_actor_ref(self):
return [self.b]
a = A.remote()
b_list = ray.get(a.get_actor_ref.remote())
assert ray.get(b_list[0].doit.remote()) == 2
@pytest.mark.skip("This test is just used to print the latency of creating 100 actors.")
def test_actor_creation_latency(ray_start_regular_shared):
# This test is just used to test the latency of actor creation.
@ray.remote
class Actor:
def get_value(self):
return 1
start = datetime.datetime.now()
actor_handles = [Actor.remote() for _ in range(100)]
actor_create_time = datetime.datetime.now()
for actor_handle in actor_handles:
ray.get(actor_handle.get_value.remote())
end = datetime.datetime.now()
print(
"actor_create_time_consume = {}, total_time_consume = {}".format(
actor_create_time - start, end - start
)
)
@pytest.mark.parametrize(
"exit_condition",
[
# "out_of_scope", TODO(edoakes): enable this once fixed.
"__ray_terminate__",
"ray.actor.exit_actor",
"ray.kill",
],
)
def test_atexit_handler(ray_start_regular_shared, exit_condition):
@ray.remote
class A:
def __init__(self, tmpfile, data):
import atexit
def f(*args, **kwargs):
with open(tmpfile, "w") as f:
f.write(data)
f.flush()
atexit.register(f)
def ready(self):
pass
def exit(self):
ray.actor.exit_actor()
data = "hello"
tmpfile = tempfile.NamedTemporaryFile("w+", suffix=".tmp", delete=False)
tmpfile.close()
a = A.remote(tmpfile.name, data)
ray.get(a.ready.remote())
if exit_condition == "out_of_scope":
del a
elif exit_condition == "__ray_terminate__":
ray.wait([a.__ray_terminate__.remote()])
elif exit_condition == "ray.actor.exit_actor":
ray.wait([a.exit.remote()])
elif exit_condition == "ray.kill":
ray.kill(a)
else:
assert False, "Unrecognized condition"
def check_file_written():
with open(tmpfile.name, "r") as f:
if f.read() == data:
return True
return False
# ray.kill() should not trigger atexit handlers, all other methods should.
if exit_condition == "ray.kill":
assert not check_file_written()
else:
wait_for_condition(check_file_written)
os.unlink(tmpfile.name)
def test_actor_ready(ray_start_regular_shared):
@ray.remote
class Actor:
pass
actor = Actor.remote()
with pytest.raises(TypeError):
# Method can't be called directly
actor.__ray_ready__()
assert ray.get(actor.__ray_ready__.remote())
def test_return_actor_handle_from_actor(ray_start_regular_shared):
@ray.remote
class Inner:
def ping(self):
return "pong"
@ray.remote
class Outer:
def __init__(self):
self.inner = Inner.remote()
def get_ref(self):
return self.inner
outer = Outer.remote()
inner = ray.get(outer.get_ref.remote())
assert ray.get(inner.ping.remote()) == "pong"
def test_actor_autocomplete(ray_start_regular_shared):
"""
Test that autocomplete works with actors by checking that the builtin dir()
function works as expected.
"""
@ray.remote
class Foo:
def method_one(self) -> None:
pass
class_calls = [fn for fn in dir(Foo) if not fn.startswith("_")]
assert set(class_calls) == {"method_one", "options", "remote", "bind"}
f = Foo.remote()
methods = [fn for fn in dir(f) if not fn.startswith("_")]
assert methods == ["method_one"]
all_methods = set(dir(f))
assert all_methods == {
"__init__",
"method_one",
"__ray_ready__",
"__ray_terminate__",
}
method_options = [fn for fn in dir(f.method_one) if not fn.startswith("_")]
assert set(method_options) == {"options", "remote"}
def test_actor_mro(ray_start_regular_shared):
@ray.remote
class Foo:
def __init__(self, x):
self.x = x
@classmethod
def factory_f(cls, x):
return cls(x)
def get_x(self):
return self.x
obj = Foo.factory_f(1)
assert obj.get_x() == 1
@pytest.mark.skipif(client_test_enabled(), reason="differing deletion behaviors")
def test_keep_calling_get_actor(ray_start_regular_shared):
"""
Test keep calling get_actor.
"""
@ray.remote
class Actor:
def hello(self):
return "hello"
actor = Actor.options(name="ABC").remote()
assert ray.get(actor.hello.remote()) == "hello"
for _ in range(10):
actor = ray.get_actor("ABC")
assert ray.get(actor.hello.remote()) == "hello"
del actor
# Verify the actor is killed
def actor_removed():
try:
ray.get_actor("ABC")
return False
except ValueError:
return True
wait_for_condition(actor_removed)
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
@pytest.mark.parametrize(
"actor_type",
[
"actor",
"threaded_actor",
"async_actor",
],
)
def test_actor_parent_task_correct(shutdown_only, actor_type):
"""Verify the parent task id is correct for all actors."""
@ray.remote
def child():
pass
@ray.remote
class ChildActor:
def child(self):
pass
def parent_func(child_actor):
core_worker = ray._private.worker.global_worker.core_worker
refs = [child_actor.child.remote(), child.remote()]
expected = {ref.task_id().hex() for ref in refs}
task_id = ray.get_runtime_context().task_id
children_task_ids = core_worker.get_pending_children_task_ids(task_id)
actual = {task_id.hex() for task_id in children_task_ids}
ray.get(refs)
return expected, actual
if actor_type == "actor":
@ray.remote
class Actor:
def parent(self, child_actor):
return parent_func(child_actor)
@ray.remote
class GeneratorActor:
def parent(self, child_actor):
yield parent_func(child_actor)
if actor_type == "threaded_actor":
@ray.remote(max_concurrency=5)
class Actor: # noqa
def parent(self, child_actor):
return parent_func(child_actor)
@ray.remote(max_concurrency=5)
class GeneratorActor: # noqa
def parent(self, child_actor):
yield parent_func(child_actor)
if actor_type == "async_actor":
@ray.remote
class Actor: # noqa
async def parent(self, child_actor):
return parent_func(child_actor)
@ray.remote
class GeneratorActor: # noqa
async def parent(self, child_actor):
yield parent_func(child_actor)
# Verify a regular actor.
actor = Actor.remote()
child_actor = ChildActor.remote()
actual, expected = ray.get(actor.parent.remote(child_actor))
assert actual == expected
# return True
# Verify a generator actor
actor = GeneratorActor.remote()
child_actor = ChildActor.remote()
gen = actor.parent.options(num_returns="streaming").remote(child_actor)
for ref in gen:
result = ray.get(ref)
actual, expected = result
assert actual == expected
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_parent_task_correct_concurrent_async_actor(shutdown_only):
"""Make sure when there are concurrent async tasks
the parent -> children task ids are properly mapped.
"""
sig = SignalActor.remote()
@ray.remote
def child(sig):
ray.get(sig.wait.remote())
@ray.remote
class AsyncActor:
async def f(self, sig):
refs = [child.remote(sig) for _ in range(2)]
core_worker = ray._private.worker.global_worker.core_worker
expected = {ref.task_id().hex() for ref in refs}
task_id = ray.get_runtime_context().task_id
children_task_ids = core_worker.get_pending_children_task_ids(task_id)
actual = {task_id.hex() for task_id in children_task_ids}
await sig.wait.remote()
ray.get(refs)
return actual, expected
a = AsyncActor.remote()
# Run 3 concurrent tasks.
refs = [a.f.remote(sig) for _ in range(20)]
# 3 concurrent task will finish.
ray.get(sig.send.remote())
# Verify children task mapping is correct.
result = ray.get(refs)
for actual, expected in result:
assert actual, expected
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
|
print("----------------------------------")
print("Practica 05_calculator.py")
print("----------------------------------")
print("Introduce el primer Número: ")
num1=int(input())
print("Introduce el Segundo Número: ")
num2=int(input())
def suma():
return num1 + num2
def resta():
return num1 - num2
def multiplicacon():
return num1 * num2
def division():
return num1 // num2
def exponencial():
return num1 ** num2
print("¿Que operación quiere hacer?")
print(" *Presione 1 para suma ")
print(" *Presione 2 para resta ")
print(" *Presione 3 para multiplicación ")
print(" *Presione 4 para división ")
print(" *Presione 5 para Exponencial ")
elec= int(input())
print("----------------------------------")
if elec == 1:
print("La suma total es: " +str(suma))
elif elec == 2:
print("La resta Total es: " +str(resta))
elif elec == 3:
print("La multriplicación es: " +str(multiplicacion))
elif elec == 4:
print("El resultado de la division es: " +str(division))
elif elec == 5:
print("El resultado exponencial es: " +str(exponencial))
else:
print("No valido")
print("----------------------------------")
Resultado={"suma":suma, "resta": resta,"multiplicacion": multiplicacion}
|
from rest_framework import status, viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from profiles_api import permissions
from . import serializers
from .models import ProfileFeedItem, UserProfile
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
an_apiview = [
'Uses HTTPS as function',
'Is similar to a traditional Django View',
'Tets 1',
'Test 2'
]
# Only list or dictionary -> convert to Json
return Response(data={
'message': 'Hello',
'an_apiview': an_apiview
},
status=status.HTTP_200_OK)
def post(self, request):
"""Create a hello message for our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
mess = f"Hello {name}"
return Response(data={
'message': mess,
}, status=status.HTTP_200_OK)
else:
return Response(data={
'error': serializer.errors,
'mess': serializer.error_messages
}, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle updating a partial of an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Handle deleting an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Text API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
a_viewset = [
'Uses HTTPS as function',
'Is similar to a traditional Django View',
'Tets 1',
'Test 2'
]
return Response({'message': 'Hello', 'viewset': a_viewset})
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
mess = f"Hello {name}"
return Response({'message': mess})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
return Response({'method': 'GET'})
def update(self, request, pk=None):
return Response({'method': 'PUT'})
def partial_update(self, request, pk=None):
return Response({'method': 'PATCH'})
def destroy(self, request, pk=None):
return Response({'method': 'PATCH'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profile"""
serializer_class = serializers.UserProfileSerializer
queryset = UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email')
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handle CRUD Feed Item"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus,
IsAuthenticated)
def perform_create(self, serializer):
"""Set the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
|
import datetime
from django.db import models
try:
from datatype_tools.lib import * # noqa
datatype_tools = True
except ImportError as e:
datatype_tools = False
from django_dunder import app_settings
from django_dunder.mixins import DunderModel
from django_dunder._formatter import _get_one_invoke
from django_dunder._register import PY3
from django_fake_model import models as f
if PY3:
round_expected_id_alt = '1'
if datatype_tools:
round_expected_float = '5.13'
else:
round_expected_float = '5'
else:
round_expected_id_alt = '1.0' # FIXME!
round_expected_float = '5.0'
def test_invoke_parse_brackets():
name, args, remainder = _get_one_invoke('one()')
assert (name, tuple(args), remainder) == ('one', tuple(), None)
name, args, remainder = _get_one_invoke('one')
assert (name, tuple(args), remainder) == ('one', tuple(), None)
name, args, remainder = _get_one_invoke('one(a)')
assert (name, tuple(args), remainder) == ('one', tuple(['a']), None)
name, args, remainder = _get_one_invoke('one(1)')
assert (name, tuple(args), remainder) == ('one', tuple([1]), None)
name, args, remainder = _get_one_invoke('one(a, 1)')
assert (name, tuple(args), remainder) == ('one', tuple(['a', 1]), None)
name, args, remainder = _get_one_invoke('one().two()')
assert (name, tuple(args), remainder) == ('one', tuple(), 'two()')
name, args, remainder = _get_one_invoke('one(a).two(b)')
assert (name, tuple(args), remainder) == ('one', tuple(['a']), 'two(b)')
name, args, remainder = _get_one_invoke('one(a). two(b)')
assert (name, tuple(args), remainder) == ('one', tuple(['a']), 'two(b)')
name, args, remainder = _get_one_invoke('one(a), two(b)')
assert (name, tuple(args), remainder) == ('one', tuple(['a']), 'two(b)')
name, args, remainder = _get_one_invoke('one(a)__two(b)')
assert (name, tuple(args), remainder) == ('one', tuple(['a']), 'two(b)')
def test_invoke_parse_underscore():
name, args, remainder = _get_one_invoke('one_a')
assert (name, tuple(args), remainder) == ('one', tuple(['a']), None)
name, args, remainder = _get_one_invoke('one__two')
assert (name, tuple(args), remainder) == ('one', tuple(), 'two')
name, args, remainder = _get_one_invoke('one__two')
assert (name, tuple(args), remainder) == ('one', tuple(), 'two')
name, args, remainder = _get_one_invoke('one_a__two_b')
assert (name, tuple(args), remainder) == ('one', tuple(['a']), 'two_b')
name, args, remainder = _get_one_invoke('one_a_1__two_b')
assert (name, tuple(args), remainder) == ('one', tuple(['a', 1]), 'two_b')
def test_invoke_parse_mixed():
name, args, remainder = _get_one_invoke('one_a__two(b)')
assert (name, tuple(args), remainder) == ('one', tuple(['a']), 'two(b)')
name, args, remainder = _get_one_invoke('one(a)__two_b')
assert (name, tuple(args), remainder) == ('one', tuple(['a']), 'two_b')
class FmtDefault(DunderModel, f.FakeModel):
name1 = models.TextField(null=True, blank=True)
name2 = models.TextField(null=True, blank=True)
decimal1 = models.DecimalField(null=True, blank=True, decimal_places=3)
date1 = models.DateField(null=True, blank=True)
@FmtDefault.fake_me
def test_fmt_str_item():
item = FmtDefault.objects.create(name1='1234567890')
app_settings.STR_ATTR_FMT = '{name}={value[4]}'
assert str(item) == '<FmtDefault: id=1, name1=5>'
app_settings.STR_ATTR_FMT = '{name}={value[0:5]}'
try:
assert str(item) == '<FmtDefault: id=1, name1=12345>'
except ValueError:
if PY3:
raise
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
# TODO: Also try value[(0, 5)] and explicit slice to get it working on PY2
@FmtDefault.fake_me
def test_fmt_str_round():
item = FmtDefault.objects.create(name1='abc', decimal1=5.129)
app_settings.STR_ATTR_FMT = '{name}={value.round}'
try:
assert str(item) == '<FmtDefault: id=1, name1=abc, decimal1={}>'.format(
round_expected_float)
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
app_settings.STR_ATTR_FMT = '{name}={value.round(1)}'
try:
assert str(item) == '<FmtDefault: id=1, name1=abc, decimal1=5.1>'
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
@FmtDefault.fake_me
def test_fmt_str_title():
item = FmtDefault.objects.create(name1='abc')
app_settings.STR_ATTR_FMT = '{name}={value.title}'
try:
assert str(item) == '<FmtDefault: id=1, name1=Abc>'
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
@FmtDefault.fake_me
def test_fmt_str_title_func():
item = FmtDefault.objects.create(name1='abc')
app_settings.STR_ATTR_FMT = '{name}={value.title()}'
try:
assert str(item) == '<FmtDefault: id=1, name1=Abc>'
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
@FmtDefault.fake_me
def test_fmt_str_multi():
item = FmtDefault.objects.create(name1='abc', decimal1=5.129)
app_settings.STR_ATTR_FMT = '{name}={value.title__round}'
try:
assert str(item) == '<FmtDefault: id=1, name1=Abc, decimal1={}>'.format(
round_expected_float)
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
app_settings.STR_ATTR_FMT = '{name}={value.round__title}'
try:
assert str(item) == '<FmtDefault: id={}, name1=Abc, decimal1={}>'.format(
round_expected_id_alt, round_expected_float)
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
app_settings.STR_ATTR_FMT = '{name}={value.round()__title()}'
try:
assert str(item) == '<FmtDefault: id={}, name1=Abc, decimal1={}>'.format(
round_expected_id_alt, round_expected_float)
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
app_settings.STR_ATTR_FMT = '{name}={value.round(), title()}'
try:
assert str(item) == '<FmtDefault: id={}, name1=Abc, decimal1={}>'.format(
round_expected_id_alt, round_expected_float)
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
@FmtDefault.fake_me
def test_fmt_str_multi_dot():
item = FmtDefault.objects.create(name1='abc', decimal1=5.129)
app_settings.STR_ATTR_FMT = '{name}={value.round().title()}'
try:
assert str(item) == '<FmtDefault: id=1, name1=Abc, decimal1={}>'.format(
round_expected_float)
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
@FmtDefault.fake_me
def test_fmt_str_chain():
item = FmtDefault.objects.create(name1='abcdefghij', decimal1=5.129)
app_settings.STR_ATTR_FMT = '{name}={value.title__round__replace_a_f}'
try:
assert str(item) == '<FmtDefault: id={}, name1=Abcdefghij, decimal1={}>'.format(
round_expected_id_alt, round_expected_float)
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
app_settings.STR_ATTR_FMT = '{name}={value.title__round__replace_A_f}'
try:
assert str(item) == '<FmtDefault: id={}, name1=fbcdefghij, decimal1={}>'.format(
round_expected_id_alt, round_expected_float)
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
app_settings.STR_ATTR_FMT = '{name}={value.replace_a_f__round__title}'
try:
assert str(item) == '<FmtDefault: id={}, name1=Fbcdefghij, decimal1={}>'.format(
round_expected_id_alt, round_expected_float)
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
@FmtDefault.fake_me
def test_fmt_str_ellipsis():
item = FmtDefault.objects.create(name1='a' * 200, decimal1=5.129)
app_settings.STR_ATTR_FMT = '{name}={value.ellipsis_10__round}'
try:
assert str(item) == (
'<FmtDefault: id=1, name1={}..., decimal1={}>'.format(
'a' * 7, round_expected_float))
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
app_settings.STR_ATTR_FMT = '{name}={value.round__ellipsis_10}'
try:
assert str(item) == (
'<FmtDefault: id={}, name1={}..., decimal1={}>'.format(
round_expected_id_alt, 'a' * 7, round_expected_float))
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
@FmtDefault.fake_me
def test_fmt_repr_ellipsis():
item = FmtDefault.objects.create(name1='a' * 200, decimal1=5.129)
app_settings.REPR_ATTR_FMT = '{name}={value.ellipsis_10__round!r}'
try:
assert repr(item) == (
"FmtDefault(id=1, name1='{}...', decimal1={})".format(
'a' * 7, round_expected_float))
finally:
app_settings.REPR_ATTR_FMT = '{name}={value!r}'
app_settings.REPR_ATTR_FMT = '{name}={value.round__ellipsis_10!r}'
try:
assert repr(item) == (
"FmtDefault(id={}, name1='{}...', decimal1={})".format(
round_expected_id_alt, 'a' * 7, round_expected_float))
finally:
app_settings.REPR_ATTR_FMT = '{name}={value!r}'
if datatype_tools:
@FmtDefault.fake_me
def test_fmt_str_datatype_tools():
item = FmtDefault.objects.create(name1='a', date1=datetime.date.today())
app_settings.STR_ATTR_FMT = '{name}={value.title__format_date(yyyymmdd)}'
try:
assert str(item) == (
'<FmtDefault: id=1, name1=A, date1={year}-{month:02d}-{day:02d}>'.format(
day=item.date1.day, month=item.date1.month, year=item.date1.year))
finally:
app_settings.STR_ATTR_FMT = '{name}={value}'
|
import random
#-*- coding: ascii -*
import os, sys
print "Exercise 94:Random Password"
print "Write a function that generates a random password. The password should have a random length of between 7 and 10 characters. Each character should be randomly selected from positions 33 to 126 in the ASCII table. Your function will not take any parameters. It will return the randomly generated password as its only result. Display the randomly generated password in your file's main program. Your main program should only run when your solution has not been imported into another file"
ext=random.randrange(7,11,1)
a=""
for i in range(ext):
numchar=random.randrange(33,127,1)
char=chr(numchar)
a+=char
print "The random Password is: ",a
|
import random
def generateAgent():
m5 = ["Mozilla/5.0"]
m4 = ["Mozilla/4.0"]
mozillaList = m5*4 + m4*2
mozillaFinal = random.choice(mozillaList)
win = "(Windows NT " + str(random.randint(3,10)) + "." + str(random.randint(3,10)) + "; Win64; x64)"
mac = "(Macintosh; Intel Mac OS X " + str(random.randint(3,10))+"_"+str(random.randint(3,10))+ "_" + str(random.randint(3,10)) +")"
linux = "(X11; Linux x86_64)"
osList = [win]*8 + [mac] + [linux]
osFinal = random.choice(osList)
chromeList = ["Chrome/70.0.3538.77 Safari/537.36","Chrome/44.0.2403.155 Safari/537.36","Chrome/41.0.2228.0 Safari/537.36",
"Chrome/41.0.2227.1 Safari/537.36","Chrome/41.0.2227.0 Safari/537.36","Chrome/41.0.2226.0 Safari/537.36",
"Chrome/41.0.2225.0 Safari/537.36","Chrome/41.0.2224.3 Safari/537.36","Chrome/40.0.2214.93 Safari/537.36","Chrome/37.0.2062.124 Safari/537.36"]
chrome = random.choice(chromeList)
safariList = ["Safari/7046A194A","Safari/8536.25","Safari/534.57.2","Safari/534.53.10","Safari/7534.48.3","Safari/533.21.1"]
safari = random.choice(safariList)
firefox = "Firefox/" + str(random.randint(24,65)) + ".0"
#browserList = [chrome]*6 + [safari]*2 + [firefox]
browserList = [chrome] * 0 + [safari] * 2 + [firefox]
browserFinal = random.choice(browserList)
finalUser = mozillaFinal + " " + osFinal + " (KHTML, like Gecko) " + browserFinal
return finalUser
def main():
for i in range(0,10):
print(generateAgent())
if __name__ == '__main__':
main()
|
from django.db import models
from alumnos.models import AlumnoCurso
# Create your models here.
class Asistencia(models.Model):
fecha = models.DateField(blank=True)
asistio = models.FloatField(blank=True)
descripcion = models.CharField(max_length=150, blank=True, null=True)
fecha_creacion = models.DateTimeField(auto_now_add=True, blank=True)
alumno_curso = models.ForeignKey(
to=AlumnoCurso, on_delete=models.CASCADE, blank=True
)
def __str__(self):
return self.alumno_curso.alumno.nombre + " " + str(self.fecha)
class Meta:
permissions = [
("list_asistencia", "Puede listar asistencias"),
(
"create_multiple_asistencia",
"Puede crear multiples asistencias",
),
(
"destroy_curso_dia_asistencia",
"Puede borrar multiples asistencias",
),
(
"porcentaje_asistencia",
"Puede obtener el porcentaje de asistencias",
),
]
|
#CSci 127 Teaching Staff
#January 2021
#A template for a program that draws nested polygons
#Modified by: --- Your Name Here! ---
#Email: --- Your Email Here! ---
import turtle
def setUp(t, dist, col):
"""
Takes three parameters, a turtle, t, the distance, dist,
to move the turtle and a color, col, to set the turtle's color.
DO NOT CHANGE THE CODE IN THIS FUNCTION
"""
t.penup()
t.forward(dist)
t.pendown()
t.color(col)
def nestedPolygon(t, length, sides):
"""
Takes three parameters: a turtle a side length and the number of sides.
The function does the following: if the length is greater than 10,
it repeats sides times: moves forward that length, turns 360/sides degrees.
When that is completed, it calls nestedPolygon(t, length/2, sides).
"""
###################################
### FILL IN YOUR CODE HERE ###
### Other than your name above, ###
### these are the only sections ###
### you change in this program. ###
###################################
def fractalPolygon(t, length, sides):
"""
Takes three parameters: a turtle a side length and the number of sides.
The function does the following: if the length is greater than 10,
it repeats sides times: moves forward that length, turns 360/sides degrees,
and calls fractalPolygon(t, length/2, sides).
"""
###################################
### FILL IN YOUR CODE HERE ###
### Other than your name above, ###
### these are the only sections ###
### you change in this program. ###
###################################
def main():
l = int(input('Enter length: '))
s = int(input('Enter number of sides: '))
#check there are enough sides to draw a polygon
if s < 3:
print("A polygon must have at least 3 sides.")
else:
tom = turtle.Turtle()
setUp(tom, -100, "darkgreen")
nestedPolygon(tom, l, s)
tess = turtle.Turtle()
setUp(tess, 100, "steelblue")
fractalPolygon(tess, l, s)
if __name__ == "__main__":
main()
|
#_*_ coding: utf-8 _*_
import re
import lxml.html
def parse_login_form(page_src):
"""
parse login form infos from the login page src
return the form action and the values list
"""
page = lxml.html.fromstring(page_src)
form = page.forms[0]
return form.action, form.form_values()
def update_form_values(values, email, psw):
"""
update form values with email and psw
"""
def _updater(value):
if value[0] == 'mobile':
return value[0], email
elif value[0].startswith('password'):
return value[0], psw
else:
return value[0], value[1].encode('utf8')
values = map(_updater, values)
values.append(('submit', '登录')) #need this, otherwise, login failed
return values
def parse_gsid(success_page):
"""
parse the gsid from the success login page
"""
r = re.search(r'gsid=(\w+)&', success_page)
return r and r.group(1) or None
def clear_contents(e):
"""
helper function, clear the spaces
"""
_clear = lambda x: x
_strip = lambda x: x.strip()
return filter(_clear, map(_strip, e.xpath('.//text()')))
def parse_weibo_page(src):
"""
#IMPORTANT:
如果是当前登录用户自己的微博列表页, 获取到的weibo数恒为10.
当如何是非当前登录用户的微博列表页, 则存在少于10条微博的可能.
手机版微博的结构
<div class="c" id="M_xxxx">
</div>
原创无图片
<div><span class="ctt">content</span>....<span class="ct">time</span></div>
原创带图片
<div><span class="ctt">content</span></div>
<div><a> <img></a> ... <span class="ct">time</span></div>
转发无图
<div><span class="cmt">rt_user</span><span class="ctt">rt_content</span>...</div>
<div><span class="cmt">...</span>....<span class="ct">time</span></div>
转发有图
<div><span class="cmt">...</span></span>
<div><a><img></a></div>
<div><span class="cmt">...</span> ... <span class="ct">...</span></div>
策略.
有两个及以上. 先检查图片. 第二个div带img则获取img且移除
如果只有一个, 必然为原创
还剩下两个div的话, 必然时转发.
"""
page = lxml.html.fromstring(src)
weibos = page.xpath('.//div[starts-with(@id, "M_")]')
weibo_list = []
for wb in weibos:
has_img = False
src = "" #因为src没有定义,但又在下面print,导致输出结果出错
content = ""
rt_content = ""
rt_user = ""
mid = wb.xpath("@id")[0][2:]
#TODO 对于"几分钟前 ...."的格式没有处理
time_and_source = wb.xpath('.//span[@class="ct"]//text()')[0].split()
time = " ".join((time_and_source[0], time_and_source[1]))
source = time_and_source[2]
divs = wb.xpath('.//div')
#先检查图片,有则处理并移除
if len(divs) > 1:
xp = divs[1].xpath('.//img/@src')
if xp:
src = xp[0]
has_img = True
divs.pop(1)
#处理微博内容
if len(divs) == 1: #如果只有一个,则为原创
contents = clear_contents(divs[0])
content = has_img and " ".join(contents) or " ".join(contents[:-6])
else: #否则,则为转发
contents = clear_contents(divs[1]) #转发理由主体
content = "".join(contents[1:-6])
#TODO BUG 转发内容删除的情况未处理
rt_contents = clear_contents(divs[0]) #转发内容主题
rt_user = rt_contents[1]
rt_content = "".join(rt_contents[3:-2])
weibo_list.append({
'mid': mid,
'content': content,
'rt_user': rt_user,
'rt_content': rt_content,
'img': src,
'time': time,
'source': source,
})
return weibo_list
if __name__ == "__main__":
f = open('weibo_page.html', 'r')
src = f.read()
f.close()
weibo_list = parse_weibo_page(src)
|
#!/usr/bin/env python
# by Cameron Po-Hsuan Chen @ Princeton
import numpy as np, scipy, random, sys, math, os
import scipy.io
from scipy import stats
sys.path.append('/Users/ChimatChen/anaconda/python.app/Contents/lib/python2.7/site-packages/')
from libsvm.svmutil import *
from scikits.learn.svm import NuSVC
import numpy as np
import matplotlib.pyplot as plt
import sys
# load experiment parameters
para = {'niter' : int(sys.argv[1]),\
'nvoxel' : int(sys.argv[2]),\
'nTR' : int(sys.argv[3]),\
'nrand' : int(sys.argv[4]),\
'nsubjs' : 10,\
'niter_unit': 1 }
niter = para['niter']
nvoxel = para['nvoxel']
nTR = para['nTR']
nsubjs = para['nsubjs']
niter_unit = para['niter_unit']
nrand = para['nrand']
# load experiment options
# rondo options
options = {'input_path' : '/jukebox/ramadge/pohsuan/pHA/data/input/', \
'working_path': '/fastscratch/pohsuan/pHA/data/working/'+str(para['nTR'])+'TR/',\
'output_path' : '/jukebox/ramadge/pohsuan/pHA/data/output/'+str(para['nTR'])+'TR/'}
# local options
#options = {'input_path' : '/Volumes/ramadge/pohsuan/pHA/data/input/', \
# 'working_path': '/Volumes/ramadge/pohsuan/pHA/data/working/'+str(para['nTR'])+'TR/',\
# 'output_path' : '/Volumes/ramadge/pohsuan/pHA/data/output/'+str(para['nTR'])+'TR/'}
nfeature = [10,50,100]
acc_spHA_VI_all = np.zeros((nsubjs*nrand, niter/niter_unit, len(nfeature)))
for i in range(1,niter/niter_unit):
for k in range(len(nfeature)):
for rand in range(nrand):
ws_spha_vi = np.load(options['working_path']+ 'lowrank'+str(nfeature[k]) +'/rand'+str(rand)+'/acc_spHA_VI_'+str(para['nvoxel'])+'vx_'+str(i)+'.npz')
acc_spHA_VI_all[range(rand*nsubjs,(rand+1)*nsubjs),i,k] = ws_spha_vi['accu']
ws_spha_vi.close()
ws_none = np.load(options['working_path']+'acc_None_'+str(para['nvoxel'])+'vx_0.npz')
acc_None_mean = ws_none['accu'].mean(axis = 0)
acc_None_se = ws_none['accu'].std(axis = 0)/math.sqrt(nsubjs)
iter_range = range(niter/niter_unit)
# set font size
font = {#'family' : 'normal',
'size' : 10
}
plt.rc('font', **font)
aspectratio=8
# plot accuracy
plt.figure()
#sys.exit()
color_code = 'cbgkmycbgkmy'
marker_code ='......******'
plt.errorbar(iter_range ,len(iter_range)*[0.639],\
len(iter_range)*[0.022] , label="Neuron HA " , markevery=2, linewidth=2, color='k',linestyle='--')
for k in range(len(nfeature)):
acc_spHA_VI_mean = acc_spHA_VI_all[:,:,k].mean(axis = 0)
acc_spHA_VI_se = acc_spHA_VI_all[:,:,k].std(axis = 0)/math.sqrt(nsubjs)
acc_spHA_VI_mean[0] = acc_None_mean
acc_spHA_VI_se[0] = acc_None_se
plt.errorbar(iter_range ,acc_spHA_VI_mean,acc_spHA_VI_se , label='spHA VI '+str(nfeature[k]) , linewidth=1, color=color_code[k], marker=marker_code[k])
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.ylim([0,0.8])
plt.axes().set_aspect(aspectratio)
plt.legend(loc=4)
plt.text(.12, .05, 'Image Classification', horizontalalignment='left', verticalalignment='bottom')
plt.text(.12, .01, 'Skinny Random Matrices', horizontalalignment='left', verticalalignment='bottom')
plt.savefig(options['output_path']+'accuracy_spha_lowrank_rand_'+str(para['nvoxel'])+'vx.eps', format='eps', dpi=1000,bbox_inches='tight')
|
import os
import json
import numpy as np
import pandas as pd
import tensorflow as tf
#from tf.keras.models import Sequential
#from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding
#from keras.callbacks import ModelCheckpoint
#from keras.utils import *
from music21 import *
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
data_directory = "/home/sachi/Documents/data"
data_file = "Data_Tunes.txt"
charIndex_json = "char_to_index.json"
BATCH_SIZE = 16
SEQ_LENGTH = 64
def preprocess(data):
list1=list(data)
list2=['\n','\n','\n']
ignore=['X','T','M','S','K','P']
i=0
#to remove Part1:
while(i<len(list1)):
if(((list1[i] in ignore) and (list1[i+1]==":"))or list1[i]=='%' ):
del list2[-1]
while(list1[i]!='\n'):
i=i+1
list2.append(list1[i])
i=i+1
i=0
#to append 'Z'(start token)
preprocess_data=[]
while(i<len(list2)):
if(list2[i]=='\n'and list2[i+1]=='\n' and list2[i+2]=='\n'):
preprocess_data.append('Z')
i=i+3
else:
preprocess_data.append(list2[i])
i=i+1
return preprocess_data
def read_data(preprocess_data):
char_to_index = {ch: i for (i, ch) in enumerate(sorted(list(set(preprocess_data))))}
with open(os.path.join(data_directory, charIndex_json), mode = "w") as f:
json.dump(char_to_index, f)
index_to_char = {i: ch for (ch, i) in char_to_index.items()}
num_unique_chars = len(char_to_index)
all_characters_as_indices = np.asarray([char_to_index[c] for c in preprocess_data], dtype = np.int32)
return all_characters_as_indices,num_unique_chars
def input_output(all_chars_as_indices,num_unique_chars):
total_length = all_chars_as_indices.shape[0]
num_examples=int(total_length/SEQ_LENGTH)
X=np.zeros((num_examples,SEQ_LENGTH))
Y=np.zeros((num_examples,SEQ_LENGTH,num_unique_chars))
for i in range(num_examples):
for j in range(SEQ_LENGTH):
X[i,j]=all_chars_as_indices[i*SEQ_LENGTH+j]
Y[i,j,all_chars_as_indices[i*SEQ_LENGTH+j+1]]=1
return X,Y
def build_model( seq_length, num_unique_chars):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Embedding(input_dim = num_unique_chars, output_dim = 512, input_shape = (seq_length,)))
model.add(tf.keras.layers.LSTM(256, return_sequences = True))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.LSTM(256, return_sequences = True))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.LSTM(256, return_sequences = True))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(num_unique_chars)))
model.add(tf.keras.layers.Activation("softmax"))
return model
def make_model(num_unique_chars):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Embedding(input_dim = num_unique_chars, output_dim = 512, batch_input_shape = (1, 1)))
# stateful: If True, the last state for each sample at index i in a batch will be used
# as initial state for the sample of index i in the following batch.
model.add(tf.keras.layers.LSTM(256, return_sequences = True, stateful = True))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.LSTM(256, return_sequences = True, stateful = True))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.LSTM(256,return_sequences=True, stateful = True))
model.add(tf.keras.layers.Dropout(0.2))
model.add((tf.keras.layers.Dense(num_unique_chars)))
model.add(tf.keras.layers.Activation("softmax"))
return model
def generate_sequence(gen_seq_length):
with open(os.path.join(data_directory, charIndex_json)) as f:
char_to_index = json.load(f)
index_to_char = {i:ch for ch, i in char_to_index.items()}
num_unique_chars = len(index_to_char)
model = make_model(num_unique_chars)
model.load_weights("/home/sachi/Documents/weights.80.hdf5")
sequence_index = [char_to_index['Z']]
for _ in range(gen_seq_length):
batch = np.zeros((1, 1))
batch[0, 0] = sequence_index[-1]
predicted_probs = model.predict_on_batch(batch).ravel()
sample = np.random.choice(range(num_unique_chars), size = 1, p = predicted_probs)
sequence_index.append(sample[0])
seq = ''.join(index_to_char[c] for c in sequence_index)
seq='M:6/8\n'+str(seq)
return seq
def convert_to_midi(abc):
c = converter.subConverters.ConverterABC()
c.registerOutputExtensions = ("midi", )
c.parseData(abc)
s = c.stream
s.write('midi', fp='demos1.mid')
file = open(os.path.join(data_directory, data_file), mode = 'r')
data = file.read()
file.close()
preprocess_data=preprocess(data)
all_characters_as_indices,num_unique_chars=read_data(preprocess_data)
X,Y=input_output(all_characters_as_indices,num_unique_chars)
print("length of preprocess_data-{}".format(len(preprocess_data)))
print("vocab_size={}".format(num_unique_chars))
print("all_characters={}".format(all_characters_as_indices))
print("length of all_characters-{}".format(len(all_characters_as_indices)))
print("shape of X={}".format(X.shape))
print("shape of Y={}".format(Y.shape))
model=build_model(SEQ_LENGTH,num_unique_chars)
model.summary()
model.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
checkpoint=tf.keras.callbacks.ModelCheckpoint(filepath='weights.{epoch:02d}.hdf5',monitor='loss',save_best_only=True,save_weights_only=True,period=10)
model.fit(X,Y,batch_size=16,epochs=80,callbacks=[checkpoint])
music = generate_sequence(192)
print("\nMUSIC SEQUENCE GENERATED: \n{}".format(music))
convert_to_midi(music) |
"""Celery background task to start workflow run."""
from _io import TextIOWrapper
import logging
import re
import subprocess
from typing import (Dict, List, Optional, Tuple)
from pro_wes.celery_worker import celery
# Get logger instance
logger = logging.getLogger(__name__)
@celery.task(
name='tasks.run_workflow',
bind=True,
ignore_result=True,
track_started=True
)
def task__run_workflow(
self,
command_list: List,
tmp_dir: str
) -> Tuple[int, List[str], List[str]]:
"""Adds workflow run to task queue."""
# Execute task in background
proc = subprocess.Popen(
command_list,
cwd=tmp_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
# Parse output in real-time
log, tes_ids = __process_cwl_logs(self, stream=proc.stdout)
returncode = proc.wait()
return (returncode, log, tes_ids)
def __process_cwl_logs(
task: celery.Task,
stream: TextIOWrapper
) -> Tuple[List, List]:
"""Parses combinend cwl-tes STDOUT/STDERR and sends TES task IDs and state
updates to broker."""
stream_container: List = list()
tes_states: Dict = dict()
# Iterate over STDOUT/STDERR stream
for line in iter(stream.readline, ''):
line = line.rstrip()
# Replace single quote characters to avoid `literal_eval()` errors
line = line.replace("'", '"')
# Handle special cases
lines = __handle_cwl_tes_log_irregularities(line)
for line in lines:
stream_container.append(line)
logger.info(line)
continue
# Detect TES task state changes
(tes_id, tes_state) = __extract_tes_task_state_from_cwl_tes_log(line)
if tes_id:
# Handle new task
if tes_id not in tes_states:
tes_states[tes_id] = tes_state
__send_event_tes_task_update(
task,
tes_id=tes_id,
)
# Handle state change
elif tes_states[tes_id] != tes_state:
tes_states[tes_id] = tes_state
__send_event_tes_task_update(
task,
tes_id=tes_id,
tes_state=tes_state,
)
logger.info(line)
continue
stream_container.append(line)
logger.info(line)
return (stream_container, list(tes_states.keys()))
def __handle_cwl_tes_log_irregularities(line: str) -> List[str]:
"""Handles irregularities arising from log parsing."""
lines: List = list()
# Handle special case where FTP and cwl-tes logs are on same line
re_ftp_cwl_tes = re.compile(
r'^(\*cmd\* .*)(\[step \w*\] produced output \{)$'
)
m = re_ftp_cwl_tes.match(line)
if m:
lines.append(m.group(1))
return lines
def __extract_tes_task_state_from_cwl_tes_log(
line: str
) -> Tuple[Optional[str], Optional[str]]:
"""Extracts task ID and state from cwl-tes log."""
task_id: Optional[str] = None
task_state: Optional[str] = None
# Extract new task ID
re_task_new = re.compile(r"^\[job \w*\] task id: (\S*)$")
m = re_task_new.match(line)
if m:
task_id = m.group(1)
# Extract task ID and state
re_task_state_poll = re.compile(
r'^\[job \w*\] POLLING "(\S*)", result: (\w*)'
)
m = re_task_state_poll.match(line)
if m:
task_id = m.group(1)
task_state = m.group(2)
return (task_id, task_state)
def __send_event_tes_task_update(
task: celery.Task,
tes_id: str,
tes_state: Optional[str] = None
) -> None:
"""Sends custom event to inform about TES task state change."""
task.send_event(
'task-tes-task-update',
tes_id=tes_id,
tes_state=tes_state,
)
return None
|
import cv2
image = cv2.imread("C:\\sample.jpg")
img = cv2.resize(myImage,(640,480))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray,cv2.cv.CV_HOUGH_GRADIENT,1,10, param1=50,param2=35,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(myImage,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(myImage,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',myImage)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import os
import sys
import datetime
import whois
import requests
def load_urls4check(path):
with open(path, "r", encoding="utf-8") as file_with_urls:
url_list = file_with_urls.read().split()
return url_list
def is_server_respond_ok(url):
try:
response_from_url = requests.get(url)
return response_from_url.ok
except requests.ConnectionError:
return None
def is_domains_paid(url, paid_days):
today = datetime.datetime.today()
expiration_date = get_domain_expiration_date(url)
if expiration_date is None:
return None
return expiration_date - today >= datetime.timedelta(paid_days)
def create_output_generator(url_list, paid_days):
for url in url_list:
domains_paid = is_domains_paid(url, paid_days)
response_ok = is_server_respond_ok(url)
yield url, response_ok, domains_paid
def get_domain_expiration_date(url):
domain = whois.whois(url)
expiration_date = domain.expiration_date
if type(expiration_date) == list:
return expiration_date[0]
else:
return expiration_date
def print_site_health(url_response_ok_and_domains_paid):
for url, site_paid, server_respond in url_response_ok_and_domains_paid:
is_paid = "Да" if site_paid else "Нет"
is_respond_ok = "Да" if server_respond else "Нет"
print("Сайт: ", url)
print("Код состояния сервера 200: ", is_respond_ok)
print("Проплачено на месяц вперед: ", is_paid)
if __name__ == "__main__":
if len(sys.argv) > 1:
filepath = sys.argv[1]
else:
exit("Путь не введен")
if not(os.path.exists(filepath)):
exit("Файла нет в директории")
paid_days = 30
url_list = load_urls4check(filepath)
url_response_ok_and_domains_paid = create_output_generator(url_list, paid_days)
print_site_health(url_response_ok_and_domains_paid)
|
# -*- coding: utf-8 -*-
from datetime import datetime
from django.contrib.contenttypes import generic
from taggit.managers import TaggableManager
from proj.core.models import User
from proj.core.comment.models import Comment
from .managers import *
PROJECT_STATUSES = (
('open', u'Открыт'),
('closed', u'Закрыт'),
)
PUBLIC_STATUSES = (
('public', u'Доступен всем'),
('friends', u'Доступен только подписчикам'),
('community', u'Доступен сообществам'),
('private', u'Приватный'),
)
class Project(models.Model):
reporter = models.ForeignKey(User, related_name='projects', verbose_name=u'Заказчик')
title = models.CharField(max_length=255, verbose_name=u'Заголовок')
description = models.TextField(max_length=1000, blank=True, null=True, verbose_name=u'Описание')
status = models.CharField(max_length=10, choices=PROJECT_STATUSES, default='open', verbose_name=u'Статус')
visible = models.CharField(max_length=10, choices=PUBLIC_STATUSES, default='public', verbose_name=u'Видимость')
date_created = models.DateTimeField(auto_now_add=True, verbose_name=u'Дата создания')
date_due = models.DateTimeField(blank=True, null=True, verbose_name=u'Срок выполнения')
date_updated = models.DateTimeField(auto_now=True, verbose_name=u'Дата обновления')
tags = TaggableManager(blank=True)
comments = generic.GenericRelation(Comment)
def __unicode__(self):
return self.title
@property
def is_past_due(self):
if datetime.now() > self.date_due:
return True
return False
class Milestone(models.Model):
project = models.ForeignKey(Project, related_name='milestones', verbose_name=u'Проект')
title = models.CharField(max_length=255, verbose_name=u'Заголовок')
description = models.TextField(max_length=1000, blank=True, null=True, verbose_name=u'Описание')
date_created = models.DateTimeField(auto_now_add=True, verbose_name=u'Дата создания')
date_due = models.DateTimeField(blank=True, null=True, verbose_name=u'Срок выполнения')
date_updated = models.DateTimeField(auto_now=True, verbose_name=u'Дата обновления')
objects = MilestoneManager()
def __unicode__(self):
return self.title
@property
def is_past_due(self):
if datetime.now() > self.date_due:
return True
return False
@property
def progress(self):
completed = self.tasks.filter(status__in=['resolved', 'closed']).count()
total = self.tasks.count()
if total:
return completed / total * 100
return 0
|
# Generated by Django 3.2.3 on 2021-05-28 18:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web_app', '0003_room_marked_for_housekeep'),
]
operations = [
migrations.CreateModel(
name='Amenity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('price', models.DecimalField(decimal_places=2, max_digits=20)),
],
),
]
|
class Human:
def __init__(self, name, params):
self.name = name
def do_action(self, observation):
if observation['event_name'] == 'GameStart':
print(observation)
elif observation['event_name'] == 'NewRound':
print(observation)
elif observation['event_name'] == 'ChooseContrat':
print(observation)
former_value = observation["data"]["contrat"]
contrat_dict = {"suit": int(observation["data"]["suit"]),
"value": int(observation["data"]["contrat"]),
"newContrat": False}
print('current contrat: ', contrat_dict)
print("To pass, just press twice 'enter'")
suit = input('suit of the new contrat: ')
contrat = input('value of the new contrat: ')
if suit != "":
contrat_dict["newContrat"]=True
contrat_dict["suit"] = int(suit)
contrat_dict["value"] = int(contrat)
print(contrat_dict)
return {
"event_name" : "ChooseContratAction",
"data" : {
'playerName': self.name,
'action': contrat_dict
}
}
elif observation['event_name'] == 'ShowPlayerHand':
print(observation)
elif observation['event_name'] == 'PlayTrick':
print(observation)
hand = observation['data']['hand']
# if '2c' in hand:
# choose_card = '2c'
# else:
choose_card = input('choose card: ')
playtrick_action = {
"event_name" : "PlayTrick_Action",
"data" : {
'playerName': self.name,
'action': {'card': choose_card}
}
}
print("playtrick_action: ", playtrick_action)
return playtrick_action
elif observation['event_name'] == 'ShowTrickAction':
print(observation)
elif observation['event_name'] == 'ShowTrickEnd':
print(observation)
elif observation['event_name'] == 'RoundEnd':
print(observation)
elif observation['event_name'] == 'GameOver':
print(observation)
|
from aiogram import Bot, Dispatcher, executor, types
#Бот слит в телеграм канале @slivmenss
# Клавиатура
menu = types.ReplyKeyboardMarkup(resize_keyboard=True)
menu.add(
types.KeyboardButton('👤 Баланс'),
types.KeyboardButton('💸 Клик'),
types.KeyboardButton('🎰 Вывод')
)#Бот слит в телеграм канале @slivmenss
pay = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
pay.add(
types.KeyboardButton('Оплатить')
)
#Бот слит в телеграм канале @slivmenss
accept = types.InlineKeyboardMarkup(row_width=3)
accept.add(
types.InlineKeyboardButton(text='✅ Принимаю', callback_data='accept')
)
#Бот слит в телеграм канале @slivmenss
buy1 = types.InlineKeyboardMarkup(row_width=3)
buy1.add(
types.InlineKeyboardButton(text='Проверить оплату', callback_data='check'),
types.InlineKeyboardButton(text='Назад', callback_data='back')
)
#Бот слит в телеграм канале @slivmenss
apanel = types.InlineKeyboardMarkup(row_width=3)
apanel.add(
types.InlineKeyboardButton(text='Статистика', callback_data='stats')
) |
from django.shortcuts import render
# Create your views here.
from .models import CharApp
from web.chargen.forms import AppForm
from django.http import HttpResponseRedirect
from datetime import datetime
from evennia.objects.models import ObjectDB
from django.conf import settings
from evennia.utils import create
def index(request):
current_user = request.user # current user logged in
p_id = current_user.id # the account id
# submitted Characters by this account
sub_apps = CharApp.objects.filter(account_id=p_id, submitted=True)
context = {'sub_apps': sub_apps}
# make the variables in 'context' available to the web page template
return render(request, 'chargen/index.html', context)
def detail(request, app_id):
app = CharApp.objects.get(app_id=app_id)
name = app.char_name
background = app.background
submitted = app.submitted
p_id = request.user.id
context = {'name': name, 'background': background,
'p_id': p_id, 'submitted': submitted}
return render(request, 'chargen/detail.html', context)
def creating(request):
user = request.user
if request.method == 'POST':
form = AppForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
background = form.cleaned_data['background']
applied_date = datetime.now()
submitted = True
if 'save' in request.POST:
submitted = False
app = CharApp(char_name=name, background=background,
date_applied=applied_date, account_id=user.id,
submitted=submitted)
app.save()
if submitted:
# Create the actual character object
typeclass = settings.BASE_CHARACTER_TYPECLASS
home = ObjectDB.objects.get_id(settings.GUEST_HOME)
# turn the permissionhandler to a string
perms = str(user.permissions)
# create the character
char = create.create_object(typeclass=typeclass, key=name,
home=home, permissions=perms)
user.db._playable_characters.append(char)
# add the right locks for the character so the account can
# puppet it
char.locks.add("puppet:id(%i) or pid(%i) or perm(Developers) "
"or pperm(Developers)" % (char.id, user.id))
char.db.background = background # set the character background
return HttpResponseRedirect('/chargen')
else:
form = AppForm()
return render(request, 'chargen/create.html', {'form': form})
def creating(request):
user = request.user
if request.method == 'POST':
form = AppForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
background = form.cleaned_data['background']
applied_date = datetime.now()
submitted = True
if 'save' in request.POST:
submitted = False
app = CharApp(char_name=name, background=background,
date_applied=applied_date, account_id=user.id,
submitted=submitted)
app.save()
if submitted:
# Create the actual character object
typeclass = settings.BASE_CHARACTER_TYPECLASS
home = ObjectDB.objects.get_id(settings.GUEST_HOME)
# turn the permissionhandler to a string
perms = str(user.permissions)
# create the character
char = create.create_object(typeclass=typeclass, key=name,
home=home, permissions=perms)
user.db._playable_characters.append(char)
# add the right locks for the character so the account can
# puppet it
char.locks.add("puppet:id(%i) or pid(%i) or perm(Developers) "
"or pperm(Developers)" % (char.id, user.id))
char.db.background = background # set the character background
return HttpResponseRedirect('/chargen')
else:
form = AppForm()
return render(request, 'chargen/create.html', {'form': form}) |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
# The setting file path is set from the virtual environment varialbles, so the following line is not needed.
# When deployed to Heroku, a corresponding environment variable on Heroku should be set.
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "snie_host.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
import os
from rsf.proj import *
########################################################################
# RETRIEVE DATA
########################################################################
# Define SLIM FTP server information
FTPserver = {
'server': 'ftp.slim.gatech.edu',
'login': 'ftp',
'password':''}
loc = os.path.join('SoftwareRelease','Acquisition','2DTimeJitteredOBS-LR','results','TimeJitAcq_1boat_2array_LR')
files = ['comptime_SNR.mat','TimeJitAcq_1boat_2array_LR.log','TimeJitAcq_1boat_2array_LRdiff.rsf','TimeJitAcq_1boat_2array_LRrecov.rsf','TimeJitAcq_1boat_2array_LR_jitdata.rsf','TimeJitAcq_1boat_2array_LR_params.mat','TimeJitAcq_1boat_2array_LR_adjrecov.rsf']
# Fetch data from FTP server
for elm in files:
Fetch(elm,loc,FTPserver)
End()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2016 Peter Magnusson <peter@birchroad.net>
import time
import logging
import hashlib
import os
import serial
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2016 Peter Magnusson <peter@birchroad.net>
from .utils import default_port
from .luacode import DOWNLOAD_FILE, SAVE_LUA, LUA_FUNCTIONS, LIST_FILES, UART_SETUP
log = logging.getLogger(__name__)
__all__ = ['Uploader', 'default_port']
class Uploader(object):
"""Uploader is the class for communicating with the nodemcu and
that will allow various tasks like uploading files, formating the filesystem etc.
"""
BAUD = 9600
TIMEOUT = 5
PORT = default_port()
def __init__(self, port=PORT, baud=BAUD):
log.info('opening port %s', port)
if port == 'loop://':
self._port = serial.serial_for_url(port, baud, timeout=Uploader.TIMEOUT)
else:
self._port = serial.Serial(port, baud, timeout=Uploader.TIMEOUT)
# Keeps things working, if following conections are made:
## RTS = CH_PD (i.e reset)
## DTR = GPIO0
self._port.setRTS(False)
self._port.setDTR(False)
def sync():
# Get in sync with LUA (this assumes that NodeMCU gets reset by the previous two lines)
log.debug('getting in sync with LUA');
self.exchange(';') # Get a defined state
self.writeln('print("%sync%");')
self.expect('%sync%\r\n> ')
sync()
if baud != Uploader.BAUD:
log.info('Changing communication to %s baud', baud)
self.writeln(UART_SETUP.format(baud=baud))
# Wait for the string to be sent before switching baud
time.sleep(0.1)
self.set_baudrate(baud)
# Get in sync again
sync()
self.line_number = 0
def set_baudrate(self, baud):
try:
self._port.setBaudrate(baud)
except AttributeError:
self._port.baudrate = baud
def expect(self, exp='> ', timeout=TIMEOUT):
"""will wait for exp to be returned from nodemcu or timeout"""
timer = self._port.timeout
# Checking for new data every 100us is fast enough
lt = 0.0001
if self._port.timeout != lt:
self._port.timeout = lt
end = time.time() + timeout
# Finish as soon as either exp matches or we run out of time (work like dump, but faster on success)
data = ''
while not data.endswith(exp) and time.time() <= end:
data += self._port.read()
self._port.timeout = timer
log.debug('expect return: %s', data)
return data
def write(self, output, binary=False):
"""write data on the nodemcu port. If 'binary' is True the debug log
will show the intended output as hex, otherwise as string"""
if not binary:
log.debug('write: %s', output)
else:
log.debug('write binary: %s', ':'.join(x.encode('hex') for x in output))
self._port.write(output)
self._port.flush()
def writeln(self, output):
"""write, with linefeed"""
self.write(output + '\n')
def exchange(self, output):
self.writeln(output)
return self.expect()
def close(self):
"""restores the nodemcu to default baudrate and then closes the port"""
self.writeln(UART_SETUP.format(baud=Uploader.BAUD))
self._port.close()
def prepare(self):
"""
This uploads the protocol functions nessecary to do binary
chunked transfer
"""
log.info('Preparing esp for transfer.')
for fn in LUA_FUNCTIONS:
d = self.exchange('print({0})'.format(fn))
if d.find('function:') == -1:
break
else:
log.debug('Found all required lua functions, no need to upload them')
return
data = SAVE_LUA.format(baud=self._port.baudrate)
##change any \r\n to just \n and split on that
lines = data.replace('\r', '').split('\n')
#remove some unneccesary spaces to conserve some bytes
for line in lines:
line = line.strip().replace(', ', ',').replace(' = ', '=')
if len(line) == 0:
continue
d = self.exchange(line)
#do some basic test of the result
if 'unexpected' in d or len(d) > len(SAVE_LUA)+10:
log.error('error in save_lua "%s"', d)
return
def download_file(self, filename):
chunk_size = 256
bytes_read = 0
data = ""
while True:
d = self.exchange(DOWNLOAD_FILE.format(filename=filename, bytes_read=bytes_read, chunk_size=chunk_size))
cmd, size, tmp_data = d.split('\n', 2)
data = data + tmp_data[0:chunk_size]
bytes_read = bytes_read + chunk_size
if bytes_read > int(size):
break
data = data[0:int(size)]
return data
def read_file(self, filename, destination=''):
if not destination:
destination = filename
log.info('Transfering %s to %s', filename, destination)
data = self.download_file(filename)
with open(destination, 'w') as f:
f.write(data)
def write_file(self, path, destination='', verify='none'):
filename = os.path.basename(path)
if not destination:
destination = filename
log.info('Transfering %s as %s', path, destination)
self.writeln("recv()")
res = self.expect('C> ')
if not res.endswith('C> '):
log.error('Error waiting for esp "%s"', res)
return
log.debug('sending destination filename "%s"', destination)
self.write(destination + '\x00', True)
if not self.got_ack():
log.error('did not ack destination filename')
return
f = open(path, 'rb')
content = f.read()
f.close()
log.debug('sending %d bytes in %s', len(content), filename)
pos = 0
chunk_size = 128
while pos < len(content):
rest = len(content) - pos
if rest > chunk_size:
rest = chunk_size
data = content[pos:pos+rest]
if not self.write_chunk(data):
d = self.expect()
log.error('Bad chunk response "%s" %s', d, ':'.join(x.encode('hex') for x in d))
return
pos += chunk_size
log.debug('sending zero block')
#zero size block
self.write_chunk('')
if verify == 'standard':
log.info('Verifying...')
data = self.download_file(destination)
if content != data:
log.error('Verification failed.')
elif verify == 'sha1':
#Calculate SHA1 on remote file. Extract just hash from result
data = self.exchange('shafile("'+destination+'")').splitlines()[1]
log.info('Remote SHA1: %s', data)
#Calculate hash of local data
filehashhex = hashlib.sha1(content.encode()).hexdigest()
log.info('Local SHA1: %s', filehashhex)
if data != filehashhex:
log.error('Verification failed.')
def exec_file(self, path):
filename = os.path.basename(path)
log.info('Execute %s', filename)
f = open(path, 'rt')
res = '> '
for line in f:
line = line.rstrip('\r\n')
retlines = (res + self.exchange(line)).splitlines()
# Log all but the last line
res = retlines.pop()
for lin in retlines:
log.info(lin)
# last line
log.info(res)
f.close()
def got_ack(self):
log.debug('waiting for ack')
res = self._port.read(1)
log.debug('ack read %s', res.encode('hex'))
return res == '\x06' #ACK
def write_lines(self, data):
lines = data.replace('\r', '').split('\n')
for line in lines:
self.exchange(line)
return
def write_chunk(self, chunk):
log.debug('writing %d bytes chunk', len(chunk))
data = '\x01' + chr(len(chunk)) + chunk
if len(chunk) < 128:
padding = 128 - len(chunk)
log.debug('pad with %d characters', padding)
data = data + (' ' * padding)
log.debug("packet size %d", len(data))
self.write(data)
return self.got_ack()
def file_list(self):
log.info('Listing files')
res = self.exchange(LIST_FILES)
log.info(res)
return res
def file_do(self, f):
log.info('Executing '+f)
res = self.exchange('dofile("'+f+'")')
log.info(res)
return res
def file_format(self):
log.info('Formating...')
res = self.exchange('file.format()')
if 'format done' not in res:
log.error(res)
else:
log.info(res)
return res
def node_heap(self):
log.info('Heap')
res = self.exchange('print(node.heap())')
log.info(res)
return res
def node_restart(self):
log.info('Restart')
res = self.exchange('node.restart()')
log.info(res)
return res
def file_compile(self, path):
log.info('Compile '+path)
cmd = 'node.compile("%s")' % path
res = self.exchange(cmd)
log.info(res)
return res
def file_remove(self, path):
log.info('Remove '+path)
cmd = 'file.remove("%s")' % path
res = self.exchange(cmd)
log.info(res)
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.