index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
983,200 | bf0114a7cf55cc7aac4f260f8cefa278cd1ff60c | from django.shortcuts import render
from django.http import JsonResponse
from message.models import Message
from tools.login_check import login_check, get_user_by_request
import json
from .models import Topic
from user.models import UserProfile
# Create your views here.
@login_check('POST', 'DELETE')
def topics(request, author_id):
if request.method == 'GET':
# 获取用户数据
# author 博主
authors = UserProfile.objects.filter(username=author_id)
if not authors:
result = {'code': 308, 'error': 'no author'}
return JsonResponse(result)
# 取出结果中的博主
author = authors[0]
# visitor
visitor = get_user_by_request(request)
visitor_name = None
if visitor:
visitor_name = visitor.username
t_id = request.GET.get('t_id')
if t_id:
# 当前是否为博主自己访问自己
is_self = False
# 获取文章详情页
t_id = int(t_id)
if author_id == visitor_name:
is_self = True
# 博主访问自己的博客
try:
author_topic = Topic.objects.get(id=t_id)
except Exception as e:
result = {'code': 312, 'error': 'Without this topic'}
return JsonResponse(result)
else:
# 访客访问博主博客
try:
author_topic = Topic.objects.get(id=t_id, limit='public')
except Exception as e:
result = {'code': 313, 'error': 'Without this topic!'}
return JsonResponse(result)
res = make_topic_res(author, author_topic, is_self)
# print('-------------------------------------')
# print(res)
# print(type(res))
return JsonResponse(res)
else:
# h获取文章列表
category = request.GET.get('category')
if category in ['tec', 'no-tec']:
# v1/topics/<author_id>?category=[tec | no-tec]
if author_id == visitor_name:
# 当前博主访问自己的博客 获取全部博客数据
topics = Topic.objects.filter(author_id=author_id, category=category)
else:
# 非博主
topics = Topic.objects.filter(author_id=author_id, limit='public', category=category)
else:
# v1/topics/<author_id> 用户全量数据
if author_id == visitor_name:
# 当前博主访问自己的博客 获取全部博客数据
topics = Topic.objects.filter(author_id=author_id)
else:
# 非博主
topics = Topic.objects.filter(author_id=author_id, limit='public')
res = make_topics_res(author, topics)
return JsonResponse(res)
elif request.method == 'POST':
# 创建用户博客数据
# request.POST只能那表单数据,而django提交过来的不是表单提交
# 只能用request.body来拿传过来的参数
json_str = request.body.decode()
if not json_str:
result = {'code': 301, 'error': 'Without json data'}
return JsonResponse(result)
json_boj = json.loads(json_str)
title = json_boj.get('title')
# xss注入 将input输入框的含有JS脚本的语义转为文本
import html
title = html.escape(title)
if not title:
result = {'code': 302, 'error': 'Please enter title'}
return JsonResponse(result)
content = json_boj.get('content')
if not content:
result = {'code': 303, 'error': 'Please enter the eontent'}
return JsonResponse(result)
# 获取纯文本内容, 用于切割文章简介
content_text = json_boj.get('content_text')
if not content_text:
result = {'code': 304, 'error': 'Please enter content_text'}
return JsonResponse(result)
# 切割简介
introduce = content_text[:30]
limit = json_boj.get('limit')
if limit not in ['public', 'private']:
result = {'code': 305, 'error': 'Your limit is wrong'}
return JsonResponse(result)
category = json_boj.get('category')
if category not in ['tec', 'no-tec']:
result = {'code': 303, 'error': 'Please choose category'}
return JsonResponse(result)
# 创建数据
Topic.objects.create(title=title, category=category,
limit=limit, content=content,
introduce=introduce,
author=request.user)
result = {'code': 200, 'username': request.user.username}
return JsonResponse(result)
elif request.method == 'DELETE':
# 先获取传过来的参数
# token存储的用户
author = request.user
token_author_id = author.username
# url 床过来的_id 必须与token中欧给你的用户名相等
if author_id != token_author_id:
result = {'code': 309, 'error': "You can't delete it"}
return JsonResponse(result)
topic_id = request.GET.get('topic_id')
try:
topic = Topic.objects.get(id=topic_id)
except:
result = {'code': 310, 'error': 'You can not delete it!'}
return JsonResponse(result)
# 先检查再删除
if topic.author.username != author_id:
result = {'code': 311, 'error': "You can't delete it!!"}
return JsonResponse(result)
topic.delete()
res = {'code': 200}
return JsonResponse(res)
def make_topics_res(author, topics):
res = {'code': 200, 'data': {}}
data = {}
data['nickname'] = author.nickname
topics_list = []
for topic in topics:
d = {}
d['id'] = topic.id
d['title'] = topic.title
d['category'] = topic.category
d['introduce'] = topic.introduce
d['author'] = author.nickname
d['created_time'] = topic.created_time.strftime('%Y-%m-%d %H:%M:%S')
topics_list.append(d)
data['topics'] = topics_list
res['data'] = data
return res
def make_topic_res(author, author_topic, is_self):
"""
拼接详情页 返回数据
:param author:
:param author_topic:
:param is_self:
:return:
"""
if is_self:
# 博主访问自己博客
# 下一篇文章,取出ID大于当前博客ID的第一个,且author为当前作者的
next_topic = Topic.objects.filter(id__gt=author_topic.id, author=author).first()
# 上一篇文章 , 取出ID小于当前博客ID的最后一个,且author为当前作者的
last_topic = Topic.objects.filter(id__lt=author_topic.id, author=author).last()
else:
# 访客访问博主的
# 下一篇
next_topic = Topic.objects.filter(id__gt=author_topic.id, author=author, limit='public').first()
# 上一篇
last_topic = Topic.objects.filter(id__lt=author_topic.id, author=author, limit='public').last()
if next_topic:
next_id = next_topic.id
next_title = next_topic.title
else:
next_id = None
next_title = None
if last_topic:
last_id = last_topic.id
last_title = last_topic.title
else:
last_id = None
last_title = None
all_messages = Message.objects.filter(topic=author_topic).order_by('-created_time')
# 所有的留言
msg_list = []
# 留言&回复的映射字典
msg_count = 0
reply_dict = {}
for msg in all_messages:
msg_count += 1
if msg.parent_message == 0:
# parent_message=0 当前是留言
msg_list.append({'id': msg.id, 'content': msg.content,
'publisher': msg.publisher.nickname,
'publisher_avatar': str(msg.publisher.avatar),
'created_time': msg.created_time.strftime('%Y-%m-%d %H:%M:%S'),
'reply': []
})
else:
# 当前是回复
reply_dict.setdefault(msg.parent_message, [])
reply_dict[msg.parent_message].append({
'msg_id': msg.id, 'content': msg.content,
'publisher': msg.publisher.nickname,
'publisher_avatar': str(msg.publisher.avatar),
'created_time': msg.created_time.strftime('%Y-%m-%d %H:%M:%S'),
})
# 合并 msg_list 和 reply_dict
for _msg in msg_list:
if _msg['id'] in reply_dict:
_msg['reply'] = reply_dict[_msg['id']]
res = {'code': 200, 'data': {}}
res['data']['nickname'] = author.nickname
res['data']['title'] = author_topic.title
res['data']['category'] = author_topic.category
res['data']['created_time'] = author_topic.created_time.strftime('%Y-h%-%d %H:%M:%S')
res['data']['content'] = author_topic.content
res['data']['introduce'] = author_topic.introduce
res['data']['author'] = author.nickname
res['data']['next_id'] = next_id
res['data']['next_title'] = next_title
res['data']['last_id'] = last_id
res['data']['last_title'] = last_title
# messages 暂时为假数据
res['data']['messages'] = msg_list
res['data']['messages_count'] = msg_count
return res
|
983,201 | af2ca6599935ccdc226fa624e2fbebfa671dfeeb | ../session2/flower.py |
983,202 | 4bf5472e4c52525314e268c6356aadc02ae0dafe | def dobro(preco, show_cifrao):
resultado = preco*2
if show_cifrao:
return cifrao(resultado)
else:
return resultado
def metade(preco, show_cifrao):
resultado = preco/2
if show_cifrao:
return cifrao(resultado)
else:
return resultado
def porcentagem(preco, porcentagem, reduzir=False, show_cifrao=False):
if reduzir:
resultado = preco-preco*(porcentagem/100)
else:
resultado = preco+preco*(porcentagem/100)
if show_cifrao:
return cifrao(resultado)
else:
return resultado
def cifrao(numero):
return f'R$ {numero:.2f}'
def resumo(numero, aumento, reducao):
print('-'*40)
print('Resumo do valor'.center(40))
print('-'*40)
print(f'Preço analisado: {cifrao(numero)}')
print(f'Dobro do preço: {dobro(numero, True)}')
print(f'{aumento}% do preço: {porcentagem(numero, aumento, show_cifrao=True)}')
print(f'{reducao}% do preço: {porcentagem(numero, reducao, reduzir=True, show_cifrao=True)}')
print('-'*40) |
983,203 | c363ed0bf81741555bd7e41bfef6a4789b92c7e5 | import math
def graph(function):
for y in range (10, -11, -1):
for x in range(-10, 11):
val = (round(eval(function)), x)
if y == val[0] and x == val[1]:
print('o', end='')
elif y == 0 and x == 0:
print('+', end='')
elif y == 0:
print('-', end='')
elif x == 0:
print('|', end='')
else:
print(' ', end='')
print()
graph(input('Enter a function f(x):\n')) |
983,204 | 9d391ee7458b313c46630213127452241b0429b8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-"
import ipcalc
import sqlite3
import sys
import os
conn = sqlite3.connect('ovpn.bd')
c = conn.cursor()
def pars():
files = os.listdir('/etc/openvpn/ccd')
for x in files:
f = open('/etc/openvpn/ccd/%s' % x)
fi = f.read().strip().split()
c.execute("UPDATE net \
SET user = ? \
WHERE a = ?", (x, fi[1]))
conn.commit()
print(x + " " + fi[1] + "-" + fi[2])
f.close
c.execute("UPDATE net SET user = 'system' WHERE id = 1")
conn.commit()
c.execute("SELECT count(*) FROM sqlite_master WHERE type='table'\
AND name='net';")
if c.fetchone()[0] == 1:
print("БД существует!!!")
pars()
conn.close()
sys.exit()
c.execute("CREATE TABLE net ('id' INTEGER PRIMARY KEY AUTOINCREMENT ,\
'user' TEXT,\
'a' TEXT,\
'b' TEXT,\
'dostup' TEXT)")
d = 1
for x in ipcalc.Network('192.168.100.0/24'):
if d == 1:
print("d == 1 ", str(x))
c.execute("insert into net (a) values ('%s')" % str(x))
conn.commit()
idi = str(x)
d += 1
elif d == 2:
print("d == 2", str(x))
c.execute("update net \
set b = ? \
where a = ?", (str(x), idi))
conn.commit()
d += 1
elif d == 3:
print("")
d += 1
elif d == 4:
print("")
d = 1
pars()
print("OK!!")
conn.close()
|
983,205 | 8074e664e09856b26d3c486ee6737f275de988a6 | import numpy as np
import pandas as pd
import h5py
# nucleosynth
from nucleosynth import paths, network, tools
from nucleosynth.tracers import extract_hdf5
from nucleosynth.printing import printv
from nucleosynth.config import tables_config
"""
Functions for loading/saving tracer data
"""
# ===============================================================
# Loading/extracting tables
# ===============================================================
def load_files(tracer_id, model, tracer_steps,
tracer_files=None, verbose=True):
"""Load multiple skynet tracer files
parameters
----------
tracer_id : int
tracer_steps : [int]
model : str
tracer_files : h5py.File
verbose : bool
"""
if tracer_files is None:
tracer_files = {}
for step in tracer_steps:
tracer_files[step] = load_file(tracer_id, tracer_step=step,
model=model, verbose=verbose)
return tracer_files
def load_file(tracer_id, tracer_step, model, tracer_file=None, verbose=True):
"""Load skynet tracer hdf5 file
parameters
----------
tracer_id : int
tracer_step : 1 or 2
model : str
tracer_file : h5py.File
if tracer_file provided, simply return
verbose : bool
"""
if tracer_file is None:
filepath = paths.tracer_filepath(tracer_id, tracer_step, model=model)
printv(f'Loading tracer file: {filepath}', verbose=verbose)
tracer_file = h5py.File(filepath, 'r')
return tracer_file
def load_table(tracer_id, model, table_name, tracer_steps,
columns=None, tracer_files=None, tracer_network=None,
y_table=None, reload=False, save=True, verbose=True):
"""Wrapper function for loading various tracer tables
Main steps:
1. Try to load from cache
2. If no cache, re-extract from file
3. Save new table to cache (if save=True)
Returns : pd.DataFrame
parameters
----------
tracer_id : int
model : str
table_name : one of ('columns', 'X', 'Y', 'network')
tracer_steps : [int]
Load multiple skynet files for joining
columns : [str]
list of columns to extract
tracer_files : {h5py.File}
raw tracer files to load and join, as returned by load_file()
dict keys must correspond to tracer_steps
tracer_network : pd.DataFrame
y_table : pd.DataFrame
reload : bool
Force reload from raw skynet file
save : bool
save extracted table to cache
verbose : bool
"""
printv(f'Loading {table_name} table', verbose=verbose)
table = None
if table_name not in ['columns', 'network', 'X', 'Y']:
raise ValueError('table_name must be one of: columns, X, Y')
if not reload:
try:
table = load_table_cache(tracer_id, model, table_name, verbose=verbose)
except FileNotFoundError:
printv('cache not found', verbose)
if table is None:
printv(f'Reloading and joining {table_name} tables', verbose)
table = extract_table(tracer_id, tracer_steps=tracer_steps, model=model,
table_name=table_name, columns=columns,
tracer_network=tracer_network, y_table=y_table,
tracer_files=tracer_files, verbose=verbose)
if save:
save_table_cache(table, tracer_id, model, table_name, verbose=verbose)
return table
def extract_table(tracer_id, tracer_steps, model, table_name, columns=None,
tracer_files=None, tracer_network=None, y_table=None,
verbose=True):
"""Wrapper for various table extract functions
Returns : pd.DataFrame
parameters
----------
tracer_id : int
tracer_steps : [int]
model : str
table_name : str
columns : [str]
tracer_files : {h5py.File}
tracer_network : pd.DataFrame
y_table : pd.DataFrame
verbose : bool
"""
step_tables = []
if columns is None:
columns = tables_config.columns
tracer_files = load_files(tracer_id, model=model, tracer_steps=tracer_steps,
tracer_files=tracer_files, verbose=verbose)
if tracer_network is None:
tracer_network = extract_hdf5.extract_network(tracer_files[tracer_steps[0]])
if table_name == 'network':
return tracer_network
if table_name == 'X':
if y_table is None:
y_table = extract_table(tracer_id, tracer_steps=tracer_steps,
model=model, table_name='Y',
tracer_files=tracer_files,
tracer_network=tracer_network, verbose=verbose)
return network.get_x(y_table, tracer_network=tracer_network)
for step in tracer_steps:
tracer_file = tracer_files[step]
if table_name == 'columns':
table = extract_hdf5.extract_columns(tracer_file, columns=columns)
elif table_name == 'Y':
table = extract_hdf5.extract_y(tracer_file, tracer_network=tracer_network)
else:
raise ValueError('table_name must be one of (network, columns, X, Y)')
step_tables += [table]
return pd.concat(step_tables, ignore_index=True)
# ===============================================================
# Composition
# ===============================================================
def load_composition(tracer_id, tracer_steps, model,
tracer_files=None, tracer_network=None,
reload=False, save=True, verbose=True):
"""Wrapper function to load both composition tables (X, Y)
Returns : {abu_var: pd.DataFrame}
parameters
----------
tracer_id : int
tracer_steps : [int]
model : str
tracer_files : {h5py.File}
tracer_network : pd.DataFrame
reload : bool
save : bool
verbose : bool
"""
composition = {}
for abu_var in ['X', 'Y']:
composition[abu_var] = load_table(tracer_id,
tracer_steps=tracer_steps,
model=model,
tracer_files=tracer_files,
table_name=abu_var,
tracer_network=tracer_network,
save=save, reload=reload,
verbose=verbose)
return composition
def load_sums(tracer_id, tracer_steps, model,
tracer_files=None, tracer_network=None, composition=None,
reload=False, save=True, verbose=True):
"""Wrapper function to load all composition sum tables
Returns : {iso_group {abu_var: pd.DataFrame}}
parameters
----------
tracer_id : int
tracer_steps : [int]
model : str
tracer_files : {tracer_step: h5py.File}
tracer_network : pd.DataFrame
composition : {abu_var: pd.DataFrame}
reload : bool
save : bool
verbose : bool
"""
printv(f'Loading composition sum tables', verbose=verbose)
sums = None
if not reload:
try:
sums = load_sums_cache(tracer_id, model=model, verbose=verbose)
except FileNotFoundError:
printv('cache not found', verbose)
if sums is None:
printv(f'Calculating sums', verbose)
tracer_files = load_files(tracer_id, tracer_steps=tracer_steps, model=model,
tracer_files=tracer_files, verbose=verbose)
if composition is None:
composition = load_composition(tracer_id, tracer_steps=tracer_steps,
model=model, tracer_files=tracer_files,
tracer_network=tracer_network,
reload=reload, save=save, verbose=verbose)
if tracer_network is None:
tracer_network = load_table(tracer_id, tracer_steps=tracer_steps,
model=model, table_name='network',
tracer_files=tracer_files, reload=reload,
save=save, verbose=verbose)
sums = network.get_all_sums(composition, tracer_network=tracer_network)
if save:
save_sums_cache(tracer_id, model=model,
sums=sums, verbose=verbose)
return sums
def save_sums_cache(tracer_id, model, sums, verbose=True):
"""Save composition sum tables to cache
parameters
----------
tracer_id : int
model : str
sums : {iso_group: {abu_var: pd.DataFrame}}
verbose : bool
"""
for iso_group, types in sums.items():
for composition_type, table in types.items():
table_name = network.sums_table_name(composition_type, iso_group=iso_group)
save_table_cache(table, tracer_id=tracer_id, model=model,
table_name=table_name, verbose=verbose)
def load_sums_cache(tracer_id, model, verbose=True):
"""Load composition sum tables from cache
Returns : {iso_group: {abu_var: pd.DataFrame}}
parameters
----------
tracer_id : int
model : str
verbose : bool
"""
sums = {'A': {}, 'Z': {}}
for iso_group in sums:
for abu_var in ['X', 'Y']:
table_name = network.sums_table_name(abu_var, iso_group=iso_group)
table = load_table_cache(tracer_id=tracer_id, model=model,
table_name=table_name, verbose=verbose)
sums[iso_group][abu_var] = table
return sums
# ===============================================================
# STIR files
# ===============================================================
def load_stir_tracer(tracer_id, model):
"""Load STIR model used for SkyNet input
Return pd.DataFrame
parameters
----------
tracer_id : int
model : str
"""
filepath = paths.stir_filepath(tracer_id, model=model)
table = pd.read_csv(filepath, header=None, skiprows=2, delim_whitespace=True)
table.columns = tables_config.stir_columns
return table
def get_stir_mass_grid(tracer_ids, model, verbose=True):
"""Get full mass grid from stir tracer files
parameters
----------
tracer_ids : int or [int]
model : str
verbose : bool
"""
printv('Loading mass grid', verbose=verbose)
tracer_ids = tools.expand_sequence(tracer_ids)
mass_grid = []
for tracer_id in tracer_ids:
mass = get_stir_mass_element(tracer_id, model)
mass_grid += [mass]
return np.array(mass_grid)
def get_stir_mass_element(tracer_id, model):
"""Get mass element (Msun) from STIR tracer file
parameters
----------
tracer_id : int
model : str
"""
filepath = paths.stir_filepath(tracer_id, model)
with open(filepath, 'r') as f:
line = f.readline()
mass = float(line.split()[3])
return mass
# ===============================================================
# Cache
# ===============================================================
def save_table_cache(table, tracer_id, model, table_name, verbose=True):
"""Save tracer table to file
parameters
----------
table : pd.DataFrame
tracer_id : int
model : str
table_name : one of ('columns', 'X', 'Y', 'network')
verbose : bool
"""
check_cache_path(model, verbose=verbose)
filepath = paths.tracer_cache_filepath(tracer_id, model, table_name=table_name)
printv(f'Saving table to cache: {filepath}', verbose)
table.to_pickle(filepath)
def load_table_cache(tracer_id, model, table_name, verbose=True):
"""Load columns table from pre-cached file
parameters
----------
tracer_id : int
model : str
table_name : one of ('columns', 'X', 'Y', 'network')
verbose : bool
"""
filepath = paths.tracer_cache_filepath(tracer_id, model, table_name=table_name)
printv(f'Loading table from cache: {filepath}', verbose)
return pd.read_pickle(filepath)
def check_cache_path(model, verbose=True):
"""Check that the model cache directory exists
"""
path = paths.tracer_cache_path(model)
paths.try_mkdir(path, skip=True, verbose=verbose)
|
983,206 | c93196aa47bc23fb37dbf5eb393ba2619d0a1ef5 | import os
from xsms.server import Servers
from xsms.server import Server
from xsms.config import conf
root_dir = os.path.dirname(os.path.abspath(__file__))
def test_server_object():
server = Server(name='insta', exec='./all run dedicated +serverconfig vanilla.cfg', title='My server')
assert server.name == 'insta'
def test_servers_object():
server1 = Server(name='vanilla', exec='./all run dedicated +serverconfig vanilla.cfg', title='My server 1')
server2 = Server(name='insta', exec='./all run dedicated +serverconfig insta.cfg', title='My server 2')
servers = Servers(name='Xonotic Server Collection', servers=[server1, server2])
assert servers.servers[0].name == 'vanilla'
assert servers.servers[1].title == 'My server 2'
|
983,207 | c12e4395b3b66ebe9a02d41d69e7b3cd09280d8d | import numpy as np
from . import statistics as stat
from ._util import unzip
default_size = 10**4
def resample(data): return np.random.choice(data, len(data))
def resample_pairs(x, y):
# in case we have sth with weird index here, e.g. Series
_x = np.array(x)
_y = np.array(y)
indices = np.arange(0, len(_x))
return unzip([(_x[i], _y[i]) for i in resample(indices)])
def replicate(data, calc_statistic, size=default_size):
return np.array([calc_statistic(resample(data)) for _ in range(size)])
def replicate2(data1, data2, calc_statistic, size=default_size):
return np.array([calc_statistic(resample(data1), resample(data2)) for _ in range(size)])
def replicate_pairs(x, y, f, size=default_size):
return np.array([f(*resample_pairs(x, y)) for _ in range(size)])
def lin_fit(x, y, size=default_size):
return unzip([stat.lin_fit(*resample_pairs(x, y)) for _ in range(size)])
|
983,208 | 05ff59b795e7b76c1296a7ebe82a447b2db54bd9 | from main import ma
from models.Thread import Thread
from schemas.User_Schema import user_schema
from schemas.Post_Schema import posts_schema
from schemas.Category_Schema import categories_schema
from marshmallow import validate, fields
class ThreadSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Thread
title = fields.Str(required=True, validate=validate.Length(min=1, max=150))
status = fields.Integer(required=True, validate=validate.Range(min=0, max=2))
thread_author = ma.Nested(user_schema, only=("user_id", "email", "fname", "lname", "role"))
categories = ma.Nested(categories_schema, only=("category_id", "name"))
thread_schema = ThreadSchema(dump_only=("time_created",))
threads_schema = ThreadSchema(many=True) |
983,209 | 67430d62b9ebf66d2d2e827e3487392efaa7808a | # String maketrans method in python
# Example-1
intab = "aeiou"
outtab = "12345"
trnstab = str.maketrans(intab, outtab)
str = "this is string example .... wow!!!"
print(str.translate(trnstab)) |
983,210 | e0d0fc77802560045248a363659077f395e31dff | import abc
from typing import Tuple
class Adapter(abc.ABC):
@abc.abstractmethod
def convert(self, *args: Tuple) -> Tuple:
pass
def __call__(self, *args: Tuple) -> Tuple:
output = self.convert(*args)
return output
class InAdapter(Adapter):
def convert(self, stage_input: Tuple) -> Tuple:
return stage_input
class OutAdapter(Adapter):
def convert(self, stage_input: Tuple, stage_output: Tuple) -> Tuple:
return stage_output
|
983,211 | 7ef5374dd34d04a933210b9a7a7e2bab7023f08c | from http.server import BaseHTTPRequestHandler, HTTPServer
import importlib
# module = importlib.import_module('controlers')
import controleurs
base_html = """<!DOCTYPE html>
<html>
<head>
<title>Mini-Serveur</title>
<style>
input {
padding: 4px 12px;
border: 1px solid #ddd;
border-radius: 4px;
margin-bottom: 10px;
}
</style>
</head>
<body>
<nav>
<a href="/">Home</a> |
<a href="/login">Login</a> |
<a href="/register">Register</a>
</nav>
{body}
</body>
</html>
"""
# HTTPRequestHandler class
class MiniHTTPServerRequestHandler(BaseHTTPRequestHandler):
paths = {
"/": "HomeController",
"/login": "LoginController",
"/register": "RegisterController"
}
# GET
def do_GET(self):
if not self.path in self.paths.keys():
self.send_response(404)
self.end_headers()
return
controller_class = self.paths[self.path]
print(controller_class)
class_ = getattr(controleurs, controller_class)
instance = class_()
html = instance.do_GET()
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# Send message back to client
message = base_html.replace("{body}", html)
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
# POST: copier-coller en grande partie de do_GET:
# - on a ajouté deux lignes au début
# - on renvoie au client ce qu'il nous a envoyé (message = post_body)
def do_POST(self):
if not self.path in self.paths.keys():
self.send_response(404)
self.end_headers()
return
# Cette ligne récupère le(s) header(s) content-length
# Même s'il n'y en a qu'un, get_all() renvoie une liste d'un élément
content_len_headers = self.headers.get_all('content-length')
# Si cette liste est vide, on ne peut pas continuer, car on ne sait pas combien
# d'octets on doit lire : on renvoie une réponse avec code 400 (Bad Request)
if not content_len_headers:
self.send_response(400)
self.end_headers()
return
# On convertit en entier la valeur string contenue dans le header content-type
content_len = int(content_len_headers[0])
# On lit ce nombre d'octets dans la requête
post_body = self.rfile.read(content_len)
# On convertit en chaîne le body
body_str = str(post_body, 'utf-8')
controller_class = self.paths[self.path]
print(controller_class)
class_ = getattr(controleurs, controller_class)
instance = class_()
html = instance.do_POST(body_str)
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# On renvoie cela tel quel au client
message = base_html.replace("{body}", html)
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
def run():
print('starting server...')
# Server settings
# Choose port 8080, for port 80, which is normally used for a http server, you need root access
server_address = ('127.0.0.1', 8081)
httpd = HTTPServer(server_address, MiniHTTPServerRequestHandler)
print('running server...')
httpd.serve_forever()
run() |
983,212 | 33d98ebd534db4aec047e94f7d71259bd7b3fb3d |
class OutputRow(object):
"""docstring for OutputRow."""
def __init__(self,):
super(OutputRow, self).__init__()
self.__inputfilename=None
@property
def imagefile(self):
"""The name of the input image file."""
return self.__inputfilename
@imagefile.setter
def imagefile(self, value):
self.__inputfilename = value
@property
def outputimagefile(self):
"""The name of the image file where the RANSAC results were saved."""
return self._outputimagefile
@outputimagefile.setter
def outputimagefile(self, value):
self._outputimagefile = value
@property
def actualthreshold(self):
"""The actualthreshold value used for the RANSAC calcualtions."""
return self._actualthreshold
@actualthreshold.setter
def actualthreshold(self, value):
self._actualthreshold = value
@property
def thresholdfactor(self):
"""The thresholdfactor property that was used to generate this RANSAC output."""
return self.__thresholdfactor
@thresholdfactor.setter
def thresholdfactor(self, value):
self.__thresholdfactor = value
@property
def elapsed_time(self):
"""The time it took for the algorithm to produce this result ."""
return self.__elapsed_time
@elapsed_time.setter
def elapsed_time(self, value):
self.__elapsed_time = value
@property
def nearest_neighbour_distance_statistic(self):
"""The nearest_neighbour_distance_statistic property."""
return self._nearest_neighbour_distance_statistic
@nearest_neighbour_distance_statistic.setter
def nearest_neighbour_distance_statistic(self, value):
self._nearest_neighbour_distance_statistic = value
def __repr__(self):
return f'input imagefile={self.imagefile}, outputimagefile={self.outputimagefile} , threshold factor={self.thresholdfactor}, actual threshold ={self.actualthreshold}'
|
983,213 | 52db879e45a57bc427d56d9b62e4b573b3c4035d | import abc as _abc
import autograder as _autograder
class StatsReporter(_autograder.Reporter):
requirements = {}
class Operation(metaclass=_abc.ABCMeta):
def __init__(self, name):
self.name = name
@_abc.abstractmethod
def read(self, data, global_data):
pass
@_abc.abstractmethod
def accumulate(self, accumulator):
pass
def __init__(self, operations):
self.operations = operations
self.accumulators = {op.name: [] for op in operations}
self.item_count = 0
def on_individual_completion(self, id, success, data, global_data):
if success:
self.item_count += 1
for op in self.operations:
self.accumulators[op.name].append(op.read(data, global_data))
def on_completion(self, data):
print('Statistics (of {} successful items):'.format(self.item_count))
for op in self.operations:
print('{name}: {value}'.format(
name=op.name,
value=op.accumulate(self.accumulators[op.name])))
|
983,214 | e436f0b152c39723bb0a42c10652a355f8bc05ad | from itertools import islice
import pytest
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["islice"] = islice
|
983,215 | ce588f461ac1385d35b75c7b7c31ca87aa306117 | class Solution(object):
def countPrefixes(self, words, s):
"""
:type words: List[str]
:type s: str
:rtype: int
"""
ans = 0
for w in words:
if s.startswith(w):
ans += 1
return ans |
983,216 | 694644c5e927145b981cd47f470968232ae22de9 | /Users/jonathongaff/MDF/mdf-harvesters/mdf_indexers/ingester/search_client.py |
983,217 | 9300077356fb62c6e342e56695d36d20d34ff5be | """
Twitter'dan gerekli API izinlerini alamadığım için, twitter verileri
kişisel arşivin istenmesiyle elde edilmiştir. Javascript kodu içerisinde
liste içerisindeki dictionary'lerde elde edilen twitter verileri bir
python dosyasına atılmış,
handleTwitter class'ı yardımıyla emoji gibi karakterlerden arındırılmış,
aynı zamanda sadece tweet içeriğinin bulunduğu txt dosyaya dönüştürülmüştür.
Verinin ilk hali aşağıdaki örnekteki gibidir.
data = [ {
"tweet" : {
"retweeted" : False,
"source" : "<a href=\"http://twitter.com/download/android\" rel=\"nofollow\">Twitter for Android</a>",
"entities" : {
"hashtags" : [ ],
"symbols" : [ ],
"user_mentions" : [ ],
"urls" : [ ]
},
"display_text_range" : [ "0", "52" ],
"favorite_count" : "6",
"id_str" : "1268833353513517063",
"truncated" : False,
"retweet_count" : "0",
"id" : "1268833353513517063",
"created_at" : "Fri Jun 05 09:13:39 +0000 2020",
"favorited" : False,
"full_text" : "Ksksksksksksk aniden karşına çıkan yasak iptali şoku",
"lang" : "tr"
}
}]
"""
from selindata import data
import unicodedata
from unidecode import unidecode
import numpy as np
import os
from keras.models import Sequential
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import RMSprop
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import pairwise_distances
def sample(preds,diversity):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / diversity
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
class handleTwitter():
def __init__(self,filename):
self.data = self.createData()
self.no_emoji_data = self.noEmoji()
self.writedata(filename)
def createData(self):
tweets = []
for dt in data:
if('http' not in dt['tweet']['full_text'] and '<'not in dt['tweet']['full_text'] \
and '@' not in dt['tweet']['full_text']):
tweets.append(dt['tweet']['full_text'])
return tweets
def deEmojify(self,inputString):
returnString = ""
for character in inputString:
try:
character.encode("ascii")
returnString += character
except UnicodeEncodeError:
replaced = unidecode(str(character))
if replaced != '':
returnString += replaced
else:
try:
returnString += "[" + unicodedata.name(character) + "]"
except ValueError:
returnString += "[x]"
return returnString
def noEmoji(self):
emojifree = []
for t in self.data:
no_emj= self.deEmojify(t)
if '[' not in no_emj:
no_emj = no_emj.lower()
emojifree.append(no_emj)
return emojifree
def writedata(self,filename):
with open(filename, "w") as f:
for s in self.no_emoji_data:
f.write(str(s) +"\n")
class preProcessor():
def __init__(self,filename):
self.NUM_OF_SEQ = None
self.MAX_LEN = 40
self.SEQ_JUMP = 3
self.CORPUS_LENGHT = None
self.corpus = self.createCorpus(filename)
self.chars = sorted(list(set(self.corpus)))
self.NUM_OF_CHARS = len(self.chars)
self.char_to_idx,self.idx_to_char = self.createIndices()
self.sequences,self.next_chars = self.createSequences()
self.dataX,self.dataY = self.one_hot()
def getTweets(self,filename):
tweets = []
with open(filename, "r") as f:
for line in f:
tweets.append(line.strip())
return tweets
def createCorpus(self,filename):
tweets = self.getTweets(filename)
corpus = u' '.join(tweets)
self.CORPUS_LENGHT= len(corpus)
return corpus
def createIndices(self):
char_to_idx = {}
idx_to_char = {}
for i,c in enumerate(self.chars):
char_to_idx[c]=i
idx_to_char[i]=c
return char_to_idx,idx_to_char
def createSequences(self):
sequences = []
next_chars = []
for i in range(0,self.CORPUS_LENGHT-self.MAX_LEN,self.SEQ_JUMP):
sequences.append(self.corpus[i: i+self.MAX_LEN])
next_chars.append(self.corpus[i+self.MAX_LEN])
self.NUM_OF_SEQ = len(sequences)
return sequences,next_chars
def one_hot(self):
dataX = np.zeros((self.NUM_OF_SEQ,self.MAX_LEN,self.NUM_OF_CHARS),dtype=np.bool)
dataY = np.zeros((self.NUM_OF_SEQ,self.NUM_OF_CHARS),dtype=np.bool)
for i,seq in enumerate(self.sequences):
for j,c in enumerate(seq):
dataX[i,j,self.char_to_idx[c]]=1
dataY[i,self.char_to_idx[self.next_chars[i]]]=1
return dataX,dataY
class LSTModel():
def __init__(self,max_len,num_of_chars,preprocessor):
self.max_len = max_len
self.num_of_chars = num_of_chars
self.model = self.createModel()
self.preprocessor = preprocessor
def createModel(self,layer_size = 128,dropout=0.2,learning_rate=0.01,verbose=1):
model = Sequential()
model.add(LSTM(layer_size,return_sequences = True,input_shape=(self.max_len,self.num_of_chars)))
model.add(Dropout(dropout))
model.add(LSTM(layer_size, return_sequences=False))
model.add(Dropout(dropout))
model.add(Dense(self.num_of_chars, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=RMSprop(lr=learning_rate))
if verbose:
print('Model Summary:')
model.summary()
return model
def trainModel(self,X, y, batch_size=128, nb_epoch=60, verbose=0):
checkpointer = ModelCheckpoint(filepath="weights.hdf5", monitor='loss', verbose=verbose, save_best_only=True, mode='min')
history = self.model.fit(X, y, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, callbacks=[checkpointer])
return history
def createTweets(self,num_of_tweets=10,tweet_length=70):
f=open("produced_tweets.txt", "a+")
self.model.load_weights('weights.hdf5')
tweets = []
seq_starts =[]
diversities = [0.2, 0.5,0.1]
for i,char in enumerate(self.preprocessor.corpus):
if char == ' ':
seq_starts.append(i)
for div in diversities:
f.write("---- diversity : %f\n"% div)
for i in range(num_of_tweets):
f.write("---- Tweet %d:\n" % i)
begin = np.random.choice(seq_starts)
tweet = u''
sequence = self.preprocessor.corpus[begin:begin+self.preprocessor.MAX_LEN]
tweet += sequence
f.write("---Random Sequence beginning: %s\n" % tweet)
for _ in range(tweet_length):
input_data = np.zeros((1,self.preprocessor.MAX_LEN,self.preprocessor.NUM_OF_CHARS),dtype=np.bool)
for t,char in enumerate(sequence):
input_data[0,t,self.preprocessor.char_to_idx[char]]=True
predictions = self.model.predict(input_data)[0]
next_idx = sample(predictions,div)
next_char = self.preprocessor.idx_to_char[next_idx]
tweet += next_char
sequence = sequence[1:] + next_char
f.write("Generated using LSTM: %s\n" % tweet)
#print(tweet)
tweets.append(tweet)
f.close()
return tweets
if __name__ == "__main__":
cwd = os.getcwd()
filename = "deneme.txt"
path = os.path.join(cwd,filename)
if not os.path.exists(path):
handler = handleTwitter(filename)
preprocessor = preProcessor(filename)
dataX = preprocessor.dataX
dataY = preprocessor.dataY
max_len = preprocessor.MAX_LEN
num_of_chars = preprocessor.NUM_OF_CHARS
lstm = LSTModel(max_len,num_of_chars,preprocessor)
#history = lstm.trainModel(dataX,dataY,verbose=1,nb_epoch=120)
tweets= lstm.createTweets()
# f = open("loss.txt","w")
# for i,loss_data in enumerate(history.history['loss']):
# msg_annotated = "{0}\t{1}\n".format(i, loss_data)
# f.write(msg_annotated)
# f.close()
vectorizer = TfidfVectorizer()
tfidf = vectorizer.fit_transform(preprocessor.sequences)
Xval = vectorizer.transform(tweets)
# print(str(pairwise_distances(Xval, Y=tfidf, metric='cosine').min(axis=1).mean()))
# f = open("pairwise_dist.txt","w")
# f.write(pairwise_distances(Xval, Y=tfidf, metric='cosine').min(axis=1).mean())
# f.close()
|
983,218 | 0313096942360a1ed726fd3bc5f75922b26c694c | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 18:43:55 2018
@author: abhij
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the training set
dataset_train = pd.read_csv('C:\\python files\\machine learning\\Machine Learning A-Z Template Folder\\Part 8 - Deep Learning\Recurrent_Neural_Networks\\TATASTEEL.csv')
training_set = dataset_train.iloc[:, 1:2].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
X_train = training_set_scaled[0:4511]
y_train = training_set_scaled[1:4512]
X_train = np.reshape(X_train, (4511, 1, 1))
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 4, activation = 'sigmoid', input_shape = (None, 1)))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = 200, batch_size = 32)
#test_set = pd.read_csv('Google_Stock_Price_Test.csv')
dataset_train = pd.read_csv('C:\\python files\\machine learning\\Machine Learning A-Z Template Folder\\Part 8 - Deep Learning\Recurrent_Neural_Networks\\TATASTEEL.csv')
real_stock_price = dataset_train.iloc[:, 1:2].values
#prediction
inputs = real_stock_price
inputs = sc.transform(inputs)
inputs = np.reshape(inputs,(4507,1,1))
predicted_stock_price = regressor.predict(inputs)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
'''
forecast = predicted_stock_price
forecast_pred = []
for i in range(100):
inputs_pred = forecast
inputs_pred = sc.transform(inputs_pred)
inputs_pred = inputs_pred[-1:,:]
inputs_pred = np.reshape(inputs_pred,(1,1,1))
predicted = regressor.predict(inputs_pred)
predicted = sc.inverse_transform(predicted)
forecast = np.append(forecast ,predicted, axis = 0)
forecast_pred.append(predicted)
'''
'''
real_stock_price_train = pd.read_csv('Google_Stock_Price_Train.csv')
real_stock_price_train = real_stock_price_train.iloc[:, 1:2].values
predicted_stock_price_train = regressor.predict(X_train)
predicted_stock_price_train = sc.inverse_transform(predicted_stock_price_train)
plt.plot(real_stock_price_train, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price_train, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
'''
|
983,219 | 66872d9ef0ec9fe5be63aee991caf9d9ec5baba2 | # -*- coding: utf-8 -*-
"""
conclusion:
play repeat.
"""
import sympy as sym
import numpy as np
import pandas as pd
p1, p2, p3, p4 = sym.symbols('p1 p2 p3 p4')
# q1, q2, q3, q4 = sym.symbols('q1 q2 q3 q4')
for q1 in range(2):
for q2 in range(2):
for q3 in range(2):
for q4 in range(2):
D = sym.Matrix( [ [ p1-1, p2-1, p3, p4], [ q1-1, q3, q2-1, q4], [ p1*q1-1, p2*q3, p3*q2, p4*q4 ], [ 1, 1, 1, 1] ]);
D1 = D.copy()
D2 = D.copy()
D3 = D.copy()
D4 = D.copy()
D1[:,0] = [[0],[0],[0],[1]]
D2[:,1] = [[0],[0],[0],[1]]
D3[:,2] = [[0],[0],[0],[1]]
D4[:,3] = [[0],[0],[0],[1]]
sub = - sym.det(D1) + sym.det(D3) + sym.det(D2) + sym.det(D4)
sub = sym.simplify(sub)
print([q1,q2,q3,q4],sub) |
983,220 | 366b673ce30da0f65dd18b8611b2a10d1a8b5f2b | from django import forms
from .validators import validate_uuid4
class FeedbackForm(forms.Form):
message = forms.CharField(widget=forms.Textarea)
referrer = forms.CharField(widget=forms.HiddenInput)
class EmailForm(forms.Form):
id = forms.CharField(validators=[validate_uuid4])
email = forms.EmailField()
|
983,221 | ba22f5ad0e5598e4055a09dfc335d8bb49c9ae41 | import numpy as np
import matplotlib.pyplot as plt
import glob
import re
import decimal
import pylab
src = 'C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments'
vp = '\pfahvemp'
gp = '\pfahmgmp'
g = '\pfahmg'
v = '\pfahve'
h = '\pfPathLog.2014-05-14-'
b = '\path_g_17_13_47.csv'
westmg2 = glob.glob('C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments\pfahmg\path_m2_15*.csv')
westmg3 = glob.glob('C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments\pfahmg\path_m3_15*.csv')
westmga = glob.glob('C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments\pfahmg\path_m3_14*.csv')
westmg = westmg3 + westmga + westmg2
westmgmp2 = glob.glob('C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments\pfahmgmp\path_m2*.csv')
westmgmp3 = glob.glob('C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments\pfahmgmp\path_m3*.csv')
westmgmp = westmgmp3 + westmgmp2
westve2 = glob.glob('C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments\pfahve\path_m2*.csv')
westve3 = glob.glob('C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments\pfahve\path_m3*.csv')
westve = westve3 + westve2
westvemp2 = glob.glob('C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments\pfahvemp\path_m2*.csv')
westvemp3 = glob.glob('C:\Users\Ramakanth\Dropbox\Thesis-Docs\Experiments\pfahvemp\path_m3*.csv')
westvemp = westvemp3 + westvemp2
east = []
west = [westmg,westmgmp,westve,westvemp]
color = ['k-','ko-','k--','k+-']
fol = [g,gp,v,vp]
hl = [h,h,h,h]
labels = ['MagMagnitude','MagMagnitude + Indoor map' , 'MagVector','MagVector + Indoor map']
totearr = [[],[],[],[]]
l = ""
for j in range(4):
for i in range(len(west[j])):
a = re.split('\D+',west[j][i])
l = fol[j] + h + a[2] + '-' + a[3] + '-' + a[4] + '.csv'
xx,yy = np.loadtxt(src + l ,delimiter=',', usecols=(0,1), unpack=True)
xa,ya = np.loadtxt(west[j][i],delimiter=',', usecols=(0,1), unpack=True)
#plt.subplot(1,2,1)
#plt.plot(xx,yy,'ro-',linewidth=2)
#plt.plot(xa,ya,'bo-',linewidth=2)
#lt.xlabel('x')
#plt.ylabel('y')
#plt.xlim(0,14)
#plt.ylim(0,26)
#plt.title(l)
if(len(xx) != len(xa)):
print src + l
xx = np.array(xx)*0.56
yy = np.array(yy)*0.56
xa = np.array(xa)*0.56
ya = np.array(ya)*0.56
earr = np.sqrt((xx-xa)**2 + (yy-ya)**2)
totearr[j] = totearr[j] + earr.tolist()
err = np.mean(earr)
#plt.subplot(1,2,2)
#plt.plot(earr,'b-')
#plt.xlabel('steps')
#plt.ylabel('Error')
#plt.title('Mean Average Error :' + str(err))
#plt.show()
#err = np.mean(np.sqrt(totearr))
#num_bins = 100
#n,bins,patches = plt.hist(totearr,num_bins,normed = 1,facecolor='green',alpha = 0.5)
#plt.xlabel('Error (in m)',size = 15)
#plt.ylabel('Probability', size = 15)
#plt.title('Localization Error Histogram,Avg Mean Error' + str(np.mean(totearr)), size = 18)
#plt.show()
n_counts,bin_edges = np.histogram(totearr[j],bins=50,normed=True)
cdf = np.cumsum(n_counts) # cdf not normalized, despite above
scale = 1.0/cdf[-1]
ncdf = scale * cdf
pylab.plot(bin_edges[1:],ncdf, color[j], label = labels[j] ,linewidth = 2)
plt.xlabel('Error (in m)',size = 18)
plt.ylabel('Cumulative Distribution Function' ,size =18)
plt.title('Localization Error CDF' ,size =20)
legend = plt.legend(loc='best', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
|
983,222 | bc026a1ef3b535800c03ca018b7b7e7f8687c812 | import numpy as np
from smo_sparse import *
from parse_file import svm_read_problem_sparse
def drive_smo_sparse( train_filename , test_filename,kernel_type_in=None,C_in=None,eps_in=None ):
train_y,train_x=svm_read_problem_sparse(train_filename)
if C_in is None:
C_in=1
if eps_in is None:
eps_in=1e-5
if kernel_type_in is None:
kernel_type_in='linear'
test_y,test_x=svm_read_problem_sparse(test_filename)
init(train_x,train_y,kernel_type_in,C_in,eps_in)
driver()
print "Training Accuracy:\n"
print get_training_accuracy()
print "Testing Accuracy:\n"
print get_test_accuracy(test_x,test_y)
if __name__ == '__main__':
drive_smo_sparse("../data/leu","../data/leu.t") |
983,223 | 04ed490c9006316ea659cba7cdafad9d61c6bdeb | # By submitting this assignment, I agree to the following:
# “Aggies do not lie, cheat, or steal, or tolerate those who do”
# “I have not given or received any unauthorized aid on this assignment”
#
# Name: Arya Ramchandani
# Section: 021
# Assignment: lab1-5
# Date: 2/9/18
import math
from math import *
print("Arya Ramchandani, 627007018, 021")
print("I have been playing the drums for 6 years")
print("")
Current = 5
Resistance = 20
print(Current*Resistance)
print("")
mass = 100
velocity = 21
print(0.5*(mass*(velocity**2)))
print("")
viscosity = 1.2
velocity = 100
dimension = 2.5
print((velocity*dimension)/viscosity)
print("")
temp = 2200
boltzman = (10**-8)
print(((5.67)*boltzman)*(temp**4))
print("")
time=20
prod_rate=100
decline_rate=2
constant=0.8
print(prod_rate/((1+((constant*decline_rate*time))**(1/constant))))
print("")
arrival_rate=20
service_rate=35
print(((arrival_rate/service_rate)**2)/(1-(arrival_rate/service_rate)))
print("")
normal_stress=20
cohesion=2
angle_friction=35
print(cohesion+(normal_stress*tan(math.radians(angle_friction))))
print("")
wavelength=7.5*(10**-7)
distance=1*(10**-6)
print(asin(wavelength)/(2*distance)) |
983,224 | 70ba140b14ccc047284e2238446ad595fc7283b2 | from typing import Iterable, Union
HTMLContent = Union['HTMLElement', str]
_escaped_attrs = ('id', 'class', 'type')
class HTMLElement(object):
tag = 'div' # type: str
render_compact = False # type: bool
def __init__(self, *content: HTMLContent, **attributes: str) -> None:
self.content = list(content)
self.attributes = attributes
for a in _escaped_attrs:
if '_' + a in self.attributes:
self.attributes[a] = self.attributes.pop('_' + a)
def append(self, *items: HTMLContent) -> 'HTMLElement':
self.content += items
return self
def __call__(self, *items: HTMLContent) -> 'HTMLElement':
return self.append(*items)
def subelement(self, item: 'HTMLElement') -> 'HTMLElement':
self.content.append(item)
return item
def lazy_render_attributes(self) -> Iterable[str]:
if self.attributes:
for k, v in self.attributes.items():
yield ' '
yield str(k)
yield '="'
yield str(v)
yield '"'
def lazy_render(self, indent: str = '', add_indent: str = '') \
-> Iterable[str]:
is_doc_root = self.tag.lower() == 'html'
if is_doc_root:
yield '<!DOCTYPE HTML>\n'
do_linebreak = not self.render_compact and self.content
yield indent
yield '<'
yield self.tag
yield from self.lazy_render_attributes()
yield '>'
if do_linebreak:
yield '\n'
child_indent = indent + add_indent if do_linebreak else ''
if not do_linebreak:
add_indent = ''
for child in self.content:
if isinstance(child, HTMLElement):
yield from child.lazy_render(child_indent, add_indent)
else:
yield '{}{}'.format(child_indent, child)
if do_linebreak:
yield '\n'
if do_linebreak:
yield indent
yield '</'
yield self.tag
yield '>'
if is_doc_root:
yield '\n'
def __str__(self) -> str:
'''Render element to string.
>>> str(a('Somewhere', href="#"))
'<a href="#">Somewhere</a>'
>>> str(p())
'<p></p>'
>>> str(div('Hello World'))
'<div>\\n Hello World\\n</div>'
>>> str(table())
'<table></table>'
'''
return ''.join(self.lazy_render(add_indent=' '))
def write(self, fname: str) -> None:
with open(fname, 'w') as f:
for s in self.lazy_render(add_indent=' '):
f.write(s)
# TAGS
class a (HTMLElement):
tag = 'a'
render_compact = True
class article (HTMLElement):
tag = 'article'
render_compact = False
class body (HTMLElement):
tag = 'body'
render_compact = False
class button (HTMLElement):
tag = 'button'
render_compact = True
class div (HTMLElement):
tag = 'div'
render_compact = False
class footer (HTMLElement):
tag = 'footer'
render_compact = False
class form (HTMLElement):
tag = 'form'
render_compact = False
class h1 (HTMLElement):
tag = 'h1'
render_compact = True
class h2 (HTMLElement):
tag = 'h2'
render_compact = True
class h3 (HTMLElement):
tag = 'h3'
render_compact = True
class h4 (HTMLElement):
tag = 'h4'
render_compact = True
class head (HTMLElement):
tag = 'head'
render_compact = False
class header (HTMLElement):
tag = 'header'
render_compact = False
class hr (HTMLElement):
tag = 'hr'
render_compact = False
class html (HTMLElement):
tag = 'html'
render_compact = False
class img (HTMLElement):
tag = 'img'
render_compact = False
class li (HTMLElement):
tag = 'li'
render_compact = True
class link (HTMLElement):
tag = 'link'
render_compact = False
class meta (HTMLElement):
tag = 'meta'
render_compact = False
class nav (HTMLElement):
tag = 'nav'
render_compact = False
class ol (HTMLElement):
tag = 'ol'
render_compact = False
class p (HTMLElement):
tag = 'p'
render_compact = True
class small (HTMLElement):
tag = 'small'
render_compact = True
class span (HTMLElement):
tag = 'span'
render_compact = True
class style (HTMLElement):
tag = 'style'
render_compact = False
class table (HTMLElement):
tag = 'table'
render_compact = False
class tbody (HTMLElement):
tag = 'tbody'
render_compact = False
class td (HTMLElement):
tag = 'td'
render_compact = True
class th (HTMLElement):
tag = 'th'
render_compact = False
class thead (HTMLElement):
tag = 'thead'
render_compact = False
class title (HTMLElement):
tag = 'title'
render_compact = True
class tr (HTMLElement):
tag = 'tr'
render_compact = False
class ul (HTMLElement):
tag = 'ul'
render_compact = False
|
983,225 | 61b2e31afc33ac85382c0ff1d2b4f7754b28a3f3 | #!/usr/bin/env python3
#series 1
fruit = ['Apples', 'Pears', 'Oranges', 'Peaches']
print(fruit)
#append new input item to list
new = input('Enter a fruit: ')
fruit.append(new)
print(fruit)
#ask user for a number and display corresponding item
num = int(input('Enter a number: '))
print(num, fruit[num-1])
#add new item with '+'
fruit = ['Cherries'] + fruit
print(fruit)
#add new item with 'insert'
fruit.insert(0, 'Guava')
print(fruit)
#display all items that start with 'P'
for i in fruit:
if i.startswith('P'):
print(i)
#series 2
fruit_list = fruit.copy()
print(fruit_list)
#remove last fruit
fruit_list.pop()
print(fruit_list)
#ask user for a fruit to remove
def delete_fruits(fruit_list):
delete_fruit = str(input('Delete a fruit: '))
for i in fruit_list:
if delete_fruit in i:
fruit_list.remove(i)
return fruit_list
delete_fruits(fruit_list)
#bonus
print(fruit_list + fruit_list)
#series 3
fruit_list_2 = fruit.copy()
print(fruit_list_2)
for i in fruit_list_2:
answer = input('Do you like ' + i.lower() + '?')
while answer.lower() not in ('yes', 'no'):
answer = input("Please enter 'yes' or 'no' only: ")
if answer.lower() == 'no':
fruit_list_2.remove(i)
else:
continue
print(fruit_list_2)
#series 4
fruit_list_3 = fruit.copy()
fruit_list_4 = []
for i in fruit_list_3:
fruit_list_4.append(i[::-1])
fruit_list_3.pop()
print(fruit_list_3, fruit_list_4)
# for i in fruit_list_3:
# answer = input('Do you like ' + i.lower() + '?')
# while answer.lower() not in ('yes', 'no'):
# print("Please enter only yes or no")
# answer = input("Do you like " + i.lower() + "? Enter yes or no only?")
# if answer.lower() == "no":
# fruit_list_3.remove(i)
# print(fruit_list_3)
#options to turn list into dict
#1) fruit_dicty = {i:j for i,j in enumerate(fruit)}
#2) enum = enumerate(fruit)
# for i,j in enum:
# fruit_dicty = {i,j}
# fruit_dict = dict((i,j) for i,j in enum) |
983,226 | dd78ab535469e85f0a0427da45fe9ef7b358252d | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="mailmerge",
description = "A simple, command line mail merge tool",
version="1.7.2",
author="Andrew DeOrio",
author_email="awdeorio@umich.edu",
url="https://github.com/awdeorio/mailmerge/",
download_url = "https://github.com/awdeorio/mailmerge/tarball/1.7.2",
license="MIT",
packages = ["mailmerge"],
keywords=["mail merge", "mailmerge", "email"],
install_requires=[
"click",
"configparser",
"jinja2",
"nose2",
"sh",
],
test_suite='nose2.collector.collector',
entry_points="""
[console_scripts]
mailmerge=mailmerge.main:main
"""
)
|
983,227 | 6b5b8d47bd57773fd6d31a82ccbb89cd78fdaf48 | from .interface import Session
from .config import Config
|
983,228 | a6859c023f291d337045ef967bc5425edde41038 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 22:30:13 2020
@author: frank
"""
import unittest
K_range = range(0,51)
CD_range = range(1,51)
def solution(K,C,D):
sock_sort = {}
count = 0
for sock in C:
if sock not in sock_sort:
sock_sort[sock] = 1
else:
del sock_sort[sock]
count += 1
dirty_sock_sort = {}
dirty_pair = 0
for smelly in D:
if K == 0:
break
else:
if smelly in sock_sort:
# if there is a pair wash the dirty one
del sock_sort[smelly]
K -= 1
count += 1
else:
if smelly not in dirty_sock_sort:
dirty_sock_sort[smelly] = 1
else:
del dirty_sock_sort[smelly]
dirty_pair += 1
if K != 0:
count += min(K//2,dirty_pair)
return count
class testsolution(unittest.TestCase):
def test_1(self):
K,C,D = 2,[1,2,1,1],[1,4,3,2,4]
self.assertEqual(solution(K,C,D), 3)
if __name__ == '__main__':
unittest.main()
|
983,229 | 47ec41d424071d50082f3d1f43db4f1ee13deeae | f=open("sample.txt","a")
text=input("Enter text:")
f.write(text)
f.close()
print("Text write to file")
|
983,230 | 0bc0376606582466fba7b9d3d2dc68fb9b4a490f | # -*- coding: UTF-8 -*-
import re, numpy, sys, pickle
from NGS.BasicUtil import *
import NGS.BasicUtil.Util
from itertools import combinations
'''
Created on 2013-6-30
@author: rui
'''
if len(sys.argv) < 7:
print("python CaculateFst.py [vcf1] [vcf2] [vcf3]....[globe_Fst(G)/reletivepaire_Fsts(R)] [winwidth] [slidesize] [fastway]")
exit(-1)
class Fst():
def __init__(self):
super().__init__()
self.doubleVcfMap = {}
self.FstMapByChrom = {} # {chr:[(first_snp_pos,last_snp_pos,fst),(),()],chr:[],chr:[]}
self.distMap = {}
def alin2PopSnpPos(self, vcfMap1, vcfMap2):
"""
{chrNo:[(pos,REF,ALT,INFO),(pos,REF,ALT,INFO),,,,,],chrNo:[],,,,,,}
"""
for currentChrom in vcfMap1.keys():
# self.FstMapByChrom[currentChrom] = []
self.doubleVcfMap[currentChrom] = []
for SNPrec in vcfMap1[currentChrom]:
low = 0
if currentChrom not in vcfMap2:
break
high = len(vcfMap2[currentChrom]) - 1
posInPop1 = SNPrec[0]
RefInPop1 = SNPrec[1]
AltInPop1 = SNPrec[2]
if re.search(r"[A-Za-z]+,[A-Za-z]+", AltInPop1) != None: # multiple allels
continue
dp4 = re.search(r"DP4=(\d*),(\d*),(\d*),(\d*)", SNPrec[3])
# print(dp4.group(0))
while low < high:
mid = int((low + high) / 2)
if posInPop1 == vcfMap2[currentChrom][mid][0]:
if AltInPop1 == vcfMap2[currentChrom][mid][2]:
self.doubleVcfMap[currentChrom].append(SNPrec + vcfMap2[currentChrom][mid])
break
elif posInPop1 < vcfMap2[currentChrom][mid][0]:
high = mid - 1
else:
low = mid + 1
else:
pass
# self.doubleVcfMap[currentChrom].append(SNPrec+)
def caculateFst(self, vcfMap1_ref, vcfMap2, caculator, winwidth, slideSize):
win = Util.Window()
self.alin2PopSnpPos(vcfMap1_ref, vcfMap2)#produce self.doubleVcfMap{}
for currentChrom in self.doubleVcfMap.keys():
# self.FstMapByChrom[currentChrom]=[]
win.winValueL = []
print("caculateFst value in "+currentChrom)
win.slidWindowOverlap(self.doubleVcfMap[currentChrom], winwidth, slideSize, caculator)
self.FstMapByChrom[currentChrom] = win.winValueL
if __name__ == '__main__':
if sys.argv[-4]=='R' or sys.argv[-4]=='r':
allkindofpaire = list(combinations(sys.argv[1:-4], 2))
alldistMap={}
for fstpaire in allkindofpaire:
fstpaire2name = re.search(r"[^/]*$", fstpaire[1]).group(0) # for linux
outfile = open(fstpaire[0] + fstpaire2name + ".fst", 'w')
# win = Util.Window()
fst_caculator = Caculators.Caculate_Fst()
pop1 = VCFutil.VCF_Data() # new a class
pop2 = VCFutil.VCF_Data() # new a class
fst = Fst()
if sys.argv[-1] == "slowway":
try:
vcf_1_idx = pickle.load(open(fstpaire[0] + ".myindex", 'rb'))
vcf_2_idx = pickle.load(open(fstpaire[1] + ".myindex", 'rb'))
except IOError:
pop1.indexVCF(fstpaire[0], fstpaire[0] + ".myindex")
pop2.indexVCF(fstpaire[1], fstpaire[1] + ".myindex")
vcf_1_idx = pickle.load(open(fstpaire[0] + ".myindex", 'rb'))
vcf_2_idx = pickle.load(open(fstpaire[1] + ".myindex", 'rb'))
tmppopmap1 = {}
tmppopmap2 = {}
for chrom in vcf_1_idx.keys():
if chrom == "title":
continue
pop1.getVcfMapByChrom(fstpaire[0], chrom, vcf_1_idx)
if pop2.getVcfMapByChrom(fstpaire[1], chrom, vcf_2_idx) == -1:
continue
tmppopmap1[chrom] = pop1.VcfList_A_Chrom
tmppopmap2[chrom] = pop2.VcfList_A_Chrom
fst.caculateFst(tmppopmap1, tmppopmap2, fst_caculator,int(sys.argv[-3]),int(sys.argv[-2]))
for e in fst.FstMapByChrom[chrom]:
print(chrom, e[0], e[1], e[2], sep='\t', file=outfile)
del tmppopmap1[chrom]
del tmppopmap2[chrom]
elif sys.argv[-1] == "fastway":
pop1.getVcfMap(fstpaire[0])
pop2.getVcfMap(fstpaire[1])
print("startcaculatefst", fstpaire[0], fstpaire[1])
fst.caculateFst(pop1.VcfMap_AllChrom, pop2.VcfMap_AllChrom, fst_caculator,int(sys.argv[-3]),int(sys.argv[-2]))
# for chrom in fst.FstMapByChrom.keys():
# for e in fst.FstMapByChrom[chrom]:
# print(chrom,e[0],e[1],e[2],sep='\t',file=outfile)
winCrossGenome = []
for chrom in fst.FstMapByChrom.keys():
for i in range(len(fst.FstMapByChrom[chrom])):
if fst.FstMapByChrom[chrom][i][2] != "NA":
winCrossGenome.append(fst.FstMapByChrom[chrom][i][2])
exception = numpy.mean(winCrossGenome)
std0 = numpy.std(winCrossGenome, ddof=0)
std1 = numpy.std(winCrossGenome, ddof=1)
del winCrossGenome
for chrom in sorted(fst.FstMapByChrom.keys()):
for i in range(len(fst.FstMapByChrom[chrom])):
if fst.FstMapByChrom[chrom][i][2] != "NA":
zFst = (fst.FstMapByChrom[chrom][i][2] - exception) / std1
else:
zFst = "NA"
print(chrom + "\t" + str(i) + "\t" + str(fst.FstMapByChrom[chrom][i][0]) + "\t" + str(fst.FstMapByChrom[chrom][i][1]) + "\t" + str(fst.FstMapByChrom[chrom][i][2]) + "\t" + str(zFst), file=outfile)
sum = 0
Number = 0
for chrom in sorted(fst.FstMapByChrom.keys()):
for i in range(len(fst.FstMapByChrom[chrom])):
if fst.FstMapByChrom[chrom][i][2] != 'NA':
Number += 1
sum += fst.FstMapByChrom[chrom][i][2]
alldistMap[re.search(r"[^/]*$", fstpaire[0]).group(0) + fstpaire2name] = sum / Number
outfile.close()
for n in alldistMap.keys():
print(n + "\t" + str(alldistMap[n]), file=open("testdist.txt", 'a'))
elif sys.argv[-4] == 'G' or sys.argv[-4] == 'g':
globalFstMapByChrom={}
fst_caculator = Caculators.Caculate_Fst()
# fst = Fst()
for majorpop in sys.argv[1:-4]:
pop1 = VCFutil.VCF_Data() # new a class
pop1.getVcfMap(majorpop)
fstlist=[]
# outfile=open(majorpop+'.gfst','w')
# if len(fstlist) != 0:
# for chrom in fstlist[0].FstMapByChrom.keys():
# for winNo in fstlist[0].FstMapByChrom[chrom]:
# sumFstInAWin=0
# Number=0
# for i in fstlist:
# if fstlist[0].FstMapByChrom[chrom][winNo][0] != fstlist[i].FstMapByChrom[chrom][winNo][0] or fstlist[0].FstMapByChrom[chrom][winNo][1] != fstlist[i].FstMapByChrom[chrom][winNo][1]:
# print(majorpop+"de shang yi ge"+chrom+)
# exit(-1)
# if fstlist[i].FstMapByChrom[chrom][winNo]!= 'NA':
# Number+=1
# sumFstInAWin+=fstlist[i].FstMapByChrom[chrom][winNo]
# gfst=sumFstInAWin/Number
# print(chrom + "\t" + str(winNo) + "\t" + str(fstlist[0].FstMapByChrom[chrom][winNo][0]) + "\t" + str(fstlist[0].FstMapByChrom[chrom][winNo][1]) + "\t" + str(gfst), file=outfile)
# fstlist=[]
for othrpop in sys.argv[1:-4]:
if majorpop == othrpop:
continue
pop2 = VCFutil.VCF_Data() # new a class
pop2.getVcfMap(othrpop)
print("startcaculatefst", majorpop, othrpop)
fstlist.append(Fst())
fstlist[-1].caculateFst(pop1.VcfMap_AllChrom, pop2.VcfMap_AllChrom, fst_caculator,int(sys.argv[-3]),int(sys.argv[-2]))
outfile=open(majorpop+'.gfst','w')
if len(fstlist) != 0:
for chrom in fstlist[0].FstMapByChrom.keys():
globalFstMapByChrom[chrom]=[]
for winNo in range(0,len(fstlist[0].FstMapByChrom[chrom])):
sumFstInAWin=0
Number=0
for i in range(0,len(fstlist)):
try:
if fstlist[i].FstMapByChrom[chrom][winNo][2]!= 'NA':
Number+=1
sumFstInAWin+=fstlist[i].FstMapByChrom[chrom][winNo][2]
except IndexError:
for j in range(0,len(fstlist)):
print(str(j),sys.argv[1+j],chrom,str(winNo),str(len(fstlist[j].FstMapByChrom[chrom])))
continue# always in the last position,and the value is caculate any way,so can't mispostion.
try:
gfst=sumFstInAWin/Number
except ZeroDivisionError:
gfst="NA"
globalFstMapByChrom[chrom].append((fstlist[0].FstMapByChrom[chrom][winNo][0],fstlist[0].FstMapByChrom[chrom][winNo][1],gfst))
# print(chrom + "\t" + str(winNo) + "\t" + str(fstlist[0].FstMapByChrom[chrom][winNo][0]) + "\t" + str(fstlist[0].FstMapByChrom[chrom][winNo][1]) + "\t" + str(gfst), file=outfile)
winCrossGenome = []
for chrom in globalFstMapByChrom.keys():
for i in range(len(globalFstMapByChrom[chrom])):
if globalFstMapByChrom[chrom][i][2] != "NA":
winCrossGenome.append(globalFstMapByChrom[chrom][i][2])
exception = numpy.mean(winCrossGenome)
std0 = numpy.std(winCrossGenome, ddof=0)
std1 = numpy.std(winCrossGenome, ddof=1)
del winCrossGenome
for chrom in sorted(globalFstMapByChrom.keys()):
for i in range(len(globalFstMapByChrom[chrom])):
if globalFstMapByChrom[chrom][i][2] != "NA":
zgFst = (globalFstMapByChrom[chrom][i][2] - exception) / std1
else:
zgFst = "NA"
print(chrom + "\t" + str(i) + "\t" + str(globalFstMapByChrom[chrom][i][0]) + "\t" + str(globalFstMapByChrom[chrom][i][1]) + "\t" + str(globalFstMapByChrom[chrom][i][2]) + "\t" + str(zgFst), file=outfile)
|
983,231 | 0c7f0e2fcf0666aa9313debb4f12ba3627c77791 | import discord
from discord.ext import commands
from bot import COMMAND_PREFIX
class Config(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.description_length = 70
self.formats = [".jpg", ".png", ".jpeg", ".webp"]
@commands.command()
@commands.guild_only()
async def config(self, ctx, option: str = None):
if option == "description":
await ctx.channel.send(
"Please enter a **description**. You have only 90sec to enter. If you want to cancel your action, send `cancel`:"
)
elif option == "background":
await ctx.channel.send(
f"Please enter a **background** link or send a picture right here. Supported formats: {' '.join(map(str, self.formats))}. You have only 90sec to enter. If you want to cancel your action, send `cancel`:"
)
else:
return await ctx.channel.send(
f":x: Choose what you want to change. To change the description, enter `{COMMAND_PREFIX}config description`. To change the background, enter `{COMMAND_PREFIX}config background`. Before entering both commands, please have a look at `{COMMAND_PREFIX}help config` command"
)
def check_author(message):
if ctx.author == message.author:
return True
try:
value = await self.bot.wait_for("message", check=check_author, timeout=90.0)
except asyncio.TimeoutError:
return
if value:
if value.content.lower() == "cancel":
return await ctx.channel.send(
f":x: {option.capitalize()} change canceled"
)
if option == "description" and len(value.content) > self.description_length:
return await ctx.channel.send(
f":x: Your description is too long! The maximum description length is {self.description_length}. Enter `{COMMAND_PREFIX}config description` to try again"
)
elif option == "background":
check = False
for form in self.formats:
if form in value.content:
check = True
if not check:
return await ctx.channel.send(
f":x: You entered an unsupported format. Supported formats: {' '.join(map(str, self.formats))}. Enter `{COMMAND_PREFIX}config background` to try again"
)
await self.bot.pg_con.execute(
f"""
UPDATE users
SET {option} = $1
WHERE user_id = $2
AND guild_id = $3
""",
value.content,
ctx.author.id,
ctx.guild.id,
)
await ctx.channel.send(
f"Your {option} has been changed! You can view your profile by entering `{COMMAND_PREFIX}profile` command"
)
def setup(bot):
bot.add_cog(Config(bot))
|
983,232 | 06d00a7a7406bf484bc9a78b310eab338bca5eb1 | # -----------------------------------------------------------------------------
# From Numpy to Python
# Copyright (2017) Nicolas P. Rougier - BSD license
# More information at https://github.com/rougier/numpy-book
# -----------------------------------------------------------------------------
import numpy as np
import itertools as it
def solution_1():
# Author: Tucker Balch
# Brute force
# 14641 (=11*11*11*11) iterations & tests
Z = []
for i in range(11):
for j in range(11):
for k in range(11):
for l in range(11):
if i + j + k + l == 10:
Z.append((i, j, k, l))
return Z
def solution_2():
# Author: Daniel Vinegrad
# Itertools
# 14641 (=11*11*11*11) iterations & tests
return [(i, j, k, l)
for i, j, k, l in it.product(range(11), repeat=4) if i + j + k + l == 10]
def solution_3():
# Author: Nick Poplas
# Intricated iterations
# 486 iterations, no test
return [(a, b, c, (10 - a - b - c))
for a in range(11) for b in range(11 - a) for c in range(11 - a - b)]
def solution_3_bis():
# Iterator using intricated iterations
# 486 iterations, no test
return ((a, b, c, (10 - a - b - c))
for a in range(11) for b in range(11 - a) for c in range(11 - a - b))
def solution_4():
# Author: Yaser Martinez
# Numpy indices
# No iterations, 1331 (= 11*11*11) tests
X123 = np.indices((11, 11, 11)).reshape(3, 11 * 11 * 11)
X4 = 10 - X123.sum(axis=0)
return np.vstack((X123, X4)).T[X4 > -1]
if __name__ == '__main__':
from tools import timeit
timeit("solution_1()", globals())
timeit("solution_2()", globals())
timeit("solution_3()", globals())
timeit("solution_4()", globals())
|
983,233 | 9389ef4ad11a1df48e687bbaded7f4c81713e561 | import numpy as np
import matplotlib.pyplot as plt
archivo=np.loadtxt('tray.txt')
x=archivo[:,0]
v=archivo[:,1]
t=archivo[:,2]
plt.plot(t,x)
plt.title('x VS t',fontsize=25)
plt.xlabel('t',fontsize=25)
plt.ylabel('x',fontsize=25)
plt.savefig('pos.png')
plt.close()
plt.plot(t,v)
plt.title('v VS t',fontsize=25)
plt.xlabel('t',fontsize=25)
plt.ylabel('v',fontsize=25)
plt.savefig('vel.png')
plt.close()
plt.plot(x,v)
plt.title('v VS x',fontsize=25)
plt.xlabel('x',fontsize=25)
plt.ylabel('v',fontsize=25)
plt.savefig('phase.png')
plt.close()
|
983,234 | 752ab4bbf9ffad6d19950152647aadccaa370bf3 | from bitcoin_forecast import GDAXRate
from sklearn.svm import SVR
from sklearn import preprocessing
from sklearn.pipeline import make_pipeline
import numpy as np
import logging
class BTCForecast(object):
"""
Forecasting with Machine Learning Techniques.
Disclaimer:
This is another just-for-fun project.
Please don't trade currencies based on this forecast.
The risk of loss in trading or holding Digital Currency can be substantial.
Current implementation uses Support Vector Regression (SVR).
"""
DEFAULT_MODEL_TYPE = 'SVR'
DEFAULT_SVR_MODEL_PARAMS = {'kernel': 'rbf', 'epsilon': 0.01, 'c': 100, 'gamma': 100}
def __init__(self, model_type=DEFAULT_MODEL_TYPE):
"""
Set ups model and pipeline for learning and predicting.
:param model_type: only 'SVR' model is supported for now
"""
assert (model_type == 'SVR'), "Model '{}' is not supported. " \
"We support only SVR for now.".format(model_type)
self._model_type = model_type
self._model_params = BTCForecast.DEFAULT_SVR_MODEL_PARAMS
# set up SVR pipeline
self._scaler = preprocessing.StandardScaler(copy=True, with_mean=True, with_std=True)
self._model = SVR(kernel=self._model_params['kernel'],
epsilon=self._model_params['epsilon'],
C=self._model_params['c'],
gamma=self._model_params['gamma'])
self._pipeline = make_pipeline(self._scaler, self._model)
self.has_learned = False
def _transform_training_set(self, gdax_rates):
"""
Transform input for learning
:param gdax_rates: list of GDAXRate's
:return: x,y training vectors
"""
rates = [gdax_rate.closing_price for gdax_rate in gdax_rates]
timestamps = [gdax_rate.end_time.timestamp() for gdax_rate in gdax_rates]
x_train = np.reshape(timestamps, (len(timestamps), 1))
y_train = rates
return x_train, y_train
def learn(self, gdax_rates):
"""
Learns based on past rates.
:param gdax_rates: list of GDAXRate's
:return: current score after training
"""
logging.getLogger('BTCForecast').debug('learning...')
x_train, y_train = self._transform_training_set(gdax_rates)
# LEARN!
self._pipeline.fit(x_train, y_train)
score = self._pipeline.score(x_train, y_train)
self.has_learned = True
logging.getLogger('BTCForecast').debug('score: {}'.format(score))
return score
def predict(self, timestamps):
"""
Predicts a value for each timestamp.
:param timestamps: a list of timestamps
:return: a list or predictions
"""
if not self.has_learned:
raise TypeError('Learning is required before any predictions')
x_test = np.reshape(timestamps, (len(timestamps), 1))
return self._pipeline.predict(x_test)
|
983,235 | d79613d12bfeb0db7126cd0897916efff5ffc5fd | from db import db
class MaidPlanSchedule(db.Model):
__tablename__ = "maidplanschedule"
id = db.Column(db.Integer, primary_key=True)
schedule_name = db.Column(db.String(80), nullable=False)
schedule_date = db.Column(db.DateTime, nullable=False)
start_time = db.Column(db.Time, nullable=False, default=0)
end_time = db.Column(db.Time, nullable=False, default=0)
post_clean_buffer = db.Column(db.Integer, nullable=False, default=0)
plans = db.relationship('MaidPlanModel', secondary="maidplanscheduleplan", backref='maidplanschedule',
lazy='dynamic')
def __init__(self, schedule_name, schedule_date, start_time, end_time, post_clean_buffer):
self.schedule_name = schedule_name
self.schedule_date = schedule_date
self.start_time = start_time
self.end_time = end_time
self.post_clean_buffer = post_clean_buffer,
def json(self):
return {
"id": self.id,
"schedule_name": self.schedule_name,
"schedule_date": self.schedule_date,
"start_time": self.start_time,
"end_time": self.end_time,
"post_clean_buffer": self.post_clean_buffer,
"plan": [plan.json() for plan in self.plan.first()],
}
@classmethod
def find_by_id(cls, id):
return cls.query.filter_by(id=id).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
983,236 | f1a56b2664ab156f3fd9fbee1fe866b47260d157 | import atexit
import time
from flask import Flask
from sqlalchemy_api_handler.utils import logger
from utils.jobs import get_all_jobs, \
remove_oldest_jobs_file, \
write_jobs_to_file
from utils.setup import setup
CLOCK_APP = Flask(__name__)
setup(CLOCK_APP, with_jobs=True)
if __name__ == '__main__':
# CLOCK_APP.async_scheduler.start()
CLOCK_APP.background_scheduler.start()
# atexit.register(lambda: CLOCK_APP.async_scheduler.shutdown())
atexit.register(CLOCK_APP.background_scheduler.shutdown)
print_jobs = True
try:
while True:
if print_jobs:
jobs = get_all_jobs(CLOCK_APP)
write_jobs_to_file(jobs)
remove_oldest_jobs_file()
time.sleep(60)
except (KeyboardInterrupt, SystemExit):
logger.warning('Scheduler interupted')
print_jobs = False
# CLOCK_APP.async_scheduler.shutdown()
CLOCK_APP.background_scheduler.shutdown()
|
983,237 | 263f00725ba691f6a9a9dab70d22b3f79ccf3147 | import random
class Luck:
def __init__(self):
self.persons = []
def put_person(self, name, n1, n2):
num = [n1, n2]
num.sort()
person = [name] + num
self.persons.append(person)
return person
def generate(self):
luckNums = [random.randint(0, 9), random.randint(0, 9)]
luckNums.sort()
return luckNums, list(filter(lambda n: luckNums == [n[1], n[2]], self.persons))
|
983,238 | 87b62afa09c9d6df3d4ab359a744b101479a317e | import importlib
import logging
from inspect import getfullargspec, isclass
from ufo2ft.constants import FEATURE_WRITERS_KEY
from ufo2ft.util import _loadPluginFromString
from .baseFeatureWriter import BaseFeatureWriter
from .cursFeatureWriter import CursFeatureWriter
from .gdefFeatureWriter import GdefFeatureWriter
from .kernFeatureWriter import KernFeatureWriter
from .markFeatureWriter import MarkFeatureWriter
__all__ = [
"BaseFeatureWriter",
"CursFeatureWriter",
"GdefFeatureWriter",
"KernFeatureWriter",
"MarkFeatureWriter",
"loadFeatureWriters",
]
logger = logging.getLogger(__name__)
def isValidFeatureWriter(klass):
"""Return True if 'klass' is a valid feature writer class.
A valid feature writer class is a class (of type 'type'), that has
two required attributes:
1) 'tableTag' (str), which can be "GSUB", "GPOS", or other similar tags.
2) 'write' (bound method), with the signature matching the same method
from the BaseFeatureWriter class:
def write(self, font, feaFile, compiler=None)
"""
if not isclass(klass):
logger.error("%r is not a class", klass)
return False
if not hasattr(klass, "tableTag"):
logger.error("%r does not have required 'tableTag' attribute", klass)
return False
if not hasattr(klass, "write"):
logger.error("%r does not have a required 'write' method", klass)
return False
if getfullargspec(klass.write).args != getfullargspec(BaseFeatureWriter.write).args:
logger.error("%r 'write' method has incorrect signature", klass)
return False
return True
def loadFeatureWriters(ufo, ignoreErrors=True):
"""Check UFO lib for key "com.github.googlei18n.ufo2ft.featureWriters",
containing a list of dicts, each having the following key/value pairs:
For example:
{
"module": "myTools.featureWriters", # default: ufo2ft.featureWriters
"class": "MyKernFeatureWriter", # required
"options": {"doThis": False, "doThat": True},
}
Import each feature writer class from the specified module (default is
the built-in ufo2ft.featureWriters), and instantiate it with the given
'options' dict.
Return the list of feature writer objects.
If the 'featureWriters' key is missing from the UFO lib, return None.
If an exception occurs and 'ignoreErrors' is True, the exception message
is logged and the invalid writer is skipped, otrherwise it's propagated.
"""
if FEATURE_WRITERS_KEY not in ufo.lib:
return None
writers = []
for wdict in ufo.lib[FEATURE_WRITERS_KEY]:
try:
moduleName = wdict.get("module", __name__)
className = wdict["class"]
options = wdict.get("options", {})
if not isinstance(options, dict):
raise TypeError(type(options))
module = importlib.import_module(moduleName)
klass = getattr(module, className)
if not isValidFeatureWriter(klass):
raise TypeError(klass)
writer = klass(**options)
except Exception:
if ignoreErrors:
logger.exception("failed to load feature writer: %r", wdict)
continue
raise
writers.append(writer)
return writers
def loadFeatureWriterFromString(spec):
"""Take a string specifying a feature writer class to load (either a
built-in writer or one defined in an external, user-defined module),
initialize it with given options and return the writer object.
The string must conform to the following notation:
- an optional python module, followed by '::'
- a required class name; the class must have a method call 'write'
with the same signature as the BaseFeatureWriter.
- an optional list of keyword-only arguments enclosed by parentheses
Raises ValueError if the string doesn't conform to this specification;
TypeError if imported name is not a feature writer class; and
ImportError if the user-defined module cannot be imported.
Examples:
>>> loadFeatureWriterFromString("KernFeatureWriter")
<ufo2ft.featureWriters.kernFeatureWriter.KernFeatureWriter object at ...>
>>> w = loadFeatureWriterFromString("KernFeatureWriter(ignoreMarks=False)")
>>> w.options.ignoreMarks
False
>>> w = loadFeatureWriterFromString("MarkFeatureWriter(features=['mkmk'])")
>>> w.features == frozenset(['mkmk'])
True
>>> loadFeatureWriterFromString("ufo2ft.featureWriters::KernFeatureWriter")
<ufo2ft.featureWriters.kernFeatureWriter.KernFeatureWriter object at ...>
"""
return _loadPluginFromString(spec, "ufo2ft.featureWriters", isValidFeatureWriter)
|
983,239 | 4e3ef142dd2d4a59def4dd98dc2f5316aec7d958 | # CRAETE local.py file by renaming/copying default.local.py
# User should update the VPC details below in local.py
VPC = {
"ID": "vpc-1",
"CIDR_BLOCKS": ["10.0.0.0/16"],
"SUBNETS": ["subnet-1", "subnet-2"]
}
MAIL_SERVER = "localhost.local"
# System reads below data from user if not updated here
AWS_ACCESS_KEY = ""
AWS_SECRET_KEY = ""
AWS_REGION = ""
MAKE_ALB_INTERNAL = True
# MAIL Server configuration
MAIL_SERVER = "localhost"
MAIL_SERVER_PORT = 587
MAIL_PROTOCOL = "smtp"
MAIL_SERVER_USER = ""
MAIL_SERVER_PWD = ""
MAIL_SMTP_AUTH = ""
MAIL_SMTP_SSL_ENABLE = "true"
MAIL_SMTP_SSL_TEST_CONNECTION = "false"
|
983,240 | 54c66e2aef2a865107865e53101226bf388035c5 | # Generated by Django 2.0.6 on 2018-07-20 01:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shopping_cart', '0003_auto_20180719_0924'),
]
operations = [
migrations.CreateModel(
name='AnonymousCart',
fields=[
('cart_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='shopping_cart.Cart')),
('anon_user', models.CharField(db_index=True, help_text='Nama Anon', max_length=200)),
],
bases=('shopping_cart.cart',),
),
]
|
983,241 | 7bf844bb7dab3a68fba4114ca3a5ad2a1a9deb60 | import numpy as np
import pandas as pd
from honeycomb_io import fetch_environment_id, fetch_person_tag_info
def pose_data_with_body_centroid(environment, start, end, df_3d_pose_data):
# filter by person_id (note nan, perhaps we filter our known tracks w/ person_id?)
# convert 'keypoint_coordinates_3d' to 'position_x/y/z'
# [
# 0: 'nose',
# 1: 'left_eye',
# 2: 'right_eye',
# 3: 'left_ear',
# 4: 'right_ear',
# 5: 'left_shoulder',
# 6: 'right_shoulder',
# 7: 'left_elbow',
# 8: 'right_elbow',
# 9: 'left_wrist',
# 10: 'right_wrist',
# 11: 'left_hip',
# 12: 'right_hip',
# 13: 'left_knee',
# 14: 'right_knee',
# 15: 'left_ankle',
# 16: 'right_ankle'
# ]
df_3d_pose_data = df_3d_pose_data.copy()
keypoints = [
{"idx": 5, "name": "left_shoulder"},
{"idx": 6, "name": "right_shoulder"},
{"idx": 11, "name": "left_hip"},
{"idx": 12, "name": "right_hip"},
]
environment_id = fetch_environment_id(environment_name=environment)
cols = []
for k in keypoints:
cols.extend(list(map(lambda c: f"{k['name']}_{c}", list("xyz"))))
np_flattened_poses = np.array(df_3d_pose_data["keypoint_coordinates_3d"].to_list())
np_flattened_chest_keypoints = np_flattened_poses[:, list(map(lambda x: x["idx"], keypoints)), :]
df_flattened_chest_keypoints = pd.DataFrame(
np_flattened_chest_keypoints.reshape(-1, 4 * 3), index=df_3d_pose_data.index, columns=cols
)
df_flattened_chest_keypoints["pose_track_3d_id"] = df_3d_pose_data["pose_track_3d_id"]
chest_keypoints_scrubbed = []
for track in pd.unique(df_flattened_chest_keypoints["pose_track_3d_id"]):
df_track = df_flattened_chest_keypoints[df_flattened_chest_keypoints["pose_track_3d_id"] == track]
chest_keypoints_scrubbed.append(df_track.interpolate().fillna(method="bfill"))
df_flattened_chest_keypoints = pd.concat(chest_keypoints_scrubbed)
df_3d_pose_data["x_position"] = df_flattened_chest_keypoints[
["left_shoulder_x", "right_shoulder_x", "left_hip_x", "right_hip_x"]
].mean(axis=1)
df_3d_pose_data["y_position"] = df_flattened_chest_keypoints[
["left_shoulder_y", "right_shoulder_y", "left_hip_y", "right_hip_y"]
].mean(axis=1)
df_3d_pose_data["z_position"] = df_flattened_chest_keypoints[
["left_shoulder_z", "right_shoulder_z", "left_hip_z", "right_hip_z"]
].mean(axis=1)
df_person_tag_info = fetch_person_tag_info(start=start, end=end, environment_id=environment_id)
df_3d_pose_data.index = df_3d_pose_data["timestamp"]
df_3d_pose_data["device_id"] = float("nan")
for person_id in pd.unique(df_3d_pose_data["person_id"]):
if isinstance(person_id, float) and np.isnan(person_id):
continue
person_details = df_person_tag_info[df_person_tag_info["person_id"] == person_id].iloc[0]
df_3d_pose_data.loc[df_3d_pose_data["person_id"] == person_id, "device_id"] = person_details["device_id"]
return df_3d_pose_data
|
983,242 | d451a8aacc40b5c63f0dbe74c6c03fee4dfa78eb | import math
import numpy
import pylab
import grid_plot_util as gpu
# plot a simple finite-difference grid
#-----------------------------------------------------------------------------
nzones = 9
# data that lives on the grid
#a = numpy.array([0.3, 1.0, 0.9, 0.8, 0.25, 0.15, 0.5, 0.55])
a = numpy.array([0.55, 0.3, 1.0, 0.9, 0.8, 0.25, 0.1, 0.5, 0.55])
gr = gpu.grid(nzones, ng=1, fd=1)
pylab.clf()
gpu.drawGrid(gr, drawGhost=1)
labels = ["-1", "0", "1", "", "i-1", "i", "i+1", "", "N-2", "N-1", "N"]
i = gr.ilo-gr.ng
while (i < gr.ng+gr.nx+1):
if not labels[i] == "":
gpu.labelCenter(gr, i, r"$%s$" % (labels[i]), fontsize="medium")
i += 1
# draw the data
i = gr.ilo
while i < gr.ihi+1:
gpu.drawFDData(gr, i, a[i-gr.ng], color="r")
i += 1
gpu.labelFD(gr, gr.ilo+4, a[gr.ilo+4-gr.ng], r"$a_i$", color="r")
# label dx
pylab.plot([gr.xc[gr.ng+nzones/2-1], gr.xc[gr.ng+nzones/2-1]], [-0.35,-0.25], color="k")
pylab.plot([gr.xc[gr.ng+nzones/2], gr.xc[gr.ng+nzones/2]], [-0.35,-0.25], color="k")
pylab.plot([gr.xc[gr.ng+nzones/2-1], gr.xc[gr.ng+nzones/2]], [-0.3,-0.3], color="k")
pylab.text(0.5*(gr.xc[gr.ng+nzones/2-1] + gr.xc[gr.ng+nzones/2]), -0.45,
r"$\Delta x$",
horizontalalignment="center", fontsize=16)
pylab.axis([gr.xmin-1.1*gr.dx,gr.xmax+1.1*gr.dx, -0.5, 1.3])
pylab.axis("off")
pylab.subplots_adjust(left=0.05,right=0.95,bottom=0.05,top=0.95)
f = pylab.gcf()
f.set_size_inches(10.0,3.0)
pylab.savefig("fd_ghost.png")
pylab.savefig("fd_ghost.eps")
|
983,243 | 9df0180ae38511e09eeab714e398d81ca372c1bc | from __future__ import absolute_import
from django.db import models
class Agency_Jun30(models.Model):
id = models.IntegerField(null=True, unique=True)
agencyId = models.IntegerField(primary_key=True)
agencyName = models.CharField(max_length=256, null=True)
agencyStatus = models.CharField(max_length=256, null=True)
agencySubTypeId = models.CharField(max_length=256, null=True)
agencyTypeId = models.CharField(max_length=256)
associatedFOId = models.CharField(max_length=256, null=True)
attachedToAgency = models.CharField(max_length=256, null=True)
creationDate = models.DateTimeField()
creator = models.CharField(max_length=256, null=True)
dateOfRegn = models.DateTimeField()
labOrLcc = models.CharField(max_length=256, null=True)
modificationDate = models.DateTimeField()
modifiedBy = models.CharField(max_length=256, null=True)
nikshayId = models.CharField(max_length=256, null=True)
nikshayProcessedFlag = models.CharField(max_length=1, null=True)
onBehalfOf = models.CharField(max_length=256, null=True)
organisationId = models.IntegerField()
owner = models.CharField(max_length=256, null=True)
parentAgencyId = models.IntegerField()
parentAgencyType = models.CharField(max_length=256, null=True)
payToParentAgency = models.CharField(max_length=256, null=True)
pendingApproval = models.CharField(max_length=256, null=True)
regnIssueAuthId = models.CharField(max_length=256, null=True)
regnNumber = models.CharField(max_length=256, null=True)
sendAlert = models.CharField(max_length=256, null=True)
subOrganisationId = models.IntegerField()
tbDrugInStock = models.CharField(max_length=256, null=True)
tbTests = models.CharField(max_length=256, null=True)
trainingAttended = models.CharField(max_length=256, null=True)
tbCorner = models.CharField(max_length=1, null=True)
class UserDetail_Jun30(models.Model):
id = models.IntegerField(primary_key=True)
accountTypeId = models.CharField(max_length=256, null=True)
addressLineOne = models.CharField(max_length=256, null=True)
addressLineTwo = models.CharField(max_length=256, null=True)
agencyId = models.IntegerField()
alternateMobileNumber = models.CharField(max_length=256, null=True)
alternateMobileNumber1 = models.CharField(max_length=256, null=True)
alternateMobileNumber2 = models.CharField(max_length=256, null=True)
bankAccountName = models.CharField(max_length=256, null=True)
bankAccountNumber = models.CharField(max_length=256, null=True)
bankBranch = models.CharField(max_length=256, null=True)
bankIFSCCode = models.CharField(max_length=256, null=True)
bankName = models.CharField(max_length=256, null=True)
blockOrHealthPostId = models.CharField(max_length=256, null=True)
creationDate = models.DateTimeField(null=True)
creator = models.CharField(max_length=256, null=True)
districtId = models.CharField(max_length=256, null=True)
dob = models.DateTimeField(null=True)
email = models.CharField(max_length=256, null=True)
firstName = models.CharField(max_length=256, null=True)
gender = models.CharField(max_length=256, null=True)
isPasswordResetFlag = models.NullBooleanField()
isPrimary = models.BooleanField()
landLineNumber = models.CharField(max_length=256, null=True)
lastName = models.CharField(max_length=256, null=True)
latitude = models.CharField(max_length=256, null=True)
longitude = models.CharField(max_length=256, null=True)
micrCode = models.IntegerField(null=True)
middleName = models.CharField(max_length=256, null=True)
mobileNumber = models.CharField(max_length=256, null=True)
modificationDate = models.DateTimeField(null=True)
modifiedBy = models.CharField(max_length=256, null=True)
motechUserName = models.CharField(max_length=256, unique=True)
organisationId = models.IntegerField()
owner = models.CharField(max_length=256, null=True)
passwordResetFlag = models.BooleanField()
pincode = models.IntegerField()
stateId = models.CharField(max_length=256, null=True)
status = models.CharField(max_length=256, null=True)
subOrganisationId = models.IntegerField()
tuId = models.CharField(max_length=256, null=True)
uniqIDNo = models.CharField(max_length=256, null=True)
uniqIDType = models.CharField(max_length=256, null=True)
userId = models.IntegerField()
userName = models.CharField(max_length=256, null=True)
valid = models.BooleanField()
villageTownCity = models.CharField(max_length=256, null=True)
wardId = models.CharField(max_length=256, null=True)
|
983,244 | 1683f720394b207e24ac053fb6f3b89ee88a8678 | import numpy as np
from numba import njit
@njit(cache=True)
def f(p, U_ij, gamma, idens, ixmom, iymom, iener):
"""
Function whose root needs to be found for cons to prim
"""
D = U_ij[idens]
tau = U_ij[iener]
if abs(tau+p) < 1.e-6:
u = U_ij[ixmom]
v = U_ij[iymom]
else:
u = U_ij[ixmom] / (tau + p + D)
v = U_ij[iymom] / (tau + p + D)
# Lorentz factor
W = 1.0 / np.sqrt(1.0 - u**2 - v**2)
return (gamma - 1.0) * (tau + D*(1.0-W) + p*(1.0-W**2)) / W**2 - p
@njit(cache=True)
def brentq(x1, b, U, gamma, idens, ixmom, iymom, iener,
TOL=1.e-6, ITMAX=100):
"""
Root finder using Brent's method
"""
# initialize variables
a = x1
c = 0.0
d = 0.0
fa = f(a, U, gamma, idens, ixmom, iymom, iener)
fb = f(b, U, gamma, idens, ixmom, iymom, iener)
fc = 0.0
# root found
if fa * fb >= 0.0:
return x1
# switch variables
if abs(fa) < abs(fb):
a, b = b, a
fa, fb = fb, fa
c = a
fc = fa
mflag = True
for _ in range(ITMAX):
if fa != fc and fb != fc: # pylint: disable=consider-using-in
s = a*fb*fc / ((fa-fb) * (fa-fc)) + b*fa*fc / ((fb-fa)*(fb-fc)) + \
c*fa*fb / ((fc-fa)*(fc-fb))
else:
s = b - fb * (b-a) / (fb-fa)
# test conditions and store in con1-con5
con1 = False
if 0.25 * (3.0 * a + b) < b:
if s < 0.25 * (3.0 * a + b) or s > b:
con1 = True
elif s < b or s > 0.25 * (3.0 * a + b):
con1 = True
con2 = mflag and abs(s-b) >= 0.5 * abs(b-c)
con3 = (not mflag) and abs(s-b) >= 0.5 * abs(c-d)
con4 = mflag and abs(b-c) < TOL
con5 = (not mflag) and abs(c-d) < TOL
if con1 or con2 or con3 or con4 or con5:
s = 0.5 * (a + b)
mflag = True
else:
mflag = False
# evaluate at midpoint and set new limits
fs = f(s, U, gamma, idens, ixmom, iymom, iener)
if abs(fa) < abs(fb):
a, b = b, a
fa, fb = fb, fa
d = c
c = b
fc = fb
if fa * fs < 0.0:
b = s
fb = fs
else:
a = s
fa = fs
# found solution to required tolerance
if fb == 0.0 or fs == 0.0 or abs(b-a) < TOL:
return b
return x1
@njit(cache=True)
def cons_to_prim(U,
irho, iu, iv, ip, ix, irhox,
idens, ixmom, iymom, iener,
naux, gamma, q, smallp=1.e-6):
"""
convert an input vector of conserved variables to primitive variables
"""
qx, qy, _ = U.shape
for j in range(qy):
for i in range(qx):
pmax = max((gamma-1.0)*U[i, j, iener]*1.0000000001, smallp)
pmin = max(min(1.0e-6*pmax, smallp), np.sqrt(U[i, j, ixmom] **
2+U[i, j, iymom]**2) - U[i, j, iener] - U[i, j, idens])
fmin = f(pmin, U[i, j, :], gamma, idens, ixmom, iymom, iener)
fmax = f(pmax, U[i, j, :], gamma, idens, ixmom, iymom, iener)
if fmin * fmax > 0.0:
pmin = pmin * 1.0e-2
fmin = f(pmin, U[i, j, :], gamma, idens, ixmom, iymom, iener)
if fmin * fmax > 0.0:
pmax = min(pmax*1.0e2, 1.0)
if fmin * fmax > 0.0:
q[i, j, ip] = max((gamma-1.0)*U[i, j, iener], smallp)
else:
q[i, j, ip] = brentq(pmin, pmax, U[i, j, :], gamma, idens, ixmom, iymom, iener)
if (q[i, j, ip] != q[i, j, ip]) or \
(q[i, j, ip]-1.0 == q[i, j, ip]) or \
(abs(q[i, j, ip]) > 1.0e10): # nan or infty alert
q[i, j, ip] = max((gamma-1.0)*U[i, j, iener], smallp)
q[i, j, ip] = max(q[i, j, ip], smallp)
if abs(U[i, j, iener] + U[i, j, idens] + q[i, j, ip]) < 1.0e-5:
q[i, j, iu] = U[i, j, ixmom]
q[i, j, iv] = U[i, j, iymom]
else:
q[i, j, iu] = U[i, j, ixmom]/(U[i, j, iener] + U[i, j, idens] + q[i, j, ip])
q[i, j, iv] = U[i, j, iymom]/(U[i, j, iener] + U[i, j, idens] + q[i, j, ip])
# nan check
if (q[i, j, iu] != q[i, j, iu]):
q[i, j, iu] = 0.0
if (q[i, j, iv] != q[i, j, iv]):
q[i, j, iv] = 0.0
W = 1.0/np.sqrt(1.0 - q[:, :, iu]**2 - q[:, :, iv]**2)
q[:, :, irho] = U[:, :, idens] / W
if naux > 0:
for i in range(naux):
q[:, :, ix+i] = U[:, :, irhox+i]/(q[:, :, irho] * W)
|
983,245 | 52e208d83f15c72927a7179064aa9861c1550769 | #Programa 03
a = 8 - 3
print(a)
c = 7 - a
print(a, c)
b = c % a
print(a, c, b)
a = a + b - c
print(a, c, b) |
983,246 | 3431cc1e09e0d5479fda30c624f89c9b53ef770e | import threading
import time
balance = 0
# 先实例一个锁的对象
my_lock = threading.Lock()
# 实现多线程
# 使用Threading模块创建线程,直接从threading.Thread继承,然后重写__init__方法和run方法:
class CustomThread(threading.Thread):
def __init__(self, n, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.n = n
def change_it(self):
"""
正常情况下balance的结果永远都会是0,但是在多线程并发的情况下会出现不是0的情况,有可能是数值加上去之后还没来得及
减掉那边又有其他的线程加上新的数值了,使用线程锁可以解决这样的问题
"""
global balance
# 可以保证数据的准确性,在多个线程的情况下,后面的线程也是等待前面执行的线程执行完并释放锁之后才会执行
# 当前面有被锁的线程在执行的时候后面有线程来的时候也只能等着(也叫死锁)
with my_lock:
balance += self.n
time.sleep(1.5)
balance -= self.n
time.sleep(1)
print("n is {}, balance is {}".format(self.n, balance))
def run(self):
for i in range(10000):
self.change_it()
if __name__ == '__main__':
t = CustomThread(5)
t2 = CustomThread(8)
t.start()
t2.start()
t.join()
t2.join()
|
983,247 | c3e55f9e50100cf9488efcab8ebe0ee51b32dcab | from django.http import HttpResponseNotAllowed
from django.shortcuts import render, redirect, get_object_or_404
from appTodolist.models import Task, TaskList
def get_tasks(request):
if request.method == 'GET':
lists = TaskList.objects.order_by("-priority")
return render(request, 'appTodoList/new_tasks.html', {
'task_lists': lists
})
else:
return HttpResponseNotAllowed(['GET'])
def add_task(request):
if request.method == 'POST':
name = request.POST.get('task_name')
id = request.POST.get('list_id')
if name and id:
task_list = TaskList.objects.get(pk=id)
Task.objects.create(name=name, task_list=task_list)
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def add_list(request):
if request.method == 'POST':
name = request.POST.get('name')
if name:
TaskList.objects.create(name=name)
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def change_state_task(request):
if request.method == 'POST':
task_id = request.POST.get("id")
if task_id is not None:
task = get_object_or_404(Task, id=task_id)
task.change_state()
task.save()
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def delete_task(request):
if request.method == 'POST':
task_id = request.POST.get("id")
if task_id is not None:
task = get_object_or_404(Task, id=task_id)
task.delete()
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def edit_name(request):
if request.method == 'POST':
task_id = request.POST.get('id')
task_name = request.POST.get('name')
if task_id is not None:
task = get_object_or_404(Task, id=task_id)
task.name = task_name
task.save()
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def increase_priority_task(request):
if request.method == 'POST':
task_id = request.POST.get('id')
if task_id is not None:
task = get_object_or_404(Task, id=task_id)
task.increase_priority()
task.save()
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def decrease_priority_task(request):
if request.method == 'POST':
task_id = request.POST.get('id')
if task_id is not None:
task = get_object_or_404(Task, id=task_id)
task.decrease_priority()
task.save()
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def increase_priority_list(request):
if request.method == 'POST':
list_id = request.POST.get('id')
if list_id is not None:
list1 = get_object_or_404(TaskList, id=list_id)
list1.increase_priority()
list1.save()
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def decrease_priority_list(request):
if request.method == 'POST':
list_id = request.POST.get('id')
if list_id is not None:
list1 = get_object_or_404(TaskList, id=list_id)
list1.decrease_priority()
list1.save()
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def delete_list(request):
if request.method == 'POST':
list_id = request.POST.get("id")
if list_id is not None:
list1 = get_object_or_404(TaskList, id=list_id)
list1.delete()
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST'])
def edit_list(request):
if request.method == 'POST':
list_id = request.POST.get("id")
task_list_name = request.POST.get('name')
if list_id is not None:
list1 = get_object_or_404(TaskList, id=list_id)
list1.name = task_list_name
list1.save()
return redirect('tasks:get_tasks')
else:
return HttpResponseNotAllowed(['POST']) |
983,248 | 86e2007323931b3493af545d1288401145453860 | # Generated by Django 2.0.dev20170813003239 on 2017-12-27 19:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='newpost',
name='text',
field=models.TextField(),
),
]
|
983,249 | 0526d7a71fc0052f0f3483a26b82e89b0555b865 | mylist = [6,2,9,7,1,5]
sortList = mylist.sort()
print(sortList)
#The output for this is 'None' because sort function does not return anything.
#To get the sorted list we can use the following code.
mylist.sort()
my_sorted_list = mylist
print(mylist)
#alternatively we can use a function called sorted
sortedList = sorted(mylist)
print(sortedList)
#The function sorted returns the list unlike the function sort |
983,250 | 3d84c5461e4800685aa6de0248c92a783ac1adca | #!/usr/bin/env python
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
colors = [PURPLE, CYAN, DARKCYAN, BLUE, GREEN, YELLOW, RED]
def cprint(*args, sep=' ', end='\n', file=None, color=None):
if color is not None:
args = [color + str(i) + Color.END for i in args]
print(*args, sep=sep, end=end, file=file)
for color in Color.colors:
# A message from developers community
cprint('GitHub is for everyone!', color=color)
|
983,251 | 485d95b481c15fdd4cb94ea0f2c8a78dbf8bac9d | """Configuration utilities."""
import yaml
import os
def load_config(fpath):
"""
Load configuration file as dict.
:param fpath: file path to config.yml
:type fpath: string
:return: dictionary with config settings
:rtype: dict
"""
assert os.path.isfile(fpath), 'File does not exist'
with open(fpath, 'r') as file:
cfg = yaml.load(file, Loader=yaml.FullLoader)
return cfg
|
983,252 | 32d4591210177f6fd38d79f0a9b4f9c2d02af3c3 | # IRL algorith developed for the toy car obstacle avoidance problem for testing.
import numpy as np
import logging
import playing #get the RL Test agent, gives out feature expectations after 2000 frames
from nn import neural_net #construct the nn and send to playing
from cvxopt import matrix #convex optimization library
from cvxopt import solvers #convex optimization library
from learning import HRL_helper # get the Reinforcement learner
from flat_game import carmunk
NUM_STATES = 8
BEHAVIOR = 'red' # yellow/brown/red/bumping
FRAMES = 3000 # number of RL training frames per iteration of H-IRL
class hrlAgent:
def __init__(self, randomFE, expertFE, epsilon, num_states, num_frames, behavior, reward_error):
self.randomPolicy = randomFE
self.expertPolicy = expertFE
self.num_states = num_states
self.num_frames = num_frames
self.behavior = behavior
self.epsilon = epsilon # termination when t<0.1
self.randomT = np.linalg.norm(np.asarray(self.expertPolicy)-np.asarray(self.randomPolicy)) #norm of the diff in expert and random
self.policiesFE = {self.randomT:self.randomPolicy} # storing the policies and their respective t values in a dictionary
print("Expert - Random at the Start (t) :: " , self.randomT)
self.currentT = self.randomT
self.minimumT = self.randomT
self.reward_error = reward_error
# DAP ################################
def getRLAgentFE(self, W, i , x , y , angle , car_distance): #get the feature expectations of a new poliicy using RL agent
HRL_helper(W, self.behavior, self.num_frames, i) # train the agent and save the model in a file used below
saved_model = 'saved-models_'+self.behavior+'/evaluatedPolicies/'+str(i)+'-164-150-100-50000-'+str(self.num_frames)+'.h5' # use the saved model to get the FE
model = neural_net(self.num_states, [164, 150], saved_model)
return playing.play(model, W, x , y , angle, car_distance) #return feature expectations by executing the learned policy
# DAP ################################
def policyListUpdater(self, W, i , x , y , angle , car_distance): #add the policyFE list and differences
tempFE = self.getRLAgentFE(W, i , x , y , angle, car_distance) # get feature expectations of a new policy respective to the input weights
hyperDistance = np.abs(np.dot(W, np.asarray(self.expertPolicy)-np.asarray(tempFE))) #hyperdistance = t
self.policiesFE[hyperDistance] = tempFE
return hyperDistance # t = (weights.tanspose)*(expert-newPolicy)
# DAP ################################
def optimalWeightFinder(self, x , y , angle, car_distance):
f = open('weights-'+BEHAVIOR+'.txt', 'w')
i = 1
while True:
W = self.optimization() # optimize to find new weights in the list of policies
print ("weights ::", W )
f.write( str(W) )
f.write('\n')
print ("the distances ::", self.policiesFE.keys())
self.currentT = self.policyListUpdater(W, i , x , y , angle, car_distance)
print ("Current distance (t) is:: ", self.currentT )
if self.currentT <= self.epsilon: # terminate if the point reached close enough
break
i += 1
f.close()
return W
def optimization(self): # implement the convex optimization, posed as an SVM problem
m = len(self.expertPolicy)
P = matrix(2.0*np.eye(m), tc='d') # min ||w||
q = matrix(np.zeros(m), tc='d')
policyList = [self.expertPolicy]
h_list = [1]
for i in self.policiesFE.keys():
policyList.append(self.policiesFE[i])
h_list.append(1)
policyMat = np.matrix(policyList)
policyMat[0] = -1*policyMat[0]
G = matrix(policyMat, tc='d')
h = matrix(-np.array(h_list), tc='d')
sol = solvers.qp(P,q,G,h)
weights = np.squeeze(np.asarray(sol['x']))
norm = np.linalg.norm(weights)
weights = weights/norm
return weights # return the normalized weights
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.INFO)
randomPolicyFE = [ 7.74363107 , 4.83296402 , 6.1289194 , 0.39292849 , 2.0488831 , 0.65611318 , 6.90207523 , 2.46475348]
# ^the random policy feature expectations
expertPolicyYellowFE = [7.5366e+00, 4.6350e+00 , 7.4421e+00, 3.1817e-01, 8.3398e+00, 1.3710e-08, 1.3419e+00 , 0.0000e+00]
# ^feature expectations for the "follow Yellow obstacles" behavior
expertPolicyRedFE = [7.9100e+00, 5.3745e-01, 5.2363e+00, 2.8652e+00, 3.3120e+00, 3.6478e-06, 3.82276074e+00 , 1.0219e-17]
# ^feature expectations for the follow Red obstacles behavior
expertPolicyBrownFE = [5.2210e+00, 5.6980e+00, 7.7984e+00, 4.8440e-01, 2.0885e-04, 9.2215e+00, 2.9386e-01 , 4.8498e-17]
# ^feature expectations for the "follow Brown obstacles" behavior
expertPolicyBumpingFE = [ 7.5313e+00, 8.2716e+00, 8.0021e+00, 2.5849e-03 ,2.4300e+01 ,9.5962e+01 ,1.5814e+01 ,1.5538e+03]
# ^feature expectations for the "nasty bumping" behavior
# DAP ################################
epsilon = 0.1
x = 150
y = 20
car_distance = 0
angle = 1.4
hrlearner = hrlAgent(randomPolicyFE, expertPolicyRedFE, epsilon, NUM_STATES, FRAMES, BEHAVIOR)
print (hrlearner.optimalWeightFinder(x , y , angle, car_distance))
|
983,253 | 2f90dcf4ee24b42b22ac566c83597c0cbb7f5e38 | import cv2
# Xml de classificacao da OpenCV
classification = "haarcascade_eye_tree_eyeglasses.xml"
# Setando a classificacao do xml da OpenCV para olhos
faceCascade = cv2.CascadeClassifier(classification)
# Ler a imagem
image = cv2.imread("2.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Fazer a deteccao da imagem
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
# Setar o retangulo amarelo
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 210), 2)
# Exibir a imagem
cv2.imshow("Titulo da imagem", image)
cv2.waitKey(0)
|
983,254 | d2dd27b6c6e3758ec68e8ee500cede9149122eae | import pandas_datareader.data as web
import datetime
import pandas as pd
if __name__ == '__main__':
# Please set the exchange rate today before run
exchange_rate = 0.8846
# loading stock list
filepath = '/Users/yanghui/Documents/yahoo/A股H股代码对照表.csv'
A_H_stock_df = pd.read_csv(filepath)
A_H_stock_df['A_code2'] = A_H_stock_df['A_code'].map(
lambda x: x[0:6] + '.SS' if x[7:11] == 'XSHG' else x[0:6] + '.SZ')
A_H_stock_df.drop(['A_code'], inplace=True, axis=1, errors='ignore')
A_H_stock_df.rename(columns={'A_code2': 'A_code'}, inplace=True)
print(A_H_stock_df)
# inquiry H stock prices
H_price = pd.DataFrame([])
for H_code in A_H_stock_df.H_code:
H_price_row = web.get_data_yahoo(H_code).tail(1)
H_price_row['H_code'] = H_code
print(H_price_row)
H_price = H_price.append(H_price_row)
H_price.drop(['Open', 'High', 'Low', 'Volume', 'Adj Close'], inplace=True, axis=1, errors='ignore')
H_price.rename(columns={'Close': 'H_price_HKD'}, inplace=True)
H_price['H_price_RMB'] = H_price['H_price_HKD'].map(
lambda x: x * exchange_rate)
print(H_price)
H_price.to_csv('/Users/yanghui/Documents/yahoo/H_price.csv')
# inquiry A stock prices
A_price = pd.DataFrame([])
for A_code in A_H_stock_df.A_code:
A_price_row = web.get_data_yahoo(A_code).tail(1)
A_price_row['A_code'] = A_code
print(A_price_row)
A_price = A_price.append(A_price_row)
A_price.drop(['Open', 'High', 'Low', 'Volume', 'Adj Close'], inplace=True, axis=1, errors='ignore')
A_price.rename(columns={'Close': 'A_price'}, inplace=True)
print(A_price)
A_price.to_csv('/Users/yanghui/Documents/yahoo/A_price.csv')
# join the A+H prices
A_H_price_compare_result = pd.merge(A_H_stock_df,H_price,how='left',on=['H_code'])
A_H_price_compare_result = pd.merge(A_H_price_compare_result,A_price,how='left',on=['A_code'])
A_H_price_compare_result['A股溢价率'] = 100 * (A_H_price_compare_result['A_price'] -
A_H_price_compare_result['H_price_RMB']) \
/ A_H_price_compare_result['H_price_RMB']
print(A_H_price_compare_result)
A_H_price_compare_result.to_csv('/Users/yanghui/Documents/yahoo/A_H_price_compare_result.csv',encoding='GBK')
|
983,255 | 8efdcb5ee611be92984f0cafab1223f95b1c448a | """
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
1, 3,5,7,9, 13,17,21,25, 31,37,43,49
4n^2+6n+1, 4n^2+6n+5
"""
from helpers import analytics, primes
analytics.monitor()
def poly(n):
return [4*n**2-2*n+1,4*n**2-4*n+1,4*n**2+2*n+1,4*n**2+1]
def main():
s = 1
primeCount = 0
for n in range(1,20000):
for m in poly(n):
if primes.isPrime(m):
primeCount += 1
s += 4
if primeCount/s < 0.1:
return 2*n+1, primeCount/s
return primeCount/s
print(main(), analytics.lap(), analytics.maxMem())
# 26241
# time: 9.41 |
983,256 | 53ac472172775c5c471b2d8fef5c4276671ddc59 | from config import config_by_name
from flask import Flask
from flask_mail import Mail
from flask_migrate import Migrate
from .models import db
from .scheduler import scheduler
migrate = Migrate()
mail = Mail()
def create_app(config_name):
app = Flask(__name__, instance_relative_config=False)
app.config.from_object(config_by_name[config_name])
db.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
scheduler.init_app(app)
with app.app_context():
'''
Todo: Enable on heroku later when app fully available
'''
if not (app.config.get('FLASK_DEBUG') or app.config.get('TESTING')):
from . import check_slots
scheduler.start()
print(' * Scheduled job started')
from . import index
app.register_blueprint(index.index_bp)
return app
|
983,257 | 006a9fa0e947976203363f2bee616a1b06407c7d | from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import Category
from .forms import CategoryForm
from django.views.generic import (
CreateView,
UpdateView,
DetailView,
DeleteView,
ListView
)
class CategoryListView(ListView):
template_name = 'categories/category_list.html'
queryset = Category.objects.all()
class CategoryDetailView(DetailView):
template_name = 'categories/category_detail.html'
def get_object(self):
id_ = self.kwargs.get("id")
return get_object_or_404(Category, id=id_)
class CategoryCreateView(CreateView):
template_name = 'categories/category_create.html'
form_class = CategoryForm
queryset = Category.objects.all()
def form_valid(self, form):
return super().form_valid(form)
def get_success_url(self):
return reverse('categories:category-list')
class CategoryUpdateView(UpdateView):
template_name = 'categories/category_create.html'
form_class = CategoryForm
queryset = Category.objects.all()
def get_object(self):
id_ = self.kwargs.get("id")
return get_object_or_404(Category, id=id_)
class CategoryDeleteView(DeleteView):
template_name = 'categories/category_delete.html'
def get_object(self):
id_ = self.kwargs.get("id")
return get_object_or_404(Category, id=id_)
def get_success_url(self):
return reverse('categories:category-list')
|
983,258 | 3fceb590f724540ed4a208de0beb834112afa85f | from flask_login import UserMixin
from . import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
login = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
|
983,259 | a04ab2f6d94541080c85ba9281a639dfbc54360f | #import for RegOnline piece
from pysimplesoap.client import SoapClient
from pysimplesoap.simplexml import SimpleXMLElement
import re
import xml.etree.cElementTree as ET
#added out of place to make sure that later datetime imports work
import datetime
#more imports for RegOnline
from datetime import datetime
from datetime import timedelta
import pymssql
import array
import sys
import xmltodict
#import for SharePoint piece
from suds.client import Client
from suds.transport.https import WindowsHttpAuthenticated
from suds.sax.element import Element
from suds.sax.attribute import Attribute
from suds.sax.text import Raw
def GetNow():
return str(datetime.now())
def initializeReg(apitoken):
client = SoapClient(
wsdl = "https://www.regonline.com/api/default.asmx?WSDL"
, trace = False)
header = SimpleXMLElement("<Headers/>")
MakeHeader = header.add_child("TokenHeader")
MakeHeader.add_attribute('xmlns','http://www.regonline.com/api')
MakeHeader.marshall('APIToken', apitoken)
client['TokenHeader']=MakeHeader
return client
def LastSixMonths():
#set the filter for time; set to anything changed (modified) in the last 180 days
SixMonthsAgo = datetime.now() - timedelta(days=180)
SixMonthsAgoString = SixMonthsAgo.strftime("%Y,%m,%d")
FilterTime = "ModDate >= DateTime(" + SixMonthsAgoString + ")"
return FilterTime
def ProcessRegData(ResponseString):
#process the ResponseString from RegOnline into an xml object and return it
#does this by finding the <Data> tags and cutting off prefix and suffix
FindDataStart = ResponseString.find("<Data>")
FindDataEnd = ResponseString.find("</Data>")
DataEnd = FindDataEnd + 7
#get all the data between the <Data> tags
ReducedData = ResponseString[FindDataStart:DataEnd]
#replace the string with nothing ""
root = ReducedData.replace('xsi:nil="true"',"")
return root
def MakeRootDict(root):
#parses the xml returned from RegOnline into a Dictionary
User_Info = xmltodict.parse(root)
return User_Info
def initializeSP(SPurl, SPusername, SPpassword):
url= SPurl + '_vti_bin/lists.asmx?WSDL'
ntlm = WindowsHttpAuthenticated(username=SPusername, password=SPpassword)
client = Client(url, transport=ntlm)
return client
def writeUsers(User_Info, client, StudentInfo):
#set tallies to zero
UpdatedRecords = 0
NewRecords = 0
for CurrentItem in User_Info['Data']['APIRegistration']:
item_data = {}
for DataPoint in StudentInfo:
item_data.update({DataPoint:CurrentItem[DataPoint]})
#set the RegOnlineID to the same thing as the "id" that you get from RegOnline, then delete the "ID"
#can't set the ID column in SharePoint through SOAP services. idk, I guess because it's a system value?
item_data["RegOnlineID"] = item_data['ID']
del item_data['ID']
#Set a blank variable for the GetListItems request
blank = ""
#setup the xml query for checking for the ID number
Eq = Element('Eq')
Eq.append(Element('FieldRef').append(Attribute('Name','RegOnlineID')))
Eq.append(Element('Value').append(Attribute('Type','Text')).setText(item_data["RegOnlineID"]))
Where = Element('Where')
Where.append(Eq)
Query = Element('Query')
Query.append(Where)
query = Query
responseExist = client.service.GetListItems('{60848478-FFC6-4897-81BE-C956C55A9B10}', blank, Raw(query))
#print responseExist
if responseExist.listitems.data._ItemCount == "1":
#set item id to returned id from GetListItems
#print responseExist.listitems.data.row
item_data["ID"] = responseExist.listitems.data.row._ows_ID
#Begin creating the updates item by defining a batch
batch = Element( 'Batch' )
batch.append(Attribute('OnError','Continue')).append(Attribute('ListVersion','1'))
#second level element needed to update. notice the Update attribute for the Cmd
#left all the options in here just in case I wanted to use them in the future.
method = Element( 'Method')
method.append(Attribute('ID','1')).append(Attribute('Cmd','Update'))
#method.append(Attribute('ID','1')).append(Attribute('Cmd','New'))
#method.append(Attribute('ID','1')).append(Attribute('Cmd','Delete'))
#method.append(Attribute('ID','1')).append(Attribute('Cmd','Move'))
#add a field for every dictionary item
for key in item_data:
val = item_data[ key ]
#get rid of spaces in column names
key = key.replace(' ','_x0020_')
#correct date to format
#if isinstance( val, datetime.datetime):
if hasattr(val,"datetime"):
val = datetime.datetime.strftime(val, '%Y-%m-%d %H:%M:%S')
method.append( Element('Field').append(Attribute('Name', key)).setText(val))
#add method object into the batch object
batch.append(method)
#set the name as updates for the way suds formats the xml
updates = batch
try:
response = client.service.UpdateListItems('{60848478-FFC6-4897-81BE-C956C55A9B10}', Raw(updates) )
except Exception as e:
print str(e)
except suds.webfault as e:
print str(e)
else:
#print response.Results.Result.ErrorCode
#print sys.exc_info()
print "Record " + item_data["ID"] + " updated"
UpdatedRecords += 1
elif responseExist.listitems.data._ItemCount > "1":
#had to add this in case a record made it into the list more than once... might error check it someday... :/
continue
else:
#add this record as a new item to the list
#Begin creating the updates item by defining a batch
batch = Element( 'Batch' )
batch.append(Attribute('OnError','Continue')).append(Attribute('ListVersion','1'))
#second level element needed to update. notice the 'New' attribute for the Cmd
method = Element( 'Method')
method.append(Attribute('ID','1')).append(Attribute('Cmd','New'))
#add a field for every dictionary item
for key in item_data:
val = item_data[ key ]
#get rid of spaces in column names
key = key.replace(' ','_x0020_')
#correct date to format
#if isinstance( val, datetime.datetime):
# val = datetime.datetime.strftime(val, '%Y-%m-%d %H:%M:%S')
if (key == 'StartDate') or (key == 'EndDate'):
if not val is None:
val = val.replace('T', ' ')
method.append( Element('Field').append(Attribute('Name', key)).setText(val))
#add method object into the batch object
batch.append(method)
#set the name as updates for the way suds formats the xml
updates = batch
try:
response = client.service.UpdateListItems('{60848478-FFC6-4897-81BE-C956C55A9B10}', Raw(updates) )
except Exception as e:
print str(e)
except suds.webfault as e:
print str(e)
else:
#print response.Results.Result.ErrorCode
#print sys.exc_info()
print "Record " + item_data["ID"] + " added to the List"
NewRecords += 1
return (UpdatedRecords,NewRecords)
def writeEvents(Event_Info, client, EventInfo):
#set tallies to zero
UpdatedRecords = 0
NewRecords = 0
for CurrentItem in Event_Info['Data']['APIEvent']:
item_data = {}
for DataPoint in EventInfo:
item_data.update({DataPoint:CurrentItem[DataPoint]})
#set the RegOnlineID to the same thing as the "id" that you get from RegOnline
item_data["EventID"] = item_data['ID']
del item_data['ID']
#Set a blank variable for the GetListItems request
blank = ""
#setup the xml query for checking for the ID number
Eq = Element('Eq')
Eq.append(Element('FieldRef').append(Attribute('Name','EventID')))
Eq.append(Element('Value').append(Attribute('Type','Text')).setText(item_data["EventID"]))
Where = Element('Where')
Where.append(Eq)
Query = Element('Query')
Query.append(Where)
query = Query
responseExist = client.service.GetListItems('{12C63117-18E2-4D92-9C0D-38202F86337C}', blank, Raw(query))
#print responseExist[0][0][1]
if responseExist.listitems.data._ItemCount != "0":
#set item id to returned id from GetListItems
item_data["ID"] = responseExist.listitems.data.row._ows_ID
#Begin creating the updates item by defining a batch
batch = Element( 'Batch' )
batch.append(Attribute('OnError','Continue')).append(Attribute('ListVersion','1'))
#second level element needed to update. notice the Update attribute for the Cmd
method = Element( 'Method')
method.append(Attribute('ID','1')).append(Attribute('Cmd','Update'))
#add a field for every dictionary item
for key in item_data:
val = item_data[ key ]
#get rid of spaces in column names
key = key.replace(' ','_x0020_')
#correct date to format
#if isinstance( val, datetime.datetime):
if (key == 'StartDate') or (key == 'EndDate'):
if not val is None:
val = val.replace('T', ' ')
method.append( Element('Field').append(Attribute('Name', key)).setText(val))
#add method object into the batch object
batch.append(method)
#set the name as updates for the way suds formats the xml
updates = batch
try:
response = client.service.UpdateListItems('{12C63117-18E2-4D92-9C0D-38202F86337C}', Raw(updates) )
#print response
except Exception as e:
print str(e)
except suds.webfault as e:
print str(e)
else:
#print response #for troubleshooting
#print sys.exc_info()
print "Event " + item_data["ID"] + " updated"
UpdatedRecords += 1
else:
#add this record as a new item to the list
#Begin creating the updates item by defining a batch
batch = Element( 'Batch' )
batch.append(Attribute('OnError','Continue')).append(Attribute('ListVersion','1'))
#second level element needed to update. notice the 'Update' attribute for the Cmd
method = Element( 'Method')
method.append(Attribute('ID','1')).append(Attribute('Cmd','New'))
#add a field for every dictionary item
for key in item_data:
val = item_data[ key ]
#get rid of spaces in column names
key = key.replace(' ','_x0020_')
#correct date to format
if (key == 'StartDate') or (key == 'EndDate'):
if not val is None:
val = val.replace('T', ' ')
method.append( Element('Field').append(Attribute('Name', key)).setText(val))
#add method object into the batch object
batch.append(method)
#set the name as updates for the way suds formats the xml
updates = batch
print updates
try:
response = client.service.UpdateListItems('{12C63117-18E2-4D92-9C0D-38202F86337C}', Raw(updates) )
except Exception as e:
print str(e)
except suds.webfault as e:
print str(e)
else:
#print response #for troubleshooting
#print sys.exc_info()
print "Event " + item_data["ID"] + " added to the List"
NewRecords += 1
return (UpdatedRecords,NewRecords)
|
983,260 | fa35d0091695cef2c34402ded8a40dfbe6956b22 | #!/usr/bin/env python3
import timeit
NUMBER = 1
def join(n):
l = []
for i in range(n):
l += ['a']
return ''.join(l)
def time_for_join(n):
return timeit.timeit(lambda: join(n), number=NUMBER)
def concat(n):
ret = ''
for i in range(n):
ret = ret + 'a'
return ret
def time_for_concat(n):
return timeit.timeit(lambda: concat(n), number=NUMBER)
def concat_left(n):
ret = ''
for i in range(n):
ret = 'a' + ret
return ret
def time_for_concat_left(n):
return timeit.timeit(lambda: concat_left(n), number=NUMBER)
for i in range(1, 1000000, 1000):
baseline = time_for_join(i)
a = time_for_concat(i) / baseline
b = time_for_concat_left(i) / baseline
print(f"{a}\t{b}") |
983,261 | d749060ed04d36f718489583b041a42c09cef827 |
def pizza(*toppings):
print(toppings[0])
print(toppings[1])
print(toppings[2])
pizza("Ham","Pineapple","Onion","Cheese","Bacon","Pepperoni")
|
983,262 | 31a6c87e68b3fb3e40e25b328b7e72dc30fe77a7 | """LAMMPS calculator for preparing and parsing single-point LAMMPS \
calculations."""
import subprocess
import numpy as np
# TODO: split LAMMPS input and data files into separate classes
def run_lammps(lammps_executable, input_file, output_file):
"""Runs a single point LAMMPS calculation.
:param lammps_executable: LAMMPS executable file.
:type lammps_executable: str
:param input_file: LAMMPS input file.
:type input_file: str
:param output_file: Desired LAMMPS output file.
:type output_file: str
"""
# run lammps
lammps_command = f"{lammps_executable} -in {input_file} "
print("run command:", lammps_command)
with open("tmp2False.out", "w+") as fout:
subprocess.call(lammps_command.split(), stdout=fout)
def lammps_parser(dump_file, std=False):
"""Parses LAMMPS dump file. Assumes the forces are the final quantities \
to get dumped.
:param dump_file: Dump file to be parsed.
:type dump_file: str
:return: Numpy array of forces on atoms.
:rtype: np.ndarray
"""
forces = []
stds = []
with open(dump_file, "r") as outf:
lines = outf.readlines()
for count, line in enumerate(lines):
if line.startswith("ITEM: ATOMS"):
force_start = count
for line in lines[force_start + 1 :]:
fline = line.split()
if std:
forces.append([float(fline[-4]), float(fline[-3]), float(fline[-2])])
stds.append(float(fline[-1]))
else:
forces.append([float(fline[-3]), float(fline[-2]), float(fline[-1])])
return np.array(forces), np.array(stds)
# -----------------------------------------------------------------------------
# data functions
# -----------------------------------------------------------------------------
def lammps_dat(structure, atom_types, atom_masses, species):
"""Create LAMMPS data file for an uncharged material.
:param structure: Structure object containing coordinates and cell.
:type structure: struc.Structure
:param atom_types: Atom types ranging from 1 to N.
:type atom_types: List[int]
:param atom_masses: Atomic masses of the atom types.
:type atom_masses: List[int]
:param species: Type of each atom.
:type species: List[int]
"""
dat_text = f"""Header of the LAMMPS data file
{structure.nat} atoms
{len(atom_types)} atom types
"""
dat_text += lammps_cell_text(structure)
dat_text += """
Masses
"""
mass_text = ""
for atom_type, atom_mass in zip(atom_types, atom_masses):
mass_text += f"{atom_type} {atom_mass}\n"
dat_text += mass_text
dat_text += """
Atoms
"""
dat_text += lammps_pos_text(structure, species)
return dat_text
def lammps_dat_charged(structure, atom_types, atom_charges, atom_masses, species):
"""Create LAMMPS data file for a charged material.
:param structure: Structure object containing coordinates and cell.
:type structure: struc.Structure
:param atom_types: List of atom types.
:type atom_types: List[int]
:param atom_charges: Charge of each atom.
:type atom_charges: List[float]
:param atom_masses: Mass of each atom type.
:type atom_masses: List[float]
:param species: Type of each atom.
:type species: List[int]
"""
dat_text = f"""Header of the LAMMPS data file
{structure.nat} atoms
{len(atom_types)} atom types
"""
dat_text += lammps_cell_text(structure)
dat_text += """
Masses
"""
mass_text = ""
for atom_type, atom_mass in zip(atom_types, atom_masses):
mass_text += f"{atom_type} {atom_mass}\n"
dat_text += mass_text
dat_text += """
Atoms
"""
dat_text += lammps_pos_text_charged(structure, atom_charges, species)
return dat_text
def lammps_cell_text(structure):
""" Write cell from structure object."""
cell_text = f"""
0.0 {structure.cell[0, 0]} xlo xhi
0.0 {structure.cell[1, 1]} ylo yhi
0.0 {structure.cell[2, 2]} zlo zhi
{structure.cell[1, 0]} {structure.cell[2, 0]} {structure.cell[2, 1]} xy xz yz
"""
return cell_text
def lammps_pos_text(structure, species):
"""Create LAMMPS position text for a system of uncharged particles."""
pos_text = "\n"
for count, (pos, spec) in enumerate(zip(structure.positions, species)):
pos_text += f"{count+1} {spec} {pos[0]} {pos[1]} {pos[2]}\n"
return pos_text
def lammps_pos_text_charged(structure, charges, species):
"""Create LAMMPS position text for a system of charged particles."""
pos_text = "\n"
for count, (pos, chrg, spec) in enumerate(
zip(structure.positions, charges, species)
):
pos_text += f"{count+1} {spec} {chrg} {pos[0]} {pos[1]} {pos[2]}\n"
return pos_text
def write_text(file, text):
"""Write text to file."""
with open(file, "w") as fin:
fin.write(text)
# -----------------------------------------------------------------------------
# input functions
# -----------------------------------------------------------------------------
def generic_lammps_input(
dat_file,
style_string,
coeff_string,
dump_file,
newton=False,
std_string="",
std_style=None,
):
"""Create text for generic LAMMPS input file."""
if newton:
ntn = "on"
else:
ntn = "off"
if std_string != "" and std_style is not None:
if std_style == "flare":
compute_cmd = f"compute std all uncertainty/atom {std_string}"
elif std_style == "flare_pp":
compute_cmd = f"compute std all flare/std/atom {std_string}"
else:
raise NotImplementedError
c_std = "c_std"
else:
compute_cmd = ""
c_std = ""
input_text = f"""# generic lammps input file
units metal
atom_style atomic
dimension 3
boundary p p p
newton {ntn}
read_data {dat_file}
pair_style {style_string}
pair_coeff {coeff_string}
thermo_style one
{compute_cmd}
dump 1 all custom 1 {dump_file} id type x y z fx fy fz {c_std}
dump_modify 1 sort id
run 0
"""
return input_text
def ewald_input(dat_file, short_cut, kspace_accuracy, dump_file, newton=True):
"""Create text for Ewald input file."""
if newton is True:
ntn = "on"
else:
ntn = "off"
input_text = f"""# Ewald input file
newton {ntn}
units metal
atom_style charge
dimension 3
boundary p p p
read_data {dat_file}
pair_style coul/long {short_cut}
pair_coeff * *
kspace_style ewald {kspace_accuracy}
thermo_style one
dump 1 all custom 1 {dump_file} id type x y z fx fy fz
dump_modify 1 sort id
run 0
"""
return input_text
|
983,263 | aa723c774f318e64440fa1cf37b0f9aceda1a3ba | import os.path
from django.contrib.auth import authenticate
from django.shortcuts import render
from qmpy.models import Entry, Task, Calculation, Formation, MetaData
from .tools import get_globals
def home_page(request):
data = get_globals()
data.update(
{
"done": "{:,}".format(Formation.objects.filter(fit="standard").count()),
}
)
request.session.set_test_cookie()
return render(request, "index.html", data)
def construction_page(request):
return render(request, "construction.html", {})
def faq_view(request):
return render(request, "faq.html")
def play_view(request):
return render(request, "play.html")
def login(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
else:
pass
else:
pass
def logout(request):
logout(request)
# redirect to success
|
983,264 | 0fac48830fd2c0be9079a637cb88953c9fd02187 | v1 = ['foo', 'bar', 'baz']
v2 = 'abc'
result = map(lambda x,y: x+y, v1, v2)
print(result)
print( list(result) )
|
983,265 | e51e64bc977fb681fb884be961f39fbe3b784d88 | #coding:gbk
import pandas as pd
import numpy as np
import json
from urllib2 import urlopen, quote
import csv
import traceback
import os
# 构造获取经纬度的函数
def getlnglat(address):
url = 'http://api.map.baidu.com/geocoder/v2/?address='
output = 'json'
ak = '[*Your Key]'
add = quote(address) # 本文城市变量为中文,为防止乱码,先用quote进行编码
url2 = url + add + '&output=' + output + "&ak=" + ak
req = urlopen(url2)
res = req.read().decode()
temp = json.loads(res)
return temp
out = open('[*Output File Name]','wb')
#out = open("test.csv", 'wb')
#writer = csv.writer(out, dialect='excel')
input = pd.read_csv("[*Input File Name]",low_memory=False)
for i in input.values:
try:
row = []
id = i[0]
b = i[3].strip()
#lng = getlnglat(b)['result']['location']['lng'] # 获取经度
#lat = getlnglat(b)['result']['location']['lat'] # 获取纬度
pre = getlnglat(b)['result']['precise'] # 是否精确查找
con = getlnglat(b)['result']['confidence'] # 可信度
lev = getlnglat(b)['result']['level'] # 能精确理解的地址类型
str_temp = str(id) + ',' + str(b) + ',' + str(pre) + ',' + str(con) + ',' + str(lev) + '\n'
#str_temp = '{"id":' + str(id) + ',"address":' + str(b) + ',"precise":' + str(pre) + ',"confidence":' + str(con) +',"level":'+str(lev) +'},'
out.write(str_temp)
#row.append([id, b, pre, con, lev])
#writer.writerow(row)
except:
f = open("异常日志.txt", 'a')
traceback.print_exc(file=f)
f.flush()
f.close()
out.close() |
983,266 | a12942862fafbbd57baf08559dfbd91d84c39f68 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 18:40:37 2019
@author: Tristan O'Hanlon
"""
import time
import sys
import numpy as np
import os
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import h5py
import math
from scipy import integrate
########################################---get variables---########################################
os.chdir('//synthesis/e/University/University/MSc/Models/Data/CMIP5/cesm1_cam5_amip') #Home PC
#os.chdir('D:/MSc/Models/Data/CMIP6/cesm2.1_cam6') #ext HDD
f = Dataset('clt_Amon_CESM1-CAM5_amip_r1i1p1_197901-200512.nc', 'r')
#get latitude
lat = np.array(f.variables['lat'][:])
#get total cloud cover keyed to latitude
tcc = np.array(f.variables['clt'][264:])
tcc = tcc[:] # get values from 01.2001 to 12.2005
tcc = np.mean(tcc, axis = 0)
tcc = np.mean(tcc, axis = -1) / 100
#get cloud fraction
f = Dataset('cl_Amon_CESM1-CAM5_amip_r1i1p1_197901-200512.nc', 'r')
cf = np.array(f.variables['cl'][264:])
cf = cf[:] # get values from 01.2001 to 12.2005
cf = np.mean(cf, axis = 0)
#get hybrid pressure levels
plev = np.array(f.variables['lev'][:]) #in hPa
a = np.array(f.variables['a'][:]) #in hPa
b = np.array(f.variables['b'][:]) #in hPa
p0 = np.array(f.variables['p0'][:]) #in hPa
f = Dataset('ps_Amon_CESM1-CAM5_amip_r1i1p1_197901-200512.nc', 'r')
ps = np.array(f.variables['ps'][264:]) #in hPa
#Convert the hybrid pressure levels to Pa
ps = np.mean(ps, axis = 0)
ps = np.mean(ps, axis = 0)
ps = np.mean(ps, axis = 0)
p = a*p0 + b*ps
p = np.array(p)
#get cloud liquid content
f = Dataset('clw_Amon_CESM1-CAM5_amip_r1i1p1_197901-200512.nc', 'r')
lw = np.array(f.variables['clw'][264:])
lw = lw[:] # get values from 01.2001 to 12.2005
lw = np.mean(lw, axis = 0)
#get cloud ice content
f = Dataset('cli_Amon_CESM1-CAM5_amip_r1i1p1_197901-200512.nc', 'r')
iw = np.array(f.variables['cli'][264:])
iw = iw[:] # get values from 01.2001 to 12.2005
iw = np.mean(iw, axis = 0)
#get temperature
f = Dataset('ta_Amon_CESM1-CAM5_amip_r1i1p1_197901-200512.nc', 'r')
T = np.array(f.variables['ta'][264:])
T = T[:] # get values from 01.2001 to 12.2005
T[T>400] = None
T = np.nanmean(T, axis = 0)
###############################################################################
#---convert pressure levels to altitude---#
#https://www.mide.com/pages/air-pressure-at-altitude-calculator
#https://www.grc.nasa.gov/www/k-12/airplane/atmosmet.html
alt_t = np.empty((p.size,1),dtype=float)
alt_p = np.empty((p.size,1),dtype=float)
alt_ts = np.empty((p.size,1),dtype=float)
# Iterate through all of the temp elements (troposphere h < 11km)
i = 0
for item in p:
newalt = (288.19 - 288.08*((item/101290)**(1/5.256)))/6.49
alt_t[i] = [newalt]
i+=1
# Iterate through all of the pressure elements (lower stratosphere 11km < h <25km)
i = 0
for item in p:
newalt = (1.73 - math.log(item/22650))/0.157
alt_p[i] = [newalt]
i+=1
# Iterate through all of the temp elements (upper stratosphere h > 25km)
i = 0
for item in p:
newalt = (216.6*((item/2488)**(1/-11.388)) - 141.94)/2.99
alt_ts[i] = [newalt]
i+=1
sys.exit(0)
#manually adjust alt and alt_so arrays usinf alt_p and alt_ts
alt = alt_t
alt_temp = 288.14 - 6.49 * alt
alt_ts = 141.89 + 2.99 * alt
###############################################################################
#---combine arrays---#
# since lw and iw are in kg/kg - need to convert to LWP and IWC in kgm^-2
# Get density levels
# Integrate density with altitude to get air path AP
# multiply lw and iw by AP
alt_m = np.hstack(alt*1000)
p = p/100
p = np.vstack(p)
pressure = np.hstack((alt, p))
temp_g = np.hstack((alt, alt_temp))
air_density = [] #create empty list
#calculate air density at each pressure layer
air_density = (pressure[:,1] * 100) / (286.9 * temp_g[:,1])
ap = integrate.trapz(air_density, alt_m)
tclw = np.mean(lw , axis = 0)
tclw = np.mean(tclw , axis = -1) * ap
tciw = np.mean(iw , axis = 0)
tciw = np.mean(tciw , axis = -1) * ap
tcc = np.vstack((lat, tcc)).T # Join the two lists as if they were two columns side by side, into a list of two elements each
tclw = np.vstack((lat, tclw)).T
tciw = np.vstack((lat, tciw)).T
#----------------------------#
alt = np.hstack(alt)
cf_g = np.mean(cf, axis = -1)
cf_g = np.mean(cf_g, axis = -1)
cf_g = np.vstack((alt, cf_g)).T
lw_g = np.mean(lw, axis = -1)
lw_g = np.mean(lw_g, axis = -1)
lw_g = np.vstack((alt, lw_g)).T
iw_g = np.mean(iw, axis = -1)
iw_g = np.mean(iw_g, axis = -1)
iw_g = np.vstack((alt, iw_g)).T
temp_alt_lat = np.mean(T, axis = -1)
cf_alt_lat = np.mean(cf, axis = -1)
lw_alt_lat = np.mean(lw, axis = -1)
iw_alt_lat = np.mean(iw, axis = -1)
#Select Southern ocean Latitudes
cf_so = np.mean(cf, axis = -1)
cf_so = np.transpose(cf_so)
cf_so = np.hstack((np.vstack(lat), cf_so)) #creates a (180,34) array
cf_so = cf_so[cf_so[:,0]>=-70]
cf_so = cf_so[cf_so[:,0]<=-50]
cf_so = cf_so[:,1:] #Split the combined array into just the tccf data, eliminating the first coloumn of latitude
cf_so = np.mean(cf_so, axis = 0)
cf_so = np.vstack((alt, cf_so)).T
lw_so = np.mean(lw, axis = -1)
lw_so = np.transpose(lw_so)
lw_so = np.hstack((np.vstack(lat), lw_so)) #creates a (180,34) array
lw_so = lw_so[lw_so[:,0]>=-70]
lw_so = lw_so[lw_so[:,0]<=-50]
lw_so = lw_so[:,1:] #Split the combined array into just the tclw data, eliminating the first coloumn of latitude
lw_so = np.mean(lw_so, axis = 0)
lw_so = np.vstack((alt, lw_so)).T
iw_so = np.mean(iw, axis = -1)
iw_so = np.transpose(iw_so)
iw_so = np.hstack((np.vstack(lat), iw_so)) #creates a (180,34) array
iw_so = iw_so[iw_so[:,0]>=-70]
iw_so = iw_so[iw_so[:,0]<=-50]
iw_so = iw_so[:,1:] #Split the combined array into just the tclw data, eliminating the first coloumn of latitude
iw_so = np.mean(iw_so, axis = 0)
iw_so = np.vstack((alt, iw_so)).T
cf_t = np.vstack((temp_g[:,1], cf_g[:,1])).T
cf_t_so = np.vstack((temp_g[:,1], cf_so[:,1])).T
lw_t = np.vstack((temp_g[:,1], lw_g[:,1])).T
lw_t_so = np.vstack((temp_g[:,1], lw_so[:,1])).T
iw_t = np.vstack((temp_g[:,1], iw_g[:,1])).T
iw_t_so = np.vstack((temp_g[:,1], iw_so[:,1])).T
lw = np.mean(lw, axis=0)
iw = np.mean(iw, axis=0)
lw = np.mean(lw, axis=-1)
iw = np.mean(iw, axis=-1)
lw_frac = (lw/(lw+iw))
iw_frac = (iw/(lw+iw))
tclw_frac = lw_frac * tcc[:,1]
tciw_frac = iw_frac * tcc[:,1]
os.chdir('c:/Users/tristan/University/University/MSc/Models/climate-analysis/CESM1-CAM5-AMIP/reduced_datasets') #Home PC
with h5py.File('2001_2005_CAM5.h5', 'w') as p:
p.create_dataset('alt', data=alt)
p.create_dataset('lat', data=lat)
p.create_dataset('air_density', data=air_density)
p.create_dataset('tcc', data=tcc)
p.create_dataset('tclw', data=tclw)
p.create_dataset('tciw', data=tciw)
p.create_dataset('tclw_frac', data=tclw_frac)
p.create_dataset('tciw_frac', data=tciw_frac)
p.create_dataset('cf', data=cf_g)
p.create_dataset('lw', data=lw_g)
p.create_dataset('iw', data=iw_g)
p.create_dataset('temp', data=temp_g)
p.create_dataset('cf_so', data=cf_so)
p.create_dataset('lw_so', data=lw_so)
p.create_dataset('iw_so', data=iw_so)
p.create_dataset('pressure', data=pressure)
p.create_dataset('temp_alt_lat', data=temp_alt_lat)
p.create_dataset('cf_alt_lat', data=cf_alt_lat)
p.create_dataset('lw_alt_lat', data=lw_alt_lat)
p.create_dataset('iw_alt_lat', data=iw_alt_lat)
p.create_dataset('cf_t', data=cf_t)
p.create_dataset('cf_t_so', data=cf_t_so)
p.create_dataset('lw_t', data=lw_t)
p.create_dataset('lw_t_so', data=lw_t_so)
p.create_dataset('iw_t', data=iw_t)
p.create_dataset('iw_t_so', data=iw_t_so)
p.close()
plt.figure()
fig, ax1 = plt.subplots()
#ax2 = ax1.twinx()
ax1.contourf(lat, alt ,temp_alt_lat)
#ax2.plot(tclw[:,0],tclw[:,1], '-b', label='Liquid Water Content')
#ax2.plot(tciw[:,0],tciw[:,1], '--b', label='Ice Water Content')
#ax.axis('equal')
#ax1.legend(loc='lower center', bbox_to_anchor=(0.5, -0.3),
# ncol=4, fancybox=True, shadow=True);
#ax2.legend(loc='lower center', bbox_to_anchor=(0.5, -0.4),
# ncol=4, fancybox=True, shadow=True);
ax1.set_xlabel('Latitude')
ax1.set_ylabel('Cloud Fraction')
#ax2.set_ylabel('Liquid and Ice Water Content ($kgm^{-2}$)')
plt.title('Cloud Fraction and Phase Content vs Latitude - GFDL.AM4 - July 2006 to April 2011')
plt.grid(True)
plt.show()
|
983,267 | 965a0a84b06978dea16928552340b81c94c29b94 | from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def hello_world(request):
if request.method == "POST":
return render(request, 'accountapp/hello_world.html',
context={'text':"POST METHOD"})
else:
return render(request, 'accountapp/hello_world.html',
context={'text':"GET METHOD"})
|
983,268 | ee7c49eb9566f955d7c99a9f78a3e44e2dcbf15d | import struct
def int_to_float(num_bits: int, value: int) -> float:
"""
Convert an integer to the equivalent floating point value.
"""
if num_bits == 32:
unpack_fmt = '>f'
elif num_bits == 64:
unpack_fmt = '>d'
else:
raise Exception(f"Unhandled bit size: {num_bits}")
return struct.unpack(unpack_fmt, value.to_bytes(num_bits // 8, 'big'))[0]
def get_bit_size(_type: str) -> int:
if _type in {'i32', 'f32'}:
return 32
elif _type in {'i64', 'f64'}:
return 64
else:
raise ValueError(f"Unsupported type: {_type}")
|
983,269 | 77d64424bbe11de75bb6dd21f931fec1aef6b833 | from scipy.stats import loguniform, uniform
import numpy as np
import argparse
import os
import sys
import time
import json
import pandas as pd
from IPython import embed
def convert(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
def select_hyperparams(config, output_name, model, is_arc, score_key='f_macro'):
### make directories
config_path, checkpoint_path, result_path = make_dirs(config)
setup_params = ['tune_params', 'num_search_trials', 'dir_name']
model_params = set()
for p in config:
if p in setup_params or ('range' in p or 'algo' in p or 'type' in p or p.startswith('CON')): continue
model_params.add(p)
print("[model params] {}".format(model_params))
score_lst = []
time_lst = []
best_epoch_lst = []
tn2vals = dict()
for trial_num in range(int(config['num_search_trials'])):
### sample values
print("[trial {}] Starting...".format(trial_num))
print("[trial {}] sampling parameters in {}".format(trial_num, config['tune_params']))
constraints_OK = False
while not constraints_OK:
p2v = sample_values(trial_num)
constraints_OK = check_constraints(config, p2v)
tn2vals[trial_num] = p2v
### construct the appropriate config file
config_file_name = config_path + 'config-{}.txt'.format(trial_num)
print("[trial {}] writing configuration to {}".format(trial_num, config_file_name))
print("[trial {}] checkpoints to {}".format(trial_num, checkpoint_path))
print("[trial {}] results to {}".format(trial_num, result_path))
f = open(config_file_name, 'w')
model_name = '{}_t{}'.format(config['name'], trial_num)
f.write('name:{}\n'.format(model_name)) # include trial number in name
f.write('ckp_path:{}\n'.format(checkpoint_path)) # checkpoint save location
f.write('res_path:{}\n'.format(result_path)) # results save location
for p in model_params:
if p == 'name': continue
f.write('{}:{}\n'.format(p, config[p]))
for p in p2v:
f.write('{}:{}\n'.format(p, p2v[p]))
f.flush()
### run the script
print("[trial {}] running cross validation".format(trial_num))
start_time = time.time()
if model == 'adv':
os.system("./adv_train.sh 1 {} 0 {} > {}log_t{}.txt".format(config_file_name, score_key, result_path, trial_num))
elif model == 'bicond':
os.system("./bicond.sh {} {} > {}log_t{}.txt".format(config_file_name, score_key, result_path, trial_num))
else:
print("ERROR: model {} is not supported".format(model))
sys.exit(1)
script_time = (time.time() - start_time) / 60.
print("[trial {}] running on ARC took {:.4f} minutes".format(trial_num, script_time))
### process the result and update information on best
if model == 'adv':
res_f = open('{}{}_t{}-{}.top5_{}.txt'.format(result_path, config['name'], trial_num, config['enc'], score_key), 'r')
else:
res_f = open('{}{}_t{}.top5_{}.txt'.format(result_path, config['name'], trial_num, score_key), 'r')
res_lines = res_f.readlines()
score_lst.append(res_lines[-2].strip().split(':')[1])
time_lst.append(script_time)
best_epoch_lst.append(res_lines[-3].strip().split(':')[1])
print("[trial {}] Done.".format(trial_num))
print()
### save the resulting scores and times, for calculating the expected validation f1
data = []
for ti in tn2vals:
data.append([ti, score_lst[ti], time_lst[ti], best_epoch_lst[ti], json.dumps(tn2vals[ti], default=convert)])
df = pd.DataFrame(data, columns=['trial_num', 'avg_score', 'time', 'best_epoch', 'param_vals'])
df.to_csv('data/model_results/{}-{}trials/{}'.format(config['dir_name'], config['num_search_trials'],
output_name), index=False)
print("results to {}".format(output_name))
def parse_config(fname):
f = open(fname, 'r')
lines = f.readlines()
n2info = dict()
for l in lines:
n, info = l.strip().split(':')
n2info[n] = info
n2info['tune_params'] = n2info['tune_params'].split(',')
for p in n2info['tune_params']:
t = n2info['{}_type'.format(p)]
n2info['{}_range'.format(p)] = list(map(lambda x: int(x) if t == 'int' else
float(x) if t == 'float' else x,
n2info['{}_range'.format(p)].split('-')))
return n2info
def sample_values(trial_num):
p2v = dict()
for p in config['tune_params']:
a = config['{}_algo'.format(p)]
if a == 'selection': #To select in order from a list of hyperparam values
p2v[p] = config['{}_range'.format(p)][trial_num]
elif a == 'choice': #To randomly select any value from a list of hyperparam values
p2v[p] = np.random.choice(config['{}_range'.format(p)])
else: #To randomly select a value from a given range
min_v, max_v = config['{}_range'.format(p)]
if a == 'loguniform':
p2v[p] = loguniform.rvs(min_v, max_v)
elif a == 'uniform-integer':
p2v[p] = np.random.randint(min_v, max_v + 1)
elif a == 'uniform-float':
p2v[p] = uniform.rvs(min_v, max_v)
else:
print("ERROR: sampling method specified as {}".format(a))
return p2v
def check_constraints(n2info, p2v):
constraints_OK = True
for n in n2info:
if not n.startswith('CON'): continue
eq = n2info[n].split('#') # equations should be in format param1#symbol#param2
if len(eq) == 3:
con_res = parse_equation(p2v[eq[0]], eq[1], p2v[eq[2]])
elif len(eq) == 4:
if eq[0] in p2v:
v1 = p2v[eq[0]]
s = eq[1]
v2 = float(eq[2]) * p2v[eq[3]]
else:
v1 = float(eq[0]) * p2v[eq[1]]
s = eq[2]
v2 = p2v[eq[3]]
con_res = parse_equation(v1, s, v2)
else:
print("ERROR: equation not parsable {}".format(eq))
sys.exit(1)
constraints_OK = con_res and constraints_OK
return constraints_OK
def parse_equation(v1, s, v2):
if s == '<': return v1 < v2
elif s == '<=': return v1 <= v2
elif s == '=': return v1 == v2
elif s == '!=': return v1 != v2
elif s == '>': return v1 > v2
elif s == '>=': return v1 >= v2
else:
print("ERROR: symbol {} not recognized".format(s))
sys.exit(1)
def make_dirs(config):
config_path = 'data/config/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
checkpoint_path = 'data/checkpoints/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
result_path = 'data/model_results/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
for p_name, p_path in [('config_path', config_path), ('ckp_path', checkpoint_path),
('result_path', result_path)]:
if not os.path.exists(p_path):
os.makedirs(p_path)
else:
print("[{}] Directory {} already exists!".format(p_name, p_path))
sys.exit(1)
return config_path, checkpoint_path, result_path
def remove_dirs(config):
config_path = 'data/config/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
checkpoint_path = 'data/checkpoints/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
result_path = 'data/model_results/{}-{}trials/'.format(config['dir_name'],
config['num_search_trials'])
for p_name, p_path in [('config_path', config_path), ('ckp_path', checkpoint_path),
('result_path', result_path)]:
if not os.path.exists(p_path):
print("[{}] directory {} doesn't exist".format(p_name, p_path))
continue
else:
print("[{}] removing all files from {}".format(p_name, p_path))
for fname in os.listdir(p_path):
os.remove(os.path.join(p_path, fname))
print("[{}] removing empty directory".format(p_name))
os.rmdir(p_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', help='What to do', required=True)
parser.add_argument('-s', '--settings', help='Name of the file containing hyperparam info', required=True)
# model_name should be bert-text-level or adv or bicond currently and is to be specified when is_arc is True.
parser.add_argument('-n', '--model', help='Name of the model to run', required=False, default='adv')
parser.add_argument('-o', '--output', help='Name of the output file (full path)', required=False,
default='trial_results.csv')
parser.add_argument('-k', '--score_key', help='Score key for optimization', required=False, default='f_macro')
args = vars(parser.parse_args())
config = parse_config(args['settings'])
if args['mode'] == '1':
## run hyperparam search
remove_dirs(config)
select_hyperparams(config, args['output'], args['model'], is_arc=('arc' in args['settings'] or 'twitter' in args['settings']), score_key=args['score_key'])
elif args['mode'] == '2':
## remove directories
remove_dirs(config)
else:
print("ERROR. exiting") |
983,270 | 5333ed95deb7aac6d48f07fbb82dafca8f439d35 | import logging
import os
import pymysql as pymysql
environment = os.getenv("APP_ENVIRONMENT", "dev")
db_host = os.getenv("DB_HOST", "localhost")
db_port = int(os.getenv("DB_PORT", 3306))
db_user = os.getenv("DB_USER", "root")
db_pass = os.getenv("DB_PASS", "devPassword")
db_name = os.getenv("DB_SCHEMA", "python-products-api")
db_conn_timeout = int(os.getenv("DB_TIMEOUT", 30))
aws_cognito_region = os.getenv("AWS_COGNITO_REGION", "us-east-1")
aws_cognito_user_pool_id = os.getenv("AWS_COGNITO_USER_POOL_ID", "us-east-1_PowfEWN7p")
aws_cognito_enabled = bool(os.getenv("AWS_COGNITO_ENABLED", True))
logging.info("db_host: {}".format(db_host))
logging.info("db_port: {}".format(db_port))
logging.debug("db_user: {}".format(db_user))
logging.debug("db_pass: {}".format(db_pass))
logging.info("db_name: {}".format(db_name))
logging.info("db_conn_timeout: {}".format(db_conn_timeout))
def get_db_config() -> dict:
return {'host': db_host,
'port': db_port,
'db': db_name,
'user': db_user,
'passwd': db_pass,
'charset': 'utf8mb4',
'cursorclass': pymysql.cursors.DictCursor,
'connect_timeout': db_conn_timeout
}
|
983,271 | 4d0aa731a94b06255d7003985439358974bb6259 | # Read csv in spark
import twint
from pprint import pprint
from warnings import warn
from os import path
def createCorpusForUser(username, filePath="Resources/tweets", tweetLimit=5000):
filePath += "/"+username + ".corpus"
if not path.exists(filePath):
print("Getting tweet for user:" + username)
try:
c = twint.Config()
# c.Store_csv= True
c.Username = username
c.Custom["tweet"] = "<|startoftext|>{tweet}<|endoftext|>"
c.Limit = str(tweetLimit)
c.Format = "<|startoftext|>{tweet}<|endoftext|>"
c.Hide_output = True
c.Output = filePath
# c.Store_object = True
twint.run.Search(c)
except ValueError:
warn(username + " has been deleted ")
else:
warn(filePath + " exist already")
|
983,272 | a5417c9aaccd025b63512ff16888dee1d82336f8 | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
from fbprophet import Prophet
import json
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route('/sendjson2/', methods=['POST','GET'])
def sendjson2():
if request.method == 'POST':
data = json.loads(request.get_data())
df = pd.DataFrame()
index=list()
value=list()
for i in data:
index.append(i['ds'])
value.append(i['y'])
df['ds'] = index
df['y'] = value
#print(df)
m = Prophet()
m.fit(df)
future = m.make_future_dataframe(periods=90)
forecast = m.predict(future)
temp=pd.DataFrame(forecast[['ds','yhat']])
temp['ds'] = pd.to_datetime(temp['ds'])
temp.index = temp['ds']
temp=temp.resample('M').sum()
date=temp['yhat'].index.date
value=list(temp['yhat'])
output={}
for i,v in enumerate(date):
output[str(v)[:-3]]=value[i]
#output=json.dumps(output, sort_keys=True)
#print(jsonify(output))
return jsonify(output)
else:
print("get")
return "see"
# A welcome message to test our server
@app.route('/')
def index():
return "<h1>Welcome to our server !!</h1>"
if __name__ == '__main__':
# Threaded option to enable multiple instances for multiple user access support
app.run(threaded=True, port=5000)
|
983,273 | 4b0a5db284a0bb6db0785be7c99bece6a31c5045 | import zip_file
from typing import Set
import xmltodict
from typing import Set
import tokenizer
NAMESPACES = {'fb2': 'http://www.gribuser.ru/xml/fictionbook/2.0'}
def dict_to_str(v, exclude: Set = set([])):
ret = ""
if isinstance(v, str):
ret = v.replace("\x0a", "").replace("\x09", "").replace("\x0d", "")
elif isinstance(v, list):
ret = " ".join(map(lambda x: dict_to_str(x, exclude), v))
elif isinstance(v, dict):
ret = " ".join(map(lambda x: dict_to_str(x, exclude),
map(lambda x: x[1], filter(
lambda y: y[0] not in exclude, v.items()))
))
return ret
def guess_book_language(book):
ret_lang = tokenizer.LANG_MAP.get(book.lang)
if tokenizer.LANG_MAP.get(book.lang) == None:
if book.annotation != " ":
ret_lang = tokenizer.guess_language(
book.title + " " + book.authors + " " + book.annotation)
else:
ret_lang = tokenizer.guess_language(
book.title + " " + book.authors)
return ret_lang
class Book:
def __init__(self,**kwargs ):
self.words = None
self.zip_file = zip_file.ZipFile(kwargs.get('zip_file'))
self.book_name = kwargs.get('book_name') or ""
self.annotation = kwargs.get('annotation') or ""
self.title = kwargs.get('title') or ""
self.genre = kwargs.get('genre') or ""
self.lang = kwargs.get('lang') or ""
self.authors = kwargs.get('authors') or ""
self.__get_words()
def open(self):
return self.zip_file.open(self.book_name)
def read_headers(self):
with self.open() as b:
book = xmltodict.parse(b)
book_description = book["FictionBook"]["description"]["title-info"]
self.title = (dict_to_str(
book_description.get('book-title')) or " ")
self.annotation = (dict_to_str(
book_description.get("annotation")) or " ")
self.annotation = self.annotation.replace(
"\n", "").replace("\r", "")
self.authors = (dict_to_str(
book_description.get("author"), set(["id"])) or " ")
self.authors = self.authors.replace("\n", "").replace("\r", "")
self.lang = book_description.get("lang")
self.lang = guess_book_language(self)
self.genre = book_description.get("genre")
self.__get_words()
def __repr__(self):
return f"zip: {self.zip_file.__repr__()} book:{self.book_name} language:{self.lang} authors:{self.authors} title:{self.title} "
def __get_words(self):
text = self.authors + " " + self.title + " " + self.annotation
self.words = tokenizer.word_tokenize(text, self.lang)
if self.words == None:
self.words = set()
|
983,274 | cd6a58724f4758c8ba54329941ee50fe4293704b | class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
return [nn for nn, _ in collections.Counter(nums).most_common()][:k] |
983,275 | f2fe7005718f7797a6fab6caea2ffb4068aedc23 | import time
from datetime import datetime
import requests
url = 'http://127.0.0.1:5000/messages'
after_id = -1
def pretty_print(message):
dt = datetime.fromtimestamp(message['timestamp'])
dt = dt.strftime('%d.%m.%Y %H:%M:%S')
first_line = dt + ' ' + message['name']
print(first_line)
print(message['text'])
print()
while True:
response = requests.get(url, params={'after_id': self.after_id})
messages = response.json()['messages']
for message in messages:
pretty_print(message)
after_id = message['id']
if not messages:
time.sleep(1)
#
# response = requests.get(url, params={'after_id': after_id})
# messages = response.json()['messages'] |
983,276 | 21dfbbbc92da3979221798d413ea860d35bf8c1c | from _base_model import BaseModel
|
983,277 | 4cf33c30ae21183c3936a83b2e350ca8bcc705ed | import numpy as np
import cv2
import os
import math
from scipy import interpolate
from matplotlib import pyplot as plt
#______________________________________________________________________________
im1 = cv2.imread("001_1_1.jpg")
im2 = cv2.imread("001_1_2.jpg")
#______________________________________________________________________________
"""
Loading Images from folder
"""
def load_images(folder):
images = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder,filename))
if img is not None:
images.append(img)
#print ("Number of photos in the folder: ")
#print (len(images))
return images
#______________________________________________________________________________
"""
Processing
"""
def processing(image):
c_image = image.copy()
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
result = image.copy()
image = cv2.medianBlur(image,19)
circles = cv2.HoughCircles(image,cv2.HOUGH_GRADIENT,1.4,10,param1=50,param2=120,minRadius=0,maxRadius=0)
height=20
width=240
r=0
mask = np.zeros((height,width),np.uint8)
if circles is not None:
for i in circles[0,:]:
cv2.circle(c_image,(i[0],i[1]),i[2],(0,255,0),2)
cv2.circle(c_image,(i[0],i[1]),int(i[2]+(2400/i[2])),(0,255,0),2)
cv2.circle(mask,(i[0],i[1]),i[2],(255,255,255),thickness=0)
r=i[2]
pupil_X=i[0]
pupil_Y=i[1]
pupil_R=i[2]
iris_X=i[0]
iris_Y=i[1]
iris_R=i[2]+(2400/i[2])
plt.title("Iris Detection")
plt.imshow(c_image,cmap='gray')
plt.show()
angledivisions=239
radiuspixels=22
r=range(0,(radiuspixels-1),1)
theta=np.linspace(0,360,num=240)
theta=list(theta)
ox=float(pupil_X-iris_X)
oy=float(pupil_Y-iris_Y)
if ox<=0:
sgn=-1
elif ox>0:
sgn=1
if ox==0 and oy>0:
sgn=1
ap=np.ones([1,240])
ap=list(ap[0])
a=[i* ((ox**2)+(oy**2)) for i in ap]
if ox==0:
phi=90
else:
phi=math.degrees(math.atan(float(oy/ox)))
b=[(math.cos(math.pi-math.radians(phi)-math.radians(i))) for i in theta]
term1=[(math.sqrt(i)*j) for i,j in zip(a,b)]
term2=[i*(j**2) for i,j in zip(a,b)]
term3=[i-(iris_R**2) for i in a]
rk=[i + math.sqrt(j-k) for i,j,k in zip(term1,term2,term3)]
r=[i-pupil_R for i in rk]
r=np.asmatrix(r)
term1=np.ones([1,radiuspixels])
term1=np.asmatrix(term1)
term1=term1.transpose()
rmat2=np.matmul(term1,r)
term1=np.ones(((angledivisions+1),1))
term1=np.asmatrix(term1)
term2= np.linspace(0,1,(radiuspixels))
term2=np.asmatrix(term2)
term3=np.matmul(term1,term2)
term3=np.asmatrix(term3)
term3=term3.transpose()
rmat3=np.multiply(rmat2,term3)
rmat4=rmat3+pupil_R
rmat=rmat4[1:radiuspixels-1]
term1=np.ones(((radiuspixels-2),1))
term2=[math.cos(math.radians(i)) for i in theta]
term2=np.asmatrix(term2)
term3=[math.sin(math.radians(i)) for i in theta]
term3=np.asmatrix(term3)
xcosmat=np.matmul(term1,term2)
xsinmat=np.matmul(term1,term3)
xot=np.multiply(rmat,xcosmat)
yot=np.multiply(rmat,xsinmat)
xo=pupil_X+xot
yo=pupil_Y-yot
xt=np.linspace(0,c_image.shape[0]-1,c_image.shape[0])
yt=np.linspace(0,c_image.shape[1]-1,c_image.shape[1])
x,y=np.meshgrid(xt,yt)
ip=interpolate.RectBivariateSpline(xt,yt,result)
polar_array=ip.ev(yo,xo)
#polar_array = np.asarray(polar_array,dtype=np.uint8)
plt.title("Normalised")
plt.imshow(polar_array,cmap='gray')
plt.show()
return polar_array
#______________________________________________________________________________
#p1 = processing(im1)
#p2 = processing(im2)
#______________________________________________________________________________
"""
Template Generation
"""
def temp_gen(polar_array ):
kernel = cv2.getGaborKernel((240, 20), 0.05, 20, 18, 1, 0, cv2.CV_64F)
h, w = kernel.shape[:2]
g_kernel = cv2.resize(kernel, (240, 20), interpolation=cv2.INTER_CUBIC)
g_kernel_freq=np.fft.fft2(g_kernel)
freq_image=np.fft.fft2(polar_array)
mul_image=np.multiply(g_kernel_freq,freq_image)
inv_image=np.fft.ifft2(mul_image)
inv_image_ravel=inv_image.ravel()
pre_template=[];
for i in inv_image_ravel:
real_part=np.real(i)
imaginary_part=np.imag(i)
if((real_part>=0) and (imaginary_part>=0)):
pre_template.append('11')
elif ((real_part>=0) and (imaginary_part<0)):
pre_template.append('10')
elif ((real_part<0) and (imaginary_part<0)):
pre_template.append('00')
elif ((real_part<0) and (imaginary_part>=0)):
pre_template.append('01')
Template=''.join(pre_template)
Template=list(Template)
Template=np.asarray(Template)
Template=np.reshape(Template,[20,480])
Template=Template.astype(int)
return Template
#______________________________________________________________________________
#en_Template = temp_gen(p1)
#qu_Template = temp_gen(p2)
#______________________________________________________________________________
"""
Mask Generation
"""
def mask_gen(polar_array):
polar_array = np.asarray(polar_array,dtype=np.uint8)
#clahe = cv2.createCLAHE(clipLimit=50.0, tileGridSize=(2,2))
#cl1 = clahe.apply(polar_array)
ad_th = cv2.adaptiveThreshold(polar_array,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,17)
_,ot = cv2.threshold(polar_array,100,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#ad_th = cv2.medianBlur(ad_th,1)
#plt.imshow(cl1,cmap="gray")
a = cv2.bitwise_and(ad_th,polar_array, mask = None)
plt.title("Iris pixels")
plt.imshow(a,cmap="gray")
plt.show()
a = a>0
a_ravel = a.ravel()
pre_a=[]
for i in a_ravel:
if i==True:
pre_a.append('11')
else:
pre_a.append('00')
new_a = ''.join(pre_a)
new_a = list(new_a)
new_a = np.asarray(new_a)
new_a = np.reshape(new_a,[20,480]) #Same as the normalised image
new_a=new_a.astype(int)
return new_a
#______________________________________________________________________________
#en_Mask = mask_gen(p1)
#qu_Mask = mask_gen(p2)
#______________________________________________________________________________
"""
Score Calculation
"""
def CalculateScore3(en_Template,en_Mask,qu_Template,qu_Mask):
Num1=np.bitwise_xor(en_Template,qu_Template)
Num2=np.bitwise_and(Num1,en_Mask)
Num3=np.bitwise_and(Num2,qu_Mask)
Numerator=np.count_nonzero(Num3)
Den1=np.bitwise_and(en_Mask,qu_Mask)
Denomenator=np.count_nonzero(Den1)
mask_scor=float(Numerator)/float(Denomenator)
return mask_scor
#______________________________________________________________________________
#result = CalculateScore3(en_Template,en_Mask,qu_Template,qu_Mask)
#print (result)
#______________________________________________________________________________
"""
Generating tuples of images
"""
def make_tuple():
images_a = load_images(folder = 'C:/Users/Tewari\'s/Documents/Database/a')
images_b = load_images(folder = 'C:/Users/Tewari\'s/Documents/Database/b')
zipp = zip(images_a,images_b)
zipp = list(zipp)
return zipp
#______________________________________________________________________________
def main(zipp):
total = 0
im_count = 0
for tup in zipp:
im1,im2 = tup
c = int(input("Press 1 to process: "))
if c == 1:
p1 = processing(im1)
p2 = processing(im2)
else: break
d = int(input("Press 1 to check score: "))
if d==1:
en_Template = temp_gen(p1)
qu_Template = temp_gen(p2)
en_Mask = mask_gen(p1)
qu_Mask = mask_gen(p2)
score_de = CalculateScore3(en_Template,en_Mask,qu_Template,qu_Mask)
print ("The score is: ", round(score_de,3))
if score_de>0 and score_de<=0.15:
print ("Accurate Match")
elif score_de>0.25:
print ("Inaccurate Match")
else: break
im_count +=1
total+=score_de
avg = total/im_count
print("The average score is: ",round(avg,3))
print("**End of Iteration**")
main(make_tuple())
#______________________________________________________________________________
|
983,278 | 1054be8f04852149ccc2243dfc45ac1242e88b4d | import joblib
import warnings
import pandas as pd
import numpy as np
import torch
from sklearn.base import BaseEstimator
from cdqa.retriever import TfidfRetriever, BM25Retriever
from cdqa.utils.converters import generate_squad_examples
from cdqa.reader import BertProcessor, BertQA
RETRIEVERS = {"bm25": BM25Retriever, "tfidf": TfidfRetriever}
class QAPipeline(BaseEstimator):
"""
A scikit-learn implementation of the whole cdQA pipeline
Parameters
----------
reader: str (path to .joblib) or .joblib object of an instance of BertQA (BERT model with sklearn wrapper), optional
retriever: "bm25" or "tfidf"
The type of retriever
retrieve_by_doc: bool (default: True). If Retriever will rank by documents
or by paragraphs.
kwargs: kwargs for BertQA(), BertProcessor(), TfidfRetriever() and BM25Retriever()
Please check documentation for these classes
Examples
--------
>>> from cdqa.pipeline import QAPipeline
>>> qa_pipeline = QAPipeline(reader='bert_qa_squad_vCPU-sklearn.joblib')
>>> qa_pipeline.fit_retriever(df=df)
>>> prediction = qa_pipeline.predict(query='When BNP Paribas was created?')
>>> from cdqa.pipeline import QAPipeline
>>> qa_pipeline = QAPipeline()
>>> qa_pipeline.fit_reader('train-v1.1.json')
>>> qa_pipeline.fit_retriever(df=df)
>>> prediction = qa_pipeline.predict(X='When BNP Paribas was created?')
"""
def __init__(self, reader=None, retriever="bm25", retrieve_by_doc=False, **kwargs):
if retriever not in RETRIEVERS:
raise ValueError(
"You provided a type of retriever that is not supported. "
+ "Please provide a retriver in the following list: "
+ str(list(RETRIEVERS.keys()))
)
retriever_class = RETRIEVERS[retriever]
# Separating kwargs
kwargs_bertqa = {
key: value
for key, value in kwargs.items()
if key in BertQA.__init__.__code__.co_varnames
}
kwargs_processor = {
key: value
for key, value in kwargs.items()
if key in BertProcessor.__init__.__code__.co_varnames
}
kwargs_retriever = {
key: value
for key, value in kwargs.items()
if key in retriever_class.__init__.__code__.co_varnames
}
if not reader:
self.reader = BertQA(**kwargs_bertqa)
elif type(reader) == str:
self.reader = joblib.load(reader)
else:
self.reader = reader
self.processor_train = BertProcessor(is_training=True, **kwargs_processor)
self.processor_predict = BertProcessor(is_training=False, **kwargs_processor)
self.retriever = retriever_class(**kwargs_retriever)
self.retrieve_by_doc = retrieve_by_doc
if torch.cuda.is_available():
self.cuda()
def fit_retriever(self, df: pd.DataFrame = None):
""" Fit the QAPipeline retriever to a list of documents in a dataframe.
Parameters
----------
df: pandas.Dataframe
Dataframe with the following columns: "title", "paragraphs"
"""
if self.retrieve_by_doc:
self.metadata = df
self.metadata["content"] = self.metadata["paragraphs"].apply(
lambda x: " ".join(x)
)
else:
self.metadata = self._expand_paragraphs(df)
self.retriever.fit(self.metadata)
return self
def fit_reader(self, data=None):
""" Fit the QAPipeline retriever to a list of documents in a dataframe.
Parameters
----------
data: dict str-path to json file
Annotated dataset in squad-like for Reader training
"""
train_examples, train_features = self.processor_train.fit_transform(data)
self.reader.fit(X=(train_examples, train_features))
return self
def predict(
self,
query: str = None,
n_predictions: int = None,
retriever_score_weight: float = 0.35,
return_all_preds: bool = False,
):
""" Compute prediction of an answer to a question
Parameters
----------
query: str
Sample (question) to perform a prediction on
n_predictions: int or None (default: None).
Number of returned predictions. If None, only one prediction is return
retriever_score_weight: float (default: 0.35).
The weight of retriever score in the final score used for prediction.
Given retriever score and reader average of start and end logits, the final score used for ranking is:
final_score = retriever_score_weight * retriever_score + (1 - retriever_score_weight) * (reader_avg_logit)
return_all_preds: boolean (default: False)
whether to return a list of all predictions done by the Reader or not
Returns
-------
if return_all_preds is False:
prediction: tuple (answer, title, paragraph, score/logit)
if return_all_preds is True:
List of dictionnaries with all metadada of all answers outputted by the Reader
given the question.
"""
if not isinstance(query, str):
raise TypeError(
"The input is not a string. Please provide a string as input."
)
if not (
isinstance(n_predictions, int) or n_predictions is None or n_predictions < 1
):
raise TypeError("n_predictions should be a positive Integer or None")
best_idx_scores = self.retriever.predict(query)
squad_examples = generate_squad_examples(
question=query,
best_idx_scores=best_idx_scores,
metadata=self.metadata,
retrieve_by_doc=self.retrieve_by_doc,
)
examples, features = self.processor_predict.fit_transform(X=squad_examples)
prediction = self.reader.predict(
X=(examples, features),
n_predictions=n_predictions,
retriever_score_weight=retriever_score_weight,
return_all_preds=return_all_preds,
)
return prediction
def to(self, device):
""" Send reader to CPU if device=='cpu' or to GPU if device=='cuda'
"""
if device not in ("cpu", "cuda"):
raise ValueError("Attribute device should be 'cpu' or 'cuda'.")
self.reader.model.to(device)
self.reader.device = torch.device(device)
return self
def cpu(self):
""" Send reader to CPU
"""
self.reader.model.cpu()
self.reader.device = torch.device("cpu")
return self
def cuda(self):
""" Send reader to GPU
"""
self.reader.model.cuda()
self.reader.device = torch.device("cuda")
return self
def dump_reader(self, filename):
""" Dump reader model to a .joblib object
"""
self.cpu()
joblib.dump(self.reader, filename)
if torch.cuda.is_available():
self.cuda()
@staticmethod
def _expand_paragraphs(df):
# Snippet taken from: https://stackoverflow.com/a/48532692/11514226
lst_col = "paragraphs"
df = pd.DataFrame(
{
col: np.repeat(df[col].values, df[lst_col].str.len())
for col in df.columns.drop(lst_col)
}
).assign(**{lst_col: np.concatenate(df[lst_col].values)})[df.columns]
df["content"] = df["paragraphs"]
return df.drop("paragraphs", axis=1)
|
983,279 | d4131c07828a73d5f6fe4e046b553157a33a7ed0 | import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import urllib.request as req
src = ("https://www.ptt.cc/bbs/movie/index.html")
request = req.Request(src, headers={
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"
})
with req.urlopen(request) as response:
data = response.read().decode("utf-8")
import bs4
root = bs4.BeautifulSoup(data, "html.parser")
titles = root.find_all("div", class_="title")
for title in titles:
if title.a != None and "討論" in title.a.string:
print(title.a.string)
import email.message
msg = email.message.EmailMessage()
msg["From"] = "kueifangp@gmail.com"
msg["To"] = "kueifangp@gmail.com"
msg["Subject"] = "您要的PTT更新來ㄌ"
msg.set_content= "爬蟲的成果"
import smtplib
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.login("kueifangp@gmail.com","password")
server.send_message(msg)
server.close()
|
983,280 | 22c25f072424248cf68e14f0bf40c98bdbd2b0a4 | class Topic:
def __init__(self, name):
self.name = name
class Educator:
def __init__(self, name, infoURL, id=None, avgRating = None):
self.id = id
self.name = name
self.infoURL = infoURL
self.avgRating = avgRating
class Tutorial:
def __init__(self, title, educatorID, platform, url, skill, lenght = None, info = None, ratingNum=0, tutorialRating = None):
self.title = title
self.educatorID = educatorID
self.platform = platform
self.url = url
self.skill = skill
self.length = lenght
self.info = info
self.ratingNum = ratingNum
self.tutorialRating = tutorialRating
|
983,281 | 4c866b583d1e1d02aae3df8510697e641066bdde | # Generated by Django 2.2.7 on 2021-10-14 02:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('list', '0004_listing_slug'),
]
operations = [
migrations.AddField(
model_name='listing',
name='email',
field=models.EmailField(blank=True, max_length=254),
),
]
|
983,282 | d7514dd696de6d0ba98adf5eb4cd39acfe113f5b | from drangler.FeatureExtractor import get_features_from_frame
from time import time
import numpy as np
from sklearn.externals import joblib
# trained_model = load("trained_model_svm.sav")
trained_model = joblib.load("trained_model_rf.sav")
def predict(data):
start = time()
data = get_features_from_frame(data)
data = np.array(data).reshape(1, -1)
print(trained_model.predict(data)[0])
print(f"Time taken: {time() - start}s") |
983,283 | ef8f47726df84e540a69a30c1e8085ae64b9e44a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains astronomical and physical constants for use in Astropy or other
places.
The package contains a `~astropy.constants.cgs` and `~astropy.constants.si`
module that define constants in CGS and SI units, respectively. A typical use
case might be::
from astropy.constants.cgs import c
... define the mass of something you want the rest energy of as m ...
E = m*c**2
"""
from . import cgs
from . import si
from .constant import Constant
# Update the docstring to include a list of units from the si
# module. The rows with lots of '=' signs are to tell Sphinx to
# display a table in the documentation.
__doc__ += """
The following constants are defined in `~astropy.constants.cgs` and
`~astropy.constants.si`. The `si` and `cgs` docstrings list the units
and values in each system.
========== ==============================
"""
for nm, val in sorted(si.__dict__.items()):
if isinstance(val, Constant):
__doc__ += '{0:^10} {1}\n'.format(nm, val.name)
__doc__ += """\
========== ==============================
"""
# update the si cand cgs module doctrings.
for module in si, cgs:
module.__doc__ += """
========== ============== ================ =========================
Name Value Unit Description
========== ============== ================ =========================
"""
for nm, val in sorted(module.__dict__.items()):
if isinstance(val, Constant):
module.__doc__ += '{0:^10} {1:^14.9g} {2:^16} {3}\n'.format(
nm, val.value, val.unit, val.name)
module.__doc__ += """\
========== ============== ================ =========================
"""
del nm, val
|
983,284 | 6d528b9aa51ee61a83ed1c94ea1f81c6072d87e3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
2015.05.26:
add put the output files into a new folder 'list to tar'
put the tar list into the new folder 'tar'
modified from list_to_tf.py
------
input:
a list of mir name
a mir_vs_tar.txt
output:
a list of tar names, repeats are not compressed
a csv file of mir_tar.csv:
mir, tar
a quality file:(pre = mir, tf = tar)
pre_name, pre_tf_count: a list of precursor with the number of TFs found;
pre_count: number of precursors operated;
pre_found_count: number of precurors with TF found
tf_sum: sum number of TFs found;(=sum(pre_tf_count))
tf_average: average of pre_tf_count(=tf_sum/pre_found_count)
usage:
python list_to_tf.py mir_list.txt mir_tar.csv
2015.05.24 by xnm
'''
import sys
import os
input_list = sys.argv[1]
input_database = sys.argv[2]
dir_list_to_tar = 'list_to_tar'
dir_tar = 'tar'
path = os.getcwd()
path_tar = os.path.join(path, dir_tar)
path_list_to_tar = os.path.join(path,dir_list_to_tar)
if not os.path.isdir(path_tar):
os.makedirs(path_tar)
if not os.path.isdir(path_list_to_tar):
os.makedirs(path_list_to_tar)
file_list = open(input_list,'r')
file_database = open(input_database,'r')
file_output = open(path_tar+'/'+input_list[:-4]+'_tar.txt','w')
file_csv = open(path_list_to_tar+'/'+input_list[:-4]+'_mir_tar.csv','w')
file_qua = open(path_list_to_tar+'/'+input_list[:-4]+'_quality.txt','w')
# initialization of quality counts
pre_count = 0
pre_found_count = 0
tf_sum = 0
# finding the accordant TFs
database = file_database.readlines()
for lines in file_list:
pre_tf_count = 0
pre_found = 0
pre_count += 1
pre_name = lines.rstrip()
pre_name = pre_name.upper()
for i in database:
i = i.rstrip()
data = i.split(' ')
mir = data[0]
TF = data[1]
if pre_name == mir:
pre_found = 1
file_output.write(TF+'\n')
file_csv.write(TF+','+pre_name+'\n')
pre_tf_count += 1
tf_sum += pre_tf_count
file_qua.write(pre_name+' '+str(pre_tf_count)+'\n')
if pre_found == 1:
pre_found_count += 1
#write quality file
if pre_found_count == 0:
tf_average = 0
else:
tf_average = round(tf_sum*1.0/pre_found_count,2)
file_qua.write('------------------------------\n')
file_qua.write('number of mirs operated = '+str(pre_count)+'\n')
file_qua.write('number of mirs with tar found = '+str(pre_found_count)+'\n')
file_qua.write(' sum number of tars found = '+str(tf_sum)+'\n')
file_qua.write('average of mir_tar_count_found = '+str(tf_average)+'/mir \n')
file_list.close()
file_database.close()
file_output.close()
file_csv.close()
file_qua.close()
|
983,285 | b98a71f6ec6b3fa2e8e08ea0d33b84e5e9a09362 | from __future__ import print_function
import argparse
import numpy as np
import chainer
from PIL import Image
from net import *
def gen_dataset(data):
image_rgb = data.copy()
image_bgr = data[:,::-1,:,:]
labels_rgb = np.zeros((len(data),), np.int32)
labels_bgr = np.ones((len(data),), np.int32)
images = np.concatenate((image_rgb, image_bgr), axis=0)
labels = np.concatenate((labels_rgb, labels_bgr), axis=0)
return chainer.datasets.tuple_dataset.TupleDataset(images, labels)
def main():
parser = argparse.ArgumentParser(description='Chainer CIFAR example:')
parser.add_argument('--image', '-i', help='Input image', required=True)
parser.add_argument('--model', '-m', default='./result/net_epoch_30',
help='trained model')
args = parser.parse_args()
net = Net(2)
chainer.serializers.load_npz(args.model, net)
# input data
img = Image.open(args.image)
img = img.resize((32, 32))
img = np.asarray(img)
img = img.astype(np.float32) / 255
img = np.transpose(img, (2, 0, 1))
x = chainer.Variable(img[np.newaxis,:,:,:])
with chainer.using_config('train', False):
with chainer.using_config('enable_backprop', False):
y = F.softmax(net(x))
if y.data[0,0] > y.data[0,1]:
print('RGB')
else:
print('BGR')
if __name__ == '__main__':
main() |
983,286 | 9ce8a505db55adeb0cefbdf936217a517f8f23f5 | #! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : dataset.py
# Author : tsing-cv
# Created date: 2019-02-14 18:12:26
# Description :
#
#================================================================
import sys
sys.path.append("../")
from config import cfgs
from core.nets import yolov3
from core.data_preparation.dataset import dataset, Parser
from core.utils import utils
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
import numpy as np
class Train():
def __init__(self):
tf.logging.set_verbosity(tf.logging.DEBUG)
self.dataset_batch()
self.create_clones()
self.train()
@staticmethod
def get_update_op():
"""
Extremely important for BatchNorm
"""
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops is not None:
return tf.group(*update_ops)
return None
@staticmethod
def sum_gradients(clone_grads):
averaged_grads = []
for grad_and_vars in zip(*clone_grads):
grads = []
var = grad_and_vars[0][1]
try:
for g, v in grad_and_vars:
assert v == var
grads.append(g)
grad = tf.add_n(grads, name = v.op.name + '_summed_gradients')
except:
# import pdb
# pdb.set_trace()
continue
averaged_grads.append((grad, v))
# tf.summary.histogram("variables_and_gradients_" + grad.op.name, grad)
# tf.summary.histogram("variables_and_gradients_" + v.op.name, v)
# tf.summary.scalar("variables_and_gradients_" + grad.op.name+\
# '_mean/var_mean', tf.reduce_mean(grad)/tf.reduce_mean(var))
# tf.summary.scalar("variables_and_gradients_" + v.op.name+'_mean',tf.reduce_mean(var))
return averaged_grads
@staticmethod
def L2_Regularizer_Loss(is_freeze_batch_norm=True):
if is_freeze_batch_norm:
trainable_variables = [v for v in tf.trainable_variables() if 'bias' not in v.name]
else:
trainable_variables = [v for v in tf.trainable_variables() if 'beta' not in v.name
and 'gamma' not in v.name and 'bias' not in v.name]
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in trainable_variables])
return lossL2
def dataset_batch(self):
tf.logging.info("Loading dataset >>>\n\tTrain dataset is in {}".format(cfgs.train_tfrecord))
parser = Parser(cfgs.IMAGE_H, cfgs.IMAGE_W, cfgs.ANCHORS, cfgs.NUM_CLASSES)
trainset = dataset(parser, cfgs.train_tfrecord, cfgs.BATCH_SIZE, shuffle=cfgs.SHUFFLE_SIZE)
testset = dataset(parser, cfgs.test_tfrecord , cfgs.BATCH_SIZE, shuffle=None)
self.is_training = tf.placeholder(tf.bool)
self.example = tf.cond(self.is_training, lambda: trainset.get_next(), lambda: testset.get_next())
def create_clones(self):
with tf.device('/cpu:0'):
self.global_step = tf.train.create_global_step()
self.learning_rate = tf.train.exponential_decay(cfgs.learning_rate,
self.global_step,
decay_steps=cfgs.DECAY_STEPS,
decay_rate=cfgs.DECAY_RATE,
staircase=True)
optimizer = tf.train.MomentumOptimizer(self.learning_rate, momentum=0.9, name='Momentum')
tf.summary.scalar('learning_rate', self.learning_rate)
# place clones
losses = 0 # for summary only
gradients = []
for clone_idx, gpu in enumerate(cfgs.gpus):
reuse = clone_idx > 0
with tf.variable_scope(tf.get_variable_scope(), reuse = reuse):
with tf.name_scope('clone_{}'.format(clone_idx)) as clone_scope:
with tf.device(gpu) as clone_device:
self.images, *self.y_true = self.example
model = yolov3.yolov3(cfgs.NUM_CLASSES, cfgs.ANCHORS)
pred_feature_map = model.forward(self.images, is_training=self.is_training)
self.loss = model.compute_loss(pred_feature_map, self.y_true)
self.y_pred = model.predict(pred_feature_map)
self.total_loss = self.loss[0] / len(cfgs.gpus)
losses += self.total_loss
if clone_idx == 0:
regularization_loss = 0.0001*self.L2_Regularizer_Loss()
self.total_loss += regularization_loss
else:
regularization_loss = 0
tf.summary.scalar("Loss/Losses", losses)
tf.summary.scalar("Loss/Regular_loss", regularization_loss)
tf.summary.scalar("Loss/Total_loss", self.total_loss)
tf.summary.scalar("Loss/Loss_xy", self.loss[1])
tf.summary.scalar("Loss/Loss_wh", self.loss[2])
tf.summary.scalar("Loss/Loss_confs", self.loss[3])
tf.summary.scalar("Loss/Loss_class", self.loss[4])
clone_gradients = optimizer.compute_gradients(self.total_loss)
gradients.append(clone_gradients)
# add all gradients together
# note that the gradients do not need to be averaged, because the average operation has been done on loss.
averaged_gradients = self.sum_gradients(gradients)
apply_grad_op = optimizer.apply_gradients(averaged_gradients, global_step=self.global_step)
train_ops = [apply_grad_op]
bn_update_op = self.get_update_op()
if bn_update_op is not None:
train_ops.append(bn_update_op)
# moving average
if cfgs.using_moving_average:
tf.logging.info('\n{}\n\tusing moving average in training, with decay = {}\n{}'.format(
'***'*20, 1-cfgs.moving_average_decay, '***'*20))
ema = tf.train.ExponentialMovingAverage(cfgs.moving_average_decay)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([apply_grad_op]):
# ema after updating
train_ops.append(tf.group(ema_op))
self.train_op = control_flow_ops.with_dependencies(train_ops, losses, name='train_op')
def train(self):
summary_hook = tf.train.SummarySaverHook(save_steps=20,
output_dir=cfgs.checkpoint_path,
summary_op=tf.summary.merge_all())
logging_hook = tf.train.LoggingTensorHook(tensors={'total_loss': self.total_loss.name,
'global_step': self.global_step.name,
'learning_rate': self.learning_rate.name,
'loss_xy': self.loss[1].name,
'loss_wh': self.loss[2].name,
'loss_confs': self.loss[3].name,
'loss_class': self.loss[4].name},
every_n_iter=2)
sess_config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True)
if cfgs.gpu_memory_fraction < 0:
sess_config.gpu_options.allow_growth = True
elif cfgs.gpu_memory_fraction > 0:
sess_config.gpu_options.per_process_gpu_memory_fraction = cfgs.gpu_memory_fraction
with tf.train.MonitoredTrainingSession(master='',
is_chief=True,
checkpoint_dir=cfgs.checkpoint_path,
hooks=[tf.train.StopAtStepHook(last_step=cfgs.max_number_of_steps),
# tf.train.NanTensorHook(self.total_loss),
summary_hook,
logging_hook],
save_checkpoint_steps=1000,
save_summaries_steps=20,
config=sess_config,
stop_grace_period_secs=120,
log_step_count_steps=cfgs.log_every_n_steps) as mon_sess:
while not mon_sess.should_stop():
_,step,y_p,y = mon_sess.run([self.train_op, self.global_step, self.y_pred, self.y_true], feed_dict={self.is_training:True})
if step%cfgs.eval_interval == 0:
train_rec_value, train_prec_value = utils.evaluate(y_p,y)
y_pre,y_gt = mon_sess.run([self.y_pred, self.y_true], feed_dict={self.is_training:False})
test_rec_value, test_prec_value = utils.evaluate(y_pre,y_gt)
tf.logging.info("\n=======================> evaluation result <================================\n")
tf.logging.info("=> STEP %10d [TRAIN]:\trecall:%7.4f \tprecision:%7.4f" %(step+1, train_rec_value, train_prec_value))
tf.logging.info("=> STEP %10d [VALID]:\trecall:%7.4f \tprecision:%7.4f" %(step+1, test_rec_value, test_prec_value))
tf.logging.info("\n=======================> evaluation result <================================\n")
if __name__ == "__main__":
Train()
# sess = tf.Session()
# imgs, y = sess.run([t.images, t.y_true], feed_dict={t.is_training:True})
# print (y)
|
983,287 | 5a538cf195fc84f13e8e7626754eaf8a85c942ad | import requests
from bs4 import BeautifulSoup, Comment
import re
import pickle
import urllib.request
"""
challenge 5
"pronounce it"
http://www.pythonchallenge.com/pc/def/peak.html
answer: peak hell sounds familiar ?
pickle?
http://www.pythonchallenge.com/pc/def/pickle.html
yes! pickle!
"""
def main():
url = 'http://www.pythonchallenge.com/pc/def/banner.p'
raw_html = urllib.request.urlopen(url).read()
print(pickle.loads(raw_html))
#print(pickle.dumps(raw_html))
data = pickle.load(urllib.request.urlopen(url))
print(data)
for line in data:
print("".join([k * v for k, v in line]))
if __name__ == "__main__":
main()
|
983,288 | 173d791d08a130ab4c77dce9bb98e1d55348113f | import os
import tarfile
from multiprocessing import Pool
from PIL import Image
from tqdm import tqdm
from mtcnn.detector import detect_faces
from utils import ensure_folder
def extract(filename):
print('Extracting {}...'.format(filename))
with tarfile.open(filename) as tar:
tar.extractall('data')
def check_one_image(filename):
img = Image.open(filename)
bounding_boxes, landmarks = detect_faces(img)
num_faces = len(bounding_boxes)
if num_faces == 0:
return filename
def check_images(usage):
folder = os.path.join('data', usage)
dirs = [d for d in os.listdir(folder)]
fileset = []
for d in dirs:
dir = os.path.join(folder, d)
files = [os.path.join(dir, f) for f in os.listdir(dir) if f.lower().endswith('.jpg')]
fileset += files
print('usage:{}, files:{}'.format(usage, len(fileset)))
results = []
# pool = Pool(12)
# for item in tqdm(pool.imap_unordered(check_one_image, fileset), total=len(fileset)):
# results.append(item)
# pool.close()
# pool.join()
# results = [r for r in results if r is not None]
for item in tqdm(fileset):
ret = check_one_image(item)
if ret is not None:
results.append(ret)
print(len(results))
with open('data/exclude_{}.txt'.format(usage), 'w') as file:
file.write('\n'.join(results))
if __name__ == '__main__':
ensure_folder('data')
ensure_folder('models')
extract('data/vggface2_test.tar.gz')
extract('data/vggface2_train.tar.gz')
check_images('train')
check_images('test')
|
983,289 | 3c336d3db292a95644c6340e9f5b5350c6d2031d | from django.db import models
# Create your models here.
class Goal(models.Model):
title = models.CharField(max_length=256)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class CheckmarkLog(models.Model):
|
983,290 | aa6398527cd4bf6f83b07b5b3b540947428dbc61 | #!/usr/bin/env python
from NFTest import *
from CryptoNICLib import *
phy2loop0 = ('../connections/conn', [])
nftest_init(sim_loop = [], hw_config = [phy2loop0])
nftest_start()
MAC = ['00:ca:fe:00:00:01', '00:ca:fe:00:00:02',
'00:ca:fe:00:00:03', '00:ca:fe:00:00:04']
IP = ['192.168.1.1', '192.168.66.6', '192.168.3.1', '192.168.4.1']
TTL = 30
#
###############################
#
# Enable encryption
key = 0x55aaff33
ip_addr = 0xC0A84206
nftest_regwrite(reg_defines.CRYPTO_KEY_REG(), key)
nftest_regwrite(reg_defines.CRYPTO_IP_ADDR_REG(), ip_addr)
#
###############################
#
# Send an IP packet in port 1
length = 64
DA = MAC[1]
SA = MAC[2]
dst_ip = IP[1]
src_ip = IP[2]
pkt = make_IP_pkt(dst_MAC=DA, src_MAC=SA, TTL=TTL, dst_IP=dst_ip,
src_IP=src_ip, pkt_len=length)
encrypted_pkt = encrypt_pkt(key, pkt)
nftest_send_dma('nf2c0', pkt)
nftest_expect_phy('nf2c0', encrypted_pkt)
nftest_finish()
|
983,291 | 1e55ddb1ae75550089f3b08290b1d639fd803c38 | from django.db import models
from django.core.validators import RegexValidator
# Create your models here.
class Contact(models.Model):
name = models.CharField(max_length=55, default='')
email = models.EmailField(max_length=255)
phone_no = models.IntegerField(validators=[RegexValidator(
"^0?[5-9]{1}\d{9}$")], null=True, blank=True)
created_date = models.DateTimeField(auto_now_add=True)
message = models.TextField()
class Meta:
ordering = ['-pk']
def __str__(self):
return self.name
|
983,292 | 60acd2bc8f6fd68d1c9109e607b61e9bf31aacbb | from .base_page import BasePage
from .locators import LoginPageLocators
class LoginPage(BasePage):
def should_be_login_page(self):
self.should_be_login_url()
self.should_be_login_form()
self.should_be_register_form()
def should_be_login_url(self):
current_url = str(self.browser.current_url)
expected_url = LoginPageLocators.LOGIN_URL
# реализуйте проверку на корректный url адрес
assert expected_url in current_url, "wrong url"
def should_be_login_form(self):
assert self.is_element_present(*LoginPageLocators.LOGIN_FORM), "no login"
def should_be_register_form(self):
assert self.is_element_present(*LoginPageLocators.REGISTER_FORM), "no register" |
983,293 | 0e139532f40baf0262ffd8113553ece683c14ab9 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .utils import *
from utils.optimizers.lars import *
from utils.optimizers.lamb import *
|
983,294 | 12e8df48782a82cb6f3a6cdc5a42a7ed81ac8b6f | import django
from django.contrib.postgres.fields import ArrayField
from django.db import models
from auditable.models import Auditable
from enumfields import EnumField
from .vehicle_statuses import VehicleDefinitionStatuses
class Vehicle(Auditable):
make = models.ForeignKey(
'Make',
related_name=None,
on_delete=models.PROTECT
)
vehicle_class_code = models.ForeignKey(
'VehicleClass',
related_name=None,
on_delete=models.PROTECT
)
vehicle_fuel_type = models.ForeignKey(
'FuelType',
related_name=None,
on_delete=models.PROTECT
)
range = models.IntegerField(
db_comment='Vehicle Range in km'
)
model_name = models.CharField(
blank=False,
db_comment="Model and trim of vehicle",
max_length=250,
null=False
)
model_year = models.ForeignKey(
'ModelYear',
related_name=None,
on_delete=models.PROTECT,
null=False
)
validation_status = EnumField(
VehicleDefinitionStatuses,
max_length=20,
null=False,
default=VehicleDefinitionStatuses.DRAFT,
db_comment="The validation status of the vehicle. Valid statuses: "
"{statuses}".format(
statuses=[c.name for c in VehicleDefinitionStatuses]
)
)
class Meta:
db_table = 'vehicle'
unique_together = [[
'make', 'model_name', 'vehicle_class_code', 'vehicle_fuel_type',
'model_year'
]]
db_table_comment = "List of credit-generating vehicle definitions"
|
983,295 | d572d6de66d324c95ee3135d7435cb66e48ab5a6 | from zope.component.interfaces import ObjectEvent
from zope.interface import Interface,implements
import traceback
class ICompilationErrorEvent(Interface):
""" when user code cannot be compiled """
pass
class CompilationErrorEvent(ObjectEvent):
implements(ICompilationErrorEvent)
def __init__(self, provider, container):
super(CompilationErrorEvent, self).__init__(provider)
self.error = provider
self.container = container
self.message = """in %s, at line %d: %s""" % (
container.id,
self.error.lineno,
self.error.msg,
)
class IExecutionErrorEvent(Interface):
""" when user code fails """
pass
class ExecutionErrorEvent(ObjectEvent):
implements(IExecutionErrorEvent)
def __init__(self, provider, container):
super(ExecutionErrorEvent, self).__init__(provider)
self.error = provider
self.container = container
self.traceback = traceback.format_exc().splitlines()
if not hasattr(self.error, 'message') or not self.error.message:
error_msg = "%s %s" % (
self.error.__class__.__name__,
str(self.error))
else:
error_msg = self.error.message
error_line = self.traceback[-2].replace(' File "<string>", ', '')
self.message = """in %s: %s, %s""" % (
container.id,
error_msg,
error_line,
)
|
983,296 | 5487bb068ad6cb8c62b1aa5d0f6fa497a5fac762 | RECORD_TERMINATOR = 0x1D
MAX_RECORD_LENGTH = 99999
LEADER_LENGTH = 24
DIRECTORY_ENTRY_LENGTH = 12
FIELD_TERMINATOR = 0x1E
SUBFIELD_DELIMITER = 0x1F
TAG_LENGTH = 3
|
983,297 | 0eb93a48844519119d38164a4ba3f71ecd78c305 | #coding=utf8
import time
import timeit
import os
import sqlite3
#score=[]
# fix1 28.06.2018 XXXXXXXXX BAD_END
pgen_path = 'MammalsGenomesWithEcology.fasta.txt'
#pgen_path = 'test.fa'
#path2=open("uni.txt",'r')
path3=("lenatgc.csv")
reps = open(path3, "w")
#for n in range(10):
br=3713
while True:
uname = path2.readline()[:-1]
print uname
pgen = open(pgen_path,'r')
#uname = "Zu_cristatus"
pr=1
while True:
name = pgen.readline()[1:-1]
genome = pgen.readline()
length=len(genome)
#print uname+" "+name
if (uname==name.replace(" ","_")):
a = genome.count("A")
t = genome.count("T")
g = genome.count("G")
c = genome.count("C")
reps.write("%s %s %s %s %s %s %s %s\n" % (br,uname,pr,name,a,t,g,c))
break
pr=pr+1
if pr>3954:
break
br=br+1
if br>3717:
break |
983,298 | cb822e5d0c4823c652bdb975c1f531197427e5c5 | import requests
import collections
import sqlite3 as sql
import os
import time
filename = os.path.join(os.path.dirname(__file__), 'example.db')
conn = sql.connect(filename)
c = conn.cursor()
# required API key for the ISBN db website API v2
filename = os.path.join(os.path.dirname(__file__), 'api.key')
with open(filename) as tokenFile:
api_key = tokenFile.read()
# Keys from the isbndb website API v2 that return single values
ISBN_DB_API_2_DATA_SINGLE_KEYS = [
"awards_text",
"marc_enc_level",
"summary",
"isbn13",
"dewey_normal",
"title_latin",
"publisher_id",
"dewey_decimal",
"publisher_text",
"language",
"physical_description_text",
"isbn10",
"edition_info",
"urls_text",
"lcc_number",
"publisher_name",
"book_id",
"notes",
"title",
"title_long"
]
# Keys from the isbndb website API v2 that return multiple values
ISBN_DB_API_2_DATA_LIST_KEYS = [
{"author_data":{
"name":"Richards, Rowland",
"id":"richards_rowland"
}},
"subject_ids"
]
ISBN_DB_TO_LIBRERY_DB_CONVERSION_TABLE = {
"awards_text":"awards_text",
"book_id":"book_id",
"dewey_decimal":"dewey_decimal",
"dewey_normal":"dewey_normal",
"edition_info":"edition_info",
"isbn10":"isbn10",
"isbn13":"isbn13",
"language":"language",
"lcc_number":"lcc_number",
"marc_enc_level":"marc_enc_level",
"notes":"notes",
"physical_description_text":"physical_description_text",
"publisher_id":"publisher_id",
"publisher_name":"publisher_name",
"publisher_text":"publisher_text",
"summary":"summary",
"title":"title",
"title_latin":"title_latin",
"title_long":"title_long",
"urls_text":"urls_text",
}
def scrape():
print("Begin new scrape at " + time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime()))
BOOK_STRING = "http://isbndb.com/api/v2/json/" + api_key + "/book/"
# get all the books with isbn13 that haven't been handled
c.execute("SELECT b_id, isbn13 FROM books WHERE isbndb_scraped is NULL and isbn13 != ''")
book_13s = c.fetchall()
# get all the ones with isbn10 that haven't been handled and dont have an isbn13
c.execute("SELECT b_id, isbn10 FROM books WHERE isbndb_scraped is NULL and isbn13 == '' and isbn10 != ''")
book_10s = c.fetchall()
# for feed in db.feeds
i = 0
for book13 in book_13s:
print(book13[1])
c.execute('INSERT OR IGNORE INTO isbndb_books (b_id) VALUES (?)', (book13[0],))
r = requests.get(BOOK_STRING + book13[1])
json = r.json()
if 'error' in json.keys():
print("Book with ISBN13 " + book13[1] + " was not found in the ISBNDB database")
c.execute('''
UPDATE books
SET isbndb_scraped = 0
WHERE b_id=? ''' , (book13[0],))
continue
for key in json['data'][0]:
if (key in ISBN_DB_API_2_DATA_SINGLE_KEYS):
c.execute('''
UPDATE isbndb_books
SET ''' + ISBN_DB_TO_LIBRERY_DB_CONVERSION_TABLE[key] + '''=?
WHERE b_id =? ''' , (json['data'][0][key], book13[0]))
else:
print("Unhandled key for book with ISBN13 " + book13[1] + " '" + key + "' has value '", end="")
print(json['data'][0][key], end="")
print("'")
c.execute('''
UPDATE books
SET isbndb_scraped = 1
WHERE b_id=? ''' , (book13[0],))
conn.commit()
for book10 in book_10s:
print(book10[1])
c.execute('INSERT OR IGNORE INTO isbndb_books (b_id) VALUES (?)', (book10[0],))
r = requests.get(BOOK_STRING + book10[1])
json = r.json()
if 'error' in json.keys():
print("Book with ISBN13 " + book10[1] + " was not found in the ISBNDB database")
c.execute('''
UPDATE books
SET isbndb_scraped = 0
WHERE b_id=? ''' , (book10[0],))
continue
for key in json['data'][0]:
if (key in ISBN_DB_API_2_DATA_SINGLE_KEYS):
c.execute('''
UPDATE isbndb_books
SET ''' + ISBN_DB_TO_LIBRERY_DB_CONVERSION_TABLE[key] + '''=?
WHERE b_id =? ''' , (json['data'][0][key], book10[0]))
else:
print("Unhandled key for book with ISBN10 " + book10[1] + " '" + key + "' has value '", end="")
print(json['data'][0][key], end="")
print("'")
c.execute('''
UPDATE books
SET isbndb_scraped = 1
WHERE b_id=? ''' , (book10[0],))
conn.commit()
'''
print("\tScraping feed:", fid)
# strip out the id (first field)
fid = fid[0]
# submit the computed request for the feed's info
feed = graph.get_object(id=fid, fields=FEED_SCRAPE, filter='stream', date_format="U")
print("\t\tResponse Received")
# update the feed meta data
c.execute('INSERT OR REPLACE INTO feeds (id, name, picture, description) VALUES (?,?,?,?)',
(fid,
feed['name'],
feed['cover']['source'] if 'cover' in feed else None,
######################
# @TODO THIS COVER NEEDS TO BE TRANSLATED TO FIT THE SAME WINDOW THAT IT WOULD ON Facebook
# The method is as follows
#
#Of the solutions you linked to above, the third is the closest to being 100% accurate (and may very well be for his use cases).
#
#Here's how it worked out for me for event covers (change fw and fh for different types of covers).
#
#You need:
#
#fw - the width that Facebook displays the cover image
#fh - the height that Facebook displays the cover image
#nw - the natural width of the image retrieved from Facebook
#nh - the natural height of the image retrieved from Facebook
#ow - the width to which you're scaling the image down in your UI
#oy - the offset_y value for the cover photo
#
# then the top margin must become calc(- (oy * ow / 100) * ((nh / nw) - (fh / fw)))
#
# note that for group cover photos, fw = 820 and fh = 250
#
feed['description'] if 'description' in feed else None))
# for posts in response !!not in database - not handled yet!! , get information, store into database
posts = feed['feed']['data']
posts = [x for x in posts]
for post in posts:
print("\t\tScraping post:", post['id'])
c.execute('INSERT OR REPLACE INTO posts (id, feed_id, author, message, created, updated) VALUES (?,?,?,?,?,?)',
(post['id'],
fid,
post['from']['id'],
post.get('message'), # the message might not exist if it was just a simple link share
post['created_time'],
post['updated_time']))
scrape_person(post['from'])
print('\t\tScraping comments')
# not all posts have comments
try:
comment_data = post['comments']['data']
except:
print('\t\t\tno available comments')
comment_data = []
for comment in comment_data:
print('\t\t\tcomment:',comment['id'])
#for comments in set not in database, get information, store into database
c.execute('INSERT OR REPLACE INTO comments (id, parent_post, author, text, created, parent_comment) VALUES (?,?,?,?,?,?)',
(comment['id'],
post['id'],
comment['from']['id'],
comment['message'],
comment['created_time'],
None))
scrape_person(comment['from'])
try:
child_comment_data = comment['comments']['data']
except:
print('\t\t\tno available child comments')
child_comment_data = []
for child_comment in child_comment_data:
print('\t\t\t\tchild_comment:',child_comment['id'])
c.execute('INSERT OR REPLACE INTO comments (id, parent_post, author, text, created, parent_comment) VALUES (?,?,?,?,?,?)',
(child_comment['id'],
post['id'],
child_comment['from']['id'],
child_comment['message'],
child_comment['created_time'],
comment['id']))
scrape_person(child_comment['from'])
print('\tDone scraping feed')
conn.commit()
'''
def scrape_person(person):
# person is a data object returned from a 'from{fields}' graph API call
print('\t\tScraping person:',person['id'])
c.execute('INSERT OR REPLACE INTO people (id, name, picture) VALUES (?,?,?)', (
person['id'],
person['name'],
person['picture']['data']['url']) )
scrape() |
983,299 | 706e5b8622d434816dc367f692608919f479bff2 | # Generated by Django 3.2.4 on 2021-07-13 08:15
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('creditors_transaction', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='creditor_transaction',
name='diesel_in_lit',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0, 'Value should not be less than 0')]),
),
migrations.AlterField(
model_name='creditor_transaction',
name='diesel_price',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0, 'Value should not be less than 0')]),
),
migrations.AlterField(
model_name='creditor_transaction',
name='petrol_in_lit',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0, 'Value should not be less than 0')]),
),
migrations.AlterField(
model_name='creditor_transaction',
name='petrol_price',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0, 'Value should not be less than 0')]),
),
migrations.AlterField(
model_name='creditor_transaction',
name='remark',
field=models.TextField(blank=True, max_length=100, null=True),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.