index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,400 | be73ba907e252676827161d3fa8fcd8418844129 | def wypisz_dane(imie, nazwisko, kurs = "Python", il_dni = 15):
print(imie, nazwisko, kurs, il_dni)
wypisz_dane("Bolek", "Kruszewski")
wypisz_dane("Bolek", "Kruszewski", "Java")
wypisz_dane("Jan", "Matejko", "malarstwo", 3567)
wypisz_dane("Paulina", "K", 30)
wypisz_dane("Marek", "0", il_dni=25)
wypisz_dane(kurs="JavaScript", imie="Olaf", il_dni=34, nazwisko="Pierdziel")
|
989,401 | 917e10920fd6e7c3e1a37c4f371993ddcb1465cf | # -*- coding: utf-8 -*-
# @Start_Time : 2018/6/25 16:28
# @End_time:
# @Author : Andy
# @Site :
# @File : 71_simplify_path_180625.py
"""
Given an absolute path for a file (Unix-style), simplify it.
For example,
path = "/home/", => "/home"
path = "/a/./b/../../c/", => "/c"
Corner Cases:
Did you consider the case where path = "/../"?
In this case, you should return "/".
Another corner case is the path might contain multiple slashes '/' together, such as "/home//foo/".
In this case, you should ignore redundant slashes and return "/home/foo".
"""
class Solution(object):
def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
def valid_string(s):
for char in s:
if ord(char) < ord('z') and ord(char) > ord('a') or ord(char) < ord('Z') and ord(char) > ord('A'):
continue
# else:
# return False
return True
path_indicat = []
path_index = 0
path_list = path.strip().split('/')
res = ["" for _ in range(len(path_list))]
for i in range(len(path_list)):
if not path_list[i]:
continue
if path_list[i] == '.':
continue
if path_list[i] == '..':
if path_index > 0:
path_index -= 1
continue
continue
# if valid_string(path_list[i]):
if 1:
if len(res) > path_index:
res[path_index] = (path_list[i])
else:
res.append(path_list[i])
path_index += 1
ress = res[0:path_index]
return "/" + "/".join(ress)
print(Solution().simplifyPath("/home/////.."))
print(Solution().simplifyPath("/a/./b/../../c/"))
print(Solution().simplifyPath("/..."))
|
989,402 | 004ce3ed6a8cae98a46821c581e21c6c047a37c1 | from sqlalchemy import create_engine
import mysql.connector
import pandas as pd
import psycopg2
#Local Imports
from extract_data.get_new_nyc_borough_confirmed import get_borough_confirmed
from combine.combine_nyc import combine_scraped_and_historical
engine = create_engine('postgresql+psycopg2://postgres:postgres@postgres2.chtkfsooypac.us-east-1.rds.amazonaws.com:5432/postgres', echo=False)
def store_historical_nyc():
df = get_borough_confirmed()
df.to_sql(name='historical_nyc_table', con=engine, index=False, if_exists="replace")
#if_exists options: {‘fail’, ‘replace’, ‘append’}, default ‘fail’
def execute_combine_nyc_scraped_and_historical():
dn, df = combine_scraped_and_historical()
if dn is not None:
dn.to_sql(name='combined_nyc_table', con=engine, index=False, if_exists="replace")
df.to_sql(name='historical_nyc_table', con=engine, index=False, if_exists="replace") |
989,403 | 121693931cf7f72220494747443cd47063ac5dbc | """Tags for djangos templating library"""
|
989,404 | 24bd01f625fd9eda7a7d3224b8b5328df92a20bf | # Faça um programa que calcule a soma entre todos os números
# ímpares que são múltiplos de três e que se encontram no intervalo de 1 até 500.
'''
soma = 0
cont = 0
for i in range(0, 501, 3):
if i % 2 == 0:
continue
else:
soma+=i
cont+=1 # Para contar os números que serão somados
print(soma, cont)
'''
### CORREÇÃO/MÉTODO DO PROFESSOR
soma = 0
cont = 0
for i in range(1, 501, 2):
if i % 3 == 0:
#print(i, end=' ')
soma+=i
cont+=1
print(f'A soma de todos os {cont} valores solicitados é: {soma}')
|
989,405 | 0fa03899a8114fa70874d659cc413e8e39d997b3 | #-*- coding: utf-8 -*-
import ffn
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
from datetime import datetime
from cycler import cycler# 用于定制线条颜色
# pip mplfinance 參考: https://pypi.org/project/mplfinance/
# 使用新 module 的說明參考 https://www.mscto.com/python/558937.html
import mplfinance as mpf
'''
利用 python 繪製 K 線圖
'''
# 出圖
matplotlib.use('TkAgg') # 解決出圖問題,參考 https://www.jianshu.com/p/3f4b89aaf057
# 設定開始日期
startDay = '2017-01-01'
# 設定股票代號
stock = 2330
# 取得股票資訊
data = ffn.get(f"{stock}.TW:Open, {stock}.TW:High, {stock}.TW:Low, {stock}.TW:Close", start=startDay)
# 取得時間(date),並加入 DataFrame 之中,而且要排在第一欄,為了之後畫 K 線圖
floteDate = data.index.to_pydatetime()
data.insert(0, "Date", floteDate)
# 變更欄位名稱
data = data.rename(columns={f"{stock}twopen": 'Open', f"{stock}twhigh": 'High', f"{stock}twlow": 'Low', f"{stock}twclose": 'Close'})
"""繪製 K 線圖"""
# 圖表樣式參考: https://github.com/matplotlib/mplfinance/blob/master/examples/styles.ipynb
# 以下圖表設定參考: https://www.pythonf.cn/read/86064
# 设置基本参数
# type:绘制图形的类型,有candle, renko, ohlc, line等
# 此处选择candle,即K线图
# mav(moving average):均线类型,此处设置7,30,60日线
# volume:布尔类型,设置是否显示成交量,默认False
# title:设置标题
# y_label_lower:设置成交量图一栏的标题
# figratio:设置图形纵横比
# figscale:设置图形尺寸(数值越大图像质量越高)
""" 記得要用 dict 包起來,才可以用 **kwargs 的方式傳參數 """
kwargs = dict(
type='candle',
mav=(7, 30, 60),
title=f"{stock} Information_{startDay}",
ylabel_lower='Shares\nTraded Volume',
figratio=(15, 10),
figscale=1)
# 设置marketcolors
# up:设置K线线柱颜色,up意为收盘价大于等于开盘价
# down:与up相反,这样设置与国内K线颜色标准相符
# edge:K线线柱边缘颜色(i代表继承自up和down的颜色),下同。详见官方文档)
# wick:灯芯(上下影线)颜色
# volume:成交量直方图的颜色
# inherit:是否继承,选填
mc = mpf.make_marketcolors(
up='red',
down='green',
edge='i',
wick='i',
inherit=True)
# 设置图形风格
# gridaxis:设置网格线位置
# gridstyle:设置网格线线型
# y_on_right:设置y轴位置是否在右
s = mpf.make_mpf_style(
gridaxis='both',
gridstyle='-.',
y_on_right=False,
marketcolors=mc)
# 设置均线颜色,配色表可见下图
# 建议设置较深的颜色且与红色、绿色形成对比
# 此处设置七条均线的颜色,也可应用默认设置
matplotlib.rcParams['axes.prop_cycle'] = cycler(
color=['dodgerblue', 'deeppink',
'navy', 'teal', 'maroon', 'darkorange',
'indigo'])
# 设置线宽
matplotlib.rcParams['lines.linewidth'] = .5
# 图形绘制
# show_nontrading:是否显示非交易日,默认False
# savefig:导出图片,填写文件名及后缀
"""
*args => 數個不指定 Key 的參數
**kwargs => 數個指定 key 的參數
"""
mpf.plot(data,
**kwargs,
style=s,
show_nontrading=False)
plt.show() |
989,406 | e6f19bb2332c3f79f7f84f6b028c354d9ff53272 | from django.shortcuts import render, get_object_or_404, redirect
from .models import Products, Category, Likes
from .forms import ProductForm
from django.db.models import Q
from django.contrib.auth.models import User
from an_interesting_site import settings
from reviews.models import Review
from reviews.calculate_reviews import CalculateRating
import datetime
form = ProductForm()
def products(request):
''' A view to show all products '''
sort = 'title'
products = Products.objects.all().order_by(sort)
category_list = Category.objects.all()
selected = None
search = None
username = None
user = request.user
likes = Likes.objects.all()
users_liked_products = []
if request.POST:
if 'all_products_z' in request.POST:
sort = '-title'
products = Products.objects.all().order_by(sort)
for product in products:
CalculateRating(product_id=product.pk)
if user.is_authenticated:
username = request.user.username
for like in likes:
if like.user.id == user.id:
users_liked_products.append(like.product.pk)
if 'author' in request.GET:
author = request.GET['author']
products = products.filter(author__in=author)
if request.GET:
if 'category' in request.GET:
categories = request.GET.getlist('category')
products = products.filter(category__name__in=categories)
selected = ', '.join(categories)
if 'q' in request.GET:
search = request.GET['q']
results = Q(title__icontains=search) | Q(
category__name__icontains=search)
products = products.filter(results)
selected = str(search)
if 'sort' in request.GET:
sort = request.GET['sort']
products = Products.objects.all().order_by(sort)
context = {
'products': products,
'categories': category_list,
'selected': selected,
'sort': sort,
'username': username,
'likes': likes,
'users_liked_products': users_liked_products,
}
return render(
request,
'products/products.html',
context
)
def productdetails(request, product_id):
''' A view to return details of the specified product '''
product = get_object_or_404(Products, pk=product_id)
price = product.price
reviews = Review.objects.filter(product_id=product_id)
context = {
'product': product,
'price': price,
'reviews': reviews,
}
return render(request, 'products/product_details.html', context)
def add_product(request):
'''
A view to return a create and render the add product form
and handle users response
'''
categories_with_sizes = settings.categories_with_sizes
print(categories_with_sizes)
user = request.user
user_id = None
username = None
form = ProductForm()
file = 'No image'
categories = Category.objects.all()
category_list = []
if user.is_authenticated:
user_id = user.id
username = user.username
# Loops over the friendly names in categories model
for category in categories:
category_list.append(category.friendly_name)
# If user is not logged in, re-direct user to login page
# And show message telling user to login.
if username is None:
return redirect('account_login')
# Handles form response
if request.POST:
form = ProductForm(request.POST)
if form.is_valid():
image = "https://fakestoreapi.com/img/81fPKd-2AYL._AC_SL1500_.jpg"
title = form.cleaned_data['title']
category_input = form.cleaned_data['category']
price = form.cleaned_data['price']
description = form.cleaned_data['description']
rate = form.cleaned_data['rate']
count = form.cleaned_data['count']
has_sizes = form.cleaned_data['has_sizes']
author = User(user_id)
new_product = Products(title=title, image=image, category=category_input, price=price, description=description, rate=rate, count=count,
has_sizes=has_sizes, author=author)
new_product.save()
return redirect('products')
context = {
'form': form,
'category_list': category_list,
'categories': categories,
'user_id': user_id,
'categories_with_sizes': categories_with_sizes,
}
return render(request, 'products/add_product.html', context)
def like(request, product_id, user_id):
''' A view to like a product '''
product = Products(product_id)
user = User(user_id)
new_like = Likes(product=product, user=user)
new_like.save()
return redirect('products')
def unlike(request, product_id, user_id):
like = Likes.objects.filter(user=User(user_id)).filter(product=Products(product_id))
like.delete()
return redirect('products')
def deals(request):
products = Products.objects.all()
today = datetime.date.today()
day_of_week = today.isocalendar()[2]
user = request.user
user_id = user.id
return render(request, 'products/deals.html')
|
989,407 | c44870fa35f9744793a24a12aa60e583f8b2b7c6 | import Metodos
def menu():
print("1. Ingresar nuevo contacto\n2. Buscar contacto\n3. Visualizar agenda")
entrada = input("Ingrese una opción: ")
if entrada == "1":
Metodos.contacto()
menu() |
989,408 | 32c083cbb550845bd938ff48a66ff12571323239 | #!/usr/bin/python3
import subprocess
import os
import timeit
googletest_root = "googletest/googletest"
include_dirs = [
'{}/include'.format(googletest_root),
'include',
'/usr/include/OpenEXR',
]
library_dirs = [
'{}/build'.format(googletest_root),
'build',
]
cpp_flags = [
'-Wall',
'-g',
]
sources = [
'format',
'image',
'array',
'data_t',
]
tests = [
'test/main.cpp',
'test/image.test.cpp',
'test/misc.cpp',
]
libs = [
'-lformat',
'-lgtest_main',
'-lgtest',
'-pthread',
'-lpng',
'-ljpeg',
'-lIlmImf',
'-lIex',
]
include_flags = ['-I{}'.format(x) for x in include_dirs]
library_flags = ['-L{}'.format(x) for x in library_dirs]
os.makedirs('build', 0o755, exist_ok = True)
def compile_format():
for source in sources:
subprocess.check_call(['g++', '-std=c++11'] + cpp_flags + include_flags + ['-c', '{}.cpp'.format(source), '-o', 'build/{}.o'.format(source)])
try:
time = timeit.timeit(compile_format, number=1)
objects = ['build/{}.o'.format(source) for source in sources]
subprocess.check_call(['ar', 'rcs', 'build/libformat.a'] + objects)
subprocess.check_call(['g++', '-std=c++11'] + tests + cpp_flags + include_flags + library_flags + libs + ['-o', 'build/test.bin'])
print("Compilation time: ", time)
except subprocess.CalledProcessError as error:
print('Compilation failed.')
exit(1)
try:
subprocess.check_call(['build/test.bin'])
except subprocess.CalledProcessError as error:
print('Running test failed.')
exit(1)
|
989,409 | cbe3ad155b0426e54bff9fe873696c55248ae6f4 | from PyQt5.QtWidgets import *
import difflib
import sys
import json
data = json.load(open('data/data.json'))
class Window(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Translator')
self.setGeometry(50,50,350,350)
self.UI()
def UI(self):
self.inputBox=QLineEdit(self)
self.inputBox.setPlaceholderText('Index word')
self.inputBox.move(50, 50)
translateButton = QPushButton("Translate", self)
translateButton.move(200,50)
translateButton.clicked.connect(self.translator)
findButton = QPushButton('Find',self)
findButton.move(210, 135)
findButton.clicked.connect(self.find)
self.combo = QComboBox(self)
self.combo.move(110, 135)
self.resultLabel = QLabel(self)
self.resultLabel.setStyleSheet("background-color:#c0c0c0;border: 3px solid gray")
self.resultLabel.setGeometry(20, 180, 300, 150)
self.resultLabel.setWordWrap(True)
self.show()
def find(self):
name = self.combo.currentText()
output = data[name.lower()]
outputstr = str()
for n in output:
outputstr = outputstr + n + '\n'
self.resultLabel.setText(f'{outputstr}')
def translator(self):
name = self.inputBox.text().lower()
def ifvalid(self, arg):
try:
return data[arg.lower()]
except KeyError:
return None
output = ifvalid(self, name)
if ifvalid(self, name) is not None:
self.combo.clear()
outputStr = str()
for n in output:
outputStr = outputStr + n + '\n'
self.resultLabel.setText(f'{outputStr}')
else:
closeMatch = difflib.get_close_matches(name, data)
self.combo.clear()
self.combo.addItems(closeMatch)
self.resultLabel.setText(f'Couldnt find the word you are looking for\n'
f'Did you mean any of the words in the menu above?\n')
# output =str(output).replace("']", '').replace("['", ''.replace("','", ''))
def main():
App = QApplication(sys.argv)
window = Window()
sys.exit(App.exec_())
if __name__ == '__main__': main() |
989,410 | 5b6a1b52836a4d22b500787f5ff9c149e48f8b60 | #!/usr/bin/python3
# Filename: Bird.py
class Bird(object):
feather = True
reproduction = "egg"
def chirp(self, sound):
print(sound)
def set_color(self, color):
self.color = color
summer = Bird()
print(summer.reproduction)
summer.chirp("jijiji")
summer.set_color("yellow")
print(summer.color)
|
989,411 | 919b63d322ae4172a10d338d480682abdcb9d6d3 | p = input("Enter plain text: ")
rows = int(input("Enter number of rows: "))
# Method 1
row_cipher_text = ""
row_mat = [[] for row in range(rows)]
for i in range(len(p)):
row_mat[i % rows].append(p[i])
for a in row_mat:
row_cipher_text += "".join(a)
row_decrypted_text = ""
i, count, col = 0, 0, 0
while count != len(row_cipher_text):
while col != rows:
if count == len(row_cipher_text):
break
row_decrypted_text += row_mat[col][i]
count += 1
col += 1
col = 0
i += 1
print("=====================")
print("Method 1: Rail-fence Cipher")
print(row_cipher_text)
print(row_decrypted_text)
# Method 2
print("=====================")
cols = int(input("Enter number of columns: "))
col_cipher_text = ""
col_mat = [[] for col in range(cols)]
for i in range(len(p)):
col_mat[i % cols].append(p[i])
for a in col_mat:
col_cipher_text += "".join(a)
col_decrypted_text = ""
i, count, row = 0, 0, 0
while count != len(col_cipher_text):
while row != cols:
if count == len(col_cipher_text):
break
col_decrypted_text += col_mat[row][i]
count += 1
row += 1
row = 0
i += 1
print("Method 2")
print(col_cipher_text)
print(col_decrypted_text) |
989,412 | 0bb0e8314a1ae2ecef86e48d0fd45cfb55c0c644 | #!/bin/env python2
# -*- coding: utf-8 -*-
"""
CBMG 688P case study: 3'UTR motif analysis
Keith Hughitt
2013/12/11
Overview
--------
This script brings together some of the concepts we have discussed in class
up to this point to complete a fairly complex and useful task: scanning a
genome to look for for interesting 3'UTR motifs
The script expects a genome sequence (as a FASTA file), gene annotations (GFF),
and a simple CSV file containing a list of short DNA or RNA motifs that should
be searched for. The lines in the GFF file corresponding to genes are extracted
and an approximate 3'UTR location is determine by grabbing the next N (where N
is something like 500) bases after the coding sequence of the gene. The 3'UTR
sequence for each gene is retrieved from the FASTA file, and then each scanned
for the presence of each motif in the input list. The matches for each gene
are then printed to the screen.
Currently, the script has been hard-coded to look for Genome and annotation
data related to an organism that we work on in my lab (http://en.wikipedia.org/wiki/Trypanosoma_cruzi).
This was done primarily for convenience since I am more familiar with this
data. The organism has been sequenced, and the ~65 Megabase haploid sequence
along with some annotation data are available at http://tritrypdb.org/tritrypdb/.
The script assumes that the 3'UTR boundaries have not been well-defined, and
attempts to make a guess by taking a region of a specified length following
the each CDS. This, of course, is not perfect and is only meant for the
demonstrative purposes in this course. A better approach would be to attempt
to use existing 3'UTR information or make a more sophisticated guess at the
3'UTR location based on other properties of the surrounding sequence, etc.
Design Strategy
---------------
Basic process to follow when designing a script such as this one:
1) Define our goals for the script -- what do we want it to do? What
should the inputs and outputs be?
2) Break up task into modular tasks: e.g. "load genome", "compute 3'UTR
coordinates", etc.
3) Decide how to store things internally: using lists? dicts? classes? etc.
4) Code each part of the process, testing along the way; use the IPython
console to inspect the values of each variable and make sure they are
as you expect.
5) Validate output.
Homework
--------
For homework, your goal will be to take this scripts, and improve it by
implementing two or more of the following changes:
1. Dynamic input
Using the argpase module discussed in class, add one or more command-line
parameters to allow the user to dynamically control how the script is executed.
Parameters to consider including:
-g --input-genome
-a --input-annotations
-u --utr-length
-c --chromosome
OUTPUT_FILE: positional argument; see next section on CSV output
Example usage:
python utf_motif_analysis.py -g input.fasta -a input.gff -u 650 output.csv
2. CSV output
Currently, the script prints the results to the screen in the form:
gene_id: motif1, motif2, etc...
This is not a very useful format, however, for further analysis. A better
approach would be to store the output as a CSV file. Using the csv.writer class
(http://docs.python.org/2/library/csv.html), write the results of this script
as a CSV file where the first column is the gene id and each column there
after contains motif found for that gene. Note that in this format the
number of columns will vary for each gene.
3. Alternative targets
Extend the script to support scanning alternative portions of the gene for
the input motifs, for example, the 5'UTR or the CDS.
4. Input UTR location
Instead of approximating the 3'UTR, allow the user to input the exact 3'UTR
locations by providing an addition file with entries of the following format:
gene_id,start,end
Example:
Tc00.1047053398345.10, 518916, 519456
Homework submission
-------------------
When submitting your homework assignment, indicate which of the above changes
you attempted to implement. You may attempt as many as you want. Your grade
will be the combination of your best two scores.
References
----------
- Najafabadi, H. S., Lu, Z., Macpherson, C., Mehta, V., Adoue, V.,
Pastinen, T., & Salavati, R. (2013). Global identification of conserved
post-transcriptional regulatory programs in trypanosomatids.
Nucleic acids research, 1–10. doi:10.1093/nar/gkt647
"""
import os
import re
import sys
import csv
import StringIO
import urllib2
from Bio import SeqIO
from Bio import SeqUtils
from Bio.Alphabet import IUPAC
def main():
"""Main application body"""
# Genome sequence and annotations
genome = load_file('http://tritrypdb.org/common/downloads/release-6.0/TcruziCLBrenerEsmeraldo-like/fasta/data/TriTrypDB-6.0_TcruziCLBrenerEsmeraldo-like_Genome.fasta')
annotations = load_file('http://tritrypdb.org/common/downloads/release-6.0/TcruziCLBrenerEsmeraldo-like/gff/data/TriTrypDB-6.0_TcruziCLBrenerEsmeraldo-like.gff')
# 3'UTR motifs from supplementary table 2 in Najafabadi et al. (2013)
motifs = load_motifs('najafabadi_table_s1_2013.csv')
# Load genome sequence
chromosomes = load_fasta(genome)
# Parse annotations and return 3'UTR coordinates
genes = get_utr_coords(annotations, utr_length=500)
# For each gene, return a list of the motifs that are present in its 3'UTR
for gene in genes:
utr_seq = get_3utr_seq(chromosomes, gene)
# check each motif to see if it is present
utr3_motifs = []
for motif in motifs:
matches = SeqUtils.nt_search(utr_seq, motif)[1:]
# save matched motif
if len(matches) > 0:
utr3_motifs.append(motif)
# output results
print("%s: %s" % (gene['id'], ", ".join(utr3_motifs)))
def get_utr_coords(filepath, utr_length=500):
"""
Parses a GFF file and returns the estimated 3'UTR coordinates for each gene
in the file.
Parameters
----------
filepath: string
Location of a GFF file to process.
utf_length: int
Length of 3'UTR sequence to assume (Default: 500)
Returns
-------
out: A list containing dictionary representations of each gene in the GFF
file.
"""
# parse GFF file and get a list of the results
gene_rows = load_gff(filepath)
# For each gene from the above list, create a dictionary containing
# the information needed to find the gene UTRs
genes = []
for row in gene_rows:
# chromosome number
match = re.search('\d+', row['seqid'])
chromosome = int(match.group())
# gene id
match = re.search('ID=[^;]+;', row['attributes'])
gene_id = match.group()[3:-1]
# 3'UTR location
# since the gene start and end ranges correspond to the CDS in this
# case, we can simply find the N bases immediately following the CDS.
if (row['strand'] == '+'):
utr3_start = int(row['end'])
utr3_end = int(row['end']) + utr_length
else:
utr3_start = int(row['start']) - 500
utr3_end = int(row['start'])
# create a dictionary representation of the gene
genes.append({
"id": gene_id,
"chromosome": chromosome,
"strand": row['strand'],
"start": int(row['start']),
"end": int(row['end']),
"utr3_start": utr3_start,
"utr3_end": utr3_end
})
return genes
def get_3utr_seq(chromosomes, gene):
"""
Returns the sequence for the 3'UTR of a given gene.
Parameters
----------
chromosomes: dict
A dictionary containing SeqRecord entries for each chromosome in the
input genome as outputted from the `load_fasta` function.
gene: dict
A dictionary representation of a single gene including the basic
information required to determine the 3'UTR sequence.
Returns
-------
out: string
3'UTR sequence string.
See Also
--------
* gene_utr_coords
* load_fasta
"""
# get chromosome SeqRecord
chromosome = chromosomes[gene['chromosome']]
# get Seq for 3'UTR range
seq = chromosome[gene['utr3_start'] - 1:gene['utr3_end']].seq
# positive strand
if gene['strand'] == '+':
return str(seq)
else:
return str(seq.reverse_complement())
def load_gff(filepath):
"""
Loads a GFF file and returns a csv.DictReader instance corresponding
to the gene rows in the file.
Parameters
----------
filepath: string
Filepath to a GFF file to be processed.
Returns
-------
out: csv.DictReader
Returns a `DictReader` instance representing the gene-related fields
of the input GFF file.
See Also
--------
* http://www.sanger.ac.uk/resources/software/gff/spec.html
* http://gmod.org/wiki/GFF
* http://docs.python.org/2/library/stringio.html
"""
# GFF fields
colnames = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand',
'phase', 'attributes']
# get lines from file
with open(filepath, 'r') as fp:
lines = fp.readlines()
# filter out non-gene entries
gene_rows = [ x for x in lines if 'gene\t' in x]
# Next, let's create a StringIO buffer -- this is similar to the file
# and url handles we have seen so far. We can then pass this to a csv
# reader instance in the same way we have seen for actual files
# First though, let's collapse the rows back into a single string
csv_str = "".join(gene_rows)
str_buffer = StringIO.StringIO(csv_str)
return csv.DictReader(str_buffer, fieldnames=colnames, delimiter='\t')
def load_fasta(filepath):
"""
Loads a genome FASTA file and returns dictionary of chromosome sequences
indexed by chromosome number.
Parameters
----------
filepath: string
Location of a FASTA genome file to be loaded. The FASTA file should
contain one entry for each chromosome in the genome.
Returns
-------
out: dict
A dictionary of SeqRecord objects indexed by chromosome number.
"""
chromosomes = {}
seqs = SeqIO.parse(filepath, format='fasta',
alphabet=IUPAC.ambiguous_dna)
# iterate over seqs and add to chromosome dictionary
for seq in seqs:
# determine chromosome number
match = re.search('\d+', seq.name)
chromosome_number = int(match.group())
chromosomes[chromosome_number] = seq
return chromosomes
def load_motifs(motif_file):
"""
Returns a list of 3'UTR motifs.
Parameters
----------
motif_file: string
Filepath to the supplementary table from Najafabadi et. al.
Returns
-------
out: list
List of motifs from the table.
"""
with open(motif_file, 'r') as fp:
reader = csv.DictReader(fp)
# Convert sequence to DNA and add to list
motifs = [row['Sequence'].replace('U', 'T') for row in reader]
return motifs
def load_file(uri):
"""
Checks to see if a file exists either at the specified location or in the
the current working directory and attempts to download otherwise. The
filepath to the matched file is then returned.
Parameters
----------
uri : string
A filepath or URI from which the file can be downloaded
Returns
-------
out : filepath
Location of the requested file.
"""
# get filename
filename = os.path.basename(uri)
# If filepath specified and file exists, return filepath as-is
if os.path.isfile(uri):
return uri
# Check to see if file exists in current working directory
elif os.path.isfile(filename):
return filename
# Otherwise, check to see if URI is a URL
elif uri.startswith(('http', 'ftp')):
# retrieve remote file contents
print("Downloading %s" % uri)
handle = urllib2.urlopen(uri)
contents = handle.read()
with open(filename, 'w') as fp:
fp.write(contents)
return filename
# if it's note a URL or a valid filepath, raise and exception
else:
raise Exception("Invalid URI specified: %s" % uri)
if __name__ == "__main__":
sys.exit(main())
|
989,413 | 81e93c49bebcef61175e6d64f079f1e528d3ccc8 | class Solution(object):
def containsNearbyAlmostDuplicate(self, nums, k, t):
"""
:type nums: List[int]
:type k: int
:type t: int
:rtype: bool
"""
if k < 1 or t < 0:
return False
buckets = {}
for i, num in enumerate(nums):
adjusted_num = num + 0x80000000
bucket = adjusted_num // (t + 1)
if bucket in buckets \
or (bucket - 1 in buckets and abs(adjusted_num - buckets[bucket - 1]) <= t) \
or (bucket + 1 in buckets and abs(adjusted_num - buckets[bucket + 1]) <= t):
return True
buckets[bucket] = adjusted_num
if i >= k:
del buckets[(nums[i - k] + 0x80000000) // (t + 1)]
return False
|
989,414 | 1622cdb3e36424f0a3eb393e7ef1dfff8dcf3503 | string=''
k=1
while len(string)<1000000:
string+=str(k)
k+=1
indices = [string[10 ** k-1] for k in range(0,7)]
product=1
for x in indices:
product *= int(x)
|
989,415 | 9cd632496c0bead4f94aaf3749461b686748f572 | # -*- coding: utf-8 -*-
"""
Compute the integral of the following function within the given domain. Use both
midpoint and trapezoidal methods! Compare your results to the exact solution of the
definite integral.
Evaluate integral numerically from 0 to 1
f(t) = 3*(t**2) * (e**(t**3))
integral = (e**(t**3))
"""
#==================================imports ====================================
import numpy as np
import integrate_utils as int_u
import matplotlib.pyplot as plt
e = np.exp(1)
#=============================Define function and integral ====================
def f_t(t):
return 3*(t**2) * (e**(t**3))
a = f_t(1)
print(a)
def int_f_t(t):
return (e**(t**3))
b = int_f_t(0)
print(b)
#============================exact solution====================================
ex_sol = int_f_t(1) - int_f_t(0)
#===================Apply trapezoidal and midpoint methods ====================
trap_result = int_u.trapezoidal(f_t, 0, 1, 10)
midpoint_result = int_u.midpoint(f_t, 0, 1, 10)
#==results(when using %10.2f, 10 is number of spaces, 2 is nuber of decimals)==
print("Trapezoidal rule results: %.20f" %trap_result)
print("Midpoint method results: %.20f" %midpoint_result)
"""
Trapezoidal rule results: 1.75204264178808499786
Midpoint method results: 1.70148276900918782317
"""
|
989,416 | cfd3b3ed579e4425bdc358c1e826099d796398db | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lacalma', '0006_auto_20141121_1349'),
]
operations = [
migrations.AlterField(
model_name='reserva',
name='comentario',
field=models.TextField(null=True, verbose_name='\xbfAlg\xfan comentario?', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='reserva',
name='estado',
field=models.CharField(default=b'pendiente', max_length=50, choices=[(b'pendiente', b'pendiente'), (b'confirmada', b'confirmada'), (b'vencida', b'vencida')]),
preserve_default=True,
),
]
|
989,417 | cf8ecbfd434f43641ca52414add707cd1a011fea | from PIL import Image, ImageDraw, ImageFont
import requests
import matplotlib.pyplot as plt
def image_add_text(img, text, left, top, text_color=(255, 0, 0), text_size=13):
# 创建一个可以在给定图像上绘图的对象
draw = ImageDraw.Draw(img)
# 字体的格式 这里的SimHei.ttf需要有这个字体
fontStyle = ImageFont.truetype("MILT_RG.ttf", text_size, encoding="utf-8")
# 绘制文本
draw.text((left, top), text, text_color, font=fontStyle)
return img
def image_combine(img,img2):
size = list(img.size)
size[1] = size[1]-185
img2 = img2.resize(size)
size = [0, 185]+list(img.size)
img.paste(img2, size)
return img
def down_file(url,dst):
with open(dst, "wb") as code:
code.write(requests.get(url).content)
down_file('https://h5.cyol.com/special/daxuexi/am4r2ione0/images/end.jpg','end.jpg')
img = image_combine(Image.open(r'base.jpg'),Image.open(r'end.jpg'))
# img = image_add_text(img,"\"青年大学习\"第十一季第四期",250,95,text_color=(0, 0, 0), text_size=45)
img.show()
img.save("combine.jpg")
|
989,418 | 65ed5e86afec0d4d2f8aa96f8fb3d1fb1b23db8b | from collections import deque
import sys
input = sys.stdin.readline
n = int(input())
q = deque()
for _ in range(n):
command = input().split()
if command[0] == 'push_back':
q.append(command[1])
elif command[0] == 'push_front':
q.appendleft(command[1])
elif command[0] == 'front':
print(q[0]) if q else print(-1)
elif command[0] == 'back':
print(q[-1]) if q else print(-1)
elif command[0] == 'size':
print(len(q))
elif command[0] == 'empty':
print(1) if not q else print(0)
elif command[0] == 'pop_front':
print(q.popleft()) if q else print(-1)
elif command[0] == 'pop_back':
print(q.pop()) if q else print(-1)
|
989,419 | 037322880281eb20169d0c65150624594978c65d | import subprocess
import os
class RaspiCameraConfig:
METERING_MODE_MATRIX = 'matrix'
METERING_MODE_AVERAGE = 'average'
METERING_MODE_BACKLIT = 'backlit'
METERING_MODE_SPOT = 'spot'
ALL_METERING_MODES = [
METERING_MODE_MATRIX,
METERING_MODE_AVERAGE,
METERING_MODE_BACKLIT,
METERING_MODE_SPOT,
]
def __init__(self, mm = None, rotation_degrees = None):
self.set_metering_mode(mm)
self.set_rotation(rotation_degrees)
def set_metering_mode(self, mm):
if mm != None and not mm in self.ALL_METERING_MODES:
raise Exception("Unknown metering mode specified: [%s]" % mm)
self.metering_mode = mm
def set_rotation(self, rot):
if rot != None and (rot < 0 or rot > 359):
raise Exception("Invalid rotation angle specified [%d]" % rot)
self.rotation_degrees = rot
class RaspiCamera:
def __init__(self, camconfig = RaspiCameraConfig()):
self.camconfig = camconfig
def capture_image_into_file(self, file_name, camconfig = None):
if camconfig == None:
camconfig = self.camconfig
cmd = RaspistillCmdBuilder()
cmd.take_picture_immediately()
cmd.save_picture_to_file(file_name)
if camconfig.metering_mode != None:
cmd.use_metering_mode(camconfig.metering_mode)
if camconfig.rotation_degrees != None:
cmd.rotate_picture(camconfig.rotation_degrees)
cmd.execute()
if not os.path.exists(file_name):
raise Exception("Called raspistill but can't find output file [%s]!" % file_name)
return file_name
class RaspistillCmdBuilder:
def __init__(self):
self.params = ['/usr/bin/raspistill']
def take_picture_immediately(self):
self.params.append('-t')
self.params.append('0')
def save_picture_to_file(self, file_name):
self.params.append('-o')
self.params.append(file_name)
def use_metering_mode(self, metering_mode):
self.params.append('-mm')
self.params.append(metering_mode)
def rotate_picture(self, rotation):
self.params.append('-rot')
self.params.append('%d' % rotation)
def execute(self):
subprocess.call(self.params)
|
989,420 | 0ecb8a133260c45831e5a40d4b69254d3b148c99 | # -*- coding:utf-8 -*-
# Anaconda 4.3.0 環境
"""
更新情報
[17/08/16] : 検証用のサンプルデータセット生成関数を追加
[17/08/31] : クラス名を DataPreProcess → MLDreProcess に改名
"""
import numpy
# Data Frame & IO 関連
import pandas
from io import StringIO
# scikit-learn ライブラリ関連
from sklearn import datasets # scikit-learn ライブラリのデータセット群
from sklearn.datasets import make_moons # 半月状のデータセット生成
from sklearn.datasets import make_circles # 同心円状のデータセット生成
#from sklearn.cross_validation import train_test_split # scikit-learn の train_test_split関数の old-version
from sklearn.model_selection import train_test_split # scikit-learn の train_test_split関数の new-version
from sklearn.metrics import accuracy_score # 正解率、誤識別率の計算用に使用
from sklearn.preprocessing import Imputer # データ(欠損値)の保管用に使用
from sklearn.preprocessing import LabelEncoder #
from sklearn.preprocessing import OneHotEncoder # One-hot encoding 用に使用
from sklearn.preprocessing import MinMaxScaler # scikit-learn の preprocessing モジュールの MinMaxScaler クラス
from sklearn.preprocessing import StandardScaler # scikit-learn の preprocessing モジュールの StandardScaler クラス
from sklearn.pipeline import Pipeline # パイプライン
class MLPreProcess( object ):
"""
機械学習用のデータの前処理を行うクラス
データフレームとして, pandas DataFrame のオブジェクトを持つ。(コンポジション:集約)
sklearn.preprocessing モジュールのラッパークラス
[public] public アクセス可能なインスタスンス変数には, 便宜上変数名の最後にアンダースコア _ を付ける.
df_ : pandas DataFrame のオブジェクト(データフレーム)
[private] 変数名の前にダブルアンダースコア __ を付ける(Pythonルール)
"""
def __init__( self, dataFrame = pandas.DataFrame() ):
"""
コンストラクタ(厳密にはイニシャライザ)
[Input]
dataFrame : pandas DataFrame オブジェクト
"""
self.df_ = dataFrame
return
def print( self, str = '' ):
print("\n")
print("-------------------------------------------------------------------")
print( str )
print("\n")
print("<pandas DataFrame> \n")
#print( "rows, colums :", self.df_.shape )
print( self.df_ )
print( self )
print("-------------------------------------------------------------------")
return
def setDataFrameFromList( self, list ):
"""
[Input]
list : list
"""
self.df_ = pandas.DataFrame( list )
return self
def setDataFrameFromDataFrame( self, dataFrame ):
"""
[Input]
dataFrame : pandas DataFrame のオブジェクト
"""
self.df_ = dataFrame
return self
def setDataFrameFromCsvData( self, csv_data ):
"""
csv フォーマットのデータを pandas DataFrame オブジェクトに変換して読み込む.
[Input]
csv_data : csv フォーマットのデータ
"""
# read_csv() 関数を用いて, csv フォーマットのデータを pandas DataFrame オブジェクトに変換して読み込む.
self.df_ = pandas.read_csv( StringIO( csv_data ) )
return self
def setDataFrameFromCsvFile( self, csv_fileName ):
"""
csv ファイルからデータフレームを構築する
[Input]
csv_fileName : string
csvファイルパス+ファイル名
"""
self.df_ = pandas.read_csv( csv_fileName, header = None )
return self
def getNumpyArray( self ):
"""
pandas Data Frame オブジェクトを Numpy 配列にして返す
"""
values = self.df_.values # pandas DataFrame の value 属性
return values
#---------------------------------------------------------
# 検証用サンプルデータセットを出力する関数群
#---------------------------------------------------------
@staticmethod
def generateMoonsDataSet( input_n_samples = 100, input_random_state = 123 ):
"""
半月形のデータセットを生成する。
[Input]
input_n_samples : int
input_random_state : int
[Output]
dat_X : numpy.ndarray
2次元 Numpy 配列
dat_y : numpy.ndarray
クラスラベル 0 or 1 (1次元 Numpy 配列)
"""
dat_X, dat_y = make_moons( n_samples = input_n_samples, random_state = input_random_state )
# 戻り値のオブジェクトの型確認
#print( isinstance(dat_X, list) )
#print( isinstance(dat_y, list) )
#print( isinstance(dat_X, pandas.DataFrame) )
#print( isinstance(dat_X, numpy.ndarray) )
return dat_X, dat_y
@staticmethod
def generateCirclesDataSet( input_n_samples = 1000, input_random_state = 123, input_noize = 0.1, input_factor = 0.2 ):
"""
同心円形のデータセットを生成する。
[Input]
input_n_samples : int
input_random_state : int
seed used by the random number generator
input_noize : float
Gaussian noise
input_factor : float
Scale factor between inner and outer circle.
[Output]
dat_X : numpy.ndarray
2次元 Numpy 配列
dat_y : numpy.ndarray
クラスラベル 0 or 1 (1次元 Numpy 配列)
"""
dat_X, dat_y = make_circles(
n_samples = input_n_samples, random_state = input_random_state,
noise = input_noize,
factor = input_factor
)
return dat_X, dat_y
#---------------------------------------------------------
# 欠損値の処理を行う関数群
#---------------------------------------------------------
def meanImputationNaN( self, axis = 0 ):
"""
欠損値 [NaN] を平均値で補完する
[Input]
axis : int
0 : NaN を列の平均値で補完
1 : NaN を行の平均値で補完
"""
imputer = Imputer(
missing_values = 'NaN',
strategy = 'mean',
axis = axis # 0 : 列の平均値, 1 : 行の平均値
)
imputer.fit( self.df_ ) # self.df_ は1次配列に変換されることに注意
self.df_ = imputer.transform( self.df_ )
return self
#---------------------------------------------------------
# カテゴリデータの処理を行う関数群
#---------------------------------------------------------
def setColumns( self, columns ):
"""
データフレームにコラム(列)を設定する。
"""
self.df_.columns = columns
return self
def mappingOrdinalFeatures( self, key, input_dict ):
"""
順序特徴量のマッピング(整数への変換)
[Input]
key : string
順序特徴量を表すキー(文字列)
dict : dictionary { "" : 1, "" : 2, ... }
"""
self.df_[key] = self.df_[key].map( dict(input_dict) ) # 整数に変換
return self
def encodeClassLabel( self, key ):
"""
クラスラベルを表す文字列を 0,1,2,.. の順に整数化する.(ディクショナリマッピング方式)
[Input]
key : string
整数化したいクラスラベルの文字列
"""
mapping = { label: idx for idx, label in enumerate( numpy.unique( self.df_[key]) ) }
self.df_[key] = self.df_[key].map( mapping )
return self
def encodeClassLabelByLabelEncoder( self, colum, bPrint = True ):
"""
クラスラベルを表す文字列を sklearn.preprocessing.LabelEncoder クラスを用いてエンコードする.
[input]
colum : int
エンコードしたいクラスラベルが存在する列番号
bPrint : bool
エンコード対象を print するか否か
"""
encoder = LabelEncoder()
encoder.fit_transform( self.df_.loc[:, colum].values ) # ? fit_transform() の結果の再代入が必要?
encoder.transform( encoder.classes_ )
if ( bPrint == True):
print( "encodeClassLabelByLabelEncoder() encoder.classes_ : ", encoder.classes_ )
print( "encoder.transform", encoder.transform( encoder.classes_ ) )
#print( "encodeClassLabelByLabelEncoder() encoder.classes_[0] : ", encoder.classes_[0] )
#print( "encodeClassLabelByLabelEncoder() encoder.classes_[1] : ", encoder.classes_[1] )
return self
def oneHotEncode( self, categories, col ):
"""
カテゴリデータ(名義特徴量, 順序特徴量)の One-hot Encoding を行う.
[Input]
categories : list
カテゴリデータの list
col : int
特徴行列の変換する変数の列位置 : 0 ~
"""
X_values = self.df_[categories].values # カテゴリデータ(特徴行列)を抽出
#print( X_values )
#print( self.df_[categories] )
# one-hot Encoder の生成
ohEncode = OneHotEncoder(
categorical_features = [col], # 変換する変数の列位置:[0] = 特徴行列 X_values の最初の列
sparse = False # ? False : 通常の行列を返すようにする。
)
# one-hot Encoding を実行
#self.df_ = ohEncode.fit_transform( X_values ).toarray() # ? sparse = True の場合の処理
self.df_ = pandas.get_dummies( self.df_[categories] ) # 文字列値を持つ行だけ数値に変換する
return self
#---------------------------------------------------------
# データセットの分割を行う関数群
#---------------------------------------------------------
@staticmethod
def dataTrainTestSplit( X_input, y_input, ratio_test = 0.3, input_random_state = 0 ):
"""
データをトレーニングデータとテストデータに分割する。
分割は, ランダムサンプリングで行う.
[Input]
X_input : Matrix (行と列からなる配列)
特徴行列
y_input : 配列
教師データ
ratio_test : float
テストデータの割合 (0.0 ~ 1.0)
[Output]
X_train : トレーニングデータ用の Matrix (行と列からなる配列)
X_test : テストデータの Matrix (行と列からなる配列)
y_train : トレーニングデータ用教師データ配列
y_test : テストデータ用教師データ配列
"""
X_train, X_test, y_train, y_test \
= train_test_split(
X_input, y_input,
test_size = ratio_test,
random_state = input_random_state #
)
return X_train, X_test, y_train, y_test
#---------------------------------------------------------
# データのスケーリングを行う関数群
#---------------------------------------------------------
@staticmethod
def normalizedTrainTest( X_train, X_test ):
"""
指定したトレーニングデータ, テストにデータ(データフレーム)を正規化 [nomalize] する.
ここでの正規化は, min-maxスケーリング [0,1] 範囲を指す.
トレーニングデータは正規化だけでなく欠損値処理も行う.
テストデータは,トレーニングデータに対する fit() の結果で, transform() を行う.
[Input]
X_train : トレーニングデータ用の Matrix (行と列からなる配列)
X_test : テストデータの Matrix (行と列からなる配列)
[Output]
X_train_norm : 正規化されたトレーニングデータ用の Matrix (行と列からなる配列)
X_test_norm : 正規化されたテストデータの Matrix (行と列からなる配列)
"""
mms = MinMaxScaler()
# fit_transform() : fit() を実施した後に, 同じデータに対して transform() を実施する。
# トレーニングデータの場合は、それ自体の統計を基に正規化や欠損値処理を行っても問題ないので、fit_transform() を使って構わない。
X_train_norm = mms.fit_transform( X_train )
# transform() : fit() で取得した統計情報を使って, 渡されたデータを実際に書き換える.
# テストデータの場合は, 比較的データ数が少なく, トレーニングデータの統計を使って正規化や欠損値処理を行うべきなので,
# トレーニングデータに対する fit() の結果で、transform() を行う必要がある。
X_test_norm = mms.transform( X_test )
return X_train_norm, X_test_norm
@staticmethod
def standardizeTrainTest( X_train, X_test ):
"""
指定したトレーニングデータ, テストにデータ(データフレーム)を標準化 [standardize] する.
ここでの標準化は, 平均値 : 0 , 分散値 : 1 への変換指す.
トレーニングデータは標準化だけでなく欠損値処理も行う.
テストデータは,トレーニングデータに対する fit() の結果で, transform() を行う.
[Input]
X_train : トレーニングデータ用の Matrix (行と列からなる配列)
X_test : テストデータの Matrix (行と列からなる配列)
[Output]
X_train_std : 標準化された [standardized] トレーニングデータ用の Matrix (行と列からなる配列)
X_test_std : 標準化された [standardized] テストデータの Matrix (行と列からなる配列)
"""
stdsc = StandardScaler()
# fit_transform() : fit() を実施した後に, 同じデータに対して transform() を実施する。
# トレーニングデータの場合は, それ自体の統計を基に標準化や欠損値処理を行っても問題ないので, fit_transform() を使って構わない。
X_train_std = stdsc.fit_transform( X_train )
# transform() : fit() で取得した統計情報を使って, 渡されたデータを実際に書き換える.
# テストデータの場合は, 比較的データ数が少なく, トレーニングデータの統計を使って標準化や欠損値処理を行うべきなので,
# トレーニングデータに対する fit() の結果で、transform() を行う必要がある。
X_test_std = stdsc.transform( X_test )
return X_train_std, X_test_std
|
989,421 | abe5dadfa169c486ed7ce0deade64c122cd8aec3 | #! /usr/local/bin/python3 -u
import testencoding as enc
import argparse
parser = argparse.ArgumentParser()
# https://stackoverflow.com/a/25513044/5719760
def aint(x):
return int(x, 0)
parser.add_argument('pages', type=str, nargs='+')
def parse_range(txt):
dash = txt.index('-')
start = txt[:dash]
end = txt[dash + 1:]
start = 0x00 if start == '' else aint(start)
end = 0xff if end == '' else aint(end)
return [x for x in range(start, end + 1)]
def gen_pages(args):
pages = []
for a in args.pages:
if '-' in a:
# parse as a range
pages.extend(parse_range(a))
else:
# parse as an int
pages.append(aint(a))
return pages
pages = gen_pages(parser.parse_args())
msg = b''
# first 0x00..0x7f are identical across all codepages
skips = [x for x in range(8)]
prefix = enc.enc(' codepage 0x')
rule = enc.enc('+') * 32
for i in pages:
msg += (
prefix
+ enc.x(i)
+ enc.enc(f' = {i}')
+ enc.nl
+ b'\x1b\x74'
+ enc.enc(i)
)
enc.write(msg)
enc.cp(raw=True, skip_rows=skips)
msg = rule + enc.nl
enc.write(enc.nl * 3)
|
989,422 | 260000a8eb6c1da4f9a0705989085610d6564881 | import pytest
from outcome import Outcome
from wheel import Wheel
from binBuilder import BinBuilder
wheel = Wheel()
bb = BinBuilder()
def test_gen_bets():
bb.gen_straight_bets(wheel)
assert wheel.get(5) == frozenset([Outcome('5', 35)])
assert len(wheel.all_outcomes) == 38
#test_gen_split_bets():
bb.gen_split_bets(wheel)
assert len(wheel.get(8)) == 5
assert len(wheel.get(23)) == 5
assert len(wheel.get(2)) == 4
assert len(wheel.get(34)) == 3
assert len(wheel.all_outcomes) == 38 + 57
#test_gen_street_bets():
bb.gen_street_bets(wheel)
assert len(wheel.get(9)) == 5
assert len(wheel.get(23)) == 6
assert len(wheel.get(31)) == 5
assert len(wheel.all_outcomes) == 95 + 12
#test_gen_corner_bets():
bb.gen_corner_bets(wheel)
assert len(wheel.get(3)) == 5
assert len(wheel.get(9)) == 7
assert len(wheel.get(23)) == 10
assert len(wheel.get(31)) == 7
assert len(wheel.get(35)) == 7
assert len(wheel.all_outcomes) == 107 + 22
#test_gen_line_bets():
bb.gen_line_bets(wheel)
assert len(wheel.get(3)) == 6
assert len(wheel.get(9)) == 9
assert len(wheel.get(23)) == 12
assert len(wheel.get(31)) == 9
assert len(wheel.get(35)) == 8
assert len(wheel.all_outcomes) == 129 + 11
#test_gen_dozen_bets():
bb.gen_dozen_bets(wheel)
assert len(wheel.get(3)) == 7
assert len(wheel.get(9)) == 10
assert len(wheel.get(23)) == 13
assert len(wheel.get(31)) == 10
assert len(wheel.get(35)) == 9
assert len(wheel.all_outcomes) == 140 + 3
#test_gen_column_bets():
bb.gen_column_bets(wheel)
assert len(wheel.get(3)) == 8
assert len(wheel.get(9)) == 11
assert len(wheel.get(23)) == 14
assert len(wheel.get(31)) == 11
assert len(wheel.get(35)) == 10
assert len(wheel.all_outcomes) == 143 + 3
#test_gen_even_money_bets():
bb.gen_even_money_bets(wheel)
assert len(wheel.get(3)) == 11
assert len(wheel.get(9)) == 14
assert len(wheel.get(23)) == 17
assert len(wheel.get(31)) == 14
assert len(wheel.get(35)) == 13
assert len(wheel.all_outcomes) == 146 + 6
#test_gen_five_bets():
bb.gen_five_bets(wheel)
assert len(wheel.get(0)) == 2
assert len(wheel.get(37)) == 2
assert len(wheel.all_outcomes) == 152 + 1
def test_buildBins():
bb.buildBins(wheel)
assert len(wheel.get(3)) == 11
assert len(wheel.get(9)) == 14
assert len(wheel.get(23)) == 17
assert len(wheel.get(31)) == 14
assert len(wheel.get(35)) == 13
assert len(wheel.get(0)) == 2
assert len(wheel.get(37)) == 2
assert len(wheel.all_outcomes) == 153
|
989,423 | c549ba0843eaa565f182b424b211669a8f65e967 | # Generated by Django 2.2.7 on 2019-12-14 00:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Profesor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
('apellido', models.CharField(max_length=30)),
('legajo', models.CharField(max_length=3)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Estudiante',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
('apellido', models.CharField(max_length=30)),
('nota_primer_parcial', models.IntegerField()),
('nota_segundo_parcial', models.IntegerField()),
('nota_final', models.IntegerField()),
('profesor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='utn.Profesor')),
],
options={
'abstract': False,
},
),
]
|
989,424 | 0123ba18205e7868648b2162b2e5044bff788eea | from django.shortcuts import render, redirect,HttpResponse
from django.contrib.auth.models import User
from django.contrib import auth
from django.http import JsonResponse
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode
from django.utils.encoding import force_bytes
from django.core.mail import EmailMessage
from django.utils.encoding import force_bytes, force_text
from .tokens import account_activation_token
# Create your views here.
def login(request):
if request.method == "POST":
username = request.POST['login']
password = request.POST['password']
user = auth.authenticate(request, username=username, password=password)
if user is not None:
auth.login(request, user)
print(user.username + " login")
return redirect('/',user)
else:
return render(request, 'login.html',{'error':'username or password is incorrect.'})
else:
return render(request,'login.html')
def signup(request):
if request.method == "POST":
if request.POST['password'] == request.POST['password_confirm']:
err=0
user = User.objects.create_user(
request.POST['username'],
password = request.POST['password'],
email = request.POST['email'],
)
user.profile.job = request.POST['job']
user.profile.location = request.POST['location']
user.profile.choose_people = request.POST['choose_people']
user.is_active = False
user.save()
print( user.profile.job)
print( user.profile.location)
print( user.profile.choose_people)
current_site = get_current_site(request)
# localhost:8000
message = render_to_string('user_activate_email.html',{
'user': user,
'domain': current_site.domain,
# 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'uid': urlsafe_base64_encode(force_bytes(user.pk)).encode().decode(),
'token': account_activation_token.make_token(user),
})
print(message)
# auth.login(request,user)
mail_subject = "Gosomi 회원가입 인증 메일입니다."
user_email = user.email
email = EmailMessage(mail_subject, message, to=[user_email])
email.send()
return render(request,'assignment.html')
# return redirect('account:home')
# return render(request, 'account/signup.html')
else:
err=1
return render(request, 'signup.html',{'err' : err})
return render(request, 'signup.html')
def assign(request):
err=0
if not request.POST.get('agree_a', None) == None:
if not request.POST.get('agree_b', None) == None:
return redirect('signup')
else:
return render(request, 'signup_1.html')
else:
return render(request, 'signup_1.html')
def activate(request, uid64, token):
uid = force_text(urlsafe_base64_decode(uid64))
user = User.objects.get(pk=uid)
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
auth.login(request, user)
return redirect('/')
else:
return HttpResponse('비정상적인 접근입니다.')
def signup_1(request):
return render(request, 'signup_1.html')
def update_profile(request, user_id):
user = User.objects.get(pk=user_id)
user.profile.job = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit...'
user.save()
def checkid(request):
try:
print(request.GET['username'])
user = User.objects.get(username=request.GET['username'])
print('user name is exist')
except Exception as e:
user= None
print('user name is not exist')
result = {
'result' : 'success',
'data' : 'not exist' if user is None else 'exist'
}
return JsonResponse(result)
def logout(request):
if request.method == 'GET':
auth.logout(request)
print('log out success')
return redirect('/')
return render(request,'login.html') |
989,425 | 30468a7e78fae952b25596fa39982d767c52c598 | import os
import torch
from tensorboardX import SummaryWriter
from shutil import copy, rmtree
class Logger:
def __init__(self, logdir):
self.logdir = logdir
if not os.path.exists(self.logdir):
os.makedirs(self.logdir)
self.logfile = open(os.path.join(logdir, 'log.txt'), 'w')
train_writer_dir = os.path.join(logdir, 'train')
eval_writer_dir = os.path.join(logdir, 'eval')
# Remove old tf events
if os.path.exists(train_writer_dir):
rmtree(train_writer_dir)
if os.path.exists(eval_writer_dir):
rmtree(eval_writer_dir)
self.train_writer = SummaryWriter(os.path.join(logdir, 'train'))
self.eval_writer = SummaryWriter(os.path.join(logdir, 'eval'))
def log_string(self, out_str, do_print=True):
self.logfile.write(str(out_str) + '\n')
self.logfile.flush()
if do_print:
print(out_str)
def backup_files(self, file_list):
for filepath in file_list:
copy(filepath, self.logdir)
def close(self):
self.logfile.close()
self.train_writer.close()
self.eval_writer.close()
def log_scalar_train(self, tag, value, global_step):
if isinstance(value, torch.Tensor):
value = value.item()
self.train_writer.add_scalar(tag, value, global_step)
def log_scalar_eval(self, tag, value, global_step):
if isinstance(value, torch.Tensor):
value = value.item()
self.eval_writer.add_scalar(tag, value, global_step)
|
989,426 | 73968d41e399d3db549135e3cc8e8d6124186ad8 | # -*- coding: utf-8 -*-
'''嵌套函数,在函数内部定义的函数'''
def f1():
print('f1() running...')
def f2():
print('f2() running...')
f2()
f1()
print('---------分割线------------')
'''使用嵌套函数避免重复代码'''
#定义了两个函数
def printChineseName(name, familyName):
print("{0} {1}".format(familyName, name))
def printEnglishName(name, familyName):
print("{0} {1}".format(name, familyName))
#调用测试
printChineseName('薇', '樊')
printEnglishName('薇', '樊')
print('---------分割线------------')
#使用嵌套函数定义
def printName(isChinese, name, familyName):
#函数内部的函数
def inner_print(a, b):
print("{0} {1}".format(a, b))
#调用内部的函数
if isChinese: #判断传入的值是否为真,
inner_print(familyName, name)
else:
inner_print(name, familyName)
#调用测试
printName(True, '薇', '樊')
printName(False, '薇', '樊')
print('==========')
printName(1, '薇', '樊')
printName(0, '薇', '樊')
|
989,427 | fc52b191a2caacd199f9b6b2918dad09ff3475d2 | from django.contrib import admin
from .models import Agent
# Register your models here.
@admin.register(Agent)
class AgentAdmin(admin.ModelAdmin):
pass
|
989,428 | 63e3edc8e016c9f931fe391bc47d752ba24487c9 | #!/usr/bin/env python
import roslib; roslib.load_manifest('guts')
import rospy
from guts.msg import sonar_data
import serial
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=60)
def guts_sonar_ir_pub():
pub = rospy.Publisher('guts_sonar_ir_data', sonar_data,queue_size=2)
rospy.init_node('guts_sonar_ir_node')
while not rospy.is_shutdown():
current_time = rospy.get_rostime()
s = ser.readline()
arduino_serial_data = []
arduino_serial_data.append([int(x) for x in s.split()])
print arduino_serial_data
sonar_data_msg = sonar_data()
sonar_data_msg.sec = current_time.secs
sonar_data_msg.nsec = current_time.nsecs
if len(arduino_serial_data[0]) == 6:
for x in range(6):
sonar_data_msg.sonar_data[x] = arduino_serial_data[0][x]
else:
continue
pub.publish(sonar_data_msg)
#rospy.sleep(1.0)
if __name__ == '__main__':
try:
guts_sonar_ir_pub()
except rospy.ROSInterruptException:
pass
|
989,429 | b89f0de6ed2676cf80abc021238f8e10e3e6becc | #code
def numberofPaths(r, c):
if r==1 or c==1
return 1
return numberofPaths(r-1, c) + numberofPaths(r, c-1)
test_case = int(input())
for i in range(test_case):
r, c = list(map(int, input().split()))
numberofPaths(r, c)
|
989,430 | 3727d51bfc94664fddaa0ac7a0723f52094b9af0 | import json
import pika
import base64
rabbit_IP = '54.144.236.23'
rabbit_port= '5672'
username = 'parser'
password = 'parser'
queue_name = 'hello'
exchange_name = 'Eldar'
rk = 'yogev'
def sender(message):
credentials = pika.PlainCredentials(username=username, password=password)
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=rabbit_IP, port=rabbit_port, credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue=queue_name, durable=False)
counter = 1
channel.basic_publish(exchange='Eldar',
routing_key='yogev',
body=message)
print('send succuesfuly to yoge rk')
|
989,431 | d5fb9d4a5b7e06c574270cc8d90965689ec471b4 | #-*- coding:utf-8 -*-
import time,datetime
from django.shortcuts import get_object_or_404, render,render_to_response
from django.http import *
from django.http import HttpResponse,HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger
from django.views.generic.base import TemplateView
from django.views import generic
from django.db.models import Q
from django.db import connection
from django.template import RequestContext
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.models import User
from django.contrib import auth
from UUBlog.models import Category, Article,UserProfile
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
import common
import utility
#头像
@login_required()
def avatar(request,uid=-1):
userInfos=common.Users(request,-1)
currentUserProfile=userInfos["currentuserprofile"]
#000/00/01
if utility.HasPostData(request,"okkk"):
avatarPath=("%d" %currentUserProfile.user_id).rjust(7,"0")
dir1=avatarPath[0:3]
dir2=avatarPath[3:5]
fileName=avatarPath[5:7]
path="%s/%s/%s/" %("avatar",dir1,dir2)
currentUserProfile.avatar=utility.SaveFile(request.FILES['avatar'],path,fileName)
currentUserProfile.save()
return HttpResponseRedirect('/')
else:
return utility.my_render_to_response(request,"pub/profile/avatar.html",locals())
#基本信息
@login_required()
def base(request,uid=-1):
userInfos=common.Users(request,-1)
currentUserProfile=userInfos["currentuserprofile"]
if utility.HasPostData(request,"ok"):
currentUserProfile.nickname=utility.GetPostData(request,"nickname")
currentUserProfile.realname=utility.GetPostData(request,"realname")
currentUserProfile.gender=utility.GetPostData(request,"gender")
currentUserProfile.birthday=utility.GetPostData(request,"birthday")
currentUserProfile.birthcity=utility.GetPostData(request,"birthcity")
currentUserProfile.residecity=utility.GetPostData(request,"residecity")
currentUserProfile.save()
return HttpResponseRedirect('/')
else:
return utility.my_render_to_response(request,"pub/profile/base.html",locals())
#个人信息
@login_required()
def info(request,uid=-1):
userInfos=common.Users(request,-1)
currentUserProfile=userInfos["currentuserprofile"]
if utility.HasPostData(request,"ok"):
currentUserProfile.affectivestatus=utility.GetPostData(request,"affectivestatus")
currentUserProfile.lookingfor=utility.GetPostData(request,"lookingfor")
currentUserProfile.bloodtype=utility.GetPostData(request,"bloodtype")
currentUserProfile.site=utility.GetPostData(request,"site")
currentUserProfile.bio=utility.GetPostData(request,"bio")
currentUserProfile.interest=utility.GetPostData(request,"interest")
currentUserProfile.sightml=utility.GetPostData(request,"sightml")
currentUserProfile.timeoffset=utility.GetPostData(request,"timeoffset")
currentUserProfile.save()
return HttpResponseRedirect('/')
else:
return utility.my_render_to_response(request,"pub/profile/info.html",locals())
#联系方式
@login_required()
def contact(request,uid=-1):
userInfos=common.Users(request,-1)
currentUserProfile=userInfos["currentuserprofile"]
if utility.HasPostData(request,"ok"):
currentUserProfile.qq=utility.GetPostData(request,"qq")
currentUserProfile.msn=utility.GetPostData(request,"msn")
currentUserProfile.taobao=utility.GetPostData(request,"taobao")
currentUserProfile.email=utility.GetPostData(request,"email")
currentUserProfile.phone=utility.GetPostData(request,"phone")
currentUserProfile.mobile=utility.GetPostData(request,"mobile")
currentUserProfile.address=utility.GetPostData(request,"address")
currentUserProfile.zipcode=utility.GetPostData(request,"zipcode")
currentUserProfile.save()
return HttpResponseRedirect('/')
else:
return utility.my_render_to_response(request,"pub/profile/contact.html",locals())
#安全
@login_required()
def security(request,uid=-1):
userInfos=common.Users(request,-1)
userProfile=userInfos[2]
if utility.HasPostData(request,"ok"):
userProfile.avatar=utility.SaveFile(request.FILES['avatar'],'avatar/')
userProfile.save()
return HttpResponseRedirect('/')
else:
return utility.my_render_to_response(request,"pub/profile/security.html",locals())
#关注 by zhou
@login_required()
def fork(request,uid=-1):
if utility.HasPostData(request, "ok"):
print 123456789
return HttpResponseRedirect('/')
else:
return utility.my_render_to_response(request, "modules/profile.html", locals())
|
989,432 | e3f21a47c4836e552e7413596dd13972cfe3395a | import re
from datetime import datetime
sentences = [
"Am 05.06.2018 findet ein cooles Event statt",
"Please follow our invitation and visit us on 2018/14/05",
"Im Monat 05/2018 ist oft gutes Wetter",
"Der Lottogewinn war 10.000.000€ groß. Er wurde am 04.06.2018 ausgeschüttet",
"Im Monat 01/2018 war in Sofia heftiger Smog",
"Dein Flug in den Urlaub geht am 06.07.2018",
]
re1 = re.compile("[0-9]{2}.[0-9]{2}.[0-9]{4}")
re2 = re.compile("[0-9]{4}/[0-9]{2}/[0-9]{2}")
re3 = re.compile("[0-9]{2}/[0-9]{4}")
for sentence in sentences:
match1 = re1.search(sentence)
match2 = re2.search(sentence)
match3 = re3.search(sentence)
if match1:
print(match1[0])
elif match2:
print(match2[0])
elif match3:
print(match3[0])
print("---------------------andere Lösung----------------------------")
re1 = re.compile("[0-9]{2}.[0-9]{2}.[0-9]{4}")
re2 = re.compile("[0-9]{4}/[0-9]{2}/[0-9]{2}")
re3 = re.compile("[0-9]{2}/[0-9]{4}")
dates = []
for sentence in sentences:
match1 = re1.search(sentence)
match2 = re2.search(sentence)
match3 = re3.search(sentence)
if match1:
dates.append(datetime.strptime(match1[0], "%d.%m.%Y"))
elif match2:
dates.append(datetime.strptime(match2[0], "%Y/%d/%m"))
elif match3:
dates.append(datetime.strptime(match3[0], "%m/%Y"))
for d in dates:
print(d.strftime("%d.%m.%Y")) |
989,433 | b16540fe929e2f361b6db5010eb1bac0ea880cfe | import os, sys
class Student():
""" A Sample Student class """
def __init__(self, firstName, lastName):
self.firstName = firstName
self.lastName = lastName
print("student created:" + self.fullName,self.email)
@property
def fullName(self):
return f'{self.firstName} {self.lastName}'
@property
def email(self):
return f'{self.firstName}.{self.lastName}@gmail.com'
st1 = Student('Krishna', 'Kummari')
st2 = Student('watsan', 'kory')
|
989,434 | 31393275e32cb01ccd730be038701d70b93e3523 | import challenge10, challenge11, challenge18
key = nonce = None
counter = 0
def do():
global key, nonce
if key is None:
key = challenge11.getRandomBytes(16)
if nonce is None:
nonce = challenge11.getRandomBytes(16)
data = open('25.txt').read().decode('base64')
pt = challenge10.decryptECB(16, data, challenge18.key)
return challenge18.encryptCTR(16, pt, key, nonce)
# need a copy to maintain state
def encryptCTR(blockSize, text, key, nonce):
global counter
ct = ''
for i in range(0, len(text), blockSize):
keystream = challenge10.encryptECB(16, nonce + challenge18.parseCounter(counter), key)
block = text[i:i+blockSize]
if len(block) == blockSize:
ct += challenge10.xor(keystream, block)
else:
ct += challenge10.xor(keystream[:len(block)], block)
counter += 1
return ct
def edit(ct, offset, pt):
encryptCTR(16, '0'* offset, key, nonce)
return ct[:offset] + encryptCTR(16, pt, key, nonce)
ct = do()
pt = edit(ct, 0, ct)
print pt
|
989,435 | 06f3eaf9e41673e2fe5e3c626394c4610d66136d | '''
Created on Aug 30, 2017
@author: arnon
'''
import subprocess as sp
import os
def cmdargs(namespace=None):
import argparse
filename = os.path.basename(__file__)
progname = filename.rpartition('.')[0]
parser = argparse.ArgumentParser(prog=progname, description="runs EventorAgent object.")
parser.add_argument('--file', type=str, help="args data file.")
args = parser.parse_args(namespace=namespace)
return args
def runcmd(file):
heredir = os.path.dirname(os.path.abspath(__file__))
projdir = os.path.dirname(heredir)
prog = os.path.join(projdir, 'eventor', 'bin', 'eventor_agent.py')
cmd = [prog, 'rec', '--file', file]
proc = sp.run(cmd)
print(proc)
if __name__ == '__main__':
args = cmdargs()
runcmd(file=args.file)
|
989,436 | 51ae5a8f24b13cbd941e1cf0049edd6d9ba200c5 | # -*- coding: utf-8 -*-
def pico(lista):
if n==3:
if lista[0]<lista[1] and lista[2]<lista[1]:
return 'S'
else:
return 'N'
if n>3:
if lista[0]<lista[1] and lista[1]<lista[2]:
return 'S'
if lista[0]<lista[1] and lista[n-1]<lista[n-2]:
return 'S'
else:
return 'N'
n = (int(input('Digite a quantidade de elementos da lista: ')))
a=[]
for i in range (0,n,1):
a.append(int(input('Digite o numero: ')))
print (pico(a))
|
989,437 | c1690ee9a4a28457ba5df6e56c64b3bd6592c5ff | # this example will give us an idea about
# the importance of logical operators (and,
# or, not) and how we use them
born = int(input("What year were you born? "))
if born >= 1945 and born <= 1964:
print("You are a baby boomer!")
elif born >= 1965 and born <= 1979:
print("You are from generation X")
elif born >= 1980 and born <= 1994:
print("You are a millennial")
elif born >= 1995:
print("You're from generation Z")
else:
print("I'm not sure what generation you're from")
print("But you're awesome!") |
989,438 | fe3b0f619051cdc6145d97b5d4a1a80444ef80b5 |
#pyhelper.py import file. Acts as a wrapper around common FinTech code
#Currently has 4 classes - Finhelper, SQLhelper, PVhelper, and APIhelper
#Written by toddshev, if anything is broken or you think anything can be added, let me know
#Otherwise, hopefully it's helpful
"""
'''
Fin methods:
load_and_clean, get_cov, get_corr, get_beta, get_sharpe, get_volatility,
allocate, get_cum_returns, drop (code hint), get_rolling
SQL methods to return code hints:
create, select, update, delete, insert, join, agg, sub
connect (returns engine)
'''
"""
import os
import json
from pathlib import Path
from dotenv import load_dotenv
import requests
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
#import alpaca_trade_api as tradeapi
import panel as pn
from panel.interact import interact
from panel import widgets
import plotly.express as px
pn.extension('plotly')
import hvplot.pandas
from sqlalchemy import create_engine
#to be used with pandas
#work in progress
class Finhelper: #fin helper
def __init__(self):
pass
def load_and_clean(self,in_path):
"""
Will read in csv from path using Path method, index first column, parse dates,
drop NA's, drop duplicates, and sort by the index. Returns dataframe
"""
in_path = Path(in_path)
try:
df = pd.read_csv(in_path, index_col = 0, parse_dates = True, infer_datetime_format = True)
except:
print("Could not read csv file. Please check the path")
finally:
#attempt to clean df
df.dropna(inplace = True)
df.drop_duplicates(inplace = True)
df.sort_index()
return df
def get_cov(df, tick, ind):
#return covariance from dataframe, ticker, and index
cov= df[tick].cov(df[ind])
return cov
def get_corr(self,df):
return df.corr()
def get_beta(self,df,tick,ind):
"""
Need to supply dataframe, ticker, and index to compare it to
Calls get_cov method
"""
cov = get_cov(df,tick,ind)
var = df[ind].var()
beta = cov / var
return beta
def get_sharpe(self,df, df_type = "returns"):
"""
Requires dataframe. If no df_type or "returns" provided, will assume DF has percent changes
If "price" is passed, will calculate the pct change prior to returning sharpe ratios
"""
if df_type == "price":
df = df.pct_change()
sharpe = (df.mean() * 252) / (df.std() * np.sqrt(252))
return sharpe
def get_volatility(self,df):
#annualized standard deviation
df = df.std() * np.sqrt(252)
df.sort_values(inplace = True)
return df
def allocate(self,weights, df):
"""
Must pass in weights that match list of assets, then dataframe
"""
return df.dot(weights)
def get_cum_returns(self,df, init_inv = 1):
"""
Dataframe must be daily returns. Optional investment amount as 2nd argument
"""
return ((1 + df).cumprod()) * init_inv
def drop(self,df, column_list):
"""
Requires dataframe and list of coumns you wish to remove
"""
df.drop(columns = column_list, inplace = True)
return df
def get_rolling(self,df, days):
#returns rolling average
return df.rolling(window=days).mean()
class SQLhelper:
def __init__(self):
pass
def create(self):
print("CREATE TABLE <tablename> ( \n"
" <field1> SERIAL PRIMARY KEY,\n"
" <field2> INT,\n"
" <field3> DOUBLE PRECISION,\n"
" <field4> FLOAT(10)\n"
");"
)
def select(self):
print("SELECT <field1>, <field2>, <field3> (or *)\n"
"FROM <table>\n"
"WHERE condition (i.e. <field1> > 100)\n"
"ORDER BY <field1> ASC;"
)
def update(self):
print("UPDATE <table>\n"
"SET <field> = newvalue\n"
"WHERE <field> = oldvalue;"
)
def delete(self):
print("DELETE FROM table\n"
"WHERE <field1> = value;"
)
def insert(self):
print("INSERT INTO table\n"
" (<field1>,<field2>,<field3>,<field4>)\n"
"VALUES\n"
"(<field1Val>,<field2Val>,<field3Val>,<field4Val>);"
)
def join(self):
print("SELECT tbl1.<field1>, tbl1.<field2>, tbl2.<field1>, tbl2.<field2>\n"
"FROM tbl1 AS alias\n"
"INNER/LEFT/RIGHT/FULL OUTER/CROSS JOIN tbl2 as alias2 ON alias.<field1> = alias2.<field2>;"
)
def agg(self):
print("SELECT COUNT(<field>) FROM table;\n\n"
"SELECT <field1>, COUNT(<field2>) AS \"Total Field2s\"\n"
"GROUP BY <field1>;"
)
def sub(self):
print("SELECT * \n"
"FROM table \n"
"WHERE <field2> IN\n"
"(\n"
" SELECT <field1>\n"
" FROM table2\n"
" WHERE <field2> = value\n"
");"
)
def connect(self,db_name):
try:
engine = create_engine(f"postgresql://postgres:postgres@localhost:5432/{db_name}")
except:
print(f"Issue connecting to {db_name}")
return engine
class PVhelper:
def __init__(self):
pass
def hvscatter(self,df,x,y, title = "Scatter Plot"):
return df.hvplot.scatter(
x = x,
y = y,
title = title
)
def pxscatter(self,df,x,y,title = "Scatter Plot"):
return px.scatter(
df,
x = x,
y = y,
title = title
)
def mapbox(self,df,lat,lon,keyname = "mapbox"):
load_dotenv()
mb_api = os.getenv("mapbox")
if not type(mb_api) is str:
raise TypeError("Could not find mapbox key")
else:
try:
px.set_mapbox_access_token(mb_api)
except:
print("Could not set mapbox key")
finally: #in case it will still load
scatter_map = px.scatter_mapbox(
df,
lat = lat,
lon = lon
)
return scatter_map
class APIhelper:
def __init__(self):
pass
#self.response_data = {}
def get(self, url, **kwargs):
try:
url += f"?format=json"
if type(kwargs) == None:
response_data = requests.get(url).json()
else:
response_data = requests.get(url, params = kwargs).json()
except:
if type(kwargs) ==None:
response_data = requests.get(url)
else:
response_data = requests.get(url, params = kwargs)
# pd.json_normalize(response_data)
df = pd.DataFrame(response_data)
return df
def get_row(self, df, column, val):
df = df.loc[(df[column] == val)]
# def get_rows(self, df, column, *args):
# for item in args:
# df = df.append(df.loc[(df[column] == item)])
def view(self, data):
print(json.dumps(data,indent = 4))
def alpaca_create(self, keyname = "ALPACA_API_KEY", secret = "ALPACA_SECRET_KEY"):
"""
Default key names are "ALPACA_API_KEY" and "ALPACA_SECRET_KEY".
If your .env differs, enter those key names as strings
"""
aak = os.getenv(keyname)
ask = os.getenv(secret)
if type(aak) is not str | type(aak) is not str:
raise Exception("Could not load API or Secret Key")
#try to create object regardless
alpaca = tradeapi.REST(
aak,
ask,
api_version="v2"
)
self.alpaca_api = alpaca
return alpaca
def get_alpaca_data(self,ticker_list,start,end, timeframe = "1D"):
"""
Requires you to run alpaca_create first, dates should be entered as 'yy-mm-dd'
Default timeframe is '1D', you may change this if desired
"""
s = pd.Timestamp(start,tz = "America/New_York").isoformat()
e = pd.Timestamp(end,tz = "America/New_York").isoformat()
df = api.get_barset(
ticker_list,
timeframe,
start = s,
end = e
).df
return df
|
989,439 | 25f066caa50e440c173f3f308e8fb3039354b989 | import socket
import sys
import os
import pandas as pd
import datetime
from glob import glob
from threading import Thread
from time import sleep
from pickle import PicklingError
import shutil
pairs = {}
box = []
Lock = False
# Multithreaded Python server : TCP Server Socket Thread Pool
class ServerThread(Thread):
def __init__(s, ip,port,num):
Thread.__init__(s)
s.ip = ip
s.port = port
s.num = num
print ("[+] New server socket thread started for " + ip + ":" + str(port))
def enc(s,data):
return str(data).encode()
def dec(s,data):
return data.decode()
def get_empty_user(s,):
return {'USERNAME':None,
'PASSWORD':None,
'TIMELINE':pd.DataFrame(columns=['time','text','Flag']),
'Friends':set(),
'Limited Friends':set(),
'Rcvd Requests':set(),
'isOnline':False,
'Chat':{}
}
def sign_up(s,client):
path = '../client_data/users/'
USERNAME = s.dec(client.recv(512))
while os.path.exists(path+USERNAME): # is valid??
client.send(s.enc('EXISTS'))
USERNAME = s.dec(client.recv(512))
client.send(s.enc('YES'))
PASSWORD = s.dec(client.recv(512))
while len(PASSWORD)<3: # is valid??
client.send(s.enc('LESS'))
PASSWORD = s.dec(client.recv(512))
client.send(s.enc('YES'))
dct = s.get_empty_user()
dct.update({'USERNAME':USERNAME,
'PASSWORD':PASSWORD})
pd.to_pickle(dct,path+USERNAME)
def log_in(s,client):
path = '../client_data/users/'
USERNAME = s.dec(client.recv(512))
while not os.path.exists(path+USERNAME): # is valid??
client.send(s.enc('NO'))
USERNAME = s.dec(client.recv(512))
client.send(s.enc('YES'))
passchk = pd.read_pickle(path+USERNAME)['PASSWORD']
PASSWORD = s.dec(client.recv(512))
while PASSWORD!=passchk: # is valid??
client.send(s.enc('NO'))
PASSWORD = s.dec(client.recv(512))
client.send(s.enc('YES'))
dct = pd.read_pickle(path+USERNAME)
dct['isOnline'] = True
pairs.update({USERNAME:s.num})
pd.to_pickle(dct,path+USERNAME)
return USERNAME
def log_out(s,USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
dct['isOnline'] = False
pd.to_pickle(dct, path+USERNAME)
def post(s,USERNAME, client):
flags = {1:'isPublic', 2:'isFriends', 3:'isLimited', 4:'isPrivate'}
path = '../client_data/users/'
status = s.dec(client.recv(1024))
client.send(s.enc('OK'))
Flag = int(s.dec(client.recv(32)))
dct = pd.read_pickle(path+USERNAME)
ind = len(dct['TIMELINE'])
dct['TIMELINE'].loc[ind, 'time'] = str(datetime.datetime.now())
dct['TIMELINE'].loc[ind, 'text'] = status
dct['TIMELINE'].loc[ind, 'Flag'] = flags[Flag]
pd.to_pickle(dct, path+USERNAME)
client.send(s.enc('YES'))
def status_chk(s,USERNAME, client, flag='isPrivate'):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)['TIMELINE']
ln = len(dct)
N = min(10, ln)
client.send(s.enc(N))
print("N sent for status_chk",N,'of',ln)
if N!=0:
dct = dct.iloc[ln-N:ln]
for ts, status, flag in zip(dct['time'], dct['text'], dct['Flag']):
recv = s.dec(client.recv(32))
if recv=='OK':
client.send(s.enc(ts+' --- '+status+' --- '+flag))
def view_tl(s,USERNAME, client):
path = '../client_data/users/'
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
name = s.dec(client.recv(64))
if not name in names:
client.send(s.enc('NULL'))
print('NULL sent')
return
name_dct = pd.read_pickle(path+name)
tl = name_dct['TIMELINE']
flags = ['isPublic']
if USERNAME in name_dct['Friends']:
flags.append('isFriends')
if USERNAME in name_dct['Limited Friends']:
flags.append('isLimited')
if USERNAME == name_dct['USERNAME']:
flags.append('isLimited')
flags.append('isFriends')
flags.append('isPrivate')
client.send(s.enc('OKAY'))
print(USERNAME,'has',flags,'flags')
dct = tl[tl.Flag.isin(flags)]
ln = len(dct)
N = min(10, ln)
isReady = s.dec(client.recv(32))
if isReady=='Ready':
client.send(s.enc(N))
print("N sent for status_chk",N,'of',ln)
if N!=0:
dct = dct.iloc[ln-N:N]
for ts, status in zip(dct['time'], dct['text']):
recv = s.dec(client.recv(32))
if recv=='OK':
client.send(s.enc(ts+' --- '+status))
else:
print('Client not ready')
def send_feed(s, USERNAME, client):
path = '../client_data/users/'
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
feed = []
for name in names:
name_dct = pd.read_pickle(path+name)
tl = name_dct['TIMELINE']
tl['usr'] = name
flags = ['isPublic']
if USERNAME in name_dct['Friends']:
flags.append('isFriends')
if USERNAME in name_dct['Limited Friends']:
flags.append('isLimited')
if USERNAME == name_dct['USERNAME']:
flags.append('isLimited')
flags.append('isFriends')
flags.append('isPrivate')
feed.append(tl[tl.Flag.isin(flags)])
feed = pd.concat(feed).sort_values('time')
ln = len(feed)
N = min(10, ln)
client.send(s.enc(N))
print("N sent for status_chk",N,'of',ln)
if N!=0:
dct = feed.iloc[ln-N:N]
for ts, usr, status in zip(dct['time'], dct['usr'], dct['text']):
recv = s.dec(client.recv(32))
if recv=='OK':
client.send(s.enc(ts+' --- : '+usr+' : '+status))
def search_usr(s,USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
N = len(usrs)
print(usrs)
client.send(s.enc(N)) # We assume N won't be zero
for usr in usrs:
recv = s.dec(client.recv(32))
if recv=='OK':
name = usr.split(SPCHR)[-1]
isFriend = 'You' if name==USERNAME else\
'Friend' if name in dct['Friends'] else\
'Request Received' if name in dct['Rcvd Requests'] else\
'Add Friend'
client.send(s.enc(name+'---'+isFriend))
############################### Friendship ############################
def add_frnd(s,USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
name = s.dec(client.recv(64))
if not name in names:
client.send(s.enc('NULL'))
elif name in dct['Friends']:
client.send(s.enc('ISFRND'))
else:
dct = pd.read_pickle(path+name)
dct['Rcvd Requests'].add(USERNAME)
pd.to_pickle(dct, path+name)
client.send(s.enc('OKAY'))
def dlt_frnd(s,USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
name = s.dec(client.recv(64))
if not name in names:
client.send(s.enc('NULL'))
elif name not in dct['Friends']:
client.send(s.enc('ISNOTFRND'))
else:
frnd_dct = pd.read_pickle(path+name)
dct['Friends'].remove(name)
frnd_dct['Friends'].remove(USERNAME)
dct['Chat'].pop(name)
frnd_dct['Chat'].pop(USERNAME)
pd.to_pickle(dct, path+USERNAME)
pd.to_pickle(frnd_dct, path+name)
client.send(s.enc('OKAY'))
def add_to_limited(s,USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
name = s.dec(client.recv(64))
if not name in names:
client.send(s.enc('NULL'))
elif name not in dct['Friends']:
client.send(s.enc('ISNOTFRND'))
else:
dct['Limited Friends'].add(name)
pd.to_pickle(dct, path+USERNAME)
client.send(s.enc('OKAY'))
def f_of_f(s,USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
name = s.dec(client.recv(64))
if not name in names:
client.send(s.enc('NULL'))
elif name not in dct['Friends']:
client.send(s.enc('ISNOTFRND'))
else:
client.send(s.enc('OKAY'))
isOk = s.dec(client.recv(32))
frdct = pd.read_pickle(path+name)
N = len(frdct['Friends'])
if isOk == 'OKAY':
client.send(s.enc(N))
for each in frdct['Friends']:
isOk = s.dec(client.recv(32))
if isOk == 'OK':
flag = 'Friend' if each in dct['Friends'] else 'You' if each==USERNAME else 'Add Friend'
client.send(s.enc(each+' --- '+flag))
def del_from_limited(s,USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
name = s.dec(client.recv(64))
if not name in names:
client.send(s.enc('NULL'))
elif name not in dct['Friends']:
client.send(s.enc('ISNOTFRND'))
elif name not in dct['Limited Friends']:
clinet.send(s.enc('ISNOTSPL'))
else:
print('Limited are',dct['Limited Friends'])
dct['Limited Friends'].remove(name)
print('Limited are',dct['Limited Friends'])
pd.to_pickle(dct, path+USERNAME)
client.send(s.enc('OKAY'))
def acc_rqst(s,USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
name = s.dec(client.recv(64))
if not name in names:
client.send(s.enc('NULL'))
elif name in dct['Friends']:
client.send(s.enc('FRND'))
elif name not in dct['Rcvd Requests']:
client.send('NORQST')
else:
frnd_dct = pd.read_pickle(path+name)
dct['Rcvd Requests'].remove(name)
dct['Friends'].add(name)
frnd_dct['Friends'].add(USERNAME)
dct['Chat'][name] = pd.DataFrame(columns=['time', 'usr', 'msg', 'isSeen'])
frnd_dct['Chat'][USERNAME] = pd.DataFrame(columns=['time', 'usr', 'msg', 'isSeen'])
pd.to_pickle(dct, path+USERNAME)
pd.to_pickle(frnd_dct, path+name)
client.send(s.enc('OKAY'))
def get_friends(s, USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
frnds = dct['Friends']
N = len(frnds)
client.send(s.enc(N))
for frnd in frnds:
recv = s.dec(client.recv(32))
if recv=='OK':
flag = 'Friend' if frnd not in dct['Limited Friends'] else 'Special Friend'
client.send(s.enc(frnd+' --- '+flag))
#######################################################################
############## Chat and stuff #########################################
def chk_act(s,USERNAME, client):
print('entered ')
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
name_dict = {usr.split(SPCHR)[-1]:usr for usr in usrs}
frnds = dct['Friends']
online = []
for frnd in frnds:
isOnline = pd.read_pickle(name_dict[frnd])['isOnline']
if isOnline:
online.append(frnd)
print('sending', len(online))
client.send(s.enc(len(online)))
for each in online:
isOk = s.dec(client.recv(32))
if isOk == 'OK':
client.send(s.enc(each))
def send_response(s,USERNAME, name, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrdf = dct['Chat'][name]
ln = len(usrdf)
N = min(10, ln)
client.send(s.enc(N))
if N!=0:
usrdf = usrdf.iloc[ln-N:ln]
i = ln-N
for ts, usr, msg, seen in zip(usrdf['time'], usrdf['usr'], usrdf['msg'], usrdf['isSeen']):
isOk = s.dec(client.recv(32))
if isOk == 'OK':
client.send(s.enc(ts+'---:'+usr+' : '+msg+': '+seen))
frnd = pd.read_pickle(path+name)
frnd['Chat'][USERNAME]['isSeen'] = 'Seen'
pd.to_pickle(frnd, path+name)
return client
def save_it(s, myself, myFriend, ts, mymsg, frmsg):
path = '../client_data/users/'
my = pd.read_pickle(path+myself)
fr = pd.read_pickle(path+myFriend)
my_l = len(my['Chat'][myFriend])
fr_l = len(fr['Chat'][myself])
if mymsg:
my['Chat'][myFriend].loc[my_l, ['time','usr','msg','isSeen']] = [ts, 'Me', mymsg, 'Seen']
fr['Chat'][myself].loc[fr_l, ['time','usr','msg','isSeen']] = [ts, myself, mymsg, 'Seen']
elif frmsg:
fr['Chat'][myself].loc[fr_l, ['time','usr','msg','isSeen']] = [ts, 'Me', frmsg,'Seen']
my['Chat'][myFriend].loc[my_l, ['time','usr','msg','isSeen']] = [ts, myself, frmsg, 'Seen']
pd.to_pickle(my, path+myself)
pd.to_pickle(fr, path+myFriend)
def live_chat(s, USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
name = s.dec(client.recv(64))
if not name in names:
client.send(s.enc('NULL'))
elif not name in dct['Friends']:
client.send(s.enc('NOFRND'))
else:
other = clients[pairs[name]]
if not box[pairs[USERNAME]] is None:
client.send(s.enc('RECV'))
print('Othe connection came in')
isOK = s.dec(client.recv(32))
if isOK == 'OK':
client.send(s.enc(box[pairs[USERNAME]]))
ts = str(datetime.datetime.now())
s.save_it(USERNAME, name, ts, None, box[pairs[USERNAME]])
box[pairs[USERNAME]] = None
print('Started middle man')
s.middle_man(client, other, USERNAME, name)
else:
client.send(s.enc('SEND'))
resp = s.dec(client.recv(1024))
print('Init saved. should wait')
box[pairs[name]] = resp
pd.to_pickle(True, 'lock.file')
while pd.read_pickle('lock.file'):
sleep(1)
print('waiting')
def middle_man(s, s1, s2, USERNAME, name):
print('Middle man: Hi')
resp = s.dec(s1.recv(1024))
while resp != '@end@':
s2.send(s.enc(resp))
ts = str(datetime.datetime.now())
s.save_it(USERNAME, name, ts, resp, None)
resp = s.dec(s2.recv(1024))
if resp == '@end@':
break
s1.send(s.enc(resp))
ts = str(datetime.datetime.now())
s.save_it(USERNAME, name, ts, None, resp)
resp = s.dec(s1.recv(1024))
s1.send(s.enc('@end@'))
s2.send(s.enc('@end@'))
pd.to_pickle(False, 'lock.file')
def init_chat(s,USERNAME, client):
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
usrs = glob(path+'*')
if '\\' in usrs[0]:
SPCHR = '\\'
elif '/' in usrs[0]:
SPCHR = '/'
names = [usr.split(SPCHR)[-1] for usr in usrs]
name = s.dec(client.recv(64))
if not name in names:
client.send(s.enc('NULL'))
elif not name in dct['Friends']:
client.send(s.enc('NOFRND'))
else:
client.send(s.enc('OKAY'))
isReady = s.dec(client.recv(32))
if isReady == 'Ready':
print('Client ready. Sending chat')
client = s.send_response(USERNAME, name, client)
resp = s.dec(client.recv(64))
client.send(s.enc('OK'))
while resp != 'FIN':
if resp == 'REFRESH':
isReady = s.dec(client.recv(32))
if isReady == 'Ready':
client = s.send_response(USERNAME, name, client)
elif resp == 'SNDMSG':
msg = s.dec(client.recv(512))
dct = pd.read_pickle(path+USERNAME)
frnd_dct = pd.read_pickle(path+name)
l_dct = len(dct['Chat'][name])
l_frnd_dct = len(frnd_dct['Chat'][USERNAME])
ts = str(datetime.datetime.now())
print('Current len of chat', l_dct, l_frnd_dct)
dct['Chat'][name].loc[l_dct, ['time','usr','msg','isSeen']] = [ts, 'Me', msg, 'NotSeen']
frnd_dct['Chat'][USERNAME].loc[l_frnd_dct, ['time','usr','msg','isSeen']] = [ts, USERNAME, msg, 'NotSeen']
pd.to_pickle(dct,path+USERNAME)
pd.to_pickle(frnd_dct,path+name)
client.send(s.enc('OK'))
resp = s.dec(client.recv(64))
client.send(s.enc('OK'))
def sync(s, USERNAME, client):
print('Starting sync')
path = '../client_data/users/'
offpath = '../client_data/users_offline/'
isOk = s.dec(client.recv(32))
print(isOk, 'received')
### send
if isOk == 'OK':
with open(path+USERNAME, 'rb') as f:
while True:
data = f.read(1024)
print(data)
if not data:
break
client.send(data)
print('sent')
isOk = s.dec(client.recv(32))
if isOk != 'OK':
break
client.send(s.enc('END'))
else:
print('Sync aborted')
### recv
resp = s.dec(client.recv(32))
if resp == 'SYNC2':
client.send(s.enc('BEGIN'))
with open(offpath+USERNAME+'_C', 'wb') as f:
while True:
print('Grtting data')
data = client.recv(1024)
print(data)
if data == b'END':
print('End received')
client.send(s.enc('END'))
break
f.write(data)
client.send(s.enc('OK'))
print('OK sent')
try:
pd.read_pickle(offpath+USERNAME+'_C')
s.fixdiff(USERNAME)
except PicklingError:
print('FIle currupted. Abort')
def fixdiff(s, USERNAME):
path = '../client_data/users/'
offpath = '../client_data/users_offline/'
main = pd.read_pickle(path+USERNAME)
cdict = pd.read_pickle(offpath+USERNAME+'_C')
for usr in main['Chat']:
frnd = pd.read_pickle(path+usr)
main['Chat'][usr] = pd.concat([main['Chat'][usr], cdict['Chat'][usr]]).sort_values('time').drop_duplicates()
df = pd.concat([main['Chat'][usr], cdict['Chat'][usr]]).sort_values('time').drop_duplicates()
df['usr'] = df['usr'].replace({'Me':USERNAME}).replace({usr:'Me'})
frnd['Chat'][USERNAME] = df
pd.to_pickle(frnd, path+usr)
pd.to_pickle(main, path+USERNAME)
print('Diff fixed')
######### Listening
def run(s):
USERNAME = None
while True:
client = clients[s.num]
client.settimeout(3000)
print('Receiving main action now from', s.ip, s.port, s.num)
action = client.recv(512).decode()
if action == 'SIGN_UP':
client.send(s.enc('OK'))
s.sign_up(client)
elif action == 'LOG_IN':
client.send(s.enc('OK'))
USERNAME = s.log_in(client)
elif action == 'LOG_OUT':
client.send(s.enc('OK'))
s.log_out(USERNAME, client)
USERNAME = None
elif action == 'POST':
client.send(s.enc('OK'))
s.post(USERNAME, client)
elif action == 'CHKTIM':
s.status_chk(USERNAME, client)
elif action == 'VIEWTL':
client.send(s.enc('OK'))
s.view_tl(USERNAME, client)
elif action == 'FEED':
s.send_feed(USERNAME, client)
elif action == 'SRCH':
s.search_usr(USERNAME, client)
elif action == 'ADDFRND':
client.send(s.enc('OK'))
s.add_frnd(USERNAME, client)
elif action == 'SYNC':
client.send(s.enc('BEGIN'))
s.sync(USERNAME, client)
elif action == 'ADDLIMITED':
client.send(s.enc('OK'))
s.add_to_limited(USERNAME, client)
elif action == 'FOFF':
client.send(s.enc('OK'))
s.f_of_f(USERNAME, client)
elif action == 'DELLIMITED':
client.send(s.enc('OK'))
s.del_from_limited(USERNAME, client)
elif action == 'ACCFRND':
client.send(s.enc('OK'))
s.acc_rqst(USERNAME, client)
elif action == 'DLTFRND':
client.send(s.enc('OK'))
s.dlt_frnd(USERNAME, client)
elif action == 'GETFRND':
s.get_friends(USERNAME, client)
elif action == 'CHKACT':
s.chk_act(USERNAME, client)
elif action == 'CHAT':
client.send(s.enc('OK'))
s.init_chat(USERNAME, client)
elif action == 'LIVECHAT':
client.send(s.enc('OK'))
s.live_chat(USERNAME, client)
print('Initial server came back')
else:
print(action, 'Not matched with anything')
break
if USERNAME:
path = '../client_data/users/'
dct = pd.read_pickle(path+USERNAME)
dct['isOnline'] = False
pd.to_pickle(dct, path+USERNAME)
print("EXIT :)")
hostname = socket.gethostname()
addr = socket.gethostbyname(hostname)
port = 12345 #int(sys.argv[1])
#caddr = sys.argv[2]
print(addr)
tcpServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#tcpServer.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcpServer.bind((addr, port))
threads = []
client_pairs = {}
clients = []
while True:
tcpServer.listen(4)
print("Multithreaded Python server : Waiting for connections from TCP clients...")
(conn, (ip,port)) = tcpServer.accept()
#print('MainServer Connected with', ip, port)
clients.append(conn)
box.append(None)
newthread = ServerThread(ip, port, len(clients)-1)
newthread.start()
threads.append(newthread)
for t in threads:
t.join()
######### Methods
|
989,440 | 3551af38b67345edd6178ed4bcb698b96cf1b8a3 | from django.urls import path
from .views import dash, PatientCreate, PatientUpdate, PatientListView,\
CreateBill, BillListView, RequestTest, TestListView,\
CreateUser, UpdateUser, UsersListView, EditBill, UpdateTest,\
PatientDetail, BillDetail, TestDetail, UserDetail
urlpatterns = [
path('', dash.as_view(), name='home'),
path('add_patient', PatientCreate.as_view(), name="addpatient"),
path('edit/<slug:pk>/patient', PatientUpdate.as_view(), name='update_patient'),
path('patient/<slug:pk>/', PatientDetail.as_view(), name='patient_detail'),
path('all_patients', PatientListView.as_view(), name='all_patients'),
path('create_bill', CreateBill.as_view(), name='create_bill'),
path('all_bills', BillListView.as_view(), name='all_bills'),
path('bill/<slug:pk>/', BillDetail.as_view(), name='bill_detail'),
path('request_test', RequestTest.as_view(), name='request_test'),
path('all_tests', TestListView.as_view(), name='all_tests'),
path('test/<slug:pk>/', TestDetail.as_view(), name='test_detail'),
path('create_user', CreateUser.as_view(), name='create_user'),
path('edit/<int:pk>/user', UpdateUser.as_view(), name='update_user'),
path('user/<int:pk>/', UserDetail.as_view(), name='user_detail'),
path('all_users', UsersListView.as_view(), name='all_users'),
path('edit/<int:pk>/bill', EditBill.as_view(), name='edit_bill'),
path('edit<int:pk>/test', UpdateTest.as_view(), name='edit_test'),
] |
989,441 | 05632cd6515132a4f8cc7f8be733da900c76fe35 | from django.conf import settings
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import (
PasswordResetView,
PasswordResetConfirmView,
PasswordResetCompleteView,
PasswordResetDoneView,
LoginView,
LogoutView
)
from django.contrib.auth.tokens import default_token_generator
from django.shortcuts import redirect, render
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from .forms import (
AuthenticationForm,
PasswordChangeForm,
PasswordResetForm,
SetPasswordForm
)
from django.views import generic
# Class-based password reset views
# - PasswordResetView sends the mail
# - PasswordResetDoneView shows a success message for the above
# - PasswordResetConfirmView checks the link the user clicked and
# prompts for a new password
# - PasswordResetCompleteView shows a success message for the above
class PasswordResetView(PasswordResetView):
extra_email_context = None
form_class = PasswordResetForm
from_email = settings.EMAIL_FROM
html_email_template_name = None
success_url = reverse_lazy('password_reset_done')
title = _('Reseteo de contraseña')
token_generator = default_token_generator
class PasswordResetConfirmView(PasswordResetConfirmView):
form_class = SetPasswordForm
post_reset_login = False
post_reset_login_backend = None
success_url = reverse_lazy('password_reset_complete')
title = _('Ingrese nueva contraseña')
token_generator = default_token_generator
class LoginView(LoginView):
form_class = AuthenticationForm
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, _('Su password fue exitosamente actualizado'))
return redirect('change_password')
else:
messages.error(request, _('Por favor corriga los errores marcados'))
else:
form = PasswordChangeForm(request.user)
return render(request, 'registration/change_password.html', {
'form': form
})
class ProfileView(LoginRequiredMixin, generic.TemplateView):
template_name = 'profile.html'
|
989,442 | dbf34eed813d67ae16b7acdfa89f1142e1c1828b | import os
import websocket
import json
import time
from kafka import KafkaProducer
import kafka.errors
try:
import thread
except ImportError:
import _thread as thread
import time
KAFKA_BROKER = os.environ["KAFKA_BROKER"]
TOPIC = "exchange-rates"
START_DELAY = 30
WS_SERVER = 'wss://streamer.cryptocompare.com/v2'
SUB_MSG = '{ \
"action": "SubAdd", \
"subs": ["5~CCCAGG~BTC~USD", "5~CCCAGG~ETH~USD"] \
}'
AUTH_HEADER = "Authorization: Apikey " + os.environ["API_KEY"]
producer = None
def connect_to_kafka():
while True:
try:
producer = KafkaProducer(bootstrap_servers=KAFKA_BROKER.split(","))
print("Connected to Kafka!")
return producer
except kafka.errors.NoBrokersAvailable as e:
print(e)
time.sleep(3)
def on_message(ws, message):
msg = json.loads(message)
if msg["TYPE"] == "5":
producer.send(TOPIC, key=bytes(
msg["FROMSYMBOL"], "utf-8"), value=bytes(message, "utf-8"))
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
def run(*args):
ws.send(SUB_MSG)
thread.start_new_thread(run, ())
def main():
global producer
print("Starting Exchange Rate listener ...")
producer = connect_to_kafka()
time.sleep(START_DELAY)
websocket.enableTrace(True)
ws = websocket.WebSocketApp(WS_SERVER,
on_message=on_message,
on_error=on_error,
on_close=on_close,
header=[AUTH_HEADER])
ws.on_open = on_open
ws.run_forever()
if __name__ == "__main__":
main()
|
989,443 | 6446448cb3bba94bdd9507b77afe7c9b9da070be | # -*- coding: utf-8 -*-
from datetime import date
import uuid
import re
def addSimple(desc, amount):
return {
"desc": desc,
"amount": amount,
"date": date.today(),
"id": str(uuid.uuid1())
}
def reportCategory(json):
def setCat(dic):
dic["category"] = getDefaultCategory(dic["desc"])
return dic
return map(lambda n:setCat(n), json)
def getDefaultCategory(desc):
dic = {
"吃":[
r".*吃.*"
],
"学习":[
r".*书.*",
r".*学.*",
r".*笔.*"
],
"住":[
r".*房.*"
],
"生活":[
]
}
isMatch = lambda pattern:re.search(pattern, desc) != None
isMatchArray = lambda patterns:any(filter(lambda pattern:isMatch(pattern), patterns))
return ((lambda keys:keys[0] if len(keys) > 0 else "其它")\
(list(filter(lambda key:isMatchArray(dic[key]), dic.keys()))))
|
989,444 | 37926df3967f485bca94e0e19ba99bc0b2171c5c | from django.urls import path
from . import views
urlpatterns = [
path('insert1', views.insert1, name='insert1'),
path('select1', views.select1, name='select1'),
path('select_m', views.select_m, name='select_m'),
path('select3', views.select3, name='select3'),
]
|
989,445 | b61431b5792f3f1b496964f09e03198eb264e8dc | # SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Unlicense OR CC0-1.0
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32c2
@pytest.mark.ethernet
def test_mqtt5_client(dut: Dut) -> None:
dut.expect_unity_test_output()
|
989,446 | 71586444df287c4281a43a0ce8d10cc3a90838d8 | import codecs
unicode_decoder = codecs.getdecoder("unicode_escape")
def parse_model_file_line(line):
model_file_element = line.rstrip().split("\t")
model_file_element[0] = [char for char in unicode_decoder(model_file_element[0])[0][::2]]
return tuple(model_file_element)
|
989,447 | 5bb820a50919e15f59a8449d31b08b8d08b40bb9 | from django.db import models
from usuario.models import User
# Create your models here.
class Maceta(models.Model):
tipoPlanta = models.ForeignKey('Plantas', on_delete= models.CASCADE)
fechaDePlantacion= models.DateTimeField()
primeraCosecha = models.DateTimeField()
User = models.ForeignKey('usuario.User', on_delete=models.CASCADE)
class Plantas(models.Model):
nombre = models.CharField(max_length=32)
class LogsTemperatura(models.Model):
maceta = models.ForeignKey('maceta',on_delete=models.CASCADE)
fecha = models.DateTimeField()
valor = models.FloatField()
class LogsValvula(models.Model):
maceta = models.ForeignKey('maceta',on_delete=models.CASCADE)
fecha = models.DateTimeField()
valor = models.FloatField()
class LogsLuminosidad(models.Model):
maceta = models.ForeignKey('maceta',on_delete=models.CASCADE)
fecha = models.DateTimeField()
valor = models.FloatField()
class LogsHumedad(models.Model):
maceta = models.ForeignKey('maceta',on_delete=models.CASCADE)
fecha = models.DateTimeField()
valor = models.FloatField()
|
989,448 | cd539d07cc13254a84727cd25696b8a9645199d3 | # -*- coding: utf-8 -*
import unittest
from unittest.mock import MagicMock
import richman.event as event
class TestEventManaer(unittest.TestCase):
def setUp(self):
self.event_manager = event.EventManager()
def tearDown(self):
pass
def test_event_manager_should_add_and_remove_listeners_correctly(self):
handlers = [MagicMock(), MagicMock(), MagicMock()]
event_name = 'Event Test'
self.event_manager.add_listeners(event_name, handlers)
self.assertDictEqual({event_name: handlers}, self.event_manager.handlers_dict)
self.event_manager.remove_listeners(event_name, handlers)
self.assertFalse(self.event_manager.handlers_dict)
def test_event_manager_sould_send_event_correctly(self):
handlers1 = [MagicMock(), MagicMock(), MagicMock()]
event_name1 = 'Event Test 1'
self.event_manager.add_listeners(event_name1, handlers1)
handlers2 = [MagicMock(), MagicMock(), MagicMock()]
event_name2 = 'Event Test 2'
self.event_manager.add_listeners(event_name2, handlers2)
event1 = MagicMock()
event1.event_name = event_name1
self.event_manager.send(event1)
for handler1 in handlers1:
handler1.assert_called_once()
for handler2 in handlers2:
handler2.assert_not_called()
event2 = MagicMock()
event2.event_name = event_name2
self.event_manager.send(event2)
for handler1 in handlers1:
handler1.assert_not_called()
for handler2 in handlers2:
handler2.assert_called_once() |
989,449 | 189704bbb214bb2f875e1e28cd1871966e1aa2b4 | import sys
import cProfile, pstats, io
def run():
substrates = set([s for s in sys.stdin.readline().rstrip().split(" ")])
r = sys.stdin.readlines()
r = list(map(lambda x: x.strip().split('->'), r))
r = list(map(lambda x: [set(x[0].split('+')), set(x[1].split('+'))], r))
n_reactions = len(r)
while (n_reactions > 0):
for idx, reaction in enumerate(r):
#print(str(reaction[0]) + ' is superset of ' + str(substrates))
if substrates.issuperset(reaction[0]):
substrates.update(reaction[1])
del r[idx]
if (len(r) != n_reactions):
n_reactions = len(r)
else:
break
print ((' ').join(sorted(substrates)))
pr = cProfile.Profile()
pr.enable()
run()
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
|
989,450 | a11508644ae5cb0266c1201f447b8717b3234a01 | import multiprocessing, time
def fendian1():
while True:
print("正在经营分店111111111111111")
time.sleep(1)
def fendian2():
while True:
print("正在经营分店222222222222222")
time.sleep(1)
def main():
# 创建2进程
# 功能:创建进程,返回一个对象
# 参数使用和线程一样
p1 = multiprocessing.Process(target=fendian1)
p2 = multiprocessing.Process(target=fendian2)
# 不能直接调用进程处理函数,需要通过start间接调用
p1.start()
p2.start()
while True:
print("正在经营总店3333333333333")
time.sleep(1)
if __name__ == '__main__':
main()
|
989,451 | d1ce2fa292c60e7656f97d8ed451319583591946 | ## Dictionaries
alien_0 = {'color': 'green', 'points': 5}
print(alien_0['color'])
print(alien_0['points'])
# add variables
alien_0['x_position'] = 0
alien_0['y_position'] = 25
print(alien_0)
## program
alien_0 = {'x_position': 0, 'y_position': 25, 'speed': 'medium'}
print(f"Original position: {alien_0['x_position']}")
# Move the alien to the right.
# Determine how far to move the alien based on its current speed.
if alien_0['speed'] == 'slow':
x_increment = 1
elif alien_0['speed'] == 'medium':
x_increment = 2
else:
# This must be a fast alien.
x_increment = 3
# The new position is the old position plus the increment.
alien_0['x_position'] = alien_0['x_position'] + x_increment
print(f"New position: {alien_0['x_position']}")
# delete
del alien_0['speed']
print(alien_0)
alien_0['color'] = 'red'
alien_0['speed'] = 'fast'
print(alien_0)
point_value = alien_0.get('points', 'No point value assigned.') # get() default vallue even not assigned
print(point_value)
## EX1
person = {'name': 'Jill', 'age': 23, 'city': 'Reacon City'}
for i in person:
print(i.title(),':',person[i])
numbers = {'Jill': 23, 'John': 43, 'Will': 12}
for i in numbers:
print('Favorite numbers\n',i,':',numbers[i])
pr = {'Data structures': 'main functions: add, rm, append, etc', 'Pandas': 'Review', 'Numpy': 'almost from scratch', 'algotihms':'more notions'}
print('Python keep learning')
for i in pr:
print(i,':\n\t',pr[i])
# Key and Value: the correct way
user_0 = {
'username': 'efermi',
'first': 'enrico',
'last': 'fermi',
}
for key, value in user_0.items():
print(f"\nKey: {key}")
print(f"Value: {value}")
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',}
for name, language in favorite_languages.items():
print(f"{name.title()}'s favorite language is {language.upper()}.")
-- 139 |
989,452 | 878e8cd91a225466316e4b1833662d5b8eddddd4 | import socket
from helpers import parse_helper
from helpers import config
from helpers.load_balancer import Algorithm
# get the IPAddr of the Server
def getServerInfo():
hostName = socket.getfqdn(socket.gethostname())
IPAddr = socket.gethostbyname(hostName)
return IPAddr
# establish socket connection
def connectSock(IPAddr, name):
s = socket.socket()
# get server host name
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind the Socket to localhost at a given port
try:
s.bind(("127.0.0.1", 5000))
s.listen(10)
print("Server for "+ name +" is running on " + IPAddr)
print "Server Listening on Port: " + str(s.getsockname()[1])
return s
except socket.error as err:
print "Socket Connection Error %s" % err
def startServer(s):
algo = Algorithm()
while True:
try:
# conn is a new socket object
conn, address = s.accept()
payload = conn.recv(1024)
if not payload:
print('dosen\'t have any payload here')
else:
print('payload in loadbalancer :', payload)
config.client = conn
parse_helper.parsePayload(payload, algo)
print('cmds done closing')
conn.close()
except Exception as e:
print('loadbalancer socket error', e)
conn.close()
break
except KeyboardInterrupt:
conn.close()
break
conn.close()
s.close()
if __name__ == '__main__':
IPAddr = getServerInfo()
socket = connectSock(IPAddr, 'Server')
startServer(socket)
|
989,453 | d7add5af08be991c9996dc583df093c987aa9d69 | prev_balance = float (raw_input ("Enter outstanding balance: "))
annual_interest = float (raw_input("Enter annual interest: "))
mmp = 120
months = 1
balance = mmp
while months < 12:
#mmp = monthly min. payment
monthly_interest_rate = annual_interest/100/12.0
prev_balance = prev_balance * (1 + monthly_interest_rate) - mmp
months +=1
print "RESULT"
print "Number of months needed:" , months
print "mmp:" , mmp
print "balance: ",round(prev_balance, 1) |
989,454 | 707bd541a5c2d1e9d3692efe6663b52387450871 | import logging
from django.core.cache import cache
from django.contrib.admin.views.decorators import staff_member_required
from nms.views.response import *
logger = logging.getLogger(__name__)
def home(request):
logger.info('bootstrap')
response_dict = success_dict()
return render_json(request, response_dict)
|
989,455 | ca61862a865db70e85d6cc619e2f1726bcc50bf3 | class Rocket(object):
def __init__(self):
self.pos = PVector(width/2, height/2)
self.vec = PVector(0,0)
self.accel = PVector(0,-1)
self.heading = 0 #unit in degrees
self.isPropulsion = False
def show(self):
if self.isPropulsion == True:
fill('#FF0000')
else:
fill('#FFFFFF')
scale = 10
pushMatrix()
translate(self.pos.x, self.pos.y)
rotate(radians(self.heading))
beginShape()
vertex(0 *scale , 0 *scale)
vertex(1 *scale , 0.6 *scale)
vertex(0 *scale , -3 *scale)
vertex(-1 *scale , 0.6 *scale)
endShape()
popMatrix()
def rotateRocket(self, angle):
self.heading += angle
self.accel.rotate(radians(angle))
def propulsion(self):
self.isPropulsion = True
self.accel.setMag(0.1)
self.vec = PVector.add(self.vec, self.accel)
self.vec.limit(6)
def update(self):
if self.isPropulsion == False:
self.vec = PVector.mult(self.vec, 0.99)
self.pos = PVector.add(self.pos, self.vec)
if (self.pos.x > width):
self.pos.x = 0
if (self.pos.x < 0):
self.pos.x = width
if (self.pos.y > height):
self.pos.y = 0
if (self.pos.y < 0):
self.pos.y = height
|
989,456 | f13fe611288bee0934e413b99f038615f71133dd | # Write a short Python function that takes a positive integer n and returns
# the sum of the squares of all the odd positive integers smaller than n.
def sum_of_odd_squares(number):
if number > 0:
count = 0
for k in range(1, number):
if k % 2 == 1:
count += (k * k)
return count
print("The sum of odd positive integers smaller than 1: ", sum_of_odd_squares(1))
print("The sum of odd positive integers smaller than 10: ", sum_of_odd_squares(10))
print("The sum of odd positive integers smaller than 100: ", sum_of_odd_squares(100))
|
989,457 | a08242390e865be22cde9f180b1053304fae0863 | ################################################################################
#
# This file demonstrates an incompatibility between the MediaWiki API at
# http://en.wikipedia.org and the 'requests' library since version 1.0.0
#
# To run, simply execute "python test.auth.append.py" and fill in your username
# and password at the prompts.
import requests, getpass, sys, json
def get_json(response):
"""
Gets the json content out of a requests.response object. This functionality
is highly dependant on the version of the requests library used.
"""
if requests.__version__ >= "1.0.0":
return response.json()
elif requests.__version__ == "0.14.2":
return response.json
else:
return json.loads(response.content)
print("Using requests library version %s" % requests.__version__)
# Setting up variables.
uri = "http://en.wikipedia.org/w/api.php" # Location Wikipedia's API
page_name = "Wikipedia:Sandbox" # The page we'll be editing
username = raw_input("Wikipedia username [anon]: ") # Prompts for username
if len(username) > 0:
# Login or get a token #########################################################
password = getpass.getpass("Wikipedia password: ") # Prompts for password
response = requests.post(
uri,
data={
'action': "login",
'lgname': username,
'lgpassword': password,
'format': "json"
}
)
doc = get_json(response)
if 'token' in doc['login']:
login_token = doc['login']['token']
print("Passing login token to API: %s" % login_token)
response = requests.post(
uri,
data={
'action': "login",
'lgname': username,
'lgpassword': password,
'lgtoken': login_token,
'format': "json"
},
cookies = response.cookies
)
doc = get_json(response)
if doc['login']['result'] == "Success":
print("Successfully logged in as %s" % doc['login']['lgusername'])
else:
print("Login unsuccessful")
sys.exit(1)
user_cookies = response.cookies
else:
print("Staying anonymous.")
user_cookies = None
# Get user info (make sure we're being logged in correctly) ####################
response = requests.post(
uri,
data={
'action': "query",
'meta': "userinfo",
'format': "json"
},
cookies = user_cookies
)
doc = get_json(response)
print("Currently logged in as %(name)s(%(id)s)" % doc['query']['userinfo'])
# Get edit token ###############################################################
response = requests.post(
uri,
data={
'action': "query",
'prop': "revisions|info",
'titles': page_name,
'intoken': "edit",
'format': "json"
},
cookies = user_cookies
)
doc = get_json(response)
edit_token = doc['query']['pages'].values()[0]['edittoken']
print("Got edit token for %r: %s" % (page_name, edit_token))
# Make edit ####################################################################
response = requests.post(
uri,
data={
'action': "edit",
'title': page_name,
'appendtext': "\n\nSome markup to append",
'summary': "This is a test",
'token': edit_token,
'format': "json"
},
cookies = user_cookies
)
print(response.content)
|
989,458 | 234e19f7f0a6d00679523d351e96d164cfd42538 | '''
Find inversion count in an unsorted array:
An inversion happen when there is a number at index i greater than another number at index j where i < j
input will be a list of unsorted int array
Algorithm runs in nlog(n)
We can break the problem into three parts
An inversion can happen on the left side, right side, or one element in left side and another in right side
So we can recursively solve left, right, and split and return their sum
'''
#implement merge sort
def SplitInversionCount(leftArr, rightArr):
mergedArr = []
leftIndex = 0
rightIndex = 0
totalLength = len(leftArr) + len(rightArr)
invCount = 0
for i in range(0,totalLength):
if(leftArr[leftIndex] <= rightArr[rightIndex]):
mergedArr.append(leftArr[leftIndex])
leftIndex += 1
if(leftIndex == len(leftArr)):
[mergedArr.append(x) for x in rightArr[rightIndex:len(rightArr)]]
mergedArr.append(invCount)
return mergedArr
else:
mergedArr.append(rightArr[rightIndex])
invCount += len(leftArr) - leftIndex
rightIndex += 1
if(rightIndex == len(rightArr)):
[mergedArr.append(x) for x in leftArr[leftIndex:len(leftArr)]]
mergedArr.append(invCount)
return mergedArr
def InversionCount(arr):
length = len(arr)
if(length == 1):
arr.append(0)
return arr
leftArr = InversionCount(arr[0:length//2])
rightArr = InversionCount(arr[length//2:length])
leftInvCount = leftArr[len(leftArr)-1]
del leftArr[len(leftArr)-1]
rightInvCount = rightArr[len(rightArr)-1]
del rightArr[len(rightArr)-1]
resultArr = SplitInversionCount(leftArr, rightArr)
resultArr[len(resultArr) - 1] += (leftInvCount + rightInvCount)
return resultArr
inputF = open("IntegerArray.txt","r")
sArr = inputF.read().splitlines()
intArr = [int(x) for x in sArr]
print(InversionCount(intArr)[len(intArr)]) |
989,459 | 2db91ac3b661ae8131b35bb71d45d60517b4d210 | from django import forms
from onboarding.apps.employee.models.documents import ModelDocumentGathering
# -------------------------------------------------------------------------------
# FormDocumentGathering
# -------------------------------------------------------------------------------
class FormDocumentGathering(forms.ModelForm):
files = forms.FileField(
required=False,
label='Files',
widget=forms.FileInput(
attrs={
'multiple': True,
'type': 'file',
'id': 'fileupload',
'class' : 'form-control'
})
)
# ---------------------------------------------------------------------------
# Meta
# ---------------------------------------------------------------------------
class Meta:
model = ModelDocumentGathering
fields = {'files'}
|
989,460 | 76228839f5f2946ca8e2afda4fd4f6850b4ec492 | import re
# re.search() searches a string, and return the start index and end index for the result
# return None if not find
# the r char means a raw string. not regex
# s = 'GeeksforGeeks: A computer science 123 portal for geeks 321'
# regex = '\d+.*?'
# match = re.findall(regex, s) # return a LIST
# print(match)
# p = re.compile('[a-e]')
# m = p.findall('Aye, Said Mr. Gilbon Start') # return a list
# print(m)
# # re.split(regex, string, maxsplit=0, flag=0)
# s = 'ae92 up up from b1 to c1 2'
# match = re.split('\s+', s, maxsplit=3, flags=re.IGNORECASE) # return a list
# print(match)
# print(re.sub('\sAND\s', ' & ', 'Baked AND beans AND Spams',count=3)) # return a string
# t = re.subn(r'\sAND\s', ' && ', 'Beans AND bakers AND spames') # return a turple
# print(t)
regex = r'[a-zA-Z]+ \d+'
s = 'My Birthday is Sep 13'
Match = re.search(regex,s) # if there is a match, match.group() returns the matched string
print(Match.group())
print(Match.groups())
for i in Match.groups(): # match.groups() return a list
print(i)
# re.match searchs from the beginning, but re.search searchs from anyplace.
s = 'I love dogs'
print(re.match(r'. dogs', s))
print(re.search(r'. dogs', s))
|
989,461 | 155da28f78b499857c72e5aac1d5b92ee2d4d2b6 | import json
import yaml
from pathlib import Path
from functools import partial
NLJ = partial('\n'.join)
def load_resume(format='yaml', name='resume'):
return {
'yaml': yaml.safe_load,
'json': json.load,
}[format]((Path.cwd() / f'{name}.{format}').read_text())
def experience_skills(job_id=None):
skills = {
'quidco': "+php +laravel +symfony +node.js +python +flask +mkdocs +mysql +kubernetes +helm +docker +tilt +serverless +amazon-aws +cdk +sqs +lambda +alpine-linux",
'intellection': "+ruby +ruby-on-rails +mysql +amazon-aws +chef +opsworks +docker +ansible",
'yola': "+python +django +piston +php +java +amazon-aws +ubuntu",
'sadalbari': "+java +documentation +consulting +finco +insureco +telco +ubuntu",
'adapt-it': "+python +plone +zope +zodb +rhel +ubuntu",
'jam-warehouse': "+php +mysql +freebsd +debian +c# +.net +asp.net +iis +python +plone +cms +zope +zodb +rhel",
'itouch-labs': "+java +ftp +deployment +documentation +testing",
'itouch-ie': "+php +java +jsp +ivr +4voice +sms +smpp +sybase +er +modelling +dbmodelling +redhat",
'vodacom': "+holos +oracle +windows",
'telkom': "+vb6 +msaccess +windows",
}
if not job_id or job_id not in skills:
unique_skills = set()
for all_skills in skills.values():
unique_skills |= set(
[s.strip() for s in all_skills.split('+') if s]
)
return unique_skills
return [s.strip() for s in skills[job_id].split('+') if s]
BADGE_URL_TEMPLATE = "https://img.shields.io/badge/{badge_name}?style={style}&logo={logo}&logoColor={colour}"
SKILL_BADGE_MAP = {
# browsers
# version-control
'git': 'git-%23F05033',
'bitbucket': 'bitbucket-%230047B3',
'github': 'github-%23121011',
'gitlab': 'gitlab-%23181717',
# +cvs, subversion, vss
# languages
'php': 'PHP-777BB4',
'python': 'Python-3776AB',
'node.js': 'Node.js-43853D',
'c-sharp': 'C%23-239120',
'.net': '.NET-5C2D91',
'java': 'Java-ED8B00',
'ruby': 'Ruby-CC342D',
'markdown': 'Markdown-000000',
'gnu-bash': 'Shell_Script-121011',
# frameworks
'django': 'Django-092E20',
'ruby-on-rails': 'Ruby_on_Rails-CC0000',
'laravel': 'Laravel-FF2D20',
'flask': 'Flask-000000',
# databases
'mysql': 'MySQL-00000F',
# sqlite, sybase, mssql, oracle
# hosting
'amazon-aws': 'Amazon_AWS-232F3E',
# operating-system
'windows': 'Windows-0078D6',
'ubuntu': 'Ubuntu-E95420',
'alpine-linux': 'Alpine_Linux-0D597F',
# macos, fedora,
}
# TODO: automate https://github.com/Ileriayo/markdown-badges
# with categories
# language, framework, database
def generate_badge_url(skill):
if skill not in SKILL_BADGE_MAP:
return None
skill_id = SKILL_BADGE_MAP[skill]
return BADGE_URL_TEMPLATE.format(**dict(zip(
'badge_name style logo colour'.split(),
[skill_id, 'for-the-badge', skill, 'white'],
)))
def shields_badges(company=None):
badges = [
generate_badge_url(skill)
for skill in experience_skills(company)
]
# remove unrecognised skills
badges = [b for b in badges if b]
return [f'<img src="{url}"/>' for url in badges]
|
989,462 | 150b28d352ee5e494dd97a2662c002f685364a28 | from board import bd
from entity import Entity
from config import EXPLODE_TIME
import time
"""
This class represents a Bomb.
It inherits from the Entity class.
"""
class Bomb(Entity):
"""
Constructor of Bomb class
created_at => Stores the time when it was created
explode_time => The time it takes for the bomb to explode
"""
def __init__(self, r, c):
Entity.__init__(self, r, c)
self.created_at = time.time()
self.explode_time = EXPLODE_TIME
"""
This method checks if a given moves is valid or not.
Parent method has been overridden in this class
"""
def check_move(self, dr, dc):
if 0 <= self.r + dr < bd.rows and 0 <= self.c + dc < bd.columns:
if bd.canvas[self.r + dr, self.c + dc] in [0, 2, 3, 4]:
return True
return False
return False
"""
This method marks all the positions that are going to explode on the board
"""
def explode(self):
bd.canvas[self.r, self.c] = 6
bd.explosions.append((self.r, self.c))
dr = [0, 0, 1, -1]
dc = [1, -1, 0, 0]
for pos in range(4):
if self.check_move(dr[pos], dc[pos]):
if bd.canvas[self.r + dr[pos], self.c + dc[pos]] == 2:
bd.score += 20
bd.canvas[self.r + dr[pos], self.c + dc[pos]] = 6
bd.explosions.append((self.r + dr[pos], self.c + dc[pos]))
"""
It calls the explode method when the time after bomb drop > explode_time
"""
def update(self):
if time.time() - self.created_at > self.explode_time:
self.explode()
"""
It updates and removes the explosions from the board after 3 frames
"""
@staticmethod
def update_explosion():
for explosion in bd.explosions:
bd.canvas[explosion[0], explosion[1]] += 1
if bd.canvas[explosion[0], explosion[1]] >= 8:
bd.canvas[explosion[0], explosion[1]] = 0
bd.explosions = []
bd.is_bomb = 0
def cur_val(self):
cur_time = int(time.time() - self.created_at)
if cur_time == 1:
return "0000"
elif cur_time == 0:
return "1111"
else:
return "%%%%"
bomb = Bomb(-2, -2)
|
989,463 | 0072854e33f7120a417ec9eb42aa8037709f67a7 | from django.apps import AppConfig
class PaginaInicialConfig(AppConfig):
name = 'pagina_inicial'
|
989,464 | 6fc11d54f6f6961e27d634a4d40ae8d6b6a3be49 | import chapter3 as ch3
import chapter4 as ch4
import chapter5 as ch5
import chapter6 as ch6
import modern_robotics_functions as mr
import math
import numpy as np
def cubic_time_scaling(Tf, t):
a2 = 3/(T**2)
a3 = -2/(T**2)
return a2*t**2 + a3*t**3
def quintic_time_scaling(Tf, t):
return 10 * (1.0 * t / Tf) ** 3 - 15 * (1.0 * t / Tf) ** 4 + 6 * (1.0 * t / Tf) ** 5
def joint_trajectory(theta_start, theta_end, Tf, N, method):
"""Computes a straight-line trajectory in joint space
:param thetastart: The initial joint variables
:param thetaend: The final joint variables
:param Tf: Total time of the motion in seconds from rest to rest
:param N: The number of points N > 1 (Start and stop) in the discrete
representation of the trajectory
:param method: The time-scaling method, where 3 indicates cubic (third-
order polynomial) time scaling and 5 indicates quintic
(fifth-order polynomial) time scaling
:return: A trajectory as an N x n matrix, where each row is an n-vector
of joint variables at an instant in time. The first row is
thetastart and the Nth row is thetaend . The elapsed time
between each row is Tf / (N - 1)"""
N = int(N)
timegap = Tf / (N - 1.0) # N points, N-1 line segments
traj = np.zeros((len(theta_start), N)) # intitialize the trajectory matrix, 1D joint vars, 2D each time instance
# for each line segment, from 0 to T, calculate the corresponding s value (0to1)
for i in range(N):
if method == 3:
s = cubic_time_scaling(Tf, timegap * i)
else:
s = quintic_time_scaling(Tf, timegap * i)
traj[:, i] = s * np.array(theta_end) + (1 - s) * np.array(theta_start) # xi = x_start + (0.whatever fraction s)(x_end-x_start)
traj = np.array(traj).T
return traj
def screw_trajectory(X_start, X_end, Tf, N, method):
"""Computes a trajectory as a list of N SE(3) matrices corresponding to
the screw motion about a space screw axis (each matrix represents the config of the end effector at an instant in time)
:param Xstart: The initial end-effector configuration
:param Xend: The final end-effector configuration
:param Tf: Total time of the motion in seconds from rest to rest
:param N: The number of points N > 1 (Start and stop) in the discrete
representation of the trajectory
:param method: The time-scaling method, where 3 indicates cubic (third-
order polynomial) time scaling and 5 indicates quintic
(fifth-order polynomial) time scaling
:return: The discretized trajectory as a list of N matrices in SE(3)
separated in time by Tf/(N-1). The first in the list is Xstart
and the Nth is Xend"""
N = int(N)
timegap = Tf / (N - 1.0)
traj = [[None]] * N
X_start_end = np.dot(ch3.transf_matrix_inverse(X_start), X_end)
for i in range(N):
if method == 3:
s = cubic_time_scaling(Tf, timegap * i)
else:
s = quintic_time_scaling(Tf, timegap * i)
fractioned_X_start_end = ch3.se3_to_transf_matrix(ch3.transf_matrix_to_se3(X_start_end) * s) # applying s to the se3 matrix instead of the transf matrix, it works
traj[i] = np.dot(X_start, fractioned_X_start_end)
return traj
def cartesian_trajectory(X_start, X_end, Tf, N, method):
"""Computes a trajectory as a list of N SE(3) matrices corresponding to
the origin of the end-effector frame following a straight line
:param Xstart: The initial end-effector configuration
:param Xend: The final end-effector configuration
:param Tf: Total time of the motion in seconds from rest to rest
:param N: The number of points N > 1 (Start and stop) in the discrete
representation of the trajectory
:param method: The time-scaling method, where 3 indicates cubic (third-
order polynomial) time scaling and 5 indicates quintic
(fifth-order polynomial) time scaling
:return: The discretized trajectory as a list of N matrices in SE(3)
separated in time by Tf/(N-1). The first in the list is Xstart
and the Nth is Xend
This function is similar to ScrewTrajectory, except the origin of the
end-effector frame follows a straight line, decoupled from the rotational
motion."""
N = int(N)
timegap = Tf / (N - 1.0)
traj = [[None]] * N
R_start, p_start = ch3.transf_matrix_to_Rp(X_start)
R_end, p_end = ch3.transf_matrix_to_Rp(X_end)
R_start_end = np.dot(np.array(R_start).T,R_end)
for i in range(N):
if method == 3:
s = cubic_time_scaling(Tf, timegap * i)
else:
s = quintic_time_scaling(Tf, timegap * i)
R_start_end_fractioned = ch3.so3_to_rotation_matrix(ch3.rotation_matrix_to_so3(R_start_end) * s)
R_start_s = np.dot(R_start, R_start_end_fractioned)
p_start_s = s * np.array(p_end) + (1 - s) * np.array(p_start) #p_start + s(p_end-p_start)
traj[i] = np.r_[np.c_[R_start_s, p_start_s], \
[[ 0, 0, 0, 1]]]
return traj |
989,465 | 66b02841d03c631700f72f07a0776954f943ff77 | # encoding: utf-8
import unittest
from spotify._mockspotify import mock_track, mock_album, mock_artist, mock_session
class TestTrack(unittest.TestCase):
track = mock_track(u'æâ€êþÿ', 3, mock_album("bar", mock_artist("baz", 1), 0, "",
0, 1, 1), 10, 20, 30, 40, 0, 1)
def test_artists(self):
self.assertEqual([x.name() for x in self.track.artists()], ["a1", "a2", "a3"])
def test_album(self):
self.assertEqual(self.track.album().name(), "bar")
def test_name(self):
self.assertEqual(self.track.name(), u'æâ€êþÿ')
def test_duration(self):
self.assertEqual(self.track.duration(), 10)
def test_popularity(self):
self.assertEqual(self.track.popularity(), 20)
def test_disc(self):
self.assertEqual(self.track.disc(), 30)
def test_index(self):
self.assertEqual(self.track.index(), 40)
def test_error(self):
self.assertEqual(self.track.error(), 0)
def test_is_loaded(self):
self.assertEqual(self.track.is_loaded(), 1)
def test_starred(self):
session = mock_session()
self.assertEqual(self.track.starred(session), False)
self.track.starred(session, set=True)
self.assertEqual(self.track.starred(session), True)
self.track.starred(session, set=False)
self.assertEqual(self.track.starred(session), False)
|
989,466 | b72f234ad3d57920759b82ada13372aa1fa4d529 | #!/usr/bin/python
import os, sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import challenges as c
import pyqrcode
# c.u1c => challenge no. 1 for urban race
s=c.u1c
colstr="DUTCH"
msg=colstr+s.replace(' ','')
cols=len(colstr)
pad= len(colstr) - (len(msg) % cols)
msg=msg+pad*" "
s=[msg[n:n+cols] for n in range(0,len(msg),cols)]
ps=""
for line in s:
ps += line[4]+" "+line[2]+" "+line[0]+" "+line[1]+" "+line[3]+"\n"
estring=pyqrcode.create(ps)
estring.svg('u1_final.svg',scale=8)
|
989,467 | 75995df79af9004b1e9d21317b752a6a8901ac5c | #!/usr/bin/python3
import scrapy.crawler
import test3_jb.spider
import test3_jb.sqlite
import scrapy
# In scrapy terminology,
# a "project" is a directory containing scrapy.cfg and a "crawler".
# a "crawler" is a python module that contains multiple "spiders" (& optionally more)
# a "spider" is a python class that does the ACTUAL SCRAPING.
# an "item" is a python object representing one scraped "thing" - e.g. on asos.com, a dress.
class JBHifiSpider(scrapy.Spider):
name = 'JB Hifi'
start_urls = ['https://www.jbhifi.com.au/collections/cameras']
def parse(self, response):
products_etree, = response.xpath('//*[@id="collection-search-result"]').getall()
for product_etree in products_etree.xpath('.//@data-product-sku/..'):
Product(
sku=product_etree.xpath('./@data-product-sku').get()
# FUCK, THIS IS ALL AJAX
@dataclasses.dataclass
class Product(scrapy.Item):
sku: int
make: str
model: str
price: int
# One "CrawlerProcess" runs one-or-more "spiders" concurrently.
# e.g. scraping both Asos.com and Topshop.com at the same time.
proc = scrapy.crawler.CrawlerProcess(
settings={
'FEEDS': {
# output_path: FEED_EXPORTERS key
'tmp.db': 'sqlite',
},
'FEED_EXPORTERS': {
'sqlite': 'test3_jb.sqlite.SqliteExporter',
},
# # FIXME: passing strings to import is bullshit.
# # Can't we just pass the fucking classes directly?
# 'ITEM_PIPELINES': {'test3.sqlite.MySqliteWriter': 1},
# Actually check the TLS cert. Like, at all.
'DOWNLOADER_CLIENTCONTEXTFACTORY':
'scrapy.core.downloader.contextfactory.BrowserLikeContextFactory',
}
)
proc.crawl(crawler_or_spidercls=JBHifiSpider)
proc.start()
# Bare minimum convert sqlite3 to xlsx
import xlsxwriter
import sqlite3
with xlsxwriter.Workbook('tmp.xlsx') as workbook:
worksheet = workbook.add_worksheet()
worksheet.write_row('A1', ['foo', 'bar', 'baz']) # FIXME: pull headings db
with sqlite3.connect('tmp.db') as conn:
for i, row in enumerate(conn.execute('SELECT * FROM quotes').fetchall()):
worksheet.write_row(f'A{2+i}', row)
|
989,468 | 84ad78b755d2a6834bd4283d5f13bb2b983566ed | import numpy as np
from thesis_galljamov18.python import tools, settings
# enables additional plots
THESIS = False
# -------------------------
# helper functions
# -------------------------
def rad(degrees):
return np.multiply(degrees,np.pi)/180
def deg(angles_in_rad):
return np.divide(angles_in_rad,np.pi)*180
def loadData():
# load the data
# subject info: weight array([[643.3]]), array([[1.79]]), array([[33]]
path = settings.PATH_THESIS_FOLDER + 'python/training/human_hopping_data/'
filename = 'comcopfrc.mat'
import scipy.io as sio
data = sio.loadmat(path + filename)
return data
# -------------------------
# MAIN function
# -------------------------
def getReferenceTrajecsForRobot(plotData = False, groundDropTest=False, perturbationHeight=0,
onlyPerturbedHops = False):
""" :returns reference data for training including vertical sledge and joint kinematics
data order is: sledge position, sledge velocity, hip angle, hip ang vel, knee angle, knee ang vel"""
global THESIS
data = loadData()
# get legLengths of all unperturbed hops
# ---------------------------------------------------
legLen = data['lenLegAllAll']
legLenAllPerturbations = legLen[0,:] if not onlyPerturbedHops else legLen[0,5]
# get all trials in one list
legLenAllTrialsList = []
# get all unperturbed hops in one list
legLenAllHops240HzList = []
unperturbedHopsIndices = np.arange(0, 3) if not onlyPerturbedHops else [3]
if not onlyPerturbedHops:
for perturbation in legLenAllPerturbations:
for trial in range(8):
legLenAllTrialsList.append(perturbation[trial,:])
for trial in legLenAllTrialsList:
for index in unperturbedHopsIndices:
legLenAllHops240HzList.append(trial[index][0, :])
else:
# get only the perturbed hop in each of the trials
for trial in legLenAllPerturbations:
legLenAllHops240HzList.append(trial[3][0])
# get GRFs of all unperturbed hops
# ---------------------------------------------------
grfs = data['grfTotAllAll']
grfsAllPerturbations = grfs[0,:] if not onlyPerturbedHops else grfs[0,5]
# get all trials in one list
grfsAllTrialsList = []
# get all unperturbed hops in one list
grfsAllHops240HzList = []
if not onlyPerturbedHops:
for perturbation in grfsAllPerturbations:
for trial in range(8):
grfsAllTrialsList.append(perturbation[trial,:])
for trial in grfsAllTrialsList:
for index in unperturbedHopsIndices:
grfsAllHops240HzList.append(trial[index][2,:])
else:
# get only the perturbed hop in each of the trials
for trial in grfsAllPerturbations:
grfsAllHops240HzList.append(trial[3][2])
if THESIS:
tools.overlayingPlots([legLenAllHops240HzList], "Leg Lengths [m]",
title="Leg lengths of all {} hops".format(len(legLenAllHops240HzList)),
xLabels=["Time [1/240s]"])
# tools.log("Mean Takeoff velocity is: {}".format(np.mean([hop[-1] for hop in legLenAllHops240HzList])))
# set to True to plot the human LengthForce Relationship
PLOT_HUMAN_LEN_FORCE_CURVE = False
if PLOT_HUMAN_LEN_FORCE_CURVE:
# tools.overlayingPlots([grfsAllHops240HzList], "leglens")
tools.plotForceLengthCurve(grfsAllHops240HzList, legLenAllHops240HzList, 240, False)
exit(33)
if THESIS:
tools.plotMeanStdOverSeveralHops(grfsAllHops240HzList)
tools.overlayingPlots([grfsAllHops240HzList], "Leg Lengths [m]",
title="Vertical GRFs of all {} hops".format(len(legLenAllHops240HzList)),
sampleRate=240)
# --------------------------------------------------------
# SCALE DOWN human leg length to the robot dimensions
# CLEAN THE DATA before that
# --------------------------------------------------------
# get human segment length from halving the leg length, which is estimated by the mean of TD leg lens of all hops
# but first clean the data (not required for groundDropExperiment)
if not onlyPerturbedHops:
allTouchdownLegLengths = [hop[0] for hop in legLenAllHops240HzList]
meanTDLegLength = np.mean(allTouchdownLegLengths)
stdTDLegLength = np.std(allTouchdownLegLengths)
allLiftOffLegLengths = [hop[-1] for hop in legLenAllHops240HzList]
meanTOLegLength = np.mean(allLiftOffLegLengths)
stdTOLegLength = np.std(allLiftOffLegLengths)
# delete leg length trajectories where TD and LO leg length is deviating too much from the mean
# ---------------------------------------------------------------------------------------------
legLenAllHops240HzList = [hop for hop in legLenAllHops240HzList
if (abs(hop[0]-meanTDLegLength) < stdTDLegLength)
and (abs(hop[-1]-meanTOLegLength) < stdTOLegLength)]
# remove ref data with stance phase durations deviating too much from the mean duration
stancePhaseDurationsList240Hz = np.array([np.size(hop) for hop in legLenAllHops240HzList])
meanStancePhaseDuration = np.mean(stancePhaseDurationsList240Hz)
# tools.log("Mean stance phase duration is: "+str(meanStancePhaseDuration))
stdStancePhaseDuration = np.std(stancePhaseDurationsList240Hz)
legLenAllHops240HzList = [hop for hop in legLenAllHops240HzList
if abs(np.size(hop) - meanStancePhaseDuration) < stdStancePhaseDuration]
if THESIS:
tools.overlayingPlots([legLenAllHops240HzList], "Leg Lengths [m]",
title="Leg lengths of all {} hops after cleanup\n(Removed hops with TD and LO leg lengths differing from the mean more then std)".format(len(legLenAllHops240HzList)),
sampleRate=240)
# SCALE LENGHTS TO ROBOT
allCleanedTouchdownLegLengths = [hop[0] for hop in legLenAllHops240HzList]
allCleanedLiftoffLegLengths = [hop[-1] for hop in legLenAllHops240HzList]
legLengthHuman = np.max(allCleanedTouchdownLegLengths)#+allCleanedLiftoffLegLengths)
# print("Human Leg Length - estimated from max cleaned TD leg lens: {}".format(legLengthHuman))
refLegLengthRobot = 0.5356 # rest leg length / sledge position at touchdown posture (14°, 148°)
refLegLengthRobot += 0.001 # to avoid ground penetration due to simulation angles deviating from desired ones
# as the shank position always changes
# the segment length of 0.27m was manually adjusted to prevent ground penetration on the initialization step
segmentLengthRobot = 0.279
scalingFactor = refLegLengthRobot / legLengthHuman
# scale the human leg length to the robot leg length
robotsLegLengthsList240Hz = [hop*scalingFactor for hop in legLenAllHops240HzList]
# maxRobotoLegLen = np.max([hop[0] for hop in robotsLegLengthsList240Hz]+[hop[-1] for hop in robotsLegLengthsList240Hz])
# ---------------------------------------------------------------------------------
# extract the HIP and KNEE ANGLES from the robots leg length for each individual hop
# ---------------------------------------------------------------------------------
hipAnglesAllHopsList240Hz = []
kneeAnglesAllHopsList240Hz = []
for robotHopLegLen in robotsLegLengthsList240Hz:
arcusArguments = robotHopLegLen / (2*segmentLengthRobot)
maxArg = np.max(arcusArguments)
hipAngles = np.arccos(arcusArguments)
kneeAngles = 2 * np.arcsin(arcusArguments)
hipAnglesAllHopsList240Hz.append(hipAngles)
kneeAnglesAllHopsList240Hz.append(kneeAngles)
if THESIS:
# get mean TD hip and knee angles to use it as desired values for the flight phase posture
allHipTDAngles = deg([hop[0] for hop in hipAnglesAllHopsList240Hz])
allKneeTDAngles = deg([hop[0] for hop in kneeAnglesAllHopsList240Hz])
meanHipTDAngle = np.mean(allHipTDAngles)
meanKneeTDAngle = np.mean(allKneeTDAngles)
print("Mean TD conditions are {}° hip angle and {}° knee angle!".format(meanHipTDAngle, meanKneeTDAngle))
# tools.severalPlots([allHipTDAngles, allKneeTDAngles], ["Hip TD Angles", "Knee TD Angles"])
# -----------------------------------------------------------------------------------
# get leg len VELOCITIES as well as joint angular velocities by taking the derivative
# -----------------------------------------------------------------------------------
robotLegSpeedsAllHops240HzList = [np.gradient(hop, 1 / 240) for hop in robotsLegLengthsList240Hz]
hipAngVelsAllHops240HzList = [np.gradient(hop, 1 / 240) for hop in hipAnglesAllHopsList240Hz]
kneeAngVelsAllHops240HzList = [np.gradient(hop, 1 / 240) for hop in kneeAnglesAllHopsList240Hz]
# ---------------------------------
# further CLEANING of ref data
# ---------------------------------
# as we have now several list (one for each trajec), it is better to first collect all the nr. of bad hops
# and then delete them from all lists at once
badHopsNrs = set()
# remove hops with deviating LO hip angles as there were some problems observed during training with that
liftoffHipAnglesAllHopsList = [hop[-1] for hop in hipAnglesAllHopsList240Hz]
meanLOHipAngle = np.mean(liftoffHipAnglesAllHopsList)
stdLOHipAngle = np.std(liftoffHipAnglesAllHopsList)
for hopNr in range(len(liftoffHipAnglesAllHopsList)):
if (liftoffHipAnglesAllHopsList[hopNr] - meanLOHipAngle) > stdLOHipAngle*1.25:
badHopsNrs.add(hopNr)
# print("{} bad hops detected from comparing LO hip angles!".format(len(badHopsNrs)))
hopNr = 1
if THESIS:
tools.severalPlots([robotsLegLengthsList240Hz[hopNr], robotLegSpeedsAllHops240HzList[hopNr],
deg(hipAnglesAllHopsList240Hz[hopNr]), hipAngVelsAllHops240HzList[hopNr],
deg(kneeAnglesAllHopsList240Hz[hopNr]), kneeAngVelsAllHops240HzList[hopNr]],
yLabels=["Leg Length [m]", "Leg Length\nDerivative [m/s]", "Hip Angles [°]", "Hip Angle\nVelocities [rad/s]",
"Knee Angles [°]", "Knee Angle\nVelocities [rad/s]"],
title="Test Reference Data Before Interpolation")
# ----------------------------------------------------------------------
# INTERPOLATE ALL REF TRAJECS for each individual hop at 240Hz to get 2kHz data out of it
# ----------------------------------------------------------------------
desiredFrequency = 2e3
# sledge pos, sledge vel, hip angles, hip angVels, knee angles, knee angVels
iSledgePos, iSledgeVel, iHipAngle, iHipAngVel, iKneeAngle, iKneeAngVel = range(6)
refTrajecsAllHops240Hz = [robotsLegLengthsList240Hz, robotLegSpeedsAllHops240HzList,
hipAnglesAllHopsList240Hz, hipAngVelsAllHops240HzList,
kneeAnglesAllHopsList240Hz, kneeAngVelsAllHops240HzList]
refTrajecsAllHops2kHz = [[],[],[],[],[],[]]
for trajecIndex in range(len(refTrajecsAllHops240Hz)):
for hopIndex in range(len(refTrajecsAllHops240Hz[trajecIndex])):
hop = refTrajecsAllHops240Hz[trajecIndex][hopIndex]
currentXAxis = np.arange(0, np.size(hop), 1)
newXAxis = np.arange(0, np.size(hop), 240 / desiredFrequency)
# cut the last 8 points as they will be all equal to the last point from the origin data
interpolatedHop = np.interp(newXAxis, currentXAxis, hop)[:-8]
refTrajecsAllHops2kHz[trajecIndex].append(interpolatedHop)
# tools.plotMeanStdOverSeveralHops(refTrajecsAllHops2kHz[iSledgePos])
# exit(33)
# get each hops STANCE PHASE DURATION
stancePhaseDurationsList = np.array([np.size(hop) for hop in refTrajecsAllHops2kHz[iSledgePos]])
# get all the TOUCHDOWN CONDITIONS
totalNrOfHops = len(stancePhaseDurationsList)
touchdownConditions = np.zeros([6, totalNrOfHops])
for trajecIndex in range(len(refTrajecsAllHops2kHz)):
for hopIndex in range(len(refTrajecsAllHops2kHz[trajecIndex])):
oneTrajecForOneHop = refTrajecsAllHops2kHz[trajecIndex][hopIndex]
touchdownConditions[trajecIndex, hopIndex] = oneTrajecForOneHop[0]
# set to True to get modified ref trajectories for training
RESCALE_DATA_IN_TIME = False
if RESCALE_DATA_IN_TIME:
# scale the data horizontally by 20% to get a new reference trajectory
for trajecIndex in range(len(refTrajecsAllHops2kHz)):
for hopIndex in range(len(refTrajecsAllHops2kHz[trajecIndex])):
refTrajecsAllHops2kHz[trajecIndex][hopIndex] = tools.rescaleInTime(
refTrajecsAllHops2kHz[trajecIndex][hopIndex], 1.2)
# correct the data a bit (as we do not have the correct segment lengths due to the shank length changing with time)
# the foot position should be about 1cm behind: reduce hip angle and lift sledge a bit up
# the data is also only used for initialization
hipAngleDecrease = rad(2)
for hopIndex in range(len(refTrajecsAllHops2kHz[iHipAngle])):
refTrajecsAllHops2kHz[iHipAngle][hopIndex] -= hipAngleDecrease
if THESIS:
tools.plot(deg(touchdownConditions[iKneeAngle, :]),
ylabel="Touchdown Knee Angles for all Hops [°]",
xlabel="Nr. of Hop []")
tools.plot(stancePhaseDurationsList*1/desiredFrequency,
ylabel="Stance Phase Duration [s]", xlabel="Nr. of Hop []")
if groundDropTest and not onlyPerturbedHops:
# lift all sledge Pos by groundDropHeight
for hopIndex in range(len(refTrajecsAllHops2kHz[iSledgePos])):
for i in range(len(refTrajecsAllHops2kHz[iSledgePos][hopIndex])):
refTrajecsAllHops2kHz[iSledgePos][hopIndex][i] += perturbationHeight
if THESIS or __name__ == "__main__":
tools.severalPlots([refTrajecsAllHops2kHz[iSledgePos][hopNr], refTrajecsAllHops2kHz[iSledgeVel][hopNr],
deg(refTrajecsAllHops2kHz[iHipAngle][hopNr]), refTrajecsAllHops2kHz[iHipAngVel][hopNr],
deg(refTrajecsAllHops2kHz[iKneeAngle][hopNr]), refTrajecsAllHops2kHz[iKneeAngVel][hopNr]],
yLabels=["Leg Length [m]", "Leg Length\nDerivative [m/s]",
"Hip Angles [°]", "Hip Angle\nVelocities [rad/s]",
"Knee Angles [°]", "Knee Angle\nVelocities [rad/s]"],
xLabels=["Time [1/2000s]"], thesis=True,
title="Interpolated Reference Data for one single hop's stance phase")
# to return only ref trajecs for one single hop
RETURN_ONLY_ONE_HOP = False
if RETURN_ONLY_ONE_HOP:
refTrajecOneHop = [trajec[0] for trajec in refTrajecsAllHops2kHz]
# tools.severalPlots(refTrajecOneHop, yLabels=["sth"] * 6)
return refTrajecOneHop
else:
return refTrajecsAllHops2kHz, touchdownConditions
if __name__ == "__main__":
getReferenceTrajecsForRobot(groundDropTest=False, onlyPerturbedHops=False)
# uncomment to get only perturbed hops to make the ground drop experiment
# getReferenceTrajecsForRobot(groundDropTest=True, onlyPerturbedHops=True)
|
989,469 | b20a1be39bf1ed61f9a9635b5443cc69e7aed999 | """
Created on Sat Oct 14 10:27:59 2017
@author: jmamath
"""
############ 1 - Packages ############
import numpy as np
import matplotlib.pyplot as plt
import math
import tensorflow as tf
from tensorflow.python.framework import ops
import time
from model_utils import *
from Load_data_v import load_preprocessed_dataset
############ 2 - Dataset ############
train_x,train_y,test_x,test_y = load_preprocessed_dataset()
#Display one digit
index = 56
plt.imshow(train_x.T[index].reshape((28, 28)))
# Explore your dataset
m_train = train_x.shape[1]
m_test = test_x.shape[1]
n_y = train_y.shape[0]
n_x = train_x.shape[0]
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(28))
print ("number of classes: n_y = " + str(n_y))
print ("train_x shape: " + str(train_x.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x shape: " + str(test_x.shape))
print ("test_y shape: " + str(test_y.shape))
############ 3 - L-layer neural network ############
def L_layer_model(X_train, Y_train, X_test, Y_test, layers_dims, num_epochs = 1500, minibatch_size = 32, learning_rate = 0.001, beta = 0.01):
"""
Implements a L-layer neural network: LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- input data, of shape (n_x, number of examples)
Y_train -- test labels represented by a numpy array (vector) of shape (n_y, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
beta -- hyperparameter for regularization.
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
"""
tic = time.time()
ops.reset_default_graph()
tf.set_random_seed(1)
seed = 3
costs = [] # to keep track of the cost
m = X_train.shape[1] # number of examples
n_x,n_y = layers_dims[0],layers_dims[len(layers_dims)-1]
X,Y = create_placeholders(n_x, n_y)
## Initializing parameters
parameters = initialize_parameters_deep(layers_dims)
# Forward propagation
AL = L_model_forward(X, parameters)
## Computing cost
cost = cost_withRegularization(AL, Y, parameters, beta)
# Backpropagation: Define the tensorflow optimizer.
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Initialize all the variables
init = tf.global_variables_initializer()
with tf.Session() as sess :
sess.run(init)
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if epoch % 10 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
costs.append(epoch_cost)
# Ploting cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations')
plt.title("Learning rate =" + str(learning_rate))
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(AL), tf.argmax(Y))
# Calculate accuracy on the training and test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy: %", 100*accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy: %", 100*accuracy.eval({X: X_test, Y: Y_test}))
sess.close()
toc = time.time()
print("Time :" + str(math.floor(toc-tic)) + "s")
h_1 = 200 # No neurons in the 1st hidden layer
h_2 = 100 # No neurons in the 2nd hidden layer
h_3 = 50 # No neurons in the 3rd hidden layer
layers_dims = [n_x,h_1,h_2,h_3, n_y]
L_layer_model(train_x, train_y, test_x, test_y,layers_dims,num_epochs = 12, minibatch_size = 1024, learning_rate = 0.001, beta = 0.001)
# 64 epoch is the gold
# I get 99.84% on the training set
# and 98.05% on the test set for 390s
|
989,470 | 004f015140d7e7d5a3a5dcb79f1c7a528dc1bd8f | '''
Sentiment analyses for large dataset is slightly different.
The approach to pre-process the data is very similar to what
we did in small data project i.e. in the file Sentiment_Analyses.py.
Here is the brief algorithm
for pre-processing:
1. Get the statements sorted as positive and negative
2. Store list of positive and negative statements in separate files
and call them pos.txt and neg.txt
3. Next create a lexicon of the words in pos and neg statements.
To arrive at the lexicon, first tokenize the words in each file.
Later convert the words into lower case. Next eliminate those
words that occur too frequently and too rarely. The logic is
that in either case the impact of these words on the sentiment is less
4. Now convert each sentence of pos file into a vector of 1s and 0s
using the following logic: First create an empty sentence array of 0s, of
length same as the lexicon. Now read each sentence of the positive
file. For each word in the sentence, find if there is a
matching word in the lexicon. If yes, replace the 0 in the empty sentence array
with a 1 at the same index value as the matching word in lexicon.
At the end of processing of each sentence, the sentence array
will be a collection of 0s and 1s. Each sentence array is of the same
length, same as the lexicon and will be filled with 1s and 0s. This is
called sentence vector.
5. Append the classification identifier to
each sentence vector. The classification identifier is an array
[1,0] for a positive statement and [0,1] for a negative
statement. For example, a sentence vector for a positive sentence
will look like [....1,0,0,0,0,0,1....][1,0].
6. Repeat the same process for the neg file. The example of
a negative sentence vector will look like [....0,0,0,0,1,1,1....][0,1]
7. Merge these two list of sentence vectors into a single file and shuffle them.
8. Later divide the data set into training set and test set (10%).
'''
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import pickle
import numpy as np
import pandas as pd
from collections import Counter
n_lines = 1600000
lemm = WordNetLemmatizer()
def init_process(fin, fout):
outfile = open(fout,'a')
with open(fin, buffering=200000,encoding='latin-1') as f:
try:
count_pos=0
count_neg=0
for line in f:
line = line.replace('"','')
initial_polarity = line.split(',')[0]
if initial_polarity == '0':
initial_polarity = [1,0]
count_pos+=1
elif initial_polarity == '4':
initial_polarity = [0,1]
count_neg+=1
tweet = line.split(',')[-1]
outline = str(initial_polarity)+':::'+tweet
outfile.write(outline)
except Exception as e:
print(str(e))
print('Init process done')
print('Training data: Pos numbers is %.0f ' % count_pos)
print('Training data: Neg number is %.0f ' % count_neg)
outfile.close()
init_process('training.1600000.processed.noemoticon.csv', 'training_large_dataset.csv')
init_process('testdata.manual.2009.06.14.csv','test_large_dataset.csv')
def create_lexicon(fin):
lexicon = []
pickle_dumps = 0
with open(fin,'r', buffering=200000, encoding='latin-1') as f:
try:
counter = 1
content = ''
for line in f:
tweet = line.split(':::')[1]
content+= ''+tweet
words = word_tokenize(content)
words = [lemm.lemmatize(i) for i in words]
lexicon = list(set(lexicon+words))
print(counter, len(lexicon))
with open('lexicon_draft.pickle', 'a+b') as f:
pickle.dump(lexicon, f)
pickle_dumps+=1
content = ''
lexicon = []
counter += 1
if counter > n_lines:
break
except Exception as e:
print(str(e))
with open('lexicon_draft.pickle', 'rb') as p:
lexi = []
for n in range(pickle_dumps):
lexi += pickle.load(p)
w_counts = Counter(lexi)
l2 =[]
for w in w_counts:
if 1000 > w_counts[w] > 50:
l2.append(w)
print('\n\n\n' + 'The length of lexicon is ', len(l2))
with open('lexicon_big_dataset.pickle', 'wb') as f:
pickle.dump(l2,f)
print("Lexicon created")
create_lexicon('training_large_dataset.csv')
def convert_to_vec(fin, fout, lexicon_pickle):
with open(lexicon_pickle,'rb') as f:
lexicon = pickle.load(f)
outfile = open(fout,'a')
with open(fin,buffering=200000, encoding='latin-1') as f:
counter = 0
for line in f:
counter+=1
label=line.split(':::')[0]
tweet =line.split(':::')[1]
current_words=word_tokenize(tweet.lower())
current_words = [lemm.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value=lexicon.index(word.lower())
features[index_value] +=1
features = list(features)
outline = str(features)+'::'+str(label)+'\n'
outfile.write(outline)
print("Convert to vec done")
convert_to_vec('test_large_dataset.csv', 'processed-test-set.csv', 'lexicon_big_dataset.pickle')
def shuffle_data(fin):
df = pd.read_csv(fin, error_bad_lines=False)
df = df.iloc[np.random.permutation(len(df))]
df.to_csv('train_set_shuffled.csv', index=False)
print("Shuffle done")
shuffle_data('training_large_dataset.csv')
|
989,471 | cb929e617856107e661850def352b699c6bc9bb7 | # Import required packages
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import tweepy
import codecs
# Put your access token, access secret, consumer key, and consumer secret here (with quote)
access_token = 'YOUR-ACCESS-TOKEN'
access_secret = 'YOUR-ACCESS-SECRET'
consumer_key = 'YOUR-CONSUMER-KEY'
consumer_secret = 'YOUR-CONSUMER-SECRET'
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
# The twitter_data.json below is using dataframe_builder.py script as reference
class StdOutListener(StreamListener):
def on_data(self, data):
print data
file = codecs.open('twitter_data.json', 'a', 'utf-8')
file.write(data)
file.close()
return True
def on_error(self, status):
print status
# BE CAREFUL!! This script will start by wiping out the content of existing twitter_data.json
if __name__ == '__main__':
file = codecs.open('twitter_data.json', 'w', 'utf-8')
file.write('')
file.close()
l = StdOutListener()
stream = Stream(auth, l)
# Just edit the track list to stream any keywords you want
stream.filter(track=['indonesia', 'jakarta']) |
989,472 | faead84b37f7861891d0471c348968658af117de | # Path to the traktor installation directory where collection.nml is.
TRAKTOR_PATH = '/tmp/fake-traktor'
# Enable pretty stack traces and router logging
DEBUG = False
# Allow users to perform a live run
LIVE_RUN_IS_AVAILABLE = False
|
989,473 | d876384b223f702c127d1bbcbffd9d3e4a23cbcc | import numpy as np
testA = np.array([10, 9, 8 , 7, 6, 5])
for x in testA:
print(x) |
989,474 | 91ea13e89937769830f6f8111dbb355e8925a7ac | """waze_poi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from waze_poi import settings
from django.conf.urls.static import static
from venues.views import Home, CreateVenue, UpdateVenue, UpdateVenue2, MyVenues, Resume
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^$', Home.as_view()),
url(r'^my_venues$', MyVenues.as_view(), name='my_venues'),
url(r'^resume', Resume.as_view(), name='resume'),
url(r'^venue/new$', CreateVenue.as_view(), name='create_venue'),
url(r'^venue/(?P<pk>[0-9]+)/$', UpdateVenue.as_view(), name='update_venue'),
url(r'^my_venue/(?P<pk>[0-9]+)/$', UpdateVenue2.as_view(), name='update_venue2'),
url(r'^admin/', admin.site.urls),
url(r'^select2/', include('django_select2.urls')),
url('', include('social_django.urls', namespace='social')),
url('^logout/$', auth_views.logout_then_login, name='logout'),
]
|
989,475 | 0ec94719fa1741f7c5a456b289f3032444f3b736 | # __author__ = 'Irina.Chegodaeva'
import os
from model.contact import Contact
import random
import pytest
def test_edit_some_contact_from_edit_form(app, db, check_ui):
if len(db.get_contact_list()) == 0:
with pytest.allure.step('If there is no contact I will add a new one'):
app.contact.add_contact(Contact(last_name="test" + app.libs.substring),
delete_photo=False,
dataset=(("1", "3"), ("2", "2"), ("3", "12"), ("4", "3"), ("5", "1")))
contact = Contact(first_name="First_Name",
middle_name="Middle_Name",
last_name="Last_Name" + app.libs.substring,
nickname="Nickname",
pic=str(os.path.dirname(__file__)) + "\\photo.gif",
title="Title",
company_name="Company Name",
company_address="Company Address",
home_phone="(999)111-11-11",
mobile_phone="(999)111-11-22",
work_phone="(999)111-11-33",
fax="(999)111-11-44",
email_1="email_1@company.com",
email_2="email_2@company.com",
email_3="email_3@company.com",
homepage="http://www.homepage.net",
birth_year="1970",
anniv_year="1990",
home_addr="Home Address",
notes="Some Notes",
extra_phone="(999)111-11-55")
with pytest.allure.step('Given a contact list before edit'):
old_contacts = db.get_contact_list()
with pytest.allure.step('Given a random contact from the list for edit'):
modified_contact = random.choice(old_contacts)
with pytest.allure.step('I modified the contact'):
app.contact.modify_some_contact_by_id(modified_contact.id, contact, delete_photo=True,
dataset=(("1", "3"), ("2", "3"), ("3", "14"), ("4", "3")),
edit_detail="edit_form")
with pytest.allure.step('Given a contact list after edit'):
new_contacts = db.get_contact_list()
with pytest.allure.step('Then the length of the new contact list is equal to the length of the old list'):
assert len(old_contacts) == len(new_contacts)
with pytest.allure.step('The modified contact was changed in the old list'):
contact.id = modified_contact.id
old_contacts.remove(modified_contact)
old_contacts.insert(int(modified_contact.id), contact)
with pytest.allure.step('Then the new contact list is equal to the old list with the edit contact'):
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(map(app.libs.clean_contact, new_contacts), key=Contact.id_or_max) ==\
sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
def test_edit_some_contact_from_details(app, db):
if app.contact.count() == 0:
with pytest.allure.step('If there is no contact I will add a new one'):
app.contact.add_contact(Contact(last_name="test" + app.libs.substring),
delete_photo=False,
dataset=(("1", "3"), ("2", "2"), ("3", "12"), ("4", "3"), ("5", "1")))
contact = Contact(first_name="First_Name" + app.libs.substring,
middle_name="Middle_Name",
last_name="Last_Name" + app.libs.substring,
nickname="Nickname",
pic=str(os.path.dirname(__file__)) + "\\photo.gif",
title="Title",
company_name="Company Name",
company_address="Company Address",
home_phone="(999)111-11-11",
mobile_phone="(999)111-11-22",
work_phone="(999)111-11-33",
fax="(999)111-11-44",
email_1="email_1@company.com",
email_2="email_2@company.com",
email_3="email_3@company.com",
homepage="http://www.homepage.net",
birth_year="1970",
anniv_year="1990",
home_addr="Home Address",
notes="Some Notes",
extra_phone="(999)111-11-55")
with pytest.allure.step('Given a contact list before edit'):
old_contacts = db.get_contact_list()
with pytest.allure.step('Given a random contact from the list for edit'):
modified_contact = random.choice(old_contacts)
with pytest.allure.step('I modified the contact'):
app.contact.modify_some_contact_by_id(modified_contact.id, contact, delete_photo=True,
dataset=(("1", "3"), ("2", "3"), ("3", "14"), ("4", "4")),
edit_detail="detail_form")
with pytest.allure.step('Given a contact list after edit'):
new_contacts = db.get_contact_list()
with pytest.allure.step('Then the length of the new contact list is equal to the length of the old list'):
assert len(old_contacts) == len(new_contacts)
with pytest.allure.step('When I changed the contact %s in the old list' % contact):
contact.id = modified_contact.id
old_contacts.remove(modified_contact)
old_contacts.insert(int(modified_contact.id), contact)
with pytest.allure.step('Then the new contact list is equal to the old list with the edited contact'):
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
|
989,476 | 897b070314b10581f1675ceba67760e4da4e538b | #!/usr/bin/python
#-*-coding:utf-8-*-
# system database engine.
#
# Package: SQLAlchemy
#
# GNU Free Documentation License 1.3
import os
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData
from sqlalchemy.orm import mapper, scoped_session, sessionmaker
def init_record(spider_name, dbname):
db_path = os.path.join('records', str(spider_name))
tabal_name = 'records'
if not os.path.exists(db_path):
os.makedirs(db_path)
db_file = os.path.join(db_path, str(dbname))
engine = create_engine('sqlite:///%s' % db_file)
metadata = MetaData()
record_table = Table(tabal_name, metadata,
Column('id', Integer, primary_key=True),
Column('hash', String, index=True)
)
class Records(object):
def __init__(self, hash):
self.hash = hash
def __repr__(self):
return "<Records('%s')>" % (self.hash)
mapper(Records, record_table)
metadata.create_all(engine)
session = scoped_session(sessionmaker(bind=engine))
return Records, session |
989,477 | cd5f8d2dbd88696b5901777daf70fb0e5c6d2e78 |
from PySide2.QtSql import QSqlDatabase, QSqlQuery
from suwar import SUWAR_LIST
AYAT_DB = "resources/ayat.ayt"
TAFASIR_DB = "resources/tafasir.ayt"
AYAT_DB_CON = QSqlDatabase.addDatabase('QSQLITE', "AYAT_DB_CON")
AYAT_DB_CON.setDatabaseName(AYAT_DB)
AYAT_DB_CON.open()
DBS_CON = QSqlDatabase.addDatabase('QSQLITE')
# @return: arry of Tuple's (aya_number, aya_text)
def get_sura(sura_number:int) -> list:
query = QSqlQuery(AYAT_DB_CON)
query.exec_(f"SELECT * FROM ayat WHERE sura={sura_number} ORDER BY aya ASC")
id, sura_n, aya_n, text, nass_safy, safha = range(6)
data = []
while query.next():
item = (query.value(aya_n), query.value(text))
data.append(item)
query.finish()
return data
# @return: tafsir text as string
def get_aya_tafsir(sura_number:int, aya_number:int, trans_key:str, trans_dir:str) -> str:
db_path = f"resources/{trans_dir}/{trans_key}.ayt"
DBS_CON.setDatabaseName(db_path)
DBS_CON.open()
query = QSqlQuery(f"SELECT * FROM {trans_key} WHERE sura={sura_number} AND aya={aya_number} LIMIT 1", DBS_CON)
query.first()
text = query.value(3)
query.finish()
DBS_CON.close()
return text
# @return: array of tuple's (trans_key, trans_name, trans_dir)
def get_tafasir_list() -> list:
DBS_CON.setDatabaseName(TAFASIR_DB)
DBS_CON.open()
query = QSqlQuery(f'SELECT * FROM trans ORDER BY trans_order ASC', DBS_CON)
id, trans_key, trans_name, trans_desc, trans_tbl, trans_order, trans_dir = range(7)
data = []
while query.next():
item = (query.value(trans_key), query.value(trans_name), query.value(trans_dir))
data.append(item)
query.finish()
DBS_CON.close()
return data
# @return: array of tuple's (sura_number:int, sura_name:str, aya_number:int, aya_text:str)
def get_aya_search_result(text:str) -> list:
data = []
if len(text) <= 0: return data
query = QSqlQuery(f'SELECT * FROM ayat WHERE nass_safy LIKE "%{text}%"', AYAT_DB_CON)
id, sura, aya, text, nass_safy, safha = range(6)
while query.next():
name = SUWAR_LIST[query.value(sura) - 1]["name"]
item = (query.value(sura), name, query.value(aya), query.value(text))
data.append(item)
query.finish()
return data |
989,478 | de1744166c6dd4507bbc48b0f0273bbbeab883e5 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RHttpuv(RPackage):
"""HTTP and WebSocket Server Library.
Provides low-level socket and protocol support for handling HTTP and
WebSocket requests directly from within R. It is primarily intended as a
building block for other packages, rather than making it particularly easy
to create complete web applications using httpuv alone. httpuv is built on
top of the libuv and http-parser C libraries, both of which were developed
by Joyent, Inc. (See LICENSE file for libuv and http-parser license
information.)"""
cran = "httpuv"
version("1.6.5", sha256="f5f63629ca5e9d0e396a89982d95b5286726c0cb425166f35a3ad32a60a79156")
version("1.5.5", sha256="0be6c98927c7859d4bbfbbec8822c9f5e95352077d87640a76bc2ade07c83117")
version("1.5.1", sha256="b5bb6b3b2f1a6d792568a70f3f357d6b3a35a5e26dd0c668c61a31f2ae8f5710")
version("1.3.5", sha256="4336b993afccca2a194aca577b1975b89a35ac863423b18a11cdbb3f8470e4e9")
version("1.3.3", sha256="bb37452ddc4d9381bee84cdf524582859af6a988e291debb71c8a2e120d02b2a")
depends_on("r@2.15.1:", type=("build", "run"))
depends_on("r-rcpp@0.11.0:", type=("build", "run"))
depends_on("r-rcpp@1.0.7:", type=("build", "run"), when="@1.6.5:")
depends_on("r-r6", type=("build", "run"), when="@1.5.0:")
depends_on("r-promises", type=("build", "run"), when="@1.5.0:")
depends_on("r-later@0.8.0:", type=("build", "run"), when="@1.5.0:")
depends_on("gmake", type="build")
depends_on("zip")
depends_on("zlib", when="@1.6.4:")
depends_on("r-bh", type=("build", "run"), when="@1.5.5")
|
989,479 | dea287666a7edd17a6dec3380b5b2c4c3dc47dc9 | import cv2
import numpy as np
def nothing(x):
pass
cap = cv2.VideoCapture(0)
while(1):
ret,im_gray = cap.read()
im_gray1=cv2.cvtColor(im_gray,cv2.COLOR_BGR2RGB)
im_color = cv2.applyColorMap(im_gray, cv2.COLORMAP_JET)
im_color1 = cv2.applyColorMap(im_gray1, cv2.COLORMAP_SUMMER)
im_color2 = cv2.applyColorMap(im_gray, cv2.COLORMAP_PINK)
im_color3 = cv2.applyColorMap(im_gray, cv2.COLORMAP_SUMMER)
im_color2 = cv2.applyColorMap(im_gray, cv2.COLORMAP_PINK)
# im_color3=cv2.medianBlur(im_color3,15)
blur = cv2.GaussianBlur(im_color3,(15,15),0)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(blur,kernel,iterations = 1)
im_color3 = cv2.dilate(blur,kernel,iterations = 1)
cv2.imshow('Red color', im_color3)
cv2.imshow('autumn',im_color1)
# cv2.imshow('pink',im_color2)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
|
989,480 | d988f68dfb46c2edbf55fca07e54e236ec48c0a3 | #coding=utf-8
import os
from flask import abort, render_template,redirect, request,url_for,flash,current_app, make_response
from . import main
from ..models import User, Role, Permission, Post, Comment
from .. import db
from .forms import FileForm, EditProfileForm, EditProfileAdiminForm, PostForm, CommentForm
from flask_login import login_required, current_user
from ..decorators import admin_required
from ..decorators import permission_required
from flask_sqlalchemy import get_debug_queries
import random
@main.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= 0.5:
current_app.logger.warning('Slow query:%s\nParameters:%s\nDuration:%fs\nContext:%s\n'%(query.statement,query.parameters,query.duration,query.context))
return response
@main.route('/', methods=['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE_ARTICLES) and form.validate_on_submit():
post = Post(body=form.body.data, author=current_user._get_current_object())
db.session.add(post)
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed',''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
pagination = query.order_by(Post.timestamp.desc()).paginate(page, per_page=int(current_app.config['FLASKY_POSTS_PER_PAGE']),error_out=False)
posts = pagination.items
#comments = Comment.query.filter_by(post_id=post.id).all()
a = 0
return render_template('index.html', form=form, posts=posts,show_followed=show_followed, pagination=pagination,a=a)
@main.route('/all')
@login_required
def show_all():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed','',max_age=30*24*60*60)
return resp
@main.route('/followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed','1',max_age=30*24*60*60)
return resp
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first()
if user is None:
abort(404)
posts = user.posts.order_by(Post.timestamp.desc()).all()
return render_template('user.html', user=user, posts=posts, img=user.img)
@main.route('/edit-profile', methods=['GET','POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
flash(u'你的资料已更新。')
return redirect(url_for('.user',username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form, user=current_user)
@main.route('/edit-profile/<int:id>', methods=['GET','POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdiminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
flash(u'资料已更新')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
@main.route('/post/<int:id>',methods=['GET','POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,post=post,author=current_user._get_current_object())
db.session.add(comment)
flash(u'你的评论已发布')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page',1,type=int)
if page == -1:
page = (post.comments.count() -1)/current_app.config['FLASKY_COMMENTS_PER_PAGE'] +1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post],form=form,comments=comments,pagination=pagination)
@main.route('/edit/<int:id>', methods=['GET','POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and not current_user.can(Permission.ADMINISTER):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
flash(u'帖子已更新')
return redirect(url_for('.post',id=post.id))
form.body.data = post.body
return render_template('edit_post.html',form=form, post=post)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(u'用户不存在')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash(u'你已经关注了此用户')
return redirect(url_for('.user',username=username))
current_user.follow(user)
flash(u'你现在关注了 %s.'% username)
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(u'用户不存在')
return redirect(url_for('.index'))
current_user.unfollow(user)
flash(u'你已经取消了对此用户的关注')
return redirect(url_for('.user',username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(u'用户不存在')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(page, per_page=int(current_app.config['FLASKY_FOLLOWERS_PER_PAGE']),error_out=False)
follows = [{'user':item.follower, 'timestamp':item.timestamp} for item in pagination.items]
return render_template('followers.html',user=user,title=u'的关注者',endpoint='.followers',pagination=pagination,follows=follows)
@main.route('/followed_by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(u'用户不存在')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(page, per_page=int(current_app.config['FLASKY_FOLLOWERS_PER_PAGE']),error_out=False)
follows = [{'user':item.followed, 'timestamp':item.timestamp} for item in pagination.items]
return render_template('followers.html',user=user,title=u'关注的用户',endpoint='.followers',pagination=pagination,follows=follows)
@main.route('/delete_posts/<int:id>')
@login_required
def delete_posts(id):
posts = Post.query.filter_by(id=id).first()
if posts is None:
flash(u'帖子不存在')
return redirect(url_for('.index'))
else:
db.session.delete(posts)
flash(u'帖子已删除')
return redirect(url_for('.index'))
@main.route('/moderate')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(page, per_page=int(current_app.config['FLASKY_POSTS_PER_PAGE']),error_out=False)
comments = pagination.items
return render_template('moderate.html', comments=comments, pagination=pagination)
@main.route('/moderate/disable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def disable(id):
comment = Comment.query.filter_by(id=id).first()
if comment is None:
flash(u'帖子不存在')
return redirect(url_for('.moderate'))
else:
comment.disabled = True
db.session.add(comment)
flash(u'帖子已隐藏')
return redirect(url_for('.moderate'))
@main.route('/moderate/enable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def enable(id):
comment = Comment.query.filter_by(id=id).first()
if comment is None:
flash(u'帖子不存在')
return redirect(url_for('.moderate'))
else:
comment.disabled = False
db.session.add(comment)
flash(u'帖子已显示')
return redirect(url_for('.moderate'))
@main.route('/user/file',methods=['GET','POST'])
@login_required
def file():
form = FileForm()
if form.validate_on_submit():
file = form.file.data
filename = random.randint(0,999999)
file.save('app/static/images/%s.jpg'%filename)
if current_user.img:
os.remove('app/static/%s'%current_user.img)
current_user.img = 'images/%s.jpg' %filename
db.session.add(current_user)
flash(u'头像修改成功')
return redirect(url_for('.user',username=current_user.username))
return render_template('file_form.html',form=form)
|
989,481 | c06c097289f014afa38d502534665f036a3a3510 | import math
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import pickle
import sys
class Utilities:
def get_distance(self, vector_1, vector_2):
return np.linalg.norm(vector_1 - vector_2)
def generate_index(self, x, y):
return str(x) + ':' + str(y)
def select_winner(self, nodemap, input_vector):
min_dist = float("inf")
winner = None
for node in nodemap:
curr_dist = Utilities().get_distance(nodemap[node].weights, input_vector)
if curr_dist < min_dist:
min_dist = curr_dist
winner = nodemap[node]
return winner
class GSOMParameters:
def __init__(self, spread_factor, learning_itr, smooth_itr, max_neighbourhood_radius=4, start_learning_rate=0.3,
smooth_neighbourhood_radius_factor=0.5, smooth_learning_factor=0.5, distance='euclidean', fd=0.1,
alpha=0.9, r=3.8):
self.SPREAD_FACTOR = spread_factor
self.LEARNING_ITERATIONS = learning_itr
self.SMOOTHING_ITERATIONS = smooth_itr
self.MAX_NEIGHBOURHOOD_RADIUS = max_neighbourhood_radius
self.START_LEARNING_RATE = start_learning_rate
self.SMOOTHING_LEARNING_RATE_FACTOR = smooth_learning_factor
self.SMOOTHING_NEIGHBOURHOOD_RADIUS_FACTOR = smooth_neighbourhood_radius_factor
self.FD = fd
self.R = r
self.ALPHA = alpha
self.DISTANCE = distance
def get_gt(self, dimensions):
return -dimensions * math.log(self.SPREAD_FACTOR)
class GSOMNode:
R = random.Random()
def __init__(self, x, y, weights):
self.weights = weights
self.x, self.y = x, y
# Remember the error occuring at this particular node
self.error = 0.0
# To be used to map labels and classes after GSOM phases are completed
self.mappedLabels = []
self.mappedClasses = []
self.data = []
def adjust_weights(self, target, influence, learn_rate):
self.weights += influence * learn_rate * (target - self.weights)
def cal_and_update_error(self, input_vector):
self.error += Utilities().get_distance(self.weights, input_vector)
return
def map_label(self, input_label):
self.mappedLabels.append(input_label)
def map_class(self, input_class):
self.mappedClasses.append(input_class)
def map_data(self, input_data):
self.data.append(input_data)
class GrowthHandler:
def grow_nodes(self, node_map, winner):
x = winner.x
y = winner.y
self._grow_individual_node(x - 1, y, winner, node_map)
self._grow_individual_node(x + 1, y, winner, node_map)
self._grow_individual_node(x, y - 1, winner, node_map)
self._grow_individual_node(x, y + 1, winner, node_map)
def _grow_individual_node(self, x, y, winner, node_map):
newNodeIndex = Utilities().generate_index(x, y)
if newNodeIndex not in node_map:
node_map[Utilities().generate_index(x, y)] = GSOMNode(x, y,
self._generate_new_node_weights(node_map, winner, x,
y))
def _generate_new_node_weights(self, node_map, winner, x, y):
new_weights = np.random.rand(len(winner.weights))
if winner.y == y:
# W1 is the winner in following cases
# W1 - W(new)
if x == winner.x + 1:
next_node_str = Utilities().generate_index(x + 1, y)
other_side_node_str = Utilities().generate_index(x - 2, y)
top_node_srt = Utilities().generate_index(winner.x, y + 1)
bottom_node_str = Utilities().generate_index(winner.x, y - 1)
"""
* 1. W1 - W(new) - W2
* 2. W2 - W1 - W(new)
* 3. W2
* |
* W1 - W(new)
* 4. W1 - W(new)
* |
* W2
* 5. W1 - W(new)
"""
new_weights = self._get_new_node_weights_in_xy_axis(node_map, winner, next_node_str,
other_side_node_str, top_node_srt, bottom_node_str)
# W(new) - W1
elif x == winner.x - 1:
next_node_str = Utilities().generate_index(x - 1, y)
other_side_node_str = Utilities().generate_index(x + 2, y)
top_node_srt = Utilities().generate_index(winner.x, y + 1)
bottom_node_str = Utilities().generate_index(winner.x, y - 1)
"""
* 1. W2 - W(new) - W1
* 2. W(new) - W1 - W2
* 3. W2
* |
* W(new) - W1
* 4. W(new) - W1
* |
* W2
* 5. W(new) - W1
"""
new_weights = self._get_new_node_weights_in_xy_axis(node_map, winner, next_node_str,
other_side_node_str, top_node_srt, bottom_node_str)
elif winner.x == x:
"""
* W(new)
* |
* W1
"""
if y == winner.y + 1:
next_node_str = Utilities().generate_index(x, y + 1)
other_side_node_str = Utilities().generate_index(x, y - 2)
left_node_srt = Utilities().generate_index(x - 1, winner.y)
right_node_str = Utilities().generate_index(x + 1, winner.y)
new_weights = self._get_new_node_weights_in_xy_axis(node_map, winner, next_node_str,
other_side_node_str, left_node_srt, right_node_str)
elif y == winner.y - 1:
next_node_str = Utilities().generate_index(x, y - 1)
other_side_node_str = Utilities().generate_index(x, y + 2)
left_node_srt = Utilities().generate_index(x - 1, winner.y)
right_node_str = Utilities().generate_index(x + 1, winner.y)
new_weights = self._get_new_node_weights_in_xy_axis(node_map, winner, next_node_str,
other_side_node_str, left_node_srt, right_node_str)
new_weights[new_weights < 0] = 0.0
new_weights[new_weights > 1] = 1.0
return new_weights
def _get_new_node_weights_in_xy_axis(self, node_map, winner, next_node_str, other_side_node_str,
top_or_left_node_srt, bottom_or_right_node_str):
if next_node_str in node_map:
new_weights = self._new_weights_for_new_node_in_middle(node_map, winner, next_node_str)
elif other_side_node_str in node_map:
new_weights = self._new_weights_for_new_node_on_one_side(node_map, winner, other_side_node_str)
elif top_or_left_node_srt in node_map:
new_weights = self._new_weights_for_new_node_on_one_side(node_map, winner, top_or_left_node_srt)
elif bottom_or_right_node_str in node_map:
new_weights = self._new_weights_for_new_node_on_one_side(node_map, winner, bottom_or_right_node_str)
else:
new_weights = self._new_weights_for_new_node_one_older_neighbour(winner)
return new_weights
def _new_weights_for_new_node_in_middle(self, node_map, winner, next_node_str):
return (winner.weights + node_map[next_node_str].weights) * 0.5
def _new_weights_for_new_node_on_one_side(self, node_map, winner, next_node_str):
return (winner.weights * 2) - node_map[next_node_str].weights
def _new_weights_for_new_node_one_older_neighbour(self, winner):
return np.full(len(winner.weights), (max(winner.weights) + min(winner.weights)) / 2)
class GSOM:
map = {}
def __init__(self, params, input_vectors):
self.parameters = params
self.inputs = input_vectors
self.dimensions = input_vectors.shape[1]
self.growth_handler = GrowthHandler()
def grow(self):
self._initialize_network(self.dimensions)
learning_rate = self.parameters.START_LEARNING_RATE
print('Growing GSOM for ', self.parameters.LEARNING_ITERATIONS, 'iterations...')
start = time.time()
for i in range(0, self.parameters.LEARNING_ITERATIONS):
if i != 0:
learning_rate = self._get_learning_rate(self.parameters, learning_rate, len(self.map))
neighbourhood_radius = self._get_neighbourhood_radius(self.parameters.LEARNING_ITERATIONS, i,
self.parameters.MAX_NEIGHBOURHOOD_RADIUS)
for k in range(0, len(self.inputs)):
self._grow_for_single_iteration_and_single_input(self.inputs[k], learning_rate, neighbourhood_radius)
print('Growing cost -', (time.time() - start), 's')
# print('Iteration', i, '/', self.parameters.LEARNING_ITERATIONS)
# print('Growing completed.')
return self.map
def smooth(self):
learning_rate = self.parameters.START_LEARNING_RATE * self.parameters.SMOOTHING_LEARNING_RATE_FACTOR
reduced_neighbourhood_radius = self.parameters.MAX_NEIGHBOURHOOD_RADIUS * self.parameters.SMOOTHING_NEIGHBOURHOOD_RADIUS_FACTOR
print('Smoothing GSOM for ', self.parameters.SMOOTHING_ITERATIONS, 'iterations...')
start = time.time()
for i in range(0, self.parameters.SMOOTHING_ITERATIONS):
if i != 0:
learning_rate = self._get_learning_rate(self.parameters, learning_rate, len(self.map))
neighbourhood_radius = self._get_neighbourhood_radius(self.parameters.SMOOTHING_ITERATIONS, i,
reduced_neighbourhood_radius)
for k in range(0, len(self.inputs)):
self._smooth_for_single_iteration_and_single_input(self.inputs[k], learning_rate, neighbourhood_radius)
# print('Iteration', i, '/', self.parameters.SMOOTHING_ITERATIONS)
print('Smoothing cost -', (time.time() - start), 's')
# print('Smoothing completed.')
return self.map
def _smooth_for_single_iteration_and_single_input(self, input_vector, learning_rate, neigh_radius):
winner = Utilities().select_winner(self.map, input_vector)
left = Utilities().generate_index(winner.x - 1, winner.y)
right = Utilities().generate_index(winner.x + 1, winner.y)
top = Utilities().generate_index(winner.x, winner.y + 1)
bottom = Utilities().generate_index(winner.x, winner.y - 1)
if left in self.map:
self._adjust_weights_for_neighbours(self.map[left], winner, input_vector, neigh_radius, learning_rate)
elif right in self.map:
self._adjust_weights_for_neighbours(self.map[right], winner, input_vector, neigh_radius, learning_rate)
elif top in self.map:
self._adjust_weights_for_neighbours(self.map[top], winner, input_vector, neigh_radius, learning_rate)
elif bottom in self.map:
self._adjust_weights_for_neighbours(self.map[bottom], winner, input_vector, neigh_radius, learning_rate)
def _grow_for_single_iteration_and_single_input(self, input_vector, learning_rate, neigh_radius):
winner = Utilities().select_winner(self.map, input_vector)
# Update the error value of the winner node
winner.cal_and_update_error(input_vector)
# Weight adaptation for winner's neighborhood
for node_id in list(self.map):
self._adjust_weights_for_neighbours(self.map[node_id], winner, input_vector, neigh_radius, learning_rate)
# Evaluate winner's weights and grow network it it's above Growth Threshold (GT)
if winner.error > self.parameters.get_gt(len(input_vector)):
self._adjust_winner_error(winner, len(input_vector))
def _adjust_winner_error(self, winner, dimensions):
left = Utilities().generate_index(winner.x - 1, winner.y)
right = Utilities().generate_index(winner.x + 1, winner.y)
top = Utilities().generate_index(winner.x, winner.y + 1)
bottom = Utilities().generate_index(winner.x, winner.y - 1)
if left in self.map and right in self.map and top in self.map and bottom in self.map:
self._distribute_error_to_neighbours(winner, left, right, top, bottom, dimensions)
else:
self.growth_handler.grow_nodes(self.map, winner)
def _distribute_error_to_neighbours(self, winner, left, right, top, bottom, dimensions):
winner.error = self.parameters.get_gt(dimensions)
self.map[left].error = self._calc_error_for_neighbours(self.map[left])
self.map[right].error = self._calc_error_for_neighbours(self.map[right])
self.map[top].error = self._calc_error_for_neighbours(self.map[top])
self.map[bottom].error = self._calc_error_for_neighbours(self.map[bottom])
def _calc_error_for_neighbours(self, node):
return node.error * (1 + self.parameters.FD)
def _adjust_weights_for_neighbours(self, node, winner, input_vector, neigh_radius, learning_rate):
node_dist_sqr = math.pow(winner.x - node.x, 2) + math.pow(winner.y - node.y, 2)
neigh_radius_sqr = neigh_radius * neigh_radius
if node_dist_sqr < neigh_radius_sqr:
influence = math.exp(- node_dist_sqr / (2 * neigh_radius_sqr))
node.adjust_weights(input_vector, influence, learning_rate)
def _initialize_network(self, dimensions):
self.map = {
'0:0': GSOMNode(0, 0, np.random.rand(dimensions)),
'0:1': GSOMNode(0, 1, np.random.rand(dimensions)),
'1:0': GSOMNode(1, 0, np.random.rand(dimensions)),
'1:1': GSOMNode(1, 1, np.random.rand(dimensions)),
}
def _get_learning_rate(self, parameters, prev_learning_rate, nodemap_size):
return parameters.ALPHA * (1 - parameters.R / nodemap_size) * prev_learning_rate
def _get_neighbourhood_radius(self, total_iteration, iteration, max_neighbourhood_radius):
time_constant = total_iteration / math.log(max_neighbourhood_radius)
return max_neighbourhood_radius * math.exp(- iteration / time_constant)
class AnnotateGSOM:
def label_map(self, nodemap, input_vectors, input_labels):
# conduct the operation for each input vector
if len(input_vectors) != len(input_labels):
print('Error: Input vector length and label length differs.')
return
# Create a new map for test results
test_result_map_labels = {}
for i in range(0, len(input_vectors)):
input_vector = input_vectors[i]
input_vectors_label = input_labels[i]
self._map_single_input_label(nodemap, input_vector, test_result_map_labels, input_vectors_label)
return test_result_map_labels
def _map_single_input_label(self, nodemap, input_vector, test_result_map_labels, input_vector_label):
winner = Utilities().select_winner(nodemap, input_vector)
winner.map_label(input_vector_label)
winner.map_data(input_vector)
winner_index = Utilities().generate_index(winner.x, winner.y)
if winner_index not in test_result_map_labels:
test_result_map_labels[winner_index] = str(input_vector_label)
else:
test_result_map_labels[winner_index] += ',' + str(input_vector_label)
def run_gsom_for_zoo_data(sf):
data = pd.read_csv(
'D:\Accelerometer Data\Processed\LSM2\Week 1\Wednesday/filtered/LSM203_(2016-11-02)_row_4442_to_8233.csv',
nrows=1000, skiprows=100, usecols=[16, 18, 19, 20])
data.columns = ['actilife_waist_intensity', 'Y', 'X', 'Z']
classes = data['actilife_waist_intensity'].tolist()
inputs = data.as_matrix(['Y', 'X', 'Z'])
print('Dataset length -', len(inputs))
gsom_params = GSOMParameters(sf, 70, 70)
gsom = GSOM(gsom_params, inputs)
gsom_map = gsom.grow()
print('GSOM growing phase completed generating', len(gsom_map), 'nodes')
gsom_map = gsom.smooth()
print('GSOM smoothing phase completed for', len(gsom_map), 'nodes')
with open('nodemap.pickle', 'wb') as handle:
pickle.dump(gsom_map, handle, protocol=pickle.HIGHEST_PROTOCOL)
# gsom_map = pickle.load(open("nodemap.pickle", "rb"))
mapped_classes = AnnotateGSOM().label_map(gsom_map, inputs, classes)
print('Labeled the gsom map hit nodes')
plt.figure(1)
plt.title('GSOM - Accelerometer with Intensity with SF ' + str(sf))
for key, value in mapped_classes.items():
key_split = key.split(':')
x = int(key_split[0])
y = int(key_split[1])
plt.plot(x, y, 'bo')
plt.text(x, y+0.1, value, fontsize=12)
if __name__ == '__main__':
"""
Test for,
1. Input - raw x, y, z | class - intensity
2. Input - vm, sdvm | class - intensity
3. Input - high-correlated features | class - intensity
"""
run_gsom_for_zoo_data(0.25)
plt.show()
|
989,482 | e6525d0b8e1ef948ba234e54781c60495558a58d | import sys
sys.path.insert(0, 'python')
import cv2
import model
from hand import Hand
from body import Body
import matplotlib.pyplot as plt
import copy
import numpy as np
import scipy.misc
from Hand_Detection import detect_hand
from PIL import Image
from load_image import read_image
import random
def parsearg():
if len(sys.argv) > 1:
if sys.argv[1] == 'reload':
return True
return False
def FindDistance(A,B):
return np.sqrt(np.power((A[0]-B[0]),2) + np.power((A[1]-B[1]),2))
flag = parsearg()
flag= True
imglist = read_image(['earring_modified', 'necklace_modified'])
body_estimation = Body('model/body_pose_model.pth')
hand_estimation = Hand('model/hand_pose_model.pth')
bias = 20
c_ear = 0.1
c_neck = 0.5
#bias2 = 5
cap = cv2.VideoCapture('video/1559875194113165.mp4')
fps = cap.get(5)
print(fps)
ret, oriImg = cap.read()
vid_writer = cv2.VideoWriter('new7.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (oriImg.shape[1],oriImg.shape[0]))
#print(oriImg.shape[1],oriImg.shape[0])
#img2 = Image.open('cutouts/earring_modified/earring0.jpg')
#img3 = Image.open('images/necklace2.jpg')
#img2 = scipy.misc.imresize(img2, (70, 30))
#img3 = scipy.misc.imresize(img3, (250, 300))
#print(img2.shape)
#rows,cols,channels = img2.shape
#rows3,cols3,channels3 = img3.shape
cap.set(3, 640)
cap.set(4, 480)
count = 0
img_2 = imglist[0][2]
img_3 = imglist[1][2]
rows, cols, channels = [70, 30, 3]
rows3, cols3, channels3 = [250, 300, 3]
while True:
ret, oriImg = cap.read()
if not ret:
break
img2 = copy.deepcopy(img_2)
img3 = copy.deepcopy(img_3)
if count%int(fps/3) == 0:
candidate, subset = body_estimation(oriImg)
print(candidate, subset)
'''
if len(candidate) != len(subset[0]):
for i in range(len(subset[0])-2):
if subset[0][i] != -1 and (candidate[int(subset[0][i])][0] == oriImg.shape[1]-1 or candidate[int(subset[0][i])][1] == oriImg.shape[0]-1):
subset[0][i] = -1
'''
canvas = copy.deepcopy(oriImg)
#canvas = Image.fromarray(cv2.cvtColor(canvas,cv2.COLOR_BGR2RGB))
#canvas = canvas.transpose(Image.FLIP_LEFT_RIGHT)
#canvas = util.draw_bodypose(canvas, candidate, subset)
if len(subset) > 0:
point = candidate[int(subset[0][16])]
point2 = candidate[int(subset[0][17])]
point3 = candidate[int(subset[0][1])]
neck1 = candidate[int(subset[0][2])]
neck2 = candidate[int(subset[0][5])]
dis1 = FindDistance(point, point2)
dis2 = FindDistance(neck1, neck2)
if subset[0][16] != -1 and subset[0][17] != -1:
img2 = scipy.misc.imresize(img2, (int(2.5*c_ear*dis1), int(c_ear*dis1)))
else:
img2 = scipy.misc.imresize(img2, (rows, cols))
if subset[0][2] != -1 and subset[0][5] != -1:
img3 = scipy.misc.imresize(img3, (int(c_neck*dis2), int(1.2*c_neck*dis2)))
else:
img3 = scipy.misc.imresize(img3, (rows3, cols3))
rows, cols, channels = img2.shape
rows3, cols3, channels3 = img3.shape
#img2 = np.array(img2)
#print(img2.shape)
#print(img2[0].shape)
#if flag and count%45 == 0:
#earring = random.randint(0, len(imglist[0]) - 1)
#neck = random.randint(0, len(imglist[1]) - 1)
#img2 = imglist[0][2]
#img3 = imglist[1][2]
#img2 = scipy.misc.imresize(img2, (int(2.5*c_ear*dis1), int(c_ear*dis1)))
#img3 = scipy.misc.imresize(img3, (int(c_neck*dis2), int(1.2*c_neck*dis2)))
#rows,cols,channels = img2.shape
#rows3,cols3,channels3 = img3.shape
roi = canvas[int(point[1] + bias) : int(point[1] + rows + bias), int(point[0] - cols/2) : int(point[0] + cols/2)]
roi2 = canvas[int(point2[1] + bias) : int(point2[1] + rows + bias), int(point2[0] - cols/2) : int(point2[0] + cols/2)]
roi3 = canvas[int(point3[1] - 2*rows3/3) : int(point3[1] + rows3/3), int(point3[0] - cols3/2) : int(point3[0] + cols3/2)]
# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 200, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img3gray = cv2.cvtColor(img3,cv2.COLOR_BGR2GRAY)
ret3, mask3 = cv2.threshold(img3gray, 200, 255, cv2.THRESH_BINARY)
mask_inv3 = cv2.bitwise_not(mask3)
# Now black-out the area of logo in ROI
print(roi2.shape, mask.shape)
if roi.shape[0] == mask.shape[0] and roi.shape[1] == mask.shape[1]:
img1_bg = cv2.bitwise_and(roi,roi,mask = np.uint8(mask))
if roi2.shape[0] == mask.shape[0] and roi2.shape[1] == mask.shape[1]:
img1_bg2 = cv2.bitwise_and(roi2,roi2,mask = np.uint8(mask))
if roi3.shape[0] == mask3.shape[0] and roi3.shape[1] == mask3.shape[1]:
img1_bg3 = cv2.bitwise_and(roi3,roi3,mask = np.uint8(mask3))
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2,img2,mask = np.uint8(mask_inv))
img3_fg = cv2.bitwise_and(img3,img3,mask = np.uint8(mask_inv3))
#print(img1_bg.shape)
#print(img2_fg[:][:][:3].shape)
# Put logo in ROI and modify the main image
#print(type(img2_fg))
if roi.shape[0] == mask.shape[0] and roi.shape[1] == mask.shape[1]:
dst = cv2.add(img1_bg, img2_fg[:, :, (2, 1, 0)])
if roi2.shape[0] == mask.shape[0] and roi2.shape[1] == mask.shape[1]:
dst2 = cv2.add(img1_bg2, img2_fg[:, :, (2, 1, 0)])
if roi3.shape[0] == mask3.shape[0] and roi3.shape[1] == mask3.shape[1]:
dst3 = cv2.add(img1_bg3, img3_fg[:, :, (2, 1, 0)])
if roi.shape[0] == mask.shape[0] and roi.shape[1] == mask.shape[1]:
canvas[int(point[1] + bias) : int(point[1] + rows + bias), int(point[0] - cols/2) : int(point[0] + cols/2)] = dst
if roi2.shape[0] == mask.shape[0] and roi2.shape[1] == mask.shape[1]:
canvas[int(point2[1] + bias) : int(point2[1] + rows + bias), int(point2[0] - cols/2) : int(point2[0] + cols/2)] = dst2
if roi3.shape[0] == mask3.shape[0] and roi3.shape[1] == mask3.shape[1]:
canvas[int(point3[1] - 2*rows3/3) : int(point3[1] + rows3/3), int(point3[0] - cols3/2) : int(point3[0] + cols3/2)] = dst3
'''
hands_list = util.handDetect(candidate, subset, oriImg)
all_hand_peaks = []
for x, y, w, is_left in hands_list:
peaks = hand_estimation(oriImg[y:y+w, x:x+w, :])
peaks[:, 0] = np.where(peaks[:, 0]==0, peaks[:, 0], peaks[:, 0]+x)
peaks[:, 1] = np.where(peaks[:, 1]==0, peaks[:, 1], peaks[:, 1]+y)
all_hand_peaks.append(peaks)
canvas = util.draw_handpose(canvas, all_hand_peaks)
'''
#cv2.imshow('demo', canvas)#一个窗口用以显示原视频
key = cv2.waitKey(1)
if key == 27:
break
vid_writer.write(canvas)
count += 1
vid_writer.release()
cv2.destroyAllWindows()
|
989,483 | a1eab9748290d0ed11e0f7125d0a97e6db6c25fe | #!/usr/bin/env python
# coding:utf-8
import argparse
import glob
import logging
import os
import sys
import time
import json
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import random
import numpy as np
import datetime
import penman
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from transformers import MBartTokenizer, AutoConfig
from dataset_amrparsing import AMRParsingDataModule
from spring_amr.tokenization_bart import AMRBartTokenizer, PENMANBartTokenizer
from utils import (
ROUGE_KEYS,
LegacySeq2SeqDataset,
Data2SeqDataset,
Seq2SeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
calculate_smatch,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
build_vocab,
)
from utils_graph2text import (
convert_text,
eval_meteor,
eval_bleu_sents,
eval_bleu_sents_tok,
eval_chrf,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
logger = logging.getLogger(__name__)
def setup_seed(seed):
# print(f"Setting seed to {seed}")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
class SummarizationModule(BaseTransformer):
mode = "summarization"
loss_names = ["loss"]
metric_names = ROUGE_KEYS
default_val_metric = "rouge2"
def __init__(self, hparams, **kwargs):
if hparams.sortish_sampler and hparams.gpus > 1:
hparams.replace_sampler_ddp = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training")
if hparams.sortish_sampler:
raise ValueError(
"--sortish_sampler and --max_tokens_per_batch may not be used simultaneously"
)
super().__init__(hparams, num_labels=None, num_rels=0, mode=self.mode, **kwargs)
# use_task_specific_params(self.model, "summarization")
save_git_info(self.hparams.output_dir)
self.metrics_save_path = Path(self.output_dir) / "metrics.json"
self.hparams_save_path = Path(self.output_dir) / "hparams.pkl"
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = -2
self.metrics = defaultdict(list)
self.model_type = self.config.model_type
self.vocab_size = (
self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
)
self.dataset_kwargs: dict = dict(
data_dir=self.hparams.data_dir,
max_source_length=self.hparams.max_source_length,
prefix=self.model.config.prefix or "",
)
n_observations_per_split = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
self.target_lens = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert (
self.target_lens["train"] <= self.target_lens["val"]
), f"target_lens: {self.target_lens}"
assert (
self.target_lens["train"] <= self.target_lens["test"]
), f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
self.hparams.git_sha = get_git_info()["repo_sha"]
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None # default to config
self.already_saved_batch = False
self.eval_beams = (
self.model.config.num_beams
if self.hparams.eval_beams is None
else self.hparams.eval_beams
)
self.eval_lenpen = self.hparams.eval_lenpen
if self.hparams.eval_max_gen_length is not None:
self.eval_max_length = self.hparams.eval_max_gen_length
else:
self.eval_max_length = self.model.config.max_length
self.val_metric = (
self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
)
def save_readable_batch(self, batch: Dict[str, torch.Tensor]) -> Dict[str, List[str]]:
"""A debugging utility"""
readable_batch = {
k: self.tokenizer.batch_decode(v.tolist())
if v is not None and "mask" not in k and "rel" not in k and "dis" not in k
else v.shape
if v is not None
else None
for k, v in batch.items()
}
save_json(readable_batch, Path(self.output_dir) / "text_batch.json")
tb = {}
for k, v in batch.items():
if v is not None:
tb[k] = v.tolist()
else:
tb[k] = v
save_json(tb, Path(self.output_dir) / "tok_batch.json")
self.already_saved_batch = True
return readable_batch
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
src_ids, src_mask = batch["input_ids"], batch["attention_mask"]
decoder_input_ids, tgt_ids = batch["decoder_input_ids"], batch["labels"]
# y = batch["labels"]
# decoder_input_ids = y[:, :-1].contiguous()
# tgt_ids = y[:, 1:].clone()
if (
not self.already_saved_batch
): # This would be slightly better if it only happened on rank zero
batch["decoder_input_ids"] = decoder_input_ids
self.save_readable_batch(batch)
outputs = self(
src_ids,
attention_mask=src_mask,
decoder_input_ids=decoder_input_ids,
use_cache=False,
return_dict=False,
)
# print('Outputs:', outputs)
lm_logits = outputs[0]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
assert lm_logits.shape[-1] == self.vocab_size
loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id
)
return (loss,)
@property
def pad(self) -> int:
return self.tokenizer.pad_token_id
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
# tokens per batch
logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum()
logs["bs"] = batch["input_ids"].shape[0]
logs["src_pad_tok"] = batch["input_ids"].eq(self.pad).sum()
logs["src_pad_frac"] = batch["input_ids"].eq(self.pad).float().mean()
# print("loss_tensors:", loss_tensors)
# print("lr_scheduler:", self.trainer.lr_schedulers[0]["scheduler"].get_lr()[0])
self.log("train_loss", loss_tensors[0].item(), prog_bar=True)
self.log("lr", self.trainer.lr_schedulers[0]["scheduler"].get_lr()[0], prog_bar=True)
return {"loss": loss_tensors[0], "log": logs}
def training_epoch_end(self, outputs, prefix="train") -> Dict:
# print('train Ouputs:', outputs)
# losses = {"loss": torch.stack([x["loss"] for x in outputs]).mean()}
losses = {k: torch.stack([x[k] for x in outputs]).mean().item() for k in self.loss_names}
loss = losses["loss"]
self.metrics["training"].append(losses)
def validation_step(self, batch, batch_idx) -> Dict:
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix="val") -> Dict:
print(f"Generating Kwargs: Num_beam: {self.eval_beams}, Max_len: {self.eval_max_length}")
self.step_count += 1
outputs = self.all_gather(outputs)
# print('Gathered outputs', outputs)
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses["loss"]
generative_metrics = {
k: torch.stack([x[k] for x in outputs]).mean().item() for k in ["gen_time", "gen_len"]
}
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(generative_metrics)
all_metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
all_metrics["step_count"] = self.step_count
# print('all_metrics:', all_metrics)
# self.metrics[prefix].append(all_metrics) # callback writes this to self.metrics_save_path
# print("Len(outputs)", len(outputs))
preds = [[[itm.item() for itm in y] for y in x["preds"]] for x in outputs]
preds_new = []
for iter in preds:
for ith_batch in iter:
preds_new.append(ith_batch)
preds = preds_new
lin_sentences = []
for idx, tokens_same_source in enumerate(preds):
# print("token_same_source", tokens_same_source)
tokens_same_source_ = [self.tokenizer.bos_token_id] + tokens_same_source[1:]
lin_sentences.append(str(idx) + " " + self.tokenizer.decode(tokens_same_source_).strip())
json.dump(
lin_sentences,
open(f"{self.hparams.output_dir}/dev-nodes.json", "w", encoding="utf-8"),
indent=4,
)
graphs = []
for idx in range(len(preds)):
graphs_same_source = []
graphs.append(graphs_same_source)
ith_pred = preds[idx]
graph, status, (lin, backr) = self.tokenizer.decode_amr(
ith_pred, restore_name_ops=False
)
graph.status = status
graph.nodes = lin
graph.backreferences = backr
graph.tokens = ith_pred
graphs_same_source.append(graph)
graphs_same_source[:] = tuple(
zip(*sorted(enumerate(graphs_same_source), key=lambda x: (x[1].status.value, x[0])))
)[1]
idx = 0
for gps in graphs:
for gp in gps:
# metadata = gg.metadata.copy()
metadata = {}
metadata["id"] = str(idx)
metadata["annotator"] = "bart-amr"
metadata["date"] = str(datetime.datetime.now())
if "save-date" in metadata:
del metadata["save-date"]
gp.metadata = metadata
idx += 1
# print("Before Penman Encoding")
pieces = [penman.encode(g[0]) for g in graphs]
# print("After Penman Encoding")
val_outputs_folder = "val_outputs"
os.system("mkdir -p " + os.path.join(self.hparams.output_dir, val_outputs_folder))
if "preds" in outputs[0]:
output_test_predictions_file = os.path.join(
self.hparams.output_dir,
val_outputs_folder,
"validation_predictions_" + str(self.step_count) + ".txt",
)
# write predictions and targets for later rouge evaluation.
with open(output_test_predictions_file, "w") as p_writer:
p_writer.write("\n\n".join(pieces))
try:
smatch_score = calculate_smatch(
self.hparams.data_dir + f"/{prefix}-gold.amr", output_test_predictions_file
)
except AttributeError:
smatch_score = {"smatch": 0.0}
rank_zero_info("number epoch: %s", self.step_count)
rank_zero_info("%s smatch_info_bpe: %s", self.step_count, smatch_score)
all_metrics[f"{prefix}_avg_smatch"] = smatch_score["smatch"]
metric_tensor: torch.FloatTensor = torch.tensor(smatch_score["smatch"]).type_as(loss)
self.metrics[prefix].append(all_metrics) # callback writes this to self.metrics_save_path
self.log_dict(all_metrics, sync_dist=True)
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def _generative_step(self, batch: dict) -> dict:
t0 = time.time()
src_ids, src_mask = batch["input_ids"], batch["attention_mask"]
generated_ids = self.model.generate(
src_ids,
attention_mask=src_mask,
use_cache=True,
decoder_start_token_id=self.decoder_start_token_id,
num_beams=self.eval_beams,
no_repeat_ngram_size=0,
min_length=0,
max_length=self.eval_max_length,
length_penalty=self.eval_lenpen,
)
gen_time = (time.time() - t0) / batch["input_ids"].shape[0]
preds = [itm.tolist() for itm in generated_ids]
loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(
gen_time=gen_time, gen_len=summ_len, preds=preds,
)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix="test")
def get_datamodule(self):
return AMRParsingDataModule(
data_dir=self.hparams.data_dir,
tokenizer=self.tokenizer,
n_obs=self.n_obs,
target_lens=self.target_lens,
max_source_length=self.hparams.max_source_length,
max_target_length=self.hparams.max_target_length,
train_batch_size=self.hparams.train_batch_size,
val_batch_size=self.hparams.eval_batch_size,
dataloader_num_workers=self.hparams.num_workers,
prefix=" ",
)
@staticmethod
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument(
"--max_source_length",
default=1024,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
default=56,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--val_max_target_length",
default=142, # these defaults are optimized for CNNDM. For xsum, see README.md.
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--test_max_target_length",
default=142,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--freeze_encoder", action="store_true")
parser.add_argument("--freeze_embeds", action="store_true")
parser.add_argument("--sortish_sampler", action="store_true", default=False)
parser.add_argument("--max_tokens_per_batch", type=int, default=None)
parser.add_argument(
"--logger_name",
type=str,
choices=["default", "wandb", "wandb_shared"],
default="default",
)
parser.add_argument(
"--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all."
)
parser.add_argument(
"--n_val", type=int, default=-1, required=False, help="# examples. -1 means use all."
)
parser.add_argument(
"--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all."
)
parser.add_argument(
"--task",
type=str,
default="summarization",
required=False,
help="# examples. -1 means use all.",
)
parser.add_argument("--label_smoothing", type=float, default=0.0, required=False)
parser.add_argument("--src_lang", type=str, default="", required=False)
parser.add_argument("--tgt_lang", type=str, default="", required=False)
parser.add_argument("--eval_beams", type=int, default=5, required=False)
parser.add_argument("--eval_lenpen", type=float, default=1.0, required=False)
parser.add_argument("--checkpoint", type=str, default=None, required=False)
parser.add_argument(
"--val_metric",
type=str,
default=None,
required=False,
choices=["bleu", "rouge2", "loss", None],
)
parser.add_argument(
"--eval_max_gen_length",
type=int,
default=None,
help="never generate more than n tokens",
)
parser.add_argument(
"--save_top_k", type=int, default=1, required=False, help="How many checkpoints to save"
)
parser.add_argument(
"--early_stopping_patience",
type=int,
default=-1,
required=False,
help="-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.",
)
parser.add_argument(
"--smart_init", action="store_true", default=False, help="smart init new embeddings."
)
return parser
class TranslationModule(SummarizationModule):
mode = "translation"
loss_names = ["loss"]
metric_names = ["bleu"]
default_val_metric = "bleu"
def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs)
self.dataset_kwargs["src_lang"] = hparams.src_lang
self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang
def calc_generative_metrics(self, preds, target) -> dict:
return calculate_bleu(preds, target)
class Graph2TextModule(SummarizationModule):
mode = "Bart"
loss_names = ["loss"]
metric_names = ["bleu"]
default_val_metric = "bleu"
def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs)
rank_zero_info("parameters %s", hparams)
def calc_generative_metrics(self, preds, target) -> dict:
return calculate_bleu(preds, target)
class AMRparsingModule(SummarizationModule):
mode = "Bart"
loss_names = ["loss"]
metric_names = ["smatch"]
default_val_metric = "smatch"
def __init__(self, hparams, **kwargs):
config = AutoConfig.from_pretrained(hparams.model_name_or_path)
hparams.tokenizer_name_or_path = hparams.tokenizer_name_or_path if hparams.tokenizer_name_or_path is not None else hparams.model_name_or_path
amr_tokenizer = PENMANBartTokenizer.from_pretrained(
hparams.tokenizer_name_or_path,
collapse_name_ops=False,
use_pointer_tokens=True,
raw_graph=False,
)
super().__init__(hparams, config=config, tokenizer=amr_tokenizer, **kwargs)
rank_zero_info("parameters %s", hparams)
# self.decoder_start_token_id = config.bos_token_id
self.decoder_start_token_id = amr_tokenizer.amr_bos_token_id
def calc_generative_metrics(self, preds, target) -> dict:
return calculate_smatch(preds, target)
def main(args, model=None) -> SummarizationModule:
Path(args.output_dir).mkdir(exist_ok=True)
if len(os.listdir(args.output_dir)) > 3 and args.do_train:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(args.output_dir)
)
if model is None:
if "summarization" in args.task:
model: SummarizationModule = SummarizationModule(args)
elif "translation" in args.task:
model: SummarizationModule = TranslationModule(args)
elif "graph2text" in args.task:
model: SummarizationModule = Graph2TextModule(args)
elif "amrparsing" in args.task:
model: SummarizationModule = AMRparsingModule(args)
print(model.model)
print(
"num. model params: {:,} (num. trained: {:,})".format(
sum(p.numel() for p in model.model.parameters()),
sum(p.numel() for p in model.model.parameters() if p.requires_grad),
)
)
dataset = Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith("/tmp")
or str(args.output_dir).startswith("/var")
):
logger = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
project = os.environ.get("WANDB_PROJECT", dataset)
logger = WandbLogger(name=model.output_dir.name, project=project)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}")
if args.early_stopping_patience >= 0:
es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience)
else:
es_callback = False
lower_is_better = args.val_metric == "loss"
datamodule = model.get_datamodule()
trainer: pl.Trainer = generic_train(
model,
args,
logging_callback=Seq2SeqLoggingCallback(),
checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, lower_is_better
),
early_stopping_callback=es_callback,
logger=logger,
)
if args.do_train:
trainer.fit(model, datamodule=datamodule)
pickle_save(model.hparams, model.output_dir / "hparams.pkl")
if not (args.do_predict or args.do_eval):
return model
model.hparams.test_checkpoint = ""
if not args.checkpoint:
checkpoints = list(
sorted(glob.glob(os.path.join(args.output_dir, "*.ckpt"), recursive=True))
)
else:
checkpoints = [args.checkpoint]
print("checkpoints:", checkpoints)
# if checkpoints:
# model.hparams.test_checkpoint = checkpoints[-1]
# trainer.resume_from_checkpoint = checkpoints[-1]
if not args.do_train:
if args.do_predict:
print("Valid Set ...")
trainer.validate(model, datamodule=datamodule)
print("Test Set ...")
trainer.test(model, datamodule=datamodule)
if args.do_eval:
print("Test Set ...")
trainer.test(model, datamodule=datamodule)
return model
trainer.logger.log_hyperparams(model.hparams)
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = SummarizationModule.add_model_specific_args(parser, os.getcwd())
args = parser.parse_args()
setup_seed(args.seed)
main(args) |
989,484 | 27e3cb26c918b6b77edc45ed191d232b2400eb5a | from datetime import datetime, timedelta
from typing import Union
from asyncpg.exceptions import UniqueViolationError
from fastapi import HTTPException, status, Security
from fastapi.security import OAuth2PasswordBearer
from passlib.hash import bcrypt
from jose import jwt, JWTError
from google.oauth2 import id_token
from google.auth.transport import requests
from settings import settings
from .models import User
from . import schemas
oauth2_scheme = OAuth2PasswordBearer(tokenUrl='/auth/sign-in')
async def get_current_user(token: str = Security(oauth2_scheme)) -> schemas.UserOut:
user_id = validate_token(token)
user = await User.objects.get_or_none(id=user_id)
if not user:
raise HTTPException(status_code=404, detail="User not found")
return user
def verify_password(plain_password: str, hashed_password: str) -> bool:
return bcrypt.verify(plain_password, hashed_password)
def hash_password(password):
return bcrypt.hash(password)
async def create_user(user_data: Union[schemas.UserCreate, schemas.GoogleUserCreate]) -> schemas.Token:
user_data = user_data.dict()
password_hash = None
if user_data.get('password'):
password_hash = hash_password(user_data.get('password'))
try:
user = await User.objects.get_or_create(
username=user_data.get('username'),
phone=user_data.get('phone'),
email=user_data.get('email'),
password_hash=password_hash,
avatar=user_data.get('avatar')
)
return create_token(user)
except UniqueViolationError:
raise HTTPException(422, "Email or Username already exists")
def create_token(user: User) -> schemas.Token:
now = datetime.utcnow()
payload = {
'iat': now,
'nbf': now,
'exp': now + timedelta(seconds=settings.jwt_expiration),
'sub': str(user.id),
'user': user.dict()
}
token = jwt.encode(
payload,
settings.jwt_secret,
settings.jwt_algorithm
)
return schemas.Token(access_token=token)
def validate_token(token: str) -> str:
exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Could not validate credentials',
headers={
'WWW-Authenticate': 'Bearer'
}
)
try:
payload = jwt.decode(token, settings.jwt_secret, settings.jwt_algorithm)
return payload.get('sub')
except JWTError:
raise exception from None
async def authenticate_user(username: str, password: str) -> schemas.Token:
exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Incorrect username or password',
headers={
'WWW-Authenticate': 'Bearer'
}
)
user = await User.objects.get_or_none(username=username)
if not user or not verify_password(password, user.password_hash):
raise exception
return create_token(user)
async def google_auth(user: schemas.GoogleUserCreate) -> schemas.Token:
try:
id_token.verify_oauth2_token(user.token, requests.Request(), settings.google_client_id)
except ValueError:
raise HTTPException(403, "Bad code")
return await create_user(user)
|
989,485 | a87c4fe31d351c1674c1d317fbbf6ac82786a4f7 | '''
Runtime: 52 ms, faster than 40.30% of Python3 online submissions for Search Insert Position.
Memory Usage: 13.5 MB, less than 100.00% of Python3 online submissions for Search Insert Position.
'''
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
def binarySearch(l, val, lo, hi):
if lo > hi:
return lo
mid = (lo + hi) // 2
if l[mid] > val:
return binarySearch(l, val, lo, mid - 1)
elif l[mid] < val:
return binarySearch(l, val, mid + 1, hi)
else:
return mid
return binarySearch(nums, target, 0, len(nums) - 1)
|
989,486 | e06c2a98647297be711fc4c537a2f6849bbdc325 | from django import template
register = template.Library()
from django.core.exceptions import NON_FIELD_ERRORS
def error_list(value):
if isinstance(value, list):
return ", ".join(value)
elif isinstance(value, str):
return value
else:
return ""
@register.filter(name='error_object')
def error_object(value):
if isinstance(value, str):
return value
elif isinstance(value, dict):
if NON_FIELD_ERRORS in value:
return error_list(value[NON_FIELD_ERRORS])
else:
return ""
else:
return error_list(value)
register.filter('error_list', error_list)
|
989,487 | ba75d80a542335301691a51ee699bb5ddd365879 | """Tests for pib."""
|
989,488 | 533bd7d3b16685155a1c5155f1f9d7f036015daa | from __future__ import unicode_literals
from django.db import models
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel, FieldRowPanel, InlinePanel, MultiFieldPanel, PageChooserPanel, StreamFieldPanel,
)
from wagtail.wagtailcore.fields import RichTextField, StreamField
from wagtail.wagtailcore.models import Collection, Page
from wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
from wagtail.wagtailsnippets.models import register_snippet
from .blocks import BaseStreamBlock
@register_snippet
class People(ClusterableModel):
"""
`People` snippets are secondary content objects that do not require their
own full webpage to render.
"""
first_name = models.CharField("First name", max_length=254)
last_name = models.CharField("Last name", max_length=254)
job_title = models.CharField("Job title", max_length=254)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
panels = [
FieldPanel('first_name', classname="col6"),
FieldPanel('last_name', classname="col6"),
FieldPanel('job_title'),
ImageChooserPanel('image')
]
search_fields = Page.search_fields + [
index.SearchField('first_name'),
index.SearchField('last_name'),
]
@property
def thumb_image(self):
# fail silently if there is no profile pic or the rendition file can't
# be found. Note @richbrennan worked out how to do this...
try:
return self.image.get_rendition('fill-50x50').img_tag()
except:
return ''
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
class Meta:
verbose_name = 'Person'
verbose_name_plural = 'People'
@register_snippet
class FooterText(models.Model):
"""
This provides editable text for the site footer
"""
body = RichTextField()
panels = [
FieldPanel('body'),
]
def __str__(self):
return "Footer text"
class Meta:
verbose_name_plural = 'Footer Text'
class StandardPage(Page):
"""
A fairly generic site page, to be used for About, etc.
"""
introduction = models.TextField(
help_text='Text to describe the page',
blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Landscape mode only; horizontal width between 1000px and 3000px.'
)
body = StreamField(
BaseStreamBlock(), verbose_name="Page body", blank=True
)
content_panels = Page.content_panels + [
FieldPanel('introduction', classname="full"),
StreamFieldPanel('body'),
ImageChooserPanel('image'),
]
class HomePage(Page):
"""
The Home Page
"""
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Homepage image'
)
hero_text = models.CharField(
max_length=255,
help_text='Write an introduction for the bakery'
)
hero_cta = models.CharField(
verbose_name='Hero CTA',
max_length=255,
help_text='Text to display on Call to Action'
)
hero_cta_link = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
verbose_name='Hero CTA link',
help_text='Choose a page to link to for the Call to Action'
)
body = StreamField(
BaseStreamBlock(), verbose_name="Home content block", blank=True
)
promo_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Promo image'
)
promo_title = models.CharField(
null=True,
blank=True,
max_length=255,
help_text='Title to display above the promo copy'
)
promo_text = RichTextField(
null=True,
blank=True,
help_text='Write some promotional copy'
)
featured_section_1_title = models.CharField(
null=True,
blank=True,
max_length=255,
help_text='Title to display above the promo copy'
)
featured_section_1 = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='First featured section for the homepage. Will display up to three child items.',
verbose_name='Featured section 1'
)
featured_section_2_title = models.CharField(
null=True,
blank=True,
max_length=255,
help_text='Title to display above the promo copy'
)
featured_section_2 = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Second featured section for the homepage. Will display up to three child items.',
verbose_name='Featured section 2'
)
featured_section_3_title = models.CharField(
null=True,
blank=True,
max_length=255,
help_text='Title to display above the promo copy'
)
featured_section_3 = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Third featured section for the homepage. Will display up to six child items.',
verbose_name='Featured section 3'
)
content_panels = Page.content_panels + [
MultiFieldPanel([
ImageChooserPanel('image'),
FieldPanel('hero_text', classname="full"),
MultiFieldPanel([
FieldPanel('hero_cta'),
PageChooserPanel('hero_cta_link'),
])
], heading="Hero section"),
MultiFieldPanel([
ImageChooserPanel('promo_image'),
FieldPanel('promo_title'),
FieldPanel('promo_text'),
], heading="Promo section"),
StreamFieldPanel('body'),
MultiFieldPanel([
MultiFieldPanel([
FieldPanel('featured_section_1_title'),
PageChooserPanel('featured_section_1'),
]),
MultiFieldPanel([
FieldPanel('featured_section_2_title'),
PageChooserPanel('featured_section_2'),
]),
MultiFieldPanel([
FieldPanel('featured_section_3_title'),
PageChooserPanel('featured_section_3'),
])
], heading="Featured homepage sections", classname="collapsible")
]
def __str__(self):
return self.title
class GalleryPage(Page):
"""
This is a page to list locations from the selected Collection
"""
introduction = models.TextField(
help_text='Text to describe the page',
blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Landscape mode only; horizontal width between 1000px and 3000px.'
)
body = StreamField(
BaseStreamBlock(), verbose_name="Page body", blank=True
)
collection = models.ForeignKey(
Collection,
limit_choices_to=~models.Q(name__in=['Root']),
null=True,
blank=True,
on_delete=models.SET_NULL,
help_text='Select the image collection for this gallery.'
)
content_panels = Page.content_panels + [
FieldPanel('introduction', classname="full"),
StreamFieldPanel('body'),
ImageChooserPanel('image'),
FieldPanel('collection'),
]
# Defining what content type can sit under the parent. Since it's a blank
# array no subpage can be added
subpage_types = []
class FormField(AbstractFormField):
page = ParentalKey('FormPage', related_name='form_fields')
class FormPage(AbstractEmailForm):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
body = StreamField(BaseStreamBlock())
thank_you_text = RichTextField(blank=True)
content_panels = AbstractEmailForm.content_panels + [
ImageChooserPanel('image'),
StreamFieldPanel('body'),
InlinePanel('form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldRowPanel([
FieldPanel('from_address', classname="col6"),
FieldPanel('to_address', classname="col6"),
]),
FieldPanel('subject'),
], "Email"),
]
|
989,489 | e66074208ab530ab182f4b3b2afaf4538370969d | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# GradientDescent 는 vanishing 문제가 있는데(vanishing gradient), 굉장히 큰 값을, 작은 그릇에 우겨넣다 보니까 문제가 생김. 기울기가 점점 작아지면서
# (기울기가 0에 수렴한다고 생각해봐) 기울기 찾기가 어려워짐. 그래서 나온 게 Relu - Rectified Linear Unit
# hidden layer를 거치면서 미분을 거듭하고, 점점 기울기가 작아지고 수가 0에 수렴할 텐데, 그럼 backpropagation 하기가 어려워짐.
# 결과값이 너무 희미해서 그 전의 계층으로 역추적 하는 과정이 불분명해짐. 미분을 거듭할 수록, 반대로 적분해서 원래 함수 찾기가 어려워지는 것처럼
# 얘도 단점은 있는데, 음수는 그냥 다 0으로 처리해버림.
# 무조건 Relu가 좋다, sigmoid 가 좋다, tanh가 좋다, 그런 건 없고, 그냥 우리가 다룰 데이터에 따라서 그때그때 다를 뿐
# In[ ]:
import tensorflow as tf
import numpy as np
import random
import matplotlib.pyplot as plt
# In[ ]:
from tensorflow.examples.tutorials.mnist import input_data #기본 제공 되는 예시 자료 갖다 쓰겠다고
# In[ ]:
mnist = input_data.read_data_sets('mnist_data/clothes', one_hot = True, source_url = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/')
# 다운로드 받은 거 어디에 저장할지, one hot encoding 된 상태로 받을 건지, 다운로드 받을 url은 어디인지
# In[ ]:
# 학습할 데이터, 아까 다운 받은 데이터 확인해 보기
print('Train image shape : ', mnist.train.images.shape)
print('Train label shape : ', mnist.train.labels.shape)
# In[ ]:
# 테스트 정보 출력
print('Test image shape : ', mnist.test.images.shape)
print('Test labels shape : ', mnist.test.labels.shape)
# In[ ]:
#학습할 데이터도 잘 있고, 테스트할 데이터도 잘 있는 듯. 이제는 하나 뽑아서 화면에 띄워 보기.
r = random.randint(0, mnist.test.num_examples-1) #랜덤으로 뽑을 번호의 범위를 정해주는 것 뿐. 그 값을 변수가지고 정해주는 것뿐
fig = plt.figure(r)
subplot = fig.add_subplot(3, 5, 1)
subplot.imshow(mnist.test.images[r].reshape(28,28), cmap = plt.cm.gray_r)
# 이미지 나오는 거에서 좌표 보면 시작점이 어딘지 잘 봐. 0행은 맨 위야. 아래로 내려갈수록 y 좌표는 늘어나. 행 번호는 늘어나
subplot.set_xticks([]) # x축 업애기
subplot.set_yticks([]) # y축 없애기
plt.show() # 사진 뽑는 거라고. 알려줘서 다른 불필요한 애들 삭제하기
# In[ ]:
# 신경망 구성하기
# In[ ]:
x = tf.placeholder(tf.float32, shape = [None, 784])
y = tf.placeholder(tf.float32, shape= [None, 10])
# In[ ]:
w1 = tf.Variable(tf.truncated_normal([784, 512]))
b1 = tf.Variable(tf.truncated_normal([512]))
y1 = tf.nn.relu(tf.add( tf.matmul(x,w1),b1 ))
# In[ ]:
init = tf.global_variables_initializer()
# In[ ]:
#신경망 두 번째 층 hidden layer
w2 = tf.Variable(tf.truncated_normal([512, 128]))
b2 = tf.Variable(tf.truncated_normal([128]))
y2 = tf.nn.relu(tf.add( tf.matmul(y1,w2),b2 ))
# In[ ]:
#신경만 세 번째 층 hidden layer - 마지막 층.
w3 = tf.Variable(tf.truncated_normal([128, 10]))
b3 = tf.Variable(tf.truncated_normal([10]))
logits = tf.matmul(y2, w3) + b3 #다른 층에서 말하자면 이게 결국 출력값 y3 이었을 것
# In[ ]:
hypothesis = tf.nn.relu(logits)
# In[ ]:
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = logits, labels = y))
# In[ ]:
train = tf.train.AdamOptimizer(0.01).minimize(cost)
# In[ ]:
batch_size = 100
total_batch = int(mnist.train.num_examples / batch_size)
# In[ ]:
sess = tf.Session()
# In[ ]:
sess.run(init)
# In[ ]:
for epoch in range(15) :
total_cost = 0
for i in range(total_batch) :
x_batch, y_batch = mnist.train.next_batch(batch_size)
_, cost_val = sess.run([train, cost], feed_dict = {x: x_batch, y: y_batch})
# In[ ]:
# In[ ]:
is_correct = tf.equal( tf.argmax(logits,1),tf.argmax(y,1) )
accuracy = tf.reduce_mean( tf.cast(is_correct,tf.float32) )
print('Accuracy: ', sess.run(accuracy, feed_dict = {x: mnist.test.images, y: mnist.test.labels} ))
# In[ ]:
label_dict = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot'
}
# In[ ]:
# 학습 잘 됐는지 확인해보자. 아까 위에서 쓴 거랑 거의 비슷. 복붙
#여기서 불러오는 부분만 학습한 방적식을 run에 넣어서 뽑아오면 됨.
for i in range(10) :
r = random.randint(0, mnist.test.num_examples-1)
fig = plt.figure(r)
subplot = fig.add_subplot(2, 5, i+1)
subplot.imshow(mnist.test.images[r].reshape(28,28), cmap = plt.cm.gray_r)
subplot.set_xticks([]) # x축 업애기
subplot.set_yticks([]) # y축 없애기
subplot.set_title("%s", label_dict[ np.argmax(labels[i]) ]) #위에 label_dict 중에 몇 번에 속하는지, 그 번호를 넣어서 가져오겠다.
plt.show()
# In[ ]:
|
989,490 | d37d521858eda3398bb2f6ff0adfb20ed29e3226 | # -*- coding: utf-8 -*-
# Copyright (c) 2020 GBC Networks Oy. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
import sys
from ft_dbconnect import MysqlDB
from ft_spell_checker import SpellChecker
# check python version >= 3.6
assert sys.version_info >= (3, 6)
def remove_special_cases(config, sc, word):
# remove words with "'", "\" and Latin œ characters
if "'" in word or '\\' in word or u'\u0153' in word:
return True
# remove if length under nn_query RANDOM_MIN_LENGTH and fails spell checker
if len(word) < int(config['RANDOM_MIN_LENGTH']):
if not sc.spelling(word):
return True
# remove if foreign characters and fails spelling checker
if config['REGEX_FOREIGN']:
if re.search(config['REGEX_FOREIGN'], word):
if not sc.spelling(word):
return True
# keep word if none of the above match
return False
def remove_garbage_from_vecfile(config, vecfile, new_vecfile):
"""
file = open vecfile
ok-words-file = open file for writing
for line in file:
word = line.chars-up-to-first-space
select word in garbwords-db-table
if not found-entry:
# this is a ok word
write this line to ok-words-file
else:
pass # ignore
end
"""
db = MysqlDB(config)
sc = SpellChecker(config)
outfile = open(new_vecfile, 'w')
with open(vecfile) as infile:
count = 0
print('Removing words from vec-file')
regex = r'^([^ ]+) '
for line in infile:
count += 1
m = re.match(regex, line) # get the first word
if m is not None:
word = m.group(1)
if remove_special_cases(config, sc, word):
print(f' {word} ', end='')
continue
# remove if found in garbwords table
if db.find_word(word):
print('.', end='') # show normal removed word as dot
continue
# everthing is ok - write line to output file
outfile.write(line)
if not count % 100:
print('') # print carriage return every 100 iterations
print('\n')
outfile.close()
def prepend_line_to_file(text, filepath):
tmpfile = './__ptemp'
with open(tmpfile, 'w') as tmp:
tmp.write(text)
tmp.write('\n')
with open(filepath, 'r') as input:
for line in input:
tmp.write(line)
os.rename(tmpfile, filepath)
return 0
def add_header_to_vecfile(vecfile):
# get column and row count
rows = 0
with open(vecfile) as outfile:
line = outfile.readline()
columns = line.count(' ')
outfile.seek(0)
buf = outfile.read(1024 * 1024)
while buf:
rows += buf.count('\n')
buf = outfile.read(1024 * 1024)
# add header row
header = f'{rows} {columns}'
print(f'Adding header "{header}"')
result = prepend_line_to_file(header, vecfile)
if result == 0:
print('Header added successfully')
return True
else:
print('\nError: Failed to add header\n')
return False
if __name__ == "__main__":
# execute only if run as a script
from ft_config import load_config
config = load_config()
remove_garbage_from_vecfile(config, config['VEC_FILE'], config['OUT_FILE'])
add_header_to_vecfile(config['OUT_FILE'])
|
989,491 | 4d55e165c86487081402f5239e4363c05808a86b | import os
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from denseNetGenerator import DataGenerator
from denseNet import build_denseNet
def fill_infos(dataset_dir):
partition = { 'train': [], 'validation' : [] }
labels = { }
train_dir = dataset_dir + '/train'
validation_dir = dataset_dir + '/validation'
train_image_names = []
sample_train_dirs = os.listdir(train_dir)
for sample_train_dir in sample_train_dirs: # sample_train_dir : directory of one vid
train_img_names_dir = os.listdir(train_dir + '/' + sample_train_dir) # train_img_names_dir : image names in the directory of that vid
train_img_names_dir = [sample_train_dir + '/' + img_names for img_names in train_img_names_dir]
train_image_names.extend(train_img_names_dir)
partition['train'].extend(train_image_names)
validation_image_names = []
sample_validation_dirs = os.listdir(validation_dir)
for sample_validation_dir in sample_validation_dirs: # sample_validation_dir : directory of one vid
validation_img_names_dir = os.listdir(validation_dir + '/' + sample_validation_dir) # validation_img_names : image names in the directory of that vid
validation_img_names_dir = [sample_validation_dir + '/' + img_names for img_names in validation_img_names_dir]
validation_image_names.extend(validation_img_names_dir)
partition['validation'].extend(validation_image_names)
all_image_names = partition['train'].copy()
all_image_names.extend(partition['validation'])
for img_name in all_image_names:
labels[img_name] = getOneHotLabel(img_name)
return partition, labels
def getOneHotLabel(img_name):
label = img_name.split('-')[2][:-4]
if label == 'NoWashing':
return [1,0,0,0,0,0,0,0,0,0,0,0]
elif label == 'WetAndApplySoap':
return [0,1,0,0,0,0,0,0,0,0,0,0]
elif label == 'RubPalmToPalm':
return [0,0,1,0,0,0,0,0,0,0,0,0]
elif label == 'RubBackOfLeftHand':
return [0,0,0,1,0,0,0,0,0,0,0,0]
elif label == 'RubBackOfRightHand':
return [0,0,0,0,1,0,0,0,0,0,0,0]
elif label == 'RubWithInterlacedFingers':
return [0,0,0,0,0,1,0,0,0,0,0,0]
elif label == 'RubWithInterlockedFingers':
return [0,0,0,0,0,0,1,0,0,0,0,0]
elif label == 'RubLeftThumb':
return [0,0,0,0,0,0,0,1,0,0,0,0]
elif label == 'RubRightThumb':
return [0,0,0,0,0,0,0,0,1,0,0,0]
elif label == 'RubRightFingerTips' or label == 'RubRightFingertips':
return [0,0,0,0,0,0,0,0,0,1,0,0]
elif label == 'RubLeftFingerTips' or label == 'RubLeftFingertips':
return [0,0,0,0,0,0,0,0,0,0,1,0]
elif label == 'RinseHands':
return [0,0,0,0,0,0,0,0,0,0,0,1]
else:
print(label, 'wrong')
params = {'dim': (256,256,3),
'batch_size':64 ,
'shuffle': True}
fpath = "check_points/DenseNetWithoutUnet/" + "DenseNet_{epoch:02d}-{val_accuracy:.2f}.hdf5"
dataset_dir = 'DenseNetDataset'
nth_frame = 0
optical_flow_dir = None
partition, labels = fill_infos(dataset_dir)
print(len(partition['validation']))
training_generator = DataGenerator(dataset_dir, 'train', partition['train'], labels, nth_frame, optical_flow_dir, **params)
validation_generator = DataGenerator(dataset_dir, 'validation', partition['validation'], labels, nth_frame, optical_flow_dir, **params)
check_point = ModelCheckpoint(fpath, monitor='val_accuracy',
verbose=2, save_best_only=False, mode='max')
model = build_denseNet(params['dim'], 12)
model.load_weights('4_temmuz_DenseNet_02-0.79.hdf5')
history = model.fit_generator(generator=training_generator,
validation_data=validation_generator,
callbacks=[check_point],
epochs=50, verbose=1,
use_multiprocessing=True, workers=4)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
989,492 | 32de8e2d50d7ee6e1e614ffb1e0f2add791029f8 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-01 22:16
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('service', '0004_auto_20180401_2203'),
]
operations = [
migrations.AlterModelManagers(
name='question',
managers=[
('new_objects', django.db.models.manager.Manager()),
],
),
]
|
989,493 | b7b2060adae2005b82eef8c8bcbaa57acb3875ee | #! /usr/bin/env python
#! -*- coding: utf-8 -*-
import re
print(re.search("www", "www.alibaba.com").span()) #在起始位置匹配
print(re.search("com", "www.alibaba.com").span()) #不在起始位置匹配
|
989,494 | e7326f752b6503fe8b55538cf7ab96c9c7f09a0c | """
입력으로 1개의 정수 N 이 주어진다.
정수 N 의 약수를 오름차순으로 출력하는 프로그램을 작성하라.
[제약사항]
N은 1이상 1,000이하의 정수이다. (1 ≤ N ≤ 1,000)
[입력]
입력으로 정수 N 이 주어진다.
[출력]
정수 N 의 모든 약수를 오름차순으로 출력한다.
"""
import sys
sys.stdin = open('1933.txt', 'r', encoding = 'UTF-8')
N = int(input())
for i in range(1, N+1):
if N % i == 0:
print(i, end=' ')
|
989,495 | e7321690434f3d1d6fcfb4f1213c6fb9830b35d9 | d={"id":112,"name":"gokul","role":"senior employee", "age":27}
print(d)
d.copy()
print(d)
d.items()
print(d)
d1=d
print(d1)
print(d1["id"])
d.values()
print(d) |
989,496 | 7e09ee0388d6b931efe0d66c9327eaec396bcfa8 | from lexicon import WORDS
from cardbox import cardbox_quiz
from matcher import *
from quiz import anagram_question, anagram_quiz
default_cardbox = 'evan'
def judge(*words):
for word in words:
if word not in WORDS:
return False
return True
def quiz_from_cardbox(*matchers, cb=dcb):
cardbox_quiz(cb, 0.01, 3, 2, 1, *matchers)
def quiz(*matchers):
anagram_quiz(*matchers)
if __name__ == "__main__":
quiz_from_cardbox()
|
989,497 | bf69ffb2a9d80718afcdb1aef84cd8cc2c292108 | #!/bin/python3
import sys
if len(sys.argv) < 2:
print("missing argument")
sys.exit()
target_path = '.'
if len(sys.argv) == 3:
target_path=sys.argv[2]
G=[0b0111000, 0b1010100,0b1100010, 0b1110001]
H=[0b1000111, 0b101011, 0b0011101]
def codage(nbr):
"""compute Hamming code of a 4 bit number"""
mask=1
result=0
for index in range(len(G)):
if ((mask<<index)&nbr) != 0:
result^=G[len(G)-index-1]
return result
with open(sys.argv[1]) as img:
splitedPath = sys.argv[1].split('/')
with open(target_path+'/'+"enc_"+splitedPath[-1],"w+") as enc_img:
#extract and write the 3 first lines
for i in range(1,4):
enc_img.write(next(img))
#calculate Hamming code for every byte cut in two equal parts
for line in img:
for number in map(int,line.rstrip().split()):
enc_img.write(str(codage((number&0b11110000)>>4))+' ')
enc_img.write(str(codage(number&0b00001111))+' ')
enc_img.write('\n')
|
989,498 | c403cd36e5768edfc4e460bfff60dcae7768629d |
# Mejorar código para que sólo use un diccionario
def longest(input_string):
consecutive = {}
streaks = {}
streak = True
for i in range(len(input_string)):
if input_string[i] in consecutive and streak:
consecutive[input_string[i]] += 1
elif input_string[i] not in consecutive:
consecutive[input_string[i]] = 1
streaks[input_string[i]] = 1
if consecutive[input_string[i]] > streaks[input_string[i]]:
streaks[input_string[i]] = consecutive[input_string[i]]
if input_string[i] in consecutive and not streak:
consecutive[input_string[i]] = 1
streak = True
if i < len(input_string)-1:
if input_string[i+1] != input_string[i]:
streak = False
return {max(streaks, key=streaks.get),max(streaks.values())}
def longest1(input_string):
count = 0
max_count = 0
max_char = ''
previous_char = ''
for c in input_string:
if previous_char == c:
count += 1
else:
count = 1
if count > max_count:
max_count = count
max_char = c
previous_char = c
return {max_char:max_count}
s = 'aaaabfddddduuuya'
print(longest(s))
print(longest1(s)) |
989,499 | b93701890000613c50c058f0c727c6373a248c41 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Technology First Data Analytics SIG
#
# ## Jupyter Notebooks Talk - February 28, 2020
#
# ### Robert Ward - Manager, Advanced Analytics (Speedway)
# + [markdown] slideshow={"slide_type": "slide"}
# # Background
#
# - Former Accountant (still recovering)
# - Started learning Python ~7 years ago
# - LexisNexis
# - SGS
# - AIS (Applied Information Sciences)
# - Speedway
#
# **Undergrad in Accounting - Cedarville** <br>
# **Masters in Business Analytics - American University (Washington DC)**
# + [markdown] slideshow={"slide_type": "slide"}
# # What is a Jupyter Notebook?
# + [markdown] slideshow={"slide_type": "fragment"}
# - An interactive development environment
# - Combines code and output into one page
# - Development is done through [Project Jupyter](https://jupyter.org/)
# - Supports over 40 languages (Python, R, Julia, C, etc.)
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Also...
# + [markdown] slideshow={"slide_type": "fragment"}
# ## ...this [slideshow](http://localhost:8888/notebooks/JupyterTalk.ipynb)!
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Thanks to Markdown and Reveal.js
# + [markdown] slideshow={"slide_type": "slide"}
# # History
# + [markdown] slideshow={"slide_type": "fragment"}
# - Originated from the IPython project in 2014
# - IPtyhon was created as an interactive computing environment for Python
# - Jupyter was created to create a language agnostic interactive environment for data science and scientific computing
# + [markdown] slideshow={"slide_type": "slide"}
# # Some caveats...
# + [markdown] slideshow={"slide_type": "fragment"}
# ### There are some strong opinions about notebooks
#
# ### Nature [article](https://www.nature.com/articles/d41586-018-07196-1) on Jupyter:
# 
#
# ### Joel Grus' [I hate notebooks](https://docs.google.com/presentation/d/1n2RlMdmv1p25Xy5thJUhkKGvjtV-dkAIsUXP-AL4ffI/edit#slide=id.g362da58057_0_1) talk at JupyterCon 2018
# + [markdown] slideshow={"slide_type": "slide"}
# # Why Jupyter?
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Traditional software development looks like...
#
# 
# + [markdown] slideshow={"slide_type": "fragment"}
# # This isn't the way most data analysts or data scientists want to work
# -
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.