index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
9,400 | 451c353a949458f5f71783c4aba1888c40018bfa | from rest_framework import serializers
from . import models
class RaumSerializer(serializers.ModelSerializer):
class Meta:
model = models.Raum
fields = [
"Raumnummer",
"Anzahl_Sitzplaetze",
"Beamer",
"Whiteboard",
]
class ZeitraumSerializer(serializers.ModelSerializer):
class Meta:
model = models.Zeitraum
fields = [
"Vorlesungszeit",
"EndTime",
"Datum",
"StartTime",
]
class RaumbelegungSerializer(serializers.ModelSerializer):
class Meta:
model = models.Raumbelegung
fields = [
"Belegt",
"Belegungsgrund",
]
|
9,401 | 971187dc0e0f02282c8945940d07c011e247667a | """
Kontrollülesanne 7.4c - Elutee number (tähtaeg 28.okt. (incl))
Maksimaalne failide arv: 1
Töö liik: Individuaaltöö
Numeroloogias peetakse tähtsaks elutee numbrit, mille arvutamiseks tuleb liita kokku sünnikuupäeva ja -aasta numbrid
nii, et jõutakse lõpuks ühe numbrini.
Näiteks, oletame, et sünnikuupäev on 15.05.1975. Teha tuleb niisiis järgnev tehe: 1+5+5+1+9+7+5 = 33, 3+3 = 6, seega on
elutee number 6.
Aga kui sünnikuupäevaks on nt. 17.11.1981, siis arvutada tuleb järgmiselt: 1+7+1+1+1+9+8+1 = 29, 2+9 = 11, 1+1=2.
Elutee numbrit arvutab järgmine (rekursiivne) funktsioon, mis võtab argumendiks sünnikuupäeva:
#argument s on sõne, esialgu see on kuupäev, edasi juba arvutatud arv
def elutee(s):
#abimuutaja numbri arvutamiseks
n = 0
# tsükkel, mis vaatab iga sümboli sõnes
for i in s:
if i != ".":
n += int(i) # arvutame summat
# kui saadud arv on väiksem kui 10, siis ongi elutee number käes
if n < 10:
return n
# kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,
#selleks kasutame jälle sama funktsiooni
else:
return elutee(str(n))
Failis sunnikuupaevad.txt on mingi hulk sünnikuupäevi, iga sünnikuupäev eraldi real. Kirjutada programm, mis tekitab
selle faili põhjal 9 tekstifaili nimedega eluteenumber1.txt, eluteenumber2.txt, ..., eluteenumber9.txt ning jagab
sünnikuupäevad nendesse failidesse vastavalt elutee numbrile (elutee numbri arvutamiseks kasutada funktsiooni elutee).
Näiteks sünnikuupäev 15.05.1975 tuleb kirjutada faili eluteenumber6.txt.
Näide programmi tööst:
Kui faili sunnikuupaevad.txt sisu on
07.02.1969
17.11.1981
29.03.1955
siis faili eluteenumber7.txt sisu peab olema
07.02.1969
29.03.1955
ja faili eluteenumber2.txt sisu peab olema
17.11.1981
Kõik ülejäänud 7 faili peavad selle näite korral küll tekkima, aga jääma tühjaks.
"""
def elutee(s):
#abimuutaja numbri arvutamiseks
n = 0
# tsükkel, mis vaatab iga sümboli sõnes
for i in s:
if i != ".":
n += int(i) # arvutame summat
# kui saadud arv on väiksem kui 10, siis ongi elutee number käes
if n < 10:
return n
# kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,
#selleks kasutame jälle sama funktsiooni
else:
return elutee(str(n))
for i in range(1,10):
fileName = "eluteenumber" + str(i) + ".txt"
f = open(fileName, "a")
# inputFile = input("Palun sisestage sünnikuupäevade faili nimi: ") TEST EI TAHA FAILI SISESTAMIST NÄHAGI!
file = open("sunnikuupaevad.txt", encoding="UTF-8")
for row in file:
fileName = "eluteenumber" + str(elutee(row.strip())) + ".txt"
file = open(fileName, "a", encoding="UTF-8")
file.write(str(row))
file.close()
file.close()
|
9,402 | 6d042a2035eab579193452e4dc44c425125d9515 | from flask_restful import Resource, reqparse
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.RegexpTokenizer(r"\w+")
# CLASS DESCRIPTION:
# Devides and clears the sentence of punctuation marks and builds a dependency tree on each sentence
# Allocates its own names and verbs
# added: Temuri Kitoshvili
class Chunk_CleanSentences(Resource):
parser = reqparse.RequestParser()
parser.add_argument('text',
type=str,
required=True,
help="გთხოვთ შეიყვანოთ სწორი წინადადება")
def get(self):
data = Chunk_CleanSentences.parser.parse_args()
text = data['text']
sentences = sent_tokenize(text)
clean_sentences = []
for sent in sentences:
clear_sentence = tokenizer.tokenize(sent)
clean_sentences.append(clear_sentence)
for word in clean_sentences:
tagged_sent = nltk.pos_tag(word)
chunkGram = r"""Chunk: {<VB.?>*<NNP>?} """
chuckParser = nltk.RegexpParser(chunkGram)
chunked = chuckParser.parse(tagged_sent)
chunked.draw()
return {"clean_sentences": clean_sentences}
|
9,403 | 1930aa258ac4fbcdb2972e19bdb2625d2dae4114 | from console import Display
import time
images = ["/img/erni_l.txt", "/img/erni_s.txt", "/img/erni_logo.txt", "/img/github_logo.txt",
"/img/upython_logo.txt", "/img/python_logo.txt", "/img/upython_logo_s.txt",
"/img/MSC_logo.txt"]
def show():
oled = Display()
for image in images:
oled.clear(0, 1)
oled.draw_graphic(image, 35, 2)
time.sleep(5)
show()
|
9,404 | fe73a80b15cad025a33930ddd9abb31524cd0244 | # coding: utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import time
from urllib import urlencode
from urlparse import parse_qs, urlparse, urlunparse
from flask import current_app as app
from flask import url_for
from jose import jwt
from oauth2client.client import flow_from_clientsecrets
from pathlib2 import Path
from .models import Customer
def create_oauth_flow():
"""Prepare Google OAuth workflow from config file."""
app.flow = flow_from_clientsecrets(
str(Path(app.config['ROOT_DIR'], 'configs/client_secrets.json')),
scope=['email', 'profile'],
redirect_uri=url_for('auth.oauth2callback', _external=True),
)
def create_jwt(user, name=None, renewable=False):
"""Create a JWT."""
session_user = sessionize_user(user, name)
session_customer = sessionize_customer(
Customer.get_by_name(user.customers[0])
)
return format_jwt(session_user, session_customer, renewable)
def sessionize_user(user, name):
document = user.to_dict(include_meta=True)
sessionized = {}
sessionized.update(document['_source'])
sessionized['_id'] = document['_id']
sessionized['google_name'] = name
return sessionized
def sessionize_customer(customer):
document = customer.to_dict(include_meta=True)
sessionized = {}
sessionized.update(document['_source'])
sessionized['_id'] = document['_id']
return sessionized
def format_jwt(user, active_customer, renewable):
"""Format a JWT and MAC it."""
now = int(time.time())
claims = {
# reserved: https://tools.ietf.org/html/rfc7519#section-4.1
'exp': now + app.config['AUTH_TOKEN_LIFETIME'],
'nbf': now, # not before
'iss': app.config['AUTH_TOKEN_ISSUER'],
'iat': now, # issue date
# private: https://tools.ietf.org/html/rfc7519#section-4.3
'user': user,
'active_customer': active_customer,
'renewable': renewable,
}
return jwt.encode(
claims,
key=app.config['AUTH_JWT_SECRET'],
algorithm=app.config['AUTH_JWT_ALGORITHM'],
)
def set_params(url, params):
"""Set GET parameters on a URL."""
components = urlparse(url)
query = parse_qs(components.query)
query.update(params)
components = components._replace(query=urlencode(query, doseq=True))
return urlunparse(components)
|
9,405 | d1a179acfda9e76a11f362671fafb50773e2b9d3 | # -- !/python3.10
# Mikhail (myke) Kolodin, 2021
# 2021-10-21 2021-10-21 1.2
# retext.py
# Заменить во входном тексте указанное слово на случайный вариант
# из предложенного набора заменителей.
# Параметры - в командной строке.
import re, random, sys
fin = 'retext-in.txt'
fot = 'retext-out.txt'
t1 = """
here we go again and we know:
here we do the same
"""
def redo(text: str, aword: str, subs: list) -> str:
""" заменятель """
return re.sub(f'(\W){aword}(\W)', r"\1"+random.choice(subs)+r"\2", " "+text+" ").strip()
def test1():
""" тестировщик """
w = "we"
s = ["they", "he", "she"]
print(w, "->", s, "\n", t1, "\n", redo(t1, w, s))
#test1()
def main():
""" запуск """
print("got params:", sys.argv)
argc = len(sys.argv)
if argc < 3:
print("Not enough parameters")
return
w, *subs = sys.argv[1:]
print(w, subs)
with open(fin) as fi:
text = fi.read()
out = redo(text, w, subs)
print("text:", text)
print("out:", out)
with open(fot, 'w') as fo:
fo.write(out)
main()
|
9,406 | 5c5922fd3a7a5eec121d94e69bc972089e435175 |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/10_DogcatcherFlatten.ipynb
import pandas as pd
import argparse
import csv
import os
import numpy as np
import string
def FivePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=True)
df["FA_start"] = df["gene_start"]
df_exon = df[df["type"]=="exon"].copy()
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["FA_end"] = df_exon["end"]
df_exon = df_exon[["name","FA_end"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["FA_length"] = df["FA_end"] - df["FA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def ThreePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=False)
df["LA_end"] = df["gene_end"]
df_exon = df[df["type"]=="exon"].copy()
# Keep first exon
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["LA_start"] = df_exon["start"]
df_exon = df_exon[["name","LA_start"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["LA_length"] = df["LA_end"] - df["LA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def getAreas(df):
"""
This function will get the first and last exons for plu and min strand.
Call it area because not necessarily exon.
"""
df_plu = df[df["strand"]=="+"]
df_min = df[df["strand"]=="-"]
df_plu_FA = FivePrimeArea(df_plu)
df_min_FA = FivePrimeArea(df_min)
df_plu_LA = ThreePrimeArea(df_plu)[["name","LA_start","LA_end","LA_length"]]
df_min_LA = ThreePrimeArea(df_min)[["name","LA_start","LA_end","LA_length"]]
df_plu = pd.merge(df_plu_FA,df_plu_LA,on="name")
df_min = pd.merge(df_min_FA,df_min_LA,on="name")
df = pd.concat([df_plu,df_min])
return df
def chrDIC(df):
"""This function will take a gtf and return strand specific dictionary of different chrm"""
chr_names=df['chr'].unique().tolist()
d_chr = d_gtf_chr = {chrom : df[df["chr"]==chrom] for chrom in chr_names}
return d_chr
def countInside(df, start, end):
rows_df = df[ (start < df["start"]) & (df["end"] < end) ]
names = rows_df['name'].unique().tolist()
names = ",".join(names)
if len(names) >0:
return names
else:
return np.nan
def removeInside(df):
d_chr = chrDIC(df)
df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']], row["start"], row["end"]), axis=1)
df2 = df.dropna(subset=['genes_inside'])
all_names = []
for i in range(len(df2)):
names = df2["genes_inside"].iloc[i]
names = names.split(",")
all_names = all_names + names
inside_genes = list(set(all_names))
l = len(inside_genes)
print(f"Removing {l} genes that are inside other genes")
df_inside = pd.DataFrame(inside_genes,columns=['name'])
df = df[~df["name"].isin(df_inside["name"])].copy()
del df["genes_inside"]
return df, df_inside
def flattenGTF(file_in,file_type,NEXTFLOW=True):
if file_type == "ENSEMBL":
print(f"Flattening ENSEMBL like genome {file_in}")
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["chr"] = df["chr"].astype(str)
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df.sort_values(by=["chr","start"], inplace=True, ascending=True)
fout = f"{file_in[:-4]}_sort.gtf"
df.to_csv(fout,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("gene_id ","")
df["name"] = df["name"].str.replace("\"","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "BED":
my_col = ["chr","start","end","name","strand"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
if file_type == "REFSEQGFF":
# Chrome numbers are changed. Need to change back to chr1 etc.
# https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.39#/def_asm_Primary_Assembly
print(f"Flattening REFSEQGFF like genome")
# https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/reference/
#download this GCF_000001405.39_GRCh38.p13_genomic.gtf.gz
# sort and index in IGV
# NC_000001.11 BestRefSeq gene 11874 14409 . + . gene_id "DDX11L1"; transcript_id ""; db_xref "GeneID:100287102"; db_xref "HGNC:HGNC:37102"; description "DEAD/H-box helicase 11 like 1 (pseudogene)"; gbkey "Gene"; gene "DDX11L1"; gene_biotype "transcribed_pseudogene"; pseudo "true";
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
replace_list = [("chr1","NC_000001.11"),
("chr2","NC_000002.12"),
("chr3","NC_000003.12"),
("chr4","NC_000004.12"),
("chr5","NC_000005.10"),
("chr6","NC_000006.12"),
("chr7","NC_000007.14"),
("chr8","NC_000008.11"),
("chr9","NC_000009.12"),
("chr10","NC_000010.11"),
("chr11","NC_000011.10"),
("chr12","NC_000012.12"),
("chr13","NC_000013.11"),
("chr14","NC_000014.9"),
("chr15","NC_000015.10"),
("chr16","NC_000016.10"),
("chr17","NC_000017.11"),
("chr18","NC_000018.10"),
("chr19","NC_000019.10"),
("chr20","NC_000020.11"),
("chr21","NC_000021.9"),
("chr22","NC_000022.11"),
("chrX","NC_000023.11"),
("chrY","NC_000024.10")]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[df["type"]=="gene"].copy()
# Change NC names to chr
for l in replace_list:
df["chr"] = np.where(df["chr"]==l[1],l[0],df["chr"])
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("ID=gene-","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "REFSEQBED":
# chr1 11873 14409 NR_046018 0 +
# 14409 14409 0 3 354,109,1189, 0,739,1347,
my_col = ["chr","start","end","name","dot","strand","start1","start2","dot2","dot3","gene_id","gene_id2"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[["chr","start","end","name","strand"]]
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
df_plu = df[df["strand"]=="+"].copy()
df_min = df[df["strand"]=="-"].copy()
df_plu, df_plu_inside = removeInside(df_plu)
df_min, df_min_inside = removeInside(df_min)
df_plu.sort_values(by=["chr","end"], inplace=True, ascending=False)
df_plu.drop_duplicates(subset=["start","chr"], keep='first', inplace=True)
df_min.sort_values(by=["chr","start"], inplace=True, ascending=True)
df_min.drop_duplicates(subset=["end","chr"], keep='first', inplace=True)
df = pd.concat([df_plu,df_min])
df = df.sort_values(by=["chr","end"],ascending=False)
gtf = df[["chr","source","type","start","end","dot","strand","dot2","gene_id"] ]
df = df[["chr","start","end","name","strand","FA_start","FA_end","LA_start","LA_end"]]
if NEXTFLOW:
file_in = os.path.basename(file_in)
fout = f"{file_in[:-4]}_flat.txt"
fout2 = f"{file_in[:-4]}_flat.gtf"
fout3 = f"{file_in[:-4]}_flat_CHROMNAMES.txt"
print(f"Outputting flat file {fout}")
df.to_csv(fout,sep="\t",index=None)
gtf.to_csv(fout2,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
gtf_names = gtf[["chr"]].copy()
gtf_names.drop_duplicates(subset=["chr"], keep='first', inplace=True)
gtf_names.to_csv(fout3,sep="\t", index=None)
return df
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED')
parser.add_argument('--annotation_in', action= 'store', metavar='annotation_in')
parser.add_argument('--file_type', action= 'store', metavar='file_type',default="ENSEMBL")
args = parser.parse_args()
return args
if __name__=="__main__":
args = parse_arguments()
file_in = args.annotation_in
file_type = args.file_type
flattenGTF(file_in,file_type)
|
9,407 | 1c2967c26c845281ceb46cc1d8c06768298ef6b6 | import numpy as np
import pandas as pd
from unrar import rarfile
import numpy as np
import pandas as pd
import tushare as ts
import os
year_month='201911'
contract_kind='NI'
rar_data_file_path='C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/'
main_code_path='C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/main/main_/'
clean_data_path='D:/1_min补充统一/'
end_date='20200107'
time_range_path='D:/统一所有品种时间范围.csv'
# save_month_fill_data_path='D:/1_min补充统一/'+contract_kind+'主力连续'+'_'+month+'.csv'
def renew_commodity_future(year_month:str,contract_kind:str,main_code_path:str,rar_data_file_path:str,clean_data_path:str,time_range_path:str,end_date:str,commodity_bool=True):
'''
用于更新月度的商品期货数据
year_month:'201911'字符串年份和月份,对应的是FutAC_Min1_Std_后面的数字,如FutAC_Min1_Std_201911
contract_kind:放对应品种的list 类似['A','B']
main_code_path:对应存放主力合约的地方
rar_data_file_path: 对应的是存放rar数据如FutAC_Min1_Std_201911.rar的位置,不包括对应的文件名
clean_data_path:对应存放分钟数据的位置,处理好的新数据会追加到对应位置下的对应品种处
time_range_path:放置交易时间文件的路径,包括文件名 如 D:/统一所有品种时间范围.csv
end_date :'20200103' 今日的日期,用来请求tushare中的交易日历,数据的读取合并都是以交易日历的时间驱动
commodity_bool:商品期货对应True,金融期货False,默认商品期货
'''
month=year_month
if commodity_bool:
file_name=rar_data_file_path+'FutAC_Min1_Std_'+month+'.rar'
else:
file_name=rar_data_file_path+'FutSF_Min1_Std_'+month+'.rar'
orignial_path=main_code_path
specifi_path=orignial_path+contract_kind+'_1day_main.npy'
rar = rarfile.RarFile(file_name,pwd='www.jinshuyuan.net')
# 原始的处理好的数据
orignal_clean_csv_path=clean_data_path
pwd='www.jinshuyuan.net'
data=np.load(specifi_path)
time_0931_15=pd.read_csv(time_range_path)['date'].values.tolist()
rar.extractall(path=file_name.split('.')[0])
# 首先需要输入end_date 确保截取的时间长度和main主力合约的时间对齐
# 按照月份确定位置
pro = ts.pro_api('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6')
ts.set_token('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6')
date_df=pro.trade_cal(exchange='DCE', start_date='20100101', end_date=end_date)
date_df=date_df.loc[date_df['is_open']==1]
date_list=date_df['cal_date'].tolist()
# ==========================================================================
# 针对的是201911月数据,对应的合约index 放在 target_date_index中
date_df=pd.DataFrame({'date':date_list})
date_df['month']=date_df['date'].str[:6]
target_date=date_df.loc[date_df['month']==month]
target_date_index=target_date.index.values
target_date=target_date['date'].values
# 获取对应目标
data=data.reshape(-1)
contract_main_pool=data[target_date_index]
# 去掉交易所的代码编号
contract_main_pool=(pd.Series(contract_main_pool).str.split('.').str[0]+'.csv').values
file_pools=os.listdir(file_name.split('.')[0])
# 郑州期货交易所是大写,其它都是小写,这里需要逻辑判断
if contract_main_pool[0] not in file_pools:
contract_main_pool=[contract_file.lower() for contract_file in contract_main_pool]
if contract_main_pool[0] not in file_pools:
print(f'找不到{contract_main_pool[0]}')
# 读取好所有的路径
contract_main_pool=(file_name.split('.')[0]+'/'+pd.Series(contract_main_pool)).values
# (len(target_date),contract_main_pool.shape[0])
row_1=['市场代码','合约代码', '时间', '开','高', '低', '收', '成交量', '成交额', '持仓量']
orignal_data=[]
orignal_data.append(row_1)
for index in range(len(target_date)):
date=target_date[index]
one_file_path=contract_main_pool[index]
df=pd.read_csv(one_file_path,encoding='gbk')
df['date']=df['时间'].str[:10]
df['date2']=df['date'].str.replace('-','')
result=df.loc[df['date2']==date]
if result.shape[0]>0:
for row_index in range(len(result)):
target_row=result.iloc[row_index].tolist()
clean_row=target_row[:-2]
orignal_data.append(clean_row)
print(f'{contract_kind} {date} finished!')
else:
print(f'没找到合约品种{contract_kind}在{date}')
print(f'{contract_kind}在{month}月的主力合约数据读取完成')
final_df=pd.DataFrame(orignal_data[1:],columns=orignal_data[0])
final_df['date']=final_df['时间'].str[:10]
final_df_date=final_df['date'].unique()
final_df['date']=final_df['时间'].str[:10]
final_df['time']=final_df['时间'].str[10:].str.strip()
final_df['时间']=final_df['date']+' '+final_df['time']
final_df=final_df.sort_values('时间')
final_df['合约代码']=final_df['合约代码'].str.upper()
final_df=final_df.sort_values('时间')
# ===============================增加了从constant_time进行截取================================
final_df['transf_date']=pd.to_datetime(final_df['date'])
final_df.set_index('transf_date',inplace=True)
combine_all_df=pd.DataFrame()
final_df['date2']=final_df['date'].str.replace('-','')
# 按月进行填充
# 设置了存放按月填充的路径
for date_index in range(len(target_date)):
#按日期进行分割
target_df=final_df.loc[final_df['date2']==target_date[date_index]]
#分割到的长度放入容器中
target_num=len(target_df)
#理论长度
theory_num=len(time_0931_15)
#实际上两种情况:1.是交易日但完全没有数据2.是交易日,只有部分数据 3.是交易日,数据也是完整的
if target_num>0:
#开始区分2,3情况
have_time=target_df['time'].values.tolist()
lack_time=[x for x in time_0931_15 if x not in have_time]
#检查是不是情况2
if lack_time:
print(f'{target_date[date_index]} 不连续')
#一共12列,先全部填充nan的时候,最后再把已知填入
insert_array=np.empty(shape=(len(lack_time),12))
insert_array.fill(np.nan)
insert_df=pd.DataFrame(insert_array,columns=['市场代码','合约代码','时间','开','高','低','收','成交量','成交额','持仓量','date','time'])
insert_df['date']=target_date[date_index]
insert_df['time']=lack_time
#缺少时间的个数小于time_0931_15则说明,当天并不是完全没数据,只是部分数据缺失,因此要对合约代码进行填充
if len(lack_time)<len(time_0931_15):
insert_df['合约代码']=target_df['合约代码'].unique()[-1]
#生成一天完整的数据
combine_insert_df=pd.concat([target_df,insert_df])
#将数据添加到容器中
combine_all_df=pd.concat([combine_all_df,combine_insert_df])
#完全没有数据,直接填充
else:
print(f'{target_date[date_index]}empty ')
lack_time=[x for x in time_0931_15]
#一共12列,先全部填充nan的时候,最后再把已知填入
insert_array=np.empty(shape=(len(lack_time),12))
insert_array.fill(np.nan)
insert_df=pd.DataFrame(insert_array,columns=['市场代码','合约代码','时间','开','高','低','收','成交量','成交额','持仓量','date','time'])
insert_df['date']=target_date[date_index]
insert_df['time']=lack_time
#将数据添加到容器
combine_all_df=pd.concat([combine_all_df,insert_df])
combine_all_df['时间']=combine_all_df['date']+' '+combine_all_df['time']
#调整时间
combine_all_df=combine_all_df.sort_values('时间')
combine_all_df.reset_index(inplace=True)
#数据输出,按设定的顺序
combine_all_df=combine_all_df[['市场代码', '合约代码', '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量','date','time']]
combine_all_df['时间']=combine_all_df['时间'].str.replace('-','')
combine_all_df['date']=combine_all_df['date'].str.replace('-','')
# combine_all_df.to_csv(save_month_fill_data_path,index=False,encoding='utf-8-sig')
# ==========================储存数据=================================================
combine_df=combine_all_df.copy()
contract_type=contract_kind
combine_df=combine_df.sort_values('时间')
# ====================================================================开始截取============================================================
# end_time+1其实是可以作为每次截取的起点,终点下一个就是起点,不过要加上0,而终点的位置也可以是end_time+1,因为end_time+1只能取end_time
# 按照下午15:15统一截取
end_time='15:15:00'
end_index=np.where(combine_df['time']==end_time)[0]+1
end_index=np.hstack(([0],end_index))
start=end_index[:-1]
end=end_index[1:]
# ================================================================缺失第一个交易日前一天的夜盘数据==========================================
# 这里的选择构造一个虚拟的时间戳,来满足缺失的夜盘数据
# 按照上一步的截取方法,第一个交易日缺少前一天的夜盘数据
last_day=date_df['date'].iloc[target_date_index[0]-1]
last_day=last_day[:4]+'-'+last_day[4:6]+'-'+last_day[6:]
first_day_have=combine_df[start[0]:end[0]]['time'].values
full_time=combine_df['time'].unique()
full_time.sort()
first_day_lack=[x for x in full_time[-179:]]
first_day_lack.sort()
lack_array=np.empty(shape=(len(first_day_lack),12))
lack_array.fill(np.nan)
# ===============================准备缺失部分df==========================================================================================
first_day_lack_df=pd.DataFrame(lack_array,columns=combine_df.columns)
first_day_lack_df['time']=first_day_lack
first_day_lack_df['date']=last_day
first_day_lack_df['时间']=first_day_lack_df['date']+' '+first_day_lack_df['time']
last_df=pd.read_csv(contract_main_pool[0],encoding='gbk')
# 确定之前的有没有夜盘
last_df['date']=last_df['时间'].str[:10]
last_df['time']=last_df['时间'].str[11:]
# 补夜盘数据
last_time_pool=last_df.loc[last_df['date']==last_day]['time'].values
last_day_have_date=[]
# 说明在上个交易日有数据
if last_time_pool.shape[0]>0:
print(f'期货品种{contract_kind}在前一个交易日{last_day}有夜盘数据,需要读取覆盖')
last_day_have_date=[x for x in last_time_pool]
if last_day_have_date:
for index in range(len(last_day_have_date)):
origanl_index=last_df.loc[(last_df['date']==last_day)&(last_df['time']==last_day_have_date[index])].index[0]
target_index=first_day_lack_df.loc[first_day_lack_df['time']==last_day_have_date[index]].index[0]
first_day_lack_df.iloc[target_index]=last_df.iloc[origanl_index]
else:
print(f'期货品种{contract_kind}在前一个交易日{last_day}没有夜盘数据,不需要读取覆盖')
print('直接使用np.nan填充上一个交易日的夜盘数据')
for index in range(first_day_lack_df.shape[0]):
combine_df=combine_df.append(first_day_lack_df.iloc[index])
combine_df['时间']=combine_df['时间'].str.replace('-','')
combine_df['date']=combine_df['date'].str.replace('-','')
combine_df.sort_values('时间',inplace=True)
# =================================缺失部分填充=========================================================================================
# combine_df=pd.concat([first_day_lack_df,combine_df])
# # ================================重新按时间排序========================================================================================
# combine_df=combine_df.sort_values('时间')
# ============================重新进行切割===============================================================================================
end_index=np.where(combine_df['time']==end_time)[0]+1
end_index=np.hstack(([0],end_index))
start=end_index[:-1]
end=end_index[1:]
# ==============================进行分割按照特定时间,明确col===============================================================================
col_type_list=['开','高','低','收','成交量','成交额','持仓量']
dir_name_list=['open','high','low','close','volume','amount','position']
#这个变量现在没有用
#交易到凌晨01
#merge_df=pd.DataFrame({'time':with_night_01})
#交易到凌晨0230,version中没有集合竞价时间,time_0931_15去掉9:00,21:00
merge_df=pd.DataFrame({'time':time_0931_15})
combine_df['date']=combine_df['时间'].str[:8]
for index in range(len(col_type_list)):
col_type=col_type_list[index]
# 用来接收分col数据的容器
csv_df=pd.DataFrame()
for s_index,e_index in zip(start,end):
# =========================================截取每个交易日数据==============================================================================
res=combine_df.iloc[s_index:e_index,:]
one_date_df=pd.DataFrame(res[col_type].values.reshape(1,-1),columns=res['time'].values.tolist())
one_date_df['main_contract_code']=res.iloc[-1]['合约代码']
one_date_df['date']=res.iloc[-1]['date']
# =======================================设置输出格式====================================================================================
col_layout=['date']
col_layout=np.hstack((col_layout,res['time'].values.tolist()))
col_layout=np.hstack((col_layout,['main_contract_code']))
one_date_df=one_date_df[col_layout]
# =======================================合并数据========================================================================================
csv_df=pd.concat([csv_df,one_date_df])
# ========================追加原始数据=======================================
# 时间问题需要处理,不然对不齐
# 在测试文件中测试,所以修改了路径
orignal_csv_df=pd.read_csv(orignal_clean_csv_path+contract_kind+'_1min_'+dir_name_list[index]+'.csv')
column_ouput_form=orignal_csv_df.columns.values
orignal_date_pool=pd.to_datetime(orignal_csv_df['date'],format='%Y-%m-%d').values
current_date_pool=pd.to_datetime(csv_df['date'],format='%Y-%m-%d').values
orignal_csv_df['date']=pd.to_datetime(orignal_csv_df['date'],format='%Y-%m-%d').dt.strftime('%Y-%m-%d')
csv_df['date']=pd.to_datetime(csv_df['date'],format='%Y%m%d').dt.strftime('%Y-%m-%d')
# check代码中的数字个数等于四个
main_code=csv_df['main_contract_code'].iloc[0]
main_code_num=csv_df['main_contract_code'].str.findall(r'[0-9]+').iloc[0][0]
if len(main_code_num)==3:
print(f'合约代码{main_code}缺少一位数字,将被替换')
csv_df['main_contract_code']=csv_df['main_contract_code'].str[:2]+month[0]+csv_df['main_contract_code'].str[2:]
main_code=csv_df['main_contract_code'].iloc[0]
print(f'合约代码{main_code}')
# 查看有没有交集,如果有交集会停止,说明进行了重复操作
intersection_pool=[date for date in orignal_date_pool if date in current_date_pool]
if not intersection_pool:
print(f'新旧数据没有时间交集,{contract_kind} {dir_name_list[index]} 将被添加到先前数据中')
orignal_csv_df=pd.concat([orignal_csv_df,csv_df])
orignal_csv_df.sort_values('date',inplace=True)
orignal_csv_df=orignal_csv_df[column_ouput_form]
orignal_csv_df.to_csv(orignal_clean_csv_path+contract_kind+'_1min_'+dir_name_list[index]+'.csv',index=False)
print(f'期货品种{contract_kind} {dir_name_list[index]} 完成')
else:
print(f'新旧数据的时间出现交集!!{contract_kind} {dir_name_list[index]} 将不会被添加到先前数据中')
|
9,408 | ba42c6af53329035f7ab72f3f1ac87cd90d9dc7f | # difference between size an shape of an image
import cv2
img = cv2.imread('police.jpg')
print img.size # byte size; slightly larger than the file size
print img.shape # y,x or rows, cols
cv2.imshow("My Picture", img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
9,409 | 9ffe350ff9a568111620ef7dafef83d341f6f01e | # -*- coding: utf-8 -*-
# https://github.com/Raschka-research-group/coral-cnn/tree/master/model-code/resnet34
from absl import flags, app
from Rank_consistent_model_fix import *
from Rank_consistent_model import *
from random import shuffle, random
import tensorflow as tf
import numpy as np
# import cv2
import os
import sys
import datetime
flags.DEFINE_string('img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')
flags.DEFINE_string('txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt', 'Text (with label information) directory')
flags.DEFINE_string('val_img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Validate image path')
flags.DEFINE_string('val_txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt', 'Validate text path')
flags.DEFINE_string("val_txt_path_2", "D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt", "Validataion text path")
flags.DEFINE_integer('img_size', 128, 'Image size')
flags.DEFINE_integer('ch', 3, 'Image channels')
flags.DEFINE_integer('batch_size', 256, 'Train Batch size')
flags.DEFINE_integer("val_batch_size", 128, "Validation Batch size")
flags.DEFINE_integer("val_batch_size_2", 128, "Validation2 batch size")
flags.DEFINE_integer('num_classes', 48, 'Number of classes')
flags.DEFINE_integer('epochs', 5000, 'Total epochs of training')
flags.DEFINE_float("lr", 5e-5, "Learning rate")
flags.DEFINE_string('weights', "/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5", '')
flags.DEFINE_bool('train', True, 'True or False')
flags.DEFINE_bool('pre_checkpoint', False, 'True or False')
flags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')
flags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')
flags.DEFINE_string("graphs", "", "")
flags.DEFINE_integer('n_test', 10000, 'Number of test images')
flags.DEFINE_string('test_txt', '', 'Test text(label) path')
flags.DEFINE_string('test_img', '', 'Test images path')
flags.DEFINE_string("output_loss_txt", "/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt", "")
FLAGS = flags.FLAGS
FLAGS(sys.argv)
optimizer = tf.keras.optimizers.Adam(FLAGS.lr,beta_1=0.9, beta_2=0.99)
def _func(filename, label):
image_string = tf.io.read_file(filename)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.
#decode_image = tf.image.random_crop(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8, 3])
if random() > 0.5:
decode_image = tf.image.flip_left_right(decode_image)
#decode_image = tf.image.per_image_standardization(decode_image)
label = label - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot, label
def val_func(name, label):
image_string = tf.io.read_file(name)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.
#decode_image = tf.image.per_image_standardization(decode_image)
label = int(label) - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot
#@tf.function
def run_model(model, images):
logits, probs = model(images, training=True)
return logits, probs
@tf.function
def train_step(model, images, levels, imp):
with tf.GradientTape() as tape:
logits, probs = run_model(model, images)
#total_loss = (-tf.reduce_sum((tf.nn.log_softmax(logits, axis=2)[:,:,1]*levels + tf.nn.log_softmax(logits, axis=2)[:,:,0]*(1-levels))*imp, 1))
# total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + tf.math.log(1. - tf.nn.sigmoid(logits))*(1-levels))*imp, 1))
total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + (tf.math.log_sigmoid(logits) - logits)*(1-levels))*imp, 1))
#total_loss = -tf.reduce_sum((tf.math.log(tf.nn.softmax(logits, 2)[:, :, 1] + 1e-7) * levels \
# + tf.math.log(tf.nn.softmax(logits, 2)[:, :, 0] + 1e-7) * (1 - levels)) * imp, 1)
total_loss = tf.reduce_mean(total_loss)
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return total_loss
def task_importance_weights(data):
label = np.array(data).astype(np.float32)
num_examples = label.size
y = np.unique(label)
m = np.zeros(label.shape)
for i, t in enumerate(np.arange(np.min(y), np.max(y))):
m_k = np.max([label[label > t].size,
num_examples - label[label > t].size])
#print(m_k)
m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)
m[i] = tf.sqrt(m_k)
# m[i] = float(m_k)**(0.5)
max_ = np.max(m)
imp = tf.cast(m / max_, tf.float32)
#print(imp)
return imp
@tf.function
def test_MAE(model, images, labels):
logits, probs = model(images, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict, 1)
grd_age = tf.argmax(labels, 1) + 1
grd_age = tf.cast(grd_age, tf.float32)
AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))
return AE
def make_levels(labels):
levels = []
for i in range(FLAGS.batch_size):
l = [1] * (labels[i].numpy()) + [0]*(FLAGS.num_classes - 1 - labels[i].numpy())
l = tf.cast(l, tf.float32)
levels.append(l)
return tf.convert_to_tensor(levels, tf.float32)
def main(argv=None):
# train_model = resnet_type1(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, 3), NUM_CLASSES=FLAGS.num_classes)
train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, FLAGS.ch), include_top=False,
batch_size=FLAGS.batch_size, weight_path=FLAGS.weights, weights='imagenet')
regularizer = tf.keras.regularizers.l2(0.000005)
initializer = tf.keras.initializers.glorot_normal()
for layer in train_model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
# for attr_ in ["kernel_initializer"]:
# if hasattr(layer, attr_):
# setattr(layer, attr_, initializer)
x = train_model.output
avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)
# avgpool = tf.reshape(avgpool, [avgpool.shape[0], -1])
# fc = tf.keras.layers.Dense(1, use_bias=False)(avgpool)
# logits = Linear(NUM_CLASSES - 1)(fc)
logits = tf.keras.layers.Dense(FLAGS.num_classes-1, use_bias=False)(avgpool)
logits = Linear(FLAGS.num_classes - 1)(logits)
probs = tf.nn.sigmoid(logits)
train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits, probs])
train_model.summary()
#for m in train_model.layers:
# if isinstance(m, tf.keras.layers.Conv2D):
# a = m.output_mask
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, (2. / n)**.5)
# elif isinstance(m, tf.keras.layers.BatchNormalization):
# m.get_weights
# m.weight.data.fill_(1)
# m.bias.data.zero_()
if FLAGS.pre_checkpoint is True:
ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.pre_checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
print(ckpt_manager.latest_checkpoint)
ckpt.restore(ckpt_manager.latest_checkpoint)
print ('Latest checkpoint restored!!')
if FLAGS.train == True:
data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0, usecols=0)
data_name = [FLAGS.img_path + data_name_ for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0, usecols=1)
imp = task_importance_weights(data_label-16)
imp = imp[0:FLAGS.num_classes-1]
val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100', skiprows=0, usecols=[0, 1, 2, 3])
print(len(val_data_name))
WM_img, WM_age = [], []
WF_img, WF_age = [], []
BM_img, BM_age = [], []
BF_img, BF_age = [], []
for i in range(len(val_data_name)):
if val_data_name[i][2] == "M" and val_data_name[i][3] == "W":
WM_img.append(FLAGS.val_img_path + val_data_name[i][0])
WM_age.append(val_data_name[i][1])
if val_data_name[i][2] == "F" and val_data_name[i][3] == "W":
WF_img.append(FLAGS.val_img_path + val_data_name[i][0])
WF_age.append(val_data_name[i][1])
if val_data_name[i][2] == "M" and val_data_name[i][3] == "B":
BM_img.append(FLAGS.val_img_path + val_data_name[i][0])
BM_age.append(val_data_name[i][1])
if val_data_name[i][2] == "F" and val_data_name[i][3] == "B":
BF_img.append(FLAGS.val_img_path + val_data_name[i][0])
BF_age.append(val_data_name[i][1])
print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))
WM_img, WM_age = np.array(WM_img), np.array(WM_age)
WF_img, WF_age = np.array(WF_img), np.array(WF_age)
BM_img, BM_age = np.array(BM_img), np.array(BM_age)
BF_img, BF_age = np.array(BF_img), np.array(BF_age)
all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age], [BF_img, BF_age]]
batch_idx = len(data_label) // FLAGS.batch_size
#current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
#train_log_dir = FLAGS.graphs + current_time + '/train'
#val_log_dir = FLAGS.graphs + current_time + '/val'
#train_summary_writer = tf.summary.create_file_writer(train_log_dir)
#val_summary_writer = tf.summary.create_file_writer(val_log_dir)
loss_f = open(FLAGS.output_loss_txt, "w")
count = 0
for epoch in range(FLAGS.epochs):
A = list(zip(data_name, data_label))
shuffle(A)
data_name, data_label = zip(*A)
data_name = np.array(data_name)
data_label = np.array(data_label)
data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(FLAGS.batch_size)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
it = iter(data_generator)
#imp = task_importance_weights(data_label)
#imp = imp[0:FLAGS.num_classes-1]
for step in range(batch_idx):
batch_images, batch_labels, age = next(it)
levels = make_levels(age)
total_loss = train_step(train_model, batch_images, levels, imp)
#with val_summary_writer.as_default():
# tf.summary.scalar(u'total loss', loss, step=count)
if count % 10 == 0:
#MAE = test_MAE(train_model, batch_images, batch_labels, levels)
print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, step + 1, batch_idx, total_loss))
if count % 100 == 0:
test_list = ["WM", "WF", "BM", "BF"]
for j in range(len(all_val_list)):
val_img, val_lab = all_val_list[j]
val_data_generator = tf.data.Dataset.from_tensor_slices((val_img, val_lab))
val_data_generator = val_data_generator.map(val_func)
val_data_generator = val_data_generator.batch(1)
val_data_generator = val_data_generator.prefetch(tf.data.experimental.AUTOTUNE)
val_idx = len(val_img) // 1
val_it = iter(val_data_generator)
AE = 0
for i in range(val_idx):
img, lab = next(val_it)
pre_age = test_MAE(train_model, img, lab)
AE += pre_age
print("MAE = {} ({})".format(AE / len(val_img), test_list[j]))
loss_f.write("Epochs: {}, step = {}".format(epoch, count))
loss_f.write(" --> ")
loss_f.write(test_list[j])
loss_f.write(": ")
loss_f.write(str(AE / len(val_img)))
loss_f.write(", ")
loss_f.write("\n")
loss_f.flush()
# print("==========")
# print("[2]MAE = {}".format(MAE))
# print("==========")
# model_dir = FLAGS.save_checkpoint
# folder_name = int((count + 1)/val_idx)
# folder_name_str = "%s/%s" % (model_dir, folder_name)
# if not os.path.isdir(folder_name_str):
# print("Make {} folder to save checkpoint".format(folder_name))
# os.makedirs(folder_name_str)
# ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)
# checkpoint_dir = folder_name_str + "/" + "CORAL_{}_steps.ckpt".format(count)
# ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, 5)
# ckpt_manager.save()
# with val_summary_writer.as_default():
# tf.summary.scalar(u'[2]MAE', MAE, step=count)
count += 1
else:
data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0, usecols=0)
data_name = [FLAGS.test_img + data_name_ for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=0, usecols=1)
data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(1)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
MAE = 0
it = iter(data_generator)
for i in range(FLAGS.n_test):
image, labels, opp_labels = next(it)
_, probs = train_model(image, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict)
age = tf.cast(age, tf.float32)
MAE += tf.reduce_sum(tf.math.abs(grd_age - age))
if i % 1000 == 0:
print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))
print('Total MAE = {}'.format(MAE / FLAGS.n_test))
if __name__ == '__main__':
app.run(main)
|
9,410 | 8c7fe90972feec19e280d3bccd39391af666608a | def play():
print("playing tank games...")
print("runing tank now!!!") |
9,411 | f9dd20a3b72c0c8e72029459244486f31eaff536 | import dash_html_components as html
import dash_core_components as dcc
import dash_daq as daq
import dash_bootstrap_components as dbc
import src.common.common_layout as layout_common
def build_navbar():
return html.Div(
id="banner",
children=[
html.Div(
id="banner-text",
className="banner",
children=[
dbc.Row(
[
dbc.Col(html.Div(html.H2("CBPM real-time display")), width=11),
dbc.Col(
html.Div(
id="banner-logo",
children=[
html.Button(
id="learn-more-button", children="INFORMATION", n_clicks=0
),
],
),
)
],
),
],
),
html.Div(
className="banner2",
children=[
dbc.Row(
[
dbc.Col(
html.Div(
daq.PowerButton(
id='live_update_switch',
on='True',
size=50,
color='#079407',
# label='Label',
# labelPosition='top'
),
id='test_button',
style={'padding': '10px 0px 0px 0px'},
), width={"size": 1},
),
dbc.Col(
html.Div(
children=[
html.H2("Live update is:"),
html.H2(
id='live_update_running',
style={'margin-left': '1.0%', 'color': '#079407', 'font-weight': 'bold'},
),
html.H2(
id='live_update_paused',
style={'margin-left': '0.5%', 'color': '#e0392a', 'font-weight': 'bold'},
),
],
), #style={'padding': '0px 1000px 0px 0px'},
),
dbc.Col(
html.Div(id='offline_store_df', style={'display': 'none'}),
),
dbc.Col(
layout_common.dropdown_menu(), width=2,
)
], no_gutters=True, justify='start',
)
]
)
],
)
def generate_modal():
return html.Div(
id="markdown",
className="modal",
children=(
html.Div(
id="markdown-container",
className="markdown-container",
children=[
html.Div(
className="close-container",
children=html.Button(
"Close",
id="markdown_close",
n_clicks=0,
className="closeButton",
),
),
html.Div(
className="markdown-text",
children=dcc.Markdown(
children=(
"""
###### What is this mock app about?
This is a dashboard for monitoring real-time process quality along manufacture production line.
###### What does this app shows
Click on buttons in `Parameter` column to visualize details of measurement trendlines on the bottom panel.
The sparkline on top panel and control chart on bottom panel show Shewhart process monitor using mock data.
The trend is updated every other second to simulate real-time measurements. Data falling outside of six-sigma control limit are signals indicating 'Out of Control(OOC)', and will
trigger alerts instantly for a detailed checkup.
Operators may stop measurement by clicking on `Stop` button, and edit specification parameters by clicking specification tab.
"""
)
),
),
],
)
),
) |
9,412 | a0d1ef11d00e2ddd65b648a87f493b7adcda5115 | class RankedHand(object):
def __init__(self, remaining_cards):
self._remaining_cards = remaining_cards
self.rank = None
def remaining_cards(self):
return self._remaining_cards
# Returns 1 if self is higher, 0 if equal, -1 if self is lower
def compare_high_cards(self, other):
s_cards = reversed(sorted(self.remaining_cards()))
o_cards = reversed(sorted(other.remaining_cards()))
for card_pair in zip(s_cards, o_cards):
print("Comparing %s and %s" % (str(card_pair[0]), str(card_pair[1])))
if(card_pair[0] > card_pair[1]):
return 1
elif(card_pair[0] < card_pair[1]):
return -1
return 0
def __eq__(self, other):
return self.rank == other.rank
def __lt__(self, other):
return self.rank < other.rank
class HighCard(RankedHand):
def __init__(self, remaining_cards):
super(HighCard, self).__init__(remaining_cards)
self.rank = 0
def __eq__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class OnePair(RankedHand):
def __init__(self, pair_cards, remaining_cards):
super(OnePair, self).__init__(remaining_cards)
self.rank = 1
self.pair_cards = pair_cards
def __eq__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__eq__(other)
else:
return self.pair_cards == other.pair_cards and self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__lt__(other)
else:
return self.pair_cards < other.pair_cards or (self.pair_cards == other.pair_cards and self.compare_high_cards(other) == -1)
class TwoPair(RankedHand):
def __init__(self, two_pair_ranks, remaining_card):
super(TwoPair, self).__init__(remaining_card)
self.two_pair_ranks = sorted(two_pair_ranks)
self.rank = 2
def high_pair(self):
return self.two_pair_ranks[1]
def low_pair(self):
return self.two_pair_ranks[0]
def __eq__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__eq__(other)
else:
return self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__lt__(other)
if self.high_pair() < other.high_pair():
return True
elif(self.high_pair() == other.high_pair() and self.low_pair() < other.low_pair()):
return True
elif(self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == -1):
return True
else:
return False
class ThreeKind(RankedHand):
def __init__(self, three_kind_rank):
self.rank = 3
self.three_kind_rank = three_kind_rank
def __eq__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__eq__(other)
else:
return False # Can't be equal
def __lt__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__lt__(other)
if self.three_kind_rank < other.three_kind_rank:
return True
elif(self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(other) == -1):
return True
else:
return False
class Straight(RankedHand):
def __init__(self, all_cards):
super(Straight, self).__init__(all_cards)
self.rank = 4
# Account for Ace low
if 14 in all_cards and 2 in all_cards:
tmp = all_cards
tmp.remove(14)
self.straight_rank = max(tmp)
else:
self.straight_rank = max(all_cards)
def __eq__(self, other):
if self.rank != other.rank:
return super(Straight, self).__eq__(other)
else:
return self.straight_rank == other.straight_rank
def __lt__(self, other):
if self.rank != other.rank:
return super(Straight, self).__lt__(other)
else:
return self.straight_rank < other.straight_rank
class Flush(RankedHand):
def __init__(self, all_cards):
super(Flush, self).__init__(all_cards)
self.rank = 5
def __eq__(self, other):
if self.rank != other.rank:
return super(Flush, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(Flush, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class FullHouse(RankedHand):
def __init__(self, three_kind_rank):
super(FullHouse, self).__init__([])
self.three_kind_rank = three_kind_rank
self.rank = 6
def __eq__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__eq__(other)
else:
return False # Can't be equal
def __lt__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__lt__(other)
elif(self.three_kind_rank < other.three_kind_rank):
return True
else:
return False
class FourKind(RankedHand):
def __init__(self, four_kind_rank):
self.four_kind_rank = four_kind_rank
self.rank = 7
def __eq__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__eq__(other)
return False # Can't be equal
def __lt__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__lt__(other)
elif(self.four_kind_rank < other.four_kind_rank):
return True
else:
return False
class StraightFlush(Straight):
def __init__(self, all_cards):
super(StraightFlush, self).__init__(all_cards)
self.rank = 8
class RoyalFlush(RankedHand):
def __init__(self):
self.rank = 9
|
9,413 | b4454d92ab8380e0eded2f7aed737378e1710c72 | #!/usr/bin/env python3
import sys, os
import random
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import shapely.geometry as geometry
from shapely.ops import cascaded_union, polygonize
import math
from matplotlib.pyplot import arrow
import dubins
this_script_path = os.path.dirname(__file__)
path_to_utils = os.path.join(this_script_path, "utils")
sys.path.append(path_to_utils)
import figure_utils
import orienteering_utils
from orienteering_utils import ProblemType
legend_font_size = 24
tick_font_size = 20
NUM_POINTS_TO_GEN = 16
SCATTER_SIZE = 80
FIG_HEIGHT = 7.5
SHOW_FIGURE = True
RESULT_FILE = "../sources/results/results.log"
RESULT_FILE = os.path.join(this_script_path, RESULT_FILE)
#use nice latex fonts if latex is installed
#figure_utils.configure_latex_fonts_latex()
data_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)
print("using the last results")
record = data_vns_sop[-1]
print("record", record)
problem_type = ProblemType.UNKNOWN
PROBLEM_FILE = record['PROBLEM_FILE']
PROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)
if "datasets/sop/" in PROBLEM_FILE:
print("showing SOP")
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = "solution_sop.png"
elif "datasets/dop_sop_dataset/" in PROBLEM_FILE:
print("showing DOP")
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = "solution_dop.png"
elif "datasets/opn_sop_dataset/" in PROBLEM_FILE:
print("showing OPN")
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = "solution_opn.png"
else:
error("can not decide problem type based on problem file location")
problem_type = ProblemType.UNKNOWN
op = orienteering_utils.SetOrienteeringProblemDefinition()
op.load_problem_file(PROBLEM_FILE)
nodes = op.nodes
sets_prices = op.get_sets_prices()
sets = op.get_sets()
original_nodes = op.get_set_centers()
result_target_ids = record['RESULT_TARGET_IDS']
result_cluster_ids = record['RESULT_CLUSTER_IDS']
result_rewards = record['REWARDS']
print("problem loaded")
print("result_target_ids:", result_target_ids)
print("result_cluster_ids:", result_cluster_ids)
print("result_rewards", result_rewards)
print("sets_prices", sets_prices)
print("sets", sets)
print("nodes", nodes)
# for the DOP only
result_head_angs = []
sampling_heading = len(sets[0])
calc_reward = 0
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
# result_node_inside_cluster.append(node_inside_cluster)
head_ang = math.pi + (2 * math.pi * node_inside_cluster) / sampling_heading
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print("what the hell, it is not good")
print("calc_reward", calc_reward)
mycmap = plt.cm.get_cmap('RdYlBu_r')
maxx, maxy = -sys.float_info.max,-sys.float_info.max
minx, miny = sys.float_info.max,sys.float_info.max
circle_radiuses = np.ones([len(nodes), 1])
circle_radiuses1 = np.multiply(2.0, circle_radiuses)
nodes_w_rewards = np.zeros((len(nodes), 3))
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN :
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
minrew = min(nodes_w_rewards[:, 2])
maxrew = max(nodes_w_rewards[:, 2])
cNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew))
mycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)
fig_width = FIG_HEIGHT*(maxx-minx)/(maxy-miny)
figsize = (fig_width*0.9,FIG_HEIGHT)
print(figsize)
fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
circles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], circle_radiuses1, c=nodes_w_rewards[:, 2] , alpha=0.05, edgecolor='black', linewidth=0.9, linestyle=':')
sc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=nodes_w_rewards[:, 2], cmap=mycmap , alpha=1.0, s=1, facecolor='black', lw=0.5)
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
# print(nodes_w_rewards[:, 2])
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + (2 * math.pi * hind) / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1], result_head_angs[node_idx]]
q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][1], result_head_angs[node_idx - 1]]
path = dubins.shortest_path(q_start, q_end, turning_radius)
qs, _ = path.sample_many(step_size)
# length_dub += math.ceil(path.path_length())
xses = [item[0] for item in qs]
yses = [item[1] for item in qs]
print(node_prew, '->', node, ",", q_start, '->', q_end)
plt.plot(xses, yses, '-g', lw=1.6)
elif problem_type == ProblemType.OPN:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
else:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
ax = plt.gca()
ax.axis('equal')
figure_utils.no_axis(ax)
cbar_position = [0.20, 0.05, 0.6, 0.03]
cbar_ax = fig.add_axes(cbar_position)
cb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')
cb.ax.tick_params(labelsize=tick_font_size)
cb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)
# offset = 0.08
fig.subplots_adjust(left=-0.035, right=1.035 , top=1.07 , bottom=0.0)
plt.savefig(SAVE_TO_FIGURE, dpi=300)
if SHOW_FIGURE:
plt.show()
|
9,414 | 39f1fc04911f8d22d07532add24cd1671a569e72 | from airflow.plugins_manager import AirflowPlugin
from flask import Blueprint, Flask
from rest_api.log.views import views
from rest_api.route.log_route import log
from rest_api.route.mylog_route import my_log_pb
from rest_api.route.native_log_route import native_log_bp
class AirflowPlugin(AirflowPlugin):
name = "airflow-plugin"
operators = []
# Leave in for explicitness
hooks = []
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
# 创建Blueprint实例
# Blueprint实例创建之后我们就可以通过@Blueprint实例名.route('/')语法为我们的模块创建路由
airflow_bp = Blueprint(
'airflow_bp',
__name__
)
app = Flask(__name__)
# 注册我们在views.py模块中创建的蓝图实例views, 并将他的URL前缀设置为`/views`
app.register_blueprint(views, url_prefix='/views')
app.register_blueprint(log, url_prefix='/')
app.register_blueprint(native_log_bp, url_prefix='/native_log')
app.register_blueprint(my_log_pb, url_prefix='/my_log')
if __name__ == '__main__':
app.run(debug=True)
|
9,415 | 0a459b4aeb2a16c06c1d89dafb656028b235a31e | import math
def calcula_distancia_do_projetil(v, O, y0):
g = 9.8
return ((v ** 2) / 2 * g) * (1 + math.sqrt(1 + ( 2 * g * y0 / (v ** 2) * (math.sin(O) ** 2)))) * math.sin(2 * O) |
9,416 | 2a799d81d963f73d8018a99cbd963af166681b35 | def factorial(num):
assert num >= 0 and int(num) == num, 'Only positive integer accept'
if num in [0,1]:
return 1
else:
return num*factorial(num-1)
print(factorial(4.4)) |
9,417 | 657ac500c40ddbd29f5e3736a78ed43e7d105478 | num=int(input("Enter the number: "))
table=[num*i for i in range(1,11)]
print(table)
with open("table.txt","a") as f:
f.write(f"{num} table is: {str(table)}")
f.write('\n') |
9,418 | 25595b5f86a41fee1dc43f199f3bcff73f6d256b | import ray
import os
import sys
import random
path_join = os.path.join
real_path = os.path.realpath
perfd_dir = real_path(path_join(os.getcwd()))
microps_dir = path_join(perfd_dir, "thirdparty", "microps")
sys.path += [perfd_dir, microps_dir]
from thirdparty.microps.oracle.experiments.spark_sql_perf.main import SparkExperiment, SparkBenchMaker
from thirdparty.microps.build.spark.driver import add_role as add_spk_role
import thirdparty.microps.oracle.apps.spark_sql_perf.configs as spk
import thirdparty.microps.oracle.experiments.spark_sql_perf.utils as utils
@ray.remote
def run(run_config: dict, wrks: dict) -> dict:
try:
add_spk_role()
except:
print("run, spark: ignore")
os.chdir(microps_dir)
# TODO: add virtual cluster labels to the pods
base_spk_config = spk.apps_config_map["sparkperfml"]
# TODO: update driver and executor memory
base_spk_config = spk.patched_app_config(base_spk_config,
{
"app_name": run_config["appName"],
"ins_type": run_config["serverInstanceType"],
"ins_num": run_config["numExecutor"] + 1,
# "node_selectors": cur_node_selectors,
"driver_adaptive_gc": run_config["driverAdaptiveGC"],
})
bench = None
for b in SparkBenchMaker.load_benchmarks():
if b["name"] == run_config["appName"]:
bench = b
if bench is None:
print("run, spark: unable to find bench", run_config["appName"])
# spark sql perf configurations
config_base = SparkBenchMaker.load_base()
# change the dataset scale
utils.update_bench_params(base=config_base, bench=bench,
key="numExamples", value=run_config["inputScale"], is_scale=True)
# change number of partition, each executor has at least one partition
utils.update_bench_params(base=config_base, bench=bench,
key="numPartitions", value=run_config["numPartition"], is_scale=False)
utils.update_bench_params(base=config_base, bench=bench,
key="randomSeed",
value=random.randint(0, 10000) if run_config.get("randomSeed", 1) == "random" else 1,
is_scale=False)
bc = SparkBenchMaker.patched_bench_config(config_base,
{
"benchmarks": [bench]
})
print(bc)
exp = SparkExperiment(
{
"app_configs": base_spk_config,
"exp_configs": {
"s3_log_bucket": run_config["logBucket"],
"num_executor": run_config["numExecutor"],
"ins_type": run_config["serverInstanceType"],
"ins_num": run_config["numServerInstance"],
"run_interval": 0.5,
"runs": 1,
"bench_config": bc,
},
"ins_type_num": [(run_config["serverInstanceType"], run_config["numServerInstance"])],
"variables": {},
}
)
exp.run()
return {}
|
9,419 | 0a5baacf17d33dbf6ea69114a8632f7fcef52c3c | import tkinter
from tkinter import messagebox
from random import randint
tplyer = 0
tcomp = 0
player = 0
comp = 0
top = tkinter.Tk()
top.resizable(width = False, height =False)
top.geometry("200x100")
def Yes():
global player
global comp
tplayer = randint(1,6)
tcomp = randint(1,6)
message =""
if tplayer>tcomp:
message = "Wygrales!"
player+=1
elif tplayer==tcomp:
message = "Remis"
else:
message = "Przegrales"
comp +=1
messagebox.showinfo( "Wynik", "Gracz: "+str(player)+" Komputer: "+str(comp)+"\nTwoj rzut "+str(tplayer)+"\n"+"Przeciwnik wyrzucil "+str(tcomp)+"\n"+message)
def No():
messagebox.showinfo("Do zobaczenia")
top.quit()
w = tkinter.Label(top,text = "Zagramy w kosci?\n")
B1 = tkinter.Button(top, text ="Tak", command = Yes,width = 10)
B2 = tkinter.Button(top, text = "Nie", command = No,width = 10)
w.grid(row = 0,column = 0)
B1.grid(row = 1, column = 0)
B2.grid(row = 1, column = 1)
top.mainloop()
|
9,420 | b569f0a0dda048d6337e1028a240caabf188a174 | ___author__ = 'acmASCIS'
'''
by ahani at {9/24/2016}
'''
import time
class Freq(object):
def __init__(self, array):
self.__array = array
self.__frequency_dict = {}
self.__array_length = len(array)
self.__running_time = round(time.time() * 1000)
def get_original_array(self):
return self.__array
def get_array_length(self):
return self.__array_length
def get_frequency_array(self):
if self.__frequency_dict is None:
raise Exception("The frequency array is empty, check your function implementation!")
return self.__frequency_dict
def get_running_time(self):
return self.__running_time
def get_frequency(self):
"""
Implement your elements frequency algorithm
:return: (dictionary) that contains key: element in array, value: frequency. Note that your dictionary should be sorted by key!
"""
#TODO
self.__running_time = round(time.time() * 1000) - self.__running_time
return self.__frequency_dict
|
9,421 | c0b6c0636d1900a31cc455795838eb958d1daf65 | # Find a list of patterns in a list of string in python
any([ p in s for p in patterns for s in strings ])
|
9,422 | 00609c4972269c36bbfcf5bec2a8648f812b6092 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 09:10:03 2018
@author: User
"""
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import re
url = "http://www.pythonscraping.com/pages/page3.html"
html = urlopen(url)
html_data = BeautifulSoup(html.read(), "lxml")
img_list = html_data.find_all("img", {"src": re.compile("\.\./img*\.jpg")})
for img in img_list:
print(img["src"]) |
9,423 | 02a228c479a6c94858f7e8ef73a7c8528def871e | class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
plus = True # In the last digit, we should add one as the quesiton requries
indexList = range(len(digits))
indexList.reverse()
for i in indexList:
if plus:
digits[i] += 1
if digits[i] == 10:
digits[i] = 0
plus = True
else:
plus = False
if plus:
# handle the case where we need one more digit
return [1] + digits
return digits
|
9,424 | 9cea998d7d5cad3ddc00f667ca06151a938d48a1 | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : gan_loss.py
# @Time : Created at 2019-07-11
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import config as cfg
class GANLoss(nn.Module):
"""Define different GAN Discriminator's objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0, target_fake_label=0.0, CUDA=False):
""" Initialize the GAN's Discriminator Loss class.
Parameters:
loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.loss_mode = loss_mode
self.which_net = which_net
self.which_D = which_D
self.gpu = CUDA
if loss_mode == 'lsgan':
self.loss = nn.MSELoss()
elif loss_mode in ['vanilla', 'ragan', 'rsgan']:
self.loss = nn.BCEWithLogitsLoss()
elif loss_mode in ['wgan', 'hinge']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % loss_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
if self.gpu:
target_tensor = target_tensor.cuda()
return target_tensor.expand_as(prediction)
def G_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = real_tensor if self.loss_mode in ['vanilla'] else fake_tensor
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' % self.which_D)
if self.loss_mode in ['lsgan', 'ragan']:
loss_fake = self.loss(prediction_fake, real_tensor)
loss_real = self.loss(prediction_real, fake_tensor)
g_loss = loss_fake + loss_real
elif self.loss_mode == 'vanilla':
loss_fake = -self.loss(prediction_fake, fake_tensor)
g_loss = loss_fake
elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':
loss_fake = -prediction_fake.mean()
loss_real = prediction_real.mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'hinge' and self.which_D == 'Ra':
loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()
loss_real = nn.ReLU()(1.0 + prediction_real).mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'rsgan':
loss_fake = self.loss(Dfake - Dreal, real_tensor)
g_loss = loss_fake
else:
raise NotImplementedError('loss_mode name [%s] is not recognized' % self.loss_mode)
return g_loss
def D_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = Dreal
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' % self.which_D)
if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:
loss_fake = self.loss(prediction_fake, fake_tensor)
loss_real = self.loss(prediction_real, real_tensor)
elif self.loss_mode == 'wgan':
loss_fake = prediction_fake.mean()
loss_real = -prediction_real.mean()
elif self.loss_mode == 'hinge':
loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()
loss_real = nn.ReLU()(1.0 - prediction_real).mean()
elif self.loss_mode == 'rsgan':
loss_fake = 0.
loss_real = self.loss(Dreal - Dfake, real_tensor)
else:
raise NotImplementedError('loss_mode name [%s] is not recognized' % self.loss_mode)
return loss_fake + loss_real
def __call__(self, Dreal, Dfake):
"""Calculate loss given Discriminator's output and grount truth labels."""
if self.which_net == 'G':
return self.G_loss(Dreal, Dfake)
elif self.which_net == 'D':
return self.D_loss(Dreal, Dfake)
else:
raise NotImplementedError('which_net name [%s] is not recognized' % self.which_net)
|
9,425 | e99ff1c75d5108efc8d587d4533c34eeb15c6978 | from django.contrib.staticfiles.storage import CachedFilesMixin
from storages.backends.s3boto3 import S3Boto3Storage
class CachedS3Storage(CachedFilesMixin, S3Boto3Storage):
pass
StaticRootS3BotoStorage = lambda: CachedS3Storage(location='static')
MediaRootS3BotoStorage = lambda: S3Boto3Storage(location='media')
|
9,426 | 97a362fc65731bb8fc3743c49a669b4cd3f0e155 | import collections
import numpy
import pytest
import random
import conftest
from svviz2.io import readstatistics
from svviz2.remap import genotyping
from svviz2.utility.intervals import Locus
def get_read_stats(isize=400):
stats = readstatistics.ReadStatistics(None)
stats.insertSizes = numpy.random.normal(400, 20, 2000).astype(int)
stats.orientations = ["+-"]
return stats
def test_gt(genome_source, genome_source_deletion):
genome_source_deletion, deletion_length = genome_source_deletion
refseq = genome_source.names_to_contigs["chr2"]
altseq = genome_source_deletion.names_to_contigs["chr2"]
print("")
coverage = 50
read_length = 150
ref_reads = conftest.simulate_read_pairs(refseq, int(len(refseq)/(read_length*2)*coverage))
alt_reads = conftest.simulate_read_pairs(altseq, int(len(altseq)/(read_length*2)*coverage))
print(len(ref_reads), len(alt_reads))
combined_reads = []
for i, _, pair in ref_reads:
if 4000-500 < i < 4000+500+deletion_length:
pair._allele = "ref"
combined_reads.append(pair)
for i, _, pair in alt_reads:
if 4000-500 < i < 4500:
pair._allele = "alt"
combined_reads.append(pair)
for pair in combined_reads:
pair.realign([genome_source], [genome_source_deletion])
ref_breakpoints = [Locus("chr2", 4000, 4000, "+"),
Locus("chr2", 4000+deletion_length, 4000+deletion_length, "+")]
alt_breakpoints = [Locus("chr2", 4000, 4000, "+")]
ref_count, alt_count = genotyping.assign_reads_to_alleles(
combined_reads, ref_breakpoints, alt_breakpoints, get_read_stats())
print(":::::", ref_count, alt_count)
|
9,427 | 7bb49712c4ef482c64f3c2a457a766de691ba7c3 | def bfs(graph, start):
queue = [start]
queued = list()
path = list()
while queue:
print('Queue is: %s' % queue)
vertex = queue.pop(0)
print('Processing %s' % vertex)
for candidate in graph[vertex]:
if candidate not in queued:
queued.append(candidate)
queue.append(candidate)
path.append(vertex + '>' + candidate)
print('Adding %s to the queue' % candidate)
return path
|
9,428 | 267276eab470b5216a2102f3e7616f7aecadcfe9 | # -------------------------------------------
# Created by: jasper
# Date: 11/24/19
# --------------------------------------------
from os import path, mkdir
class IOHandler:
def __init__(self, directory, fName, data_instance):
"""Save the setup of a class instance or load a class instance from a saved setup
Parameters
----------
directory : str
path of the directory the files are saved to or read from
fName : str
Name of the project. File endings will be set automaticaly
data_instance : object
class instance to perform actions on
"""
self.fName = fName
self.data_instance = data_instance
self.directory = directory
def dump_data(self):
"""save the data contained in data_instance, checking whether the
directories already exist and asking whether to create them if not. """
while not path.isdir(self.directory):
print(
"# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]".format(
self.directory))
select = input()
if select == "2":
self.directory = input("Enter new directory: \n")
else:
mkdir(self.directory)
print("# Directory " + self.directory + " created")
self.fullpath = self.directory + "/" + self.fName
self.data_instance.dump_data(self.fullpath)
def dump_data_to_txt(self):
while not path.isdir(self.directory):
print(
"# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]".format(
self.directory))
select = input()
if select == "2":
self.directory = input("Enter new directory: \n")
else:
mkdir(self.directory)
print("# Directory " + self.directory + " created")
self.fullpath = self.directory + "/" + self.fName
self.data_instance.dump_to_txt(self.fullpath)
def read_data(self):
"""Read data into the specified data_instance. If the read process
hits a not existing file, it will be notified to you"""
try:
self.data_instance.read_data(self.directory + self.fName)
except FileNotFoundError as file_error:
print(
"# The file {} belonging to {} do not exist.".format(
file_error.filename, self.fName))
|
9,429 | 7025cc896035c59e0bbb7943493b6ca24fd9e6ca | from flask import Flask, render_template, request
app = Flask(__name__)
def convert(decimal_num):
roman = {1000:'M', 900:'CM', 500:'D', 400:'CD', 100:'C', 90:'XC', 50:'L', 40:'XL', 10:'X', 9:'IX', 5:'V', 4:'IV', 1:'I'}
num_to_roman = ''
for i in roman.keys():
num_to_roman += roman[i]*(decimal_num//i)
decimal_num %= i
return num_to_roman
# Ister ustekini kullan ister bunu
#def convert_to_roman(num):
# roman = ['M','CM','D','CD','C','XC','L','XL','X','IX','V','IV','I']
# number = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
# romanvalue = ''
# for i,d in enumerate(number):
# while (num >= d):
# num -= d
# romanvalue += roman[i]
# return romanvalue
@app.route('/', methods=['POST','GET'])
def main_post():
if request.method == 'POST':
alpha = request.form['number'] # degerler dictionary olarak geliyor dedi o yuzden key i aliyoz [] ile
if not alpha.isdecimal():
return render_template('index.html', not_valid=True,developer_name='Pablo')
number=int(alpha)
if not 0<number<4000:
return render_template('index.html', not_valid=True,developer_name='Pablo')
return render_template('result.html', developer_name='Pablo', number_decimal=number,number_roman=convert(number))
else:
return render_template('index.html',not_valid = False, develeoper_name='Pablo')
if __name__=='__main__':
#app.run(debug=True)
app.run(host='0.0.0.0',port=80) |
9,430 | 7e318ae7317eac90d6ce9a6b1d0dcc8ff65abef0 | from dataclasses import dataclass, field
from typing import List
@dataclass
class Root:
a: List[object] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 2,
"max_occurs": 4,
"sequence": 1,
}
)
b: List[object] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"max_occurs": 2,
"sequence": 1,
}
)
|
9,431 | eb81f1825c4ac8e20dde1daefbdad22f588e696e | #1.문자열에 홑따옴표 포함기키기 : 쌍따옴표
print("Python's Data Type")
#2.문자열에 쌍따옴표 포함시키기 : 홑따옴표
print('"Python is very easy" he said.')
#멀티라인(여러줄)표현하기
#1. 연속된 쌍따옴표 3개 사용하기
print("""No pain
No gain""")
#2. 연속된 쌍따옴표 3개 사용하기
print('''No pain
No gain''')
#3.이스케이프 코드 \n 삽입하기
print("No pain \n No gain")
"""
이스케이프(escape) 문자
\n :new line. 문자열 안에서 줄을 바꿀 때 사용
\t :tap.문자열 사이에 탭만큼의 간격을 줄 때 사용
\\ :문자 \를 그대로 표현할 때 사용
\' :홑따옴표를 그대로 표현할 때 사용
\" :쌍따옴표를 그대로 표현할 때 사용
"""
print("Ha\tHa\tHa")
print("역슬래시 \\")
print("쌍따옴표 \"")
print("홑따옴표 \'")
|
9,432 | f5274f5d838d484ca0c1cc5a5192a2fd698cf827 | from .. import CURRENT_NAME
from ..cmd import call_cmd
from .config import Configurator
from .config import USER_INI
from icemac.install.addressbook._compat import Path
import argparse
import os
import pdb # noqa: T002
import sys
def update(stdin=None):
"""Update the current address book installation."""
curr_path = Path.cwd() / CURRENT_NAME
if not curr_path.exists():
print("ERROR: There is no symlink named {!r} in the current"
" directory.".format(CURRENT_NAME))
print("This script cannot be called here.")
sys.exit(-1)
if (curr_path / 'buildout.cfg').exists():
print("ERROR: '{}/buildout.cfg' already exists please (re-) move"
" it.".format(CURRENT_NAME))
sys.exit(-2)
cwd = os.getcwd()
os.chdir(str(curr_path)) # PY2: in PY3 `str` is no longer needed
configurator = Configurator(
curr_path / USER_INI, install_new_version=False, stdin=stdin)
try:
configurator()
call_cmd('running bin/buildout', '../bin/buildout')
if configurator.restart_server == 'yes':
call_cmd('Restarting instance', 'bin/svctl', 'restart', 'all')
finally:
os.chdir(str(cwd)) # PY2: in PY3 `str` is no longer needed
print('Done.')
def main(args=None):
"""Entry point for `bin/change-addressbook-config`."""
parser = argparse.ArgumentParser(
description='Update the current address book installation.')
parser.add_argument(
'--debug', action="store_true",
help='Enter debugger on errors.')
args = parser.parse_args(args)
try:
update()
except Exception:
if args.debug:
pdb.post_mortem()
else:
raise
|
9,433 | 1e168cf6ba785a08244f47eb490b54605a09e4b0 |
traditional_investor_stage1 = \
"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date "\
"FROM "\
"(SELECT "\
"report_date, "\
"investor_holdings.investor_name AS investor,"\
"investor_id,"\
"AVG(investor_holdings.amount_held) AS invest_amount,"\
"AVG(investor_holdings.latest_change) AS invest_change,"\
"investor_holdings.security_id, "\
"MAX(isin) as isin,"\
"MAX(issue_date) as issue_date, "\
"MAX(maturity_date) as maturity_date "\
"FROM investor_holdings "\
"INNER JOIN securities ON investor_holdings.security_id = securities.id "\
"INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id "\
"INNER JOIN organizations ON issuing_entities.organization_id = organizations.id "\
"INNER JOIN gics ON organizations.sector = gics.sub_industry_id "\
"INNER JOIN security_issues ON security_issues.security_id = securities.id "\
"WHERE investor_holdings.deleted_at is NULL "\
"AND investor_holdings.report_date > '{}' "\
"AND issuing_entities.name = '{}' "\
"AND securities.currency = '{}' "\
"AND gics.industry_group = '{}' GROUP BY (investor_holdings.investor_name, " \
"investor_holdings.investor_id, " \
"investor_holdings.security_id, " \
"investor_holdings.report_date)) as FOO "
non_traditional_investor_stage1 = \
"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date "\
"FROM "\
"(SELECT "\
"report_date, "\
"investor_holdings.investor_name AS investor,"\
"investor_id,"\
"AVG(investor_holdings.amount_held) AS invest_amount,"\
"AVG(investor_holdings.latest_change) AS invest_change,"\
"investor_holdings.security_id, "\
"MAX(isin) as isin,"\
"MAX(issue_date) as issue_date, "\
"MAX(maturity_date) as maturity_date "\
"FROM investor_holdings "\
"INNER JOIN securities ON investor_holdings.security_id = securities.id "\
"INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id "\
"INNER JOIN organizations ON issuing_entities.organization_id = organizations.id "\
"INNER JOIN gics ON organizations.sector = gics.sub_industry_id "\
"INNER JOIN security_issues ON security_issues.security_id = securities.id "\
"WHERE investor_holdings.deleted_at is NULL "\
"AND investor_holdings.report_date > '{}' "\
"AND securities.currency = '{}' "\
"AND gics.industry_group = '{}' GROUP BY "\
"(investor_holdings.investor_name, " \
"investor_holdings.investor_id, " \
"investor_holdings.security_id, " \
"investor_holdings.report_date)) as FOO " |
9,434 | 8bb67317ede277e03e8cbdefefeffa3d206ece65 | from os import listdir
import re
import numpy as np
from sklearn.metrics import f1_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import LeaveOneOut
import matplotlib.pyplot as plt
n_gram_range = (1, 1)
alpha_smoothing = 1e-10
lambdas_best = [1e190, 1]
def parse_doc_line(line):
parsed = re.search(r'\d[\d\s]+\d', line)
return "empty" if parsed is None else parsed[0]
def get_roc_point(clf, x_set, y_set, threshold):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
roc_predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
answers = np.append(answers, y_test)
roc_predictions = np.append(roc_predictions,
['spmsg' if prediction[0] <= threshold else 'legit' for prediction in
clf.predict_proba(x_test)])
print(f'Finished iteration {i} / 10')
i += 1
true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0
for prediction, answer in zip(roc_predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
else:
if answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
roc_point_ = (
1 - (true_negatives_ / (true_negatives_ + false_positives_)),
true_positives_ / (true_positives_ + false_negatives_))
return roc_point_
def get_cv_score(clf, x_set, y_set):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
predictions = np.append(predictions, clf.predict(x_test))
answers = np.append(answers, y_test)
print(f'Finished iteration {i} / 10')
i += 1
true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0
for prediction, answer in zip(predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
else:
if answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
f1_result = f1_score(answers, predictions, average='macro')
return f1_result, true_negatives_, true_positives_, false_negatives_, false_positives_
parts_X = []
parts_Y = []
for part in range(1, 11):
parts_X.append([])
parts_Y.append([])
for file in listdir(f'messages/part{part}'):
f = open(f'messages/part{part}/{file}', "r")
one = parse_doc_line(f.readline())
f.readline()
two = parse_doc_line(f.readline())
curr_obj = one + " " + two
parts_Y[-1].append(re.findall(r'\D+', file)[0])
parts_X[-1].append(curr_obj)
f.close()
roc_points = []
for thresh in range(0, 11):
roc_points.append(get_roc_point(
MultinomialNB(alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y), thresh / 10))
f1_points = []
true_positives_list = []
false_positives_list = []
true_negatives_list = []
false_negatives_list = []
lambda_ratios = [1, 1e5, 1e10, 1e20, 1e40, 1e80, 1e160, 1e190]
for lambda_ratio in lambda_ratios:
f1, true_negatives, true_positives, false_negatives, false_positives = get_cv_score(
MultinomialNB(class_prior=(lambda_ratio, 1), alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y))
print(f'F1 score: {f1}\n True negatives: {true_negatives}\n True positives: {true_positives}\n False negatives: '
f'{false_negatives}\n False positives: {false_positives}')
f1_points.append(f1)
true_positives_list.append(true_positives)
false_positives_list.append(false_positives)
true_negatives_list.append(true_negatives)
false_negatives_list.append(false_negatives)
fig, plts = plt.subplots(3)
plts[0].margins(0.0)
plts[0].set_ylim(ymin=0)
plts[0].plot([point[0] for point in roc_points], [point[1] for point in roc_points])
plts[0].set_ylabel('Roc Curve')
plts[1].set_xscale('log')
plts[1].plot(lambda_ratios, f1_points, '-b')
plts[1].set_ylabel('F1 score')
plts[1].set_xlim(xmin=1)
plts[2].set_xscale('log')
plts[2].set_yscale('log')
plts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')
plts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives')
plts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')
plts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives')
plts[2].legend(loc="upper right")
plts[2].set_xlabel('Lambda_legit / Lambda_spam')
plts[2].set_xlim(xmin=1)
plt.show()
|
9,435 | 63068a15d750abb29398d687495d6001ba17ab8a | """""""""""""""
Write Data
"""""""""""""""
import json
from city import City
def load_json(file_name='data.json'):
with open(file_name, 'r') as json_fp:
json_data = json_fp.read()
data_arr = json.loads(json_data)
return data_arr
if __name__ == '__main__':
json_file = 'data.json'
load_json(json_file) |
9,436 | 0d022291f9ace02ef1ee5c462657ea6376a0e6a4 | import RPi.GPIO as GPIO
import time
from datetime import datetime
led1 = [('g', 40), ('f', 38), ('a', 36), ('b', 32),
('e', 26), ('d', 24), ('c', 22)]
led2 = [('g', 19), ('f', 15), ('a', 13),
('b', 11), ('e', 7), ('d', 5), ('c', 3)]
numbers = [
('a', 'b', 'c', 'd', 'e', 'f'),
('b', 'c'),
('a', 'b', 'g', 'e', 'd'),
('a', 'b', 'g', 'c', 'd'),
('f', 'g', 'b', 'c'),
('a', 'f', 'g', 'c', 'd'),
('a', 'f', 'g', 'c', 'd', 'e'),
('a', 'b', 'c'),
('a', 'b', 'c', 'd', 'e', 'f', 'g'),
('a', 'b', 'c', 'd', 'f', 'g')
]
reset = 12
minus = 16
more = 18
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(reset, GPIO.IN)
GPIO.setup(minus, GPIO.IN)
GPIO.setup(more, GPIO.IN)
def setupLed1():
for port in led1:
GPIO.setup(port[1], GPIO.OUT)
def setupLed2():
for port in led2:
GPIO.setup(port[1], GPIO.OUT)
def statusLed(port, status):
GPIO.output(port, status)
def turnOnAllLeds():
for led in led1:
statusLed(led[1], True)
for led in led2:
statusLed(led[1], True)
def turnOffAllLeds():
for led in led1:
statusLed(led[1], False)
for led in led2:
statusLed(led[1], False)
def turnOffOneLed(led):
for port in led:
statusLed(port[1], False)
def createNumber(ledNumber, number):
turnOffOneLed(ledNumber)
for i in range(10):
if number == i:
for letter in numbers[i]:
for led in ledNumber:
if led[0] == letter:
statusLed(led[1], True)
def createNumber2Leds(led1, led2, number):
if number < 10:
createNumber(led1, 0)
createNumber(led2, number)
else:
decenas = number / 10
unidades = number % 10
createNumber(led1, decenas)
createNumber(led2, unidades)
def titileoNumber2Leds(led1, led2, number):
for i in range(3):
turnOffAllLeds()
time.sleep(0.25)
createNumber2Leds(led1, led2, number)
time.sleep(0.25)
def digiTurno():
contador = 0
titileoNumber2Leds(led1, led2, contador)
while True:
if GPIO.input(reset):
contador = 0
print("-"*20+" RESET "+"-"*20)
print(datetime.now())
titileoNumber2Leds(led1, led2, contador)
print("Numero actual = "+str(contador))
time.sleep(.3)
if GPIO.input(more):
if contador < 99:
contador += 1
else:
print(datetime.now())
contador = 0
print("Numero actual = "+str(contador))
createNumber2Leds(led1, led2, contador)
time.sleep(.3)
if GPIO.input(minus):
if contador == 0:
contador = 99
else:
contador = contador-1
print("Numero actual = "+str(contador))
createNumber2Leds(led1, led2, contador)
time.sleep(.3)
def main():
setupLed1()
setupLed2()
turnOffAllLeds()
try:
print("Presione un boton para continuar")
digiTurno()
except (KeyboardInterrupt, SystemExit):
GPIO.cleanup()
if __name__ == "__main__":
main()
|
9,437 | d190eb27ea146cf99ac7f8d29fb5f769121af60e | M, N = 3, 16
prime = set(range(M, N+1))
for i in range(2, N+1):
prime -= set(range(i**2, N+1, i))
for number in prime:
print(number) |
9,438 | 56afde2a31ad9dddee35e84609dff2eb0fc6fe1a | # Mezzanine Django Framework createdb error on Max OSX 10.9.2
import django
django.version
|
9,439 | 8beafcd4f9c02657a828d8c37f2aecda325ba180 | import pickle
from numpy import *
import math
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from math import factorial
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
order_range = range(order+1)
half_window = (window_size -1) // 2
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
#pgddpg
with open("3v1_/learning_curves/model-prey-s/seed_pgddpg_0.8/pre_trained_prey_20200910204032/model-prey-s_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data0 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data0))
with open("3v1_/learning_curves/model-prey-01/seed_pgddpg_0.8/pre_trained_prey_20200910200405/model-prey-01_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data1 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data1))
with open("3v1_/learning_curves/model-prey-02/seed_pgddpg_0.8/pre_trained_prey_20200910200419/model-prey-02_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data2 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data2))
with open("3v1_/learning_curves/model-prey-03/seed_pgddpg_0.8/pre_trained_prey_20200910200427/model-prey-03_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data3 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data3 ))
with open("3v1_/learning_curves/model-prey-04/seed_pgddpg_0.8/pre_trained_prey_20200910200435/model-prey-04_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data4 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data4))
with open("3v1_/learning_curves/model-prey-23/seed_pgddpg_0.8/pre_trained_prey_20200910200115/model-prey-23_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data5 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data5))
with open("3v1_/learning_curves/model-prey-06/seed_pgddpg_0.8/pre_trained_prey_20200910200446/model-prey-06_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data6 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data6))
with open("3v1_/learning_curves/model-prey-07/seed_pgddpg_0.8/pre_trained_prey_20200910200455/model-prey-07_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data7 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data7))
with open("3v1_/learning_curves/model-prey-08/seed_pgddpg_0.8/pre_trained_prey_20200910200504/model-prey-08_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data8 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data8))
with open("3v1_/learning_curves/model-prey-09/seed_pgddpg_0.8/pre_trained_prey_20200910200512/model-prey-09_sucess_record.pkl", 'rb') as fo:
pgddpg_dict_data9 = pickle.load(fo, encoding='bytes')
print(len(pgddpg_dict_data9))
#ddpg
with open("3v1_/learning_curves/model-prey-s/seed_ddpg/20200912103349/model-prey-s_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data0 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data0))
with open("3v1_/learning_curves/model-prey-01/seed_ddpg/20200912103401/model-prey-01_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data1 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data1))
with open("3v1_/learning_curves/model-prey-02/seed_ddpg/20200912103408/model-prey-02_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data2 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data2))
with open("3v1_/learning_curves/model-prey-03/seed_ddpg/20200912103416/model-prey-03_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data3 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data3 ))
with open("3v1_/learning_curves/model-prey-04/seed_ddpg/20200912103421/model-prey-04_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data4 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data4))
with open("3v1_/learning_curves/model-prey-23/seed_ddpg/20200912103327/model-prey-23_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data5 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data5))
with open("3v1_/learning_curves/model-prey-06/seed_ddpg/20200912103427/model-prey-06_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data6 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data6))
with open("3v1_/learning_curves/model-prey-07/seed_ddpg/20200912103433/model-prey-07_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data7 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data7))
with open("3v1_/learning_curves/model-prey-08/seed_ddpg/20200912103440/model-prey-08_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data8 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data8))
with open("3v1_/learning_curves/model-prey-09/seed_ddpg/20200912103446/model-prey-09_sucess_record.pkl", 'rb') as fo:
ddpg_dict_data9 = pickle.load(fo, encoding='bytes')
print(len(ddpg_dict_data9))
#maddpg
with open("3v1_/learning_curves/model-prey-s/seed_maddpg/20200910205027/model-prey-s_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data0 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data0))
with open("3v1_/learning_curves/model-prey-01/seed_maddpg/20200910205033/model-prey-01_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data1 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data1))
with open("3v1_/learning_curves/model-prey-02/seed_maddpg/20200910205040/model-prey-02_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data2 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data2))
with open("3v1_/learning_curves/model-prey-03/seed_maddpg/20200910205046/model-prey-03_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data3 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data3 ))
with open("3v1_/learning_curves/model-prey-04/seed_maddpg/20200910205052/model-prey-04_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data4 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data4))
with open("3v1_/learning_curves/model-prey-23/seed_maddpg/20200910205019/model-prey-23_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data5 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data5))
with open("3v1_/learning_curves/model-prey-06/seed_maddpg/20200910205104/model-prey-06_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data6 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data6))
with open("3v1_/learning_curves/model-prey-07/seed_maddpg/20200910205135/model-prey-07_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data7 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data7))
with open("3v1_/learning_curves/model-prey-08/seed_maddpg/20200910205147/model-prey-08_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data8 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data8))
with open("3v1_/learning_curves/model-prey-09/seed_maddpg/20200910205155/model-prey-09_sucess_record.pkl", 'rb') as fo:
maddpg_dict_data9 = pickle.load(fo, encoding='bytes')
print(len(maddpg_dict_data9))
smooth_neighbor=5
start=0
# end=min(len(pgddpg_dict_data0),len(pgddpg_dict_data1),len(pgddpg_dict_data2),len(pgddpg_dict_data3),len(pgddpg_dict_data4),len(pgddpg_dict_data5),len(pgddpg_dict_data6),len(pgddpg_dict_data7),len(pgddpg_dict_data8),len(pgddpg_dict_data9),)
end=400
ddpg_vs_prey00 = savitzky_golay(np.array(ddpg_dict_data0[start:end]), smooth_neighbor, 3)
ddpg_vs_prey01 = savitzky_golay(np.array(ddpg_dict_data1[start:end]), smooth_neighbor, 3)
ddpg_vs_prey02 = savitzky_golay(np.array(ddpg_dict_data2[start:end]), smooth_neighbor, 3)
ddpg_vs_prey03 = savitzky_golay(np.array(ddpg_dict_data3[start:end]), smooth_neighbor, 3)
ddpg_vs_prey04 = savitzky_golay(np.array(ddpg_dict_data4[start:end]), smooth_neighbor, 3)
ddpg_vs_prey05 = savitzky_golay(np.array(ddpg_dict_data5[start:end]), smooth_neighbor, 3)
ddpg_vs_prey06 = savitzky_golay(np.array(ddpg_dict_data6[start:end]), smooth_neighbor, 3)
ddpg_vs_prey07 = savitzky_golay(np.array(ddpg_dict_data7[start:end]), smooth_neighbor, 3)
ddpg_vs_prey08 = savitzky_golay(np.array(ddpg_dict_data8[start:end]), smooth_neighbor, 3)
ddpg_vs_prey09 = savitzky_golay(np.array(ddpg_dict_data9[start:end]), smooth_neighbor, 3)
maddpg_vs_prey00 = savitzky_golay(np.array(maddpg_dict_data0[start:end]), smooth_neighbor, 3)
maddpg_vs_prey01 = savitzky_golay(np.array(maddpg_dict_data1[start:end]), smooth_neighbor, 3)
maddpg_vs_prey02 = savitzky_golay(np.array(maddpg_dict_data2[start:end]), smooth_neighbor, 3)
maddpg_vs_prey03 = savitzky_golay(np.array(maddpg_dict_data3[start:end]), smooth_neighbor, 3)
maddpg_vs_prey04 = savitzky_golay(np.array(maddpg_dict_data4[start:end]), smooth_neighbor, 3)
maddpg_vs_prey05 = savitzky_golay(np.array(maddpg_dict_data5[start:end]), smooth_neighbor, 3)
maddpg_vs_prey06 = savitzky_golay(np.array(maddpg_dict_data6[start:end]), smooth_neighbor, 3)
maddpg_vs_prey07 = savitzky_golay(np.array(maddpg_dict_data7[start:end]), smooth_neighbor, 3)
maddpg_vs_prey08 = savitzky_golay(np.array(maddpg_dict_data8[start:end]), smooth_neighbor, 3)
maddpg_vs_prey09 = savitzky_golay(np.array(maddpg_dict_data9[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey00 = savitzky_golay(np.array(pgddpg_dict_data0[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey01 = savitzky_golay(np.array(pgddpg_dict_data1[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey02 = savitzky_golay(np.array(pgddpg_dict_data2[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey03 = savitzky_golay(np.array(pgddpg_dict_data3[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey04 = savitzky_golay(np.array(pgddpg_dict_data4[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey05 = savitzky_golay(np.array(pgddpg_dict_data5[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey06 = savitzky_golay(np.array(pgddpg_dict_data6[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey07 = savitzky_golay(np.array(pgddpg_dict_data7[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey08 = savitzky_golay(np.array(pgddpg_dict_data8[start:end]), smooth_neighbor, 3)
pgddpg_vs_prey09 = savitzky_golay(np.array(pgddpg_dict_data9[start:end]), smooth_neighbor, 3)
print(end)
zz = range(0, end-start)
zz=np.multiply(100, zz)
#ax1 = plt.subplot(2,1,1)
plt.figure()
#pgmaddpg
plt.plot(zz, pgddpg_vs_prey00, label='pgddpg_vs_prey00', linewidth=1, linestyle = "dashed",#prey-s
color='r', marker='o', markerfacecolor='red', markersize=2)
#ddpg
plt.plot(zz, ddpg_vs_prey00, label='ddpg_vs_prey00', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
#maddpg
plt.plot(zz, maddpg_vs_prey00, label='maddpg_vs_prey00', linewidth=1, linestyle = "dashed",#prey-s
color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, pgddpg_vs_prey01, label='pgddpg_vs_prey01', linewidth=1, linestyle = "dashed",
color='r', marker='o', markerfacecolor='red', markersize=2)
plt.plot(zz, pgddpg_vs_prey02, label='pgddpg_vs_prey02', linewidth=1, linestyle = "dashed",
color='r', marker='o', markerfacecolor='red', markersize=2)
plt.plot(zz, pgddpg_vs_prey03, label='pgddpg_vs_prey03', linewidth=1, linestyle = "dashed",
color='r', marker='o', markerfacecolor='red', markersize=2)
plt.plot(zz, pgddpg_vs_prey04, label='pgddpg_vs_prey04', linewidth=1, linestyle = "dashed",
color='r', marker='o', markerfacecolor='red', markersize=2)
plt.plot(zz, pgddpg_vs_prey05, label='pgddpg_vs_prey05', linewidth=1, linestyle = "dashed",#prey-23
color='r', marker='o', markerfacecolor='red', markersize=2)
plt.plot(zz, pgddpg_vs_prey06, label='pgddpg_vs_prey06', linewidth=1, linestyle = "dashed",
color='r', marker='o', markerfacecolor='red', markersize=2)
plt.plot(zz, pgddpg_vs_prey07, label='pgddpg_vs_prey07', linewidth=1, linestyle = "dashed",
color='r', marker='o', markerfacecolor='red', markersize=2)
plt.plot(zz, pgddpg_vs_prey08, label='pgddpg_vs_prey08', linewidth=1, linestyle = "dashed",
color='r', marker='o', markerfacecolor='red', markersize=2)
plt.plot(zz, pgddpg_vs_prey09, label='pgddpg_vs_prey09', linewidth=1, linestyle = "dashed",
color='r', marker='o', markerfacecolor='red', markersize=2)
# plt.tick_params(labelsize=23)
# font2 = {'family': 'Times New Roman',
# 'weight': 'normal',
# 'size': 30,
# }
# plt.title('pgddpg',font2)
# plt.xlabel('iteration',font2)
# plt.ylabel('avg_success_rate',font2)
# plt.legend()
# plt.show()
#ddpg
# plt.plot(zz, ddpg_vs_prey00, label='ddpg_vs_prey00', linewidth=1, linestyle = "dashed",
# color='b', marker='v', markerfacecolor='red', markersize=2)
plt.plot(zz, ddpg_vs_prey01, label='ddpg_vs_prey01', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
plt.plot(zz, ddpg_vs_prey02, label='ddpg_vs_prey02', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
plt.plot(zz, ddpg_vs_prey03, label='ddpg_vs_prey03', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
plt.plot(zz, ddpg_vs_prey04, label='ddpg_vs_prey04', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
plt.plot(zz, ddpg_vs_prey05, label='ddpg_vs_prey05', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
plt.plot(zz, ddpg_vs_prey06, label='ddpg_vs_prey06', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
plt.plot(zz, ddpg_vs_prey07, label='ddpg_vs_prey07', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
plt.plot(zz, ddpg_vs_prey08, label='ddpg_vs_prey08', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
plt.plot(zz, ddpg_vs_prey09, label='ddpg_vs_prey09', linewidth=1, linestyle = "dashed",
color='b', marker='v', markerfacecolor='red', markersize=2)
# plt.tick_params(labelsize=23)
# font2 = {'family': 'Times New Roman',
# 'weight': 'normal',
# 'size': 30,
# }
# plt.title('ddpg',font2)
# plt.xlabel('iteration',font2)
# plt.ylabel('avg_success_rate',font2)
# plt.legend()
# plt.show()
#maddpg
# plt.plot(zz, maddpg_vs_prey00, label='maddpg_vs_prey00', linewidth=1, linestyle = "dashed",#prey-s
# color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, maddpg_vs_prey01, label='maddpg_vs_prey01', linewidth=1, linestyle = "dashed",
color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, maddpg_vs_prey02, label='maddpg_vs_prey02', linewidth=1, linestyle = "dashed",
color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, maddpg_vs_prey03, label='maddpg_vs_prey03', linewidth=1, linestyle = "dashed",
color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, maddpg_vs_prey04, label='maddpg_vs_prey04', linewidth=1, linestyle = "dashed",
color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, maddpg_vs_prey05, label='maddpg_vs_prey05', linewidth=1, linestyle = "dashed",#prey-23
color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, maddpg_vs_prey06, label='maddpg_vs_prey06', linewidth=1, linestyle = "dashed",
color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, maddpg_vs_prey07, label='maddpg_vs_prey07', linewidth=1, linestyle = "dashed",
color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, maddpg_vs_prey08, label='maddpg_vs_prey08', linewidth=1, linestyle = "dashed",
color='g', marker='.', markerfacecolor='red', markersize=2)
plt.plot(zz, maddpg_vs_prey09, label='maddpg_vs_prey09', linewidth=1, linestyle = "dashed",
color='g', marker='.', markerfacecolor='red', markersize=2)
# plt.tick_params(labelsize=23)
# font2 = {'family': 'Times New Roman',
# 'weight': 'normal',
# 'size': 30,
# }
# plt.title('maddpg',font2)
# plt.xlabel('iteration',font2)
# plt.ylabel('avg_success_rate',font2)
# plt.legend()
# plt.show()
plt.tick_params(labelsize=23)
font2 = {'family': 'Times New Roman',
'weight': 'normal',
'size': 30,
}
plt.title('Different Seeds',font2)
plt.xlabel('Episodes',font2)
plt.ylabel('avg_success_rate',font2)
plt.legend(labels =[r"pgddpg($\beta=0.8$) vs preys",r"ddpg($\alpha=1$) vs preys",r"maddpg($\alpha=5$) vs preys"])
plt.show()
|
9,440 | f91e997b305348485698d180b97138b040285b60 | class Line:
def __init__(self,coor1,coor2):
self.coor1 = coor1
self.coor2 = coor2
def slope(self):
pass
def distance(self):
#x = self.coor1[0]-self.coor2[0]
#y = self.coor2[1]-self.coor2[1]
#return ((x**2)+(y**2))**0.5
return ((((self.coor2[0]-self.coor1[0])**2)+((self.coor2[1]-self.coor1[1])**2))**0.5)
def slope(self):
return (self.coor2[1]-self.coor1[1])/(self.coor2[0]-self.coor1[0])
def __str__(self):
return f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'
line1 = Line((3,4),(5,6))
li = Line((3,2),(8,10))
print(li.distance())
print(line1.coor1[0])
print(line1.distance())
print(li)
class Cylinder:
pi = 3.14
def __init__(self,height=1,radius=1):
self.height = height
self.radius = radius
def volume(self):
return self.pi*self.radius**2*self.height
def surface_area(self):
return 2*self.pi*self.radius**2
def __str__(self):
return f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'
c = Cylinder(2,3)
print(c)
class Account:
def __init__(self,name,balance):
self.name=name
self.balance=balance
def deposit(self,money):
self.balance += money
return 'Deposit accepted'
def withdraw(self,moneytaken):
if self.balance < moneytaken:
return 'Funds Unavailable'
else:
self.balance -= moneytaken
return 'Withdraw Accepted'
def __str__(self):
return f'Account owner: {self.name}\nAccount balance: {self.balance}$'
acct1 = Account('jose',100)
print(acct1)
print(acct1.withdraw(1000))
print(acct1.balance)
print(acct1.deposit(101))
print(acct1.balance)
|
9,441 | 8f854f4f2c807f988945af4dc53dba93cfb31168 | ## Author: Aleem Juma
import os
from app import app
import pandas as pd
# read in the quotes database
q = pd.read_csv(os.path.join('app','data','quotes_all.csv'), sep=';', skiprows=1, header=0)
# there are a few quote genres that don't occur in the model vocab
# replace them with appropriate words so the similarity search works
replace = {
'movingon':'moving',
'fathersday': 'fathers',
'memorialday': 'memorial',
'mothersday': 'mothers',
'newyears': 'year',
'saintpatricksday': 'ireland',
'valentinesday': 'valentine'
}
q['GENRE'].replace(to_replace=replace, inplace=True)
import spacy
nlp = spacy.load('en_core_web_md')
# cache the computed tokens for the genres in the dataset
cache = {genre:nlp(genre) for genre in q.GENRE.unique()}
def get_similarity(word1, word2):
'''
Returns a similarity score between two words
'''
tok1 = cache.get(word1, nlp(word1))
tok2 = cache.get(word2, nlp(word2))
return tok1.similarity(tok2)
def get_random_word():
'''
Returns a random category label from the data
'''
random_word = q['GENRE'].sample(1).iloc[0]
return random_word
def get_closest_words(word, choices, n=1):
'''
Returns the n closest matches in the model vocab
Parameters:
word word to search
choices available matches
n number of results to return
Returns:
A list of n tuples in the form (word (str), similarity (float))
'''
app.logger.info(f'Finding closest words to "{word}"')
if word in choices:
# if the word is already in the list return the same word with 100% match
return [(word, 1.0)]
if word in nlp.vocab.strings:
# if not in the list, find the closest words
similarities = [(choice, get_similarity(word, choice)) for choice in choices]
# sort, reverse, and return the top n (word,similarity) tuples
return sorted(similarities, key=lambda x: x[1])[::-1][:n]
else:
app.logger.info(f'Not in model vocab: "{word}"')
# if the requested label isn't in the model vocab, return a random genre
return [(get_random_word(), 1.0), (word, 0.0)]
def find_matching_quote(genre, top_n=5):
'''
Returns a matching quote and up to 5 of the most similar genres with similarity measures
Paramters:
genre genre to match
Returns:
(str) Quote
(str) Author
(list) List of tuples in the form (word (str), simliarity (float))
'''
# find closest matches
matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)
# get the best one
closest = matched_genres[0][0]
app.logger.info(f'Finding quote for: "{closest}"')
# get a quote from that genre
matching_quote = q[q['GENRE']==closest].sample(1).iloc[0]
quote = matching_quote.QUOTE
author = matching_quote.AUTHOR
# return the quote and the genres
return quote, author, matched_genres
|
9,442 | 82c3bde5746d04c126a93851844f775e7ce65f4b | import torch
import numpy as np
# source: https://github.com/krasserm/bayesian-machine-learning/blob/master/gaussian_processes.ipynb
def kernel(X1, X2, l=1.0, sigma_f=1.0):
''' Isotropic squared exponential kernel. Computes a covariance matrix from points in X1 and X2. Args: X1: Array of m points (m x d). X2: Array of n points (n x d). Returns: Covariance matrix (m x n). '''
sqdist = np.sum(X1**2, 1).reshape(-1, 1) + np.sum(X2**2, 1) - 2 * np.dot(X1, X2.T)
return sigma_f**2 * np.exp(-0.5 / l**2 * sqdist)
# source: # https://github.com/krasserm/bayesian-machine-learning/blob/master/gaussian_processes.ipynb
def posterior_predictive(X_s, X_train, Y_train, l=1.0, sigma_f=1.0, sigma_y=1e-8):
''' Computes the sufficient statistics of the GP posterior predictive distribution from m training data X_train and Y_train and n new inputs X_s. Args: X_s: New input locations (n x d). X_train: Training locations (m x d). Y_train: Training targets (m x 1). l: Kernel length parameter. sigma_f: Kernel vertical variation parameter. sigma_y: Noise parameter. Returns: Posterior mean vector (n x d) and covariance matrix (n x n). '''
K = kernel(X_train, X_train, l, sigma_f) + sigma_y**2 * np.eye(len(X_train))
K_s = kernel(X_s, X_train, l, sigma_f)
K_ss = kernel(X_s, X_s, l, sigma_f) + sigma_y**2 * np.eye(len(X_s))
mu_s = np.matmul(K_s, np.linalg.solve(K, Y_train))
cov_s = K_ss - np.matmul(K_s, np.linalg.solve(K, K_s.T))
return mu_s, cov_s
class CNP(torch.nn.Module):
def __init__(self, in_dim, hidden_dim, query_dim, out_dim, en_layer, dec_layer):
super(CNP, self).__init__()
if en_layer == 1:
self.encoder = torch.nn.Linear(in_dim, hidden_dim)
else:
self.encoder = [
torch.nn.Linear(in_dim, hidden_dim),
torch.nn.ReLU()
]
for i in range(en_layer-2):
self.encoder.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.encoder.append(torch.nn.ReLU())
self.encoder.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.encoder = torch.nn.Sequential(*self.encoder)
if dec_layer == 1:
self.decoder = torch.nn.Linear(hidden_dim+query_dim, out_dim)
else:
self.decoder = [
torch.nn.Linear(hidden_dim+query_dim, hidden_dim),
torch.nn.ReLU()
]
for i in range(dec_layer-2):
self.decoder.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.decoder.append(torch.nn.ReLU())
self.decoder.append(torch.nn.Linear(hidden_dim, out_dim))
self.decoder = torch.nn.Sequential(*self.decoder)
def forward(self, context, query, key=None):
query = query.view(query.shape[0], -1)
# encode
h = self.encoder(context)
# aggregate
h = h.mean(dim=0)
h = torch.stack([h]*(query.shape[0]), dim=0)
r = torch.cat([h, query], dim=1)
# predict
out = self.decoder(r)
return out
class ANP(torch.nn.Module):
def __init__(self, in_dim, hidden_dim, query_dim, out_dim, en_layer, dec_layer, nhead):
super(ANP, self).__init__()
if en_layer == 1:
self.encoder = torch.nn.Linear(in_dim, hidden_dim)
else:
self.encoder = [
torch.nn.Linear(in_dim, hidden_dim),
torch.nn.ReLU()
]
for i in range(en_layer-2):
self.encoder.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.encoder.append(torch.nn.ReLU())
self.encoder.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.encoder = torch.nn.Sequential(*self.encoder)
if dec_layer == 1:
self.decoder = torch.nn.Linear(hidden_dim, out_dim)
else:
self.decoder = [
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.ReLU()
]
for i in range(dec_layer-2):
self.decoder.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.decoder.append(torch.nn.ReLU())
self.decoder.append(torch.nn.Linear(hidden_dim, out_dim))
self.decoder = torch.nn.Sequential(*self.decoder)
self.projector = torch.nn.Linear(query_dim, hidden_dim)
self.attention = torch.nn.MultiheadAttention(embed_dim=hidden_dim, num_heads=nhead)
def forward(self, context, key, query):
query = query.view(query.shape[0], -1)
key = key.view(key.shape[0], -1)
# encode
h = self.encoder(context)
h.unsqueeze_(1)
# aggregate
q_t = self.projector(query)
k_t = self.projector(key)
q_t.unsqueeze_(1)
k_t.unsqueeze_(1)
h, _ = self.attention(query=q_t, key=k_t, value=h)
h.squeeze_(1)
# predict
pred = self.decoder(h)
return pred
class ANPv2(torch.nn.Module):
def __init__(self, in_dim, hidden_dim, query_dim, out_dim, en_layer, dec_layer, nhead):
super(ANPv2, self).__init__()
if en_layer == 1:
self.encoder = torch.nn.Linear(in_dim, hidden_dim)
else:
self.encoder = [
torch.nn.Linear(in_dim, hidden_dim),
torch.nn.ReLU()
]
for i in range(en_layer-2):
self.encoder.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.encoder.append(torch.nn.ReLU())
self.encoder.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.encoder = torch.nn.Sequential(*self.encoder)
if dec_layer == 1:
self.decoder = torch.nn.Linear(hidden_dim, out_dim)
else:
self.decoder = [
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.ReLU()
]
for i in range(dec_layer-2):
self.decoder.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.decoder.append(torch.nn.ReLU())
self.decoder.append(torch.nn.Linear(hidden_dim, out_dim))
self.decoder = torch.nn.Sequential(*self.decoder)
self.key_mlp = torch.nn.Sequential(
torch.nn.Linear(query_dim, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, hidden_dim)
)
self.query_mlp = torch.nn.Sequential(
torch.nn.Linear(query_dim, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, hidden_dim)
)
self.attention = torch.nn.MultiheadAttention(embed_dim=hidden_dim, num_heads=nhead)
def forward(self, context, key, query):
query = query.view(query.shape[0], -1)
key = key.view(key.shape[0], -1)
# encode
h = self.encoder(context)
h.unsqueeze_(1)
# aggregate
q_t = self.query_mlp(query)
k_t = self.key_mlp(key)
q_t.unsqueeze_(1)
k_t.unsqueeze_(1)
h, _ = self.attention(query=q_t, key=k_t, value=h)
h.squeeze_(1)
# predict
pred = self.decoder(h)
return pred
|
9,443 | b9a262bd6ddbca3b214825a473d870e70e8b5e57 | #!/usr/bin/env python
import fileinput
import sys
import os
from argparse import ArgumentParser
from probin.model.composition import multinomial as mn
from probin.dna import DNA
from Bio import SeqIO
from corrbin.misc import all_but_index, Uniq_id, GenomeGroup
from corrbin.multinomial import Experiment
from corrbin.contig_generation import SampleSetting
def main(open_name_file, dir_path, kmer_length, x_set):
groups = []
DNA.generate_kmer_hash(kmer_length)
# Read the file with all names, divide them into groups
for line in open_name_file:
if line[0:12] == 'family_name:':
family = line.split('\t')[1].strip()
elif line[0:11] == 'genus_name:':
genus = line.split('\t')[1].strip()
new_group = GenomeGroup(genus)
new_group.family = family
groups.append(new_group)
elif line[0:6] == 'entry:':
genome_name = line.split('\t')[2].strip()
genome_species = line.split('\t')[1].strip()
meta_genome = {'id': genome_name,
'species': genome_species,
'genus': genus,
'family': family,
'file_name': genome_name
}
groups[-1].genome_data.append(meta_genome)
# Each genome in a group is a bin, fit parameters to all bins
os.chdir(dir_path)
for group in groups:
for genome_data in group.genome_data:
dir_name = genome_data['file_name']
fasta_files = os.listdir(dir_name)
for fasta_file in fasta_files:
genome_file = open(dir_name + '/' + fasta_file)
identifier = genome_file.readline()
# Only use non-plasmid genomes
# Some bacterial genomes contain more than 1 chromosonme,
# but assumed not more than 2
if identifier.find('plasmid') == -1 and identifier.find('chromosome 2') == -1:
genome_file.close() #Close and reopen the same file
genome_file = open(dir_name + '/' + fasta_file)
genome_seq = list(SeqIO.parse(genome_file, "fasta"))
if len(genome_seq) > 1:
sys.stderr.write("Warning! The file " + fasta_file + " in directory " + dir_name + " contained more than one sequence, ignoring all but the first!" + os.linesep)
genome = DNA(id = dir_name, seq= str(genome_seq[0].seq))
genome.calculate_signature()
genome.genus = genome_data['genus']
genome.species = genome_data['species']
genome.family = genome_data['family']
group.genomes.append(genome)
genome_file.close()
# For each bin, generate a number of contigs,
# re-calculate parameters for that bin without contig-section.
# Further score this contig against all bins, keep within-group
# scores separate from outside-group scores.
all_scores = []
id_generator = Uniq_id(1000)
for group_index in range(len(groups)):
group = groups[group_index]
rest_groups = all_but_index(groups, group_index)
test = Experiment(x_set, group, rest_groups, id_generator)
group_scores = test.execute()
all_scores.append(group_scores)
sys.stdout.write("p_value\tcontig_family\tcontig_genus\tcontig_species\tcontig_genome\tcompare_family\tcompare_genus\tcompare_species\tcompare_genome\tcontig_id" + os.linesep)
for group_scores in all_scores:
for genome_scores in group_scores:
for score in genome_scores:
sys.stdout.write(str(score) + '\n')
if __name__=="__main__":
parser = ArgumentParser()
parser.add_argument('files', nargs='*',
help='specify input files, default is stdin')
parser.add_argument('-o', '--output',
help='specify the output file. The default is stdout')
parser.add_argument('-v', '--verbose', action='store_true',
help='information written to stderr during execution.')
parser.add_argument('-m', '--model', default="multinomial", type=str,
help='specify the model to use for calculating the probabilities, default is multinomial')
parser.add_argument('-k', '--kmer_length', default=4, type=int,
help='specify the kmer length, default is 4')
parser.add_argument('-d', '--directory_path', default='/home/johannes/repos/DATA/reference_genomes_ncbi',
type=str, help='specify the path to where the reference genomes are located locally')
parser.add_argument('-c', '--no_contigs', default=100, type=int,
help='Specify the number of contigs to be sampled from each group. This may be only approximate due to what priority is chosen')
parser.add_argument('-p', '--priority', default="genomes",
type=str, help='specify the prioritized way of sampling contigs. Specify "groups" to make sure each group is sampled exactly the number of times specified by no_contigs, distributed randomly over the genomes present in each group, or specify "genomes" to make sure each genome within a certain group contributes with exactly the same number of contigs.')
parser.add_argument('--contig_min_length', default=1000, type=int, help='Specify the minimum length for contigs')
parser.add_argument('--contig_max_length', default=1000, type=int, help='Specify the maximum length for contigs')
parser.add_argument('--debug_mode', action='store_true', help='In debug mode, all contigs will start at the first nucleotide, making testing possible.')
args = parser.parse_args()
if args.output and args.output != '-':
sys.stdout = open(args.output, 'w')
name_file_handle = fileinput.input(args.files)
if args.verbose:
sys.stderr.write("Number of genomes read: %i %s" % (len(genomes),os.linesep))
ex_setting = SampleSetting(args.priority, args.no_contigs, args.contig_min_length, args.contig_max_length, args.debug_mode)
main(name_file_handle, args.directory_path, args.kmer_length, ex_setting)
name_file_handle.close()
|
9,444 | 814191a577db279389975e5a02e72cd817254275 | """
Version information for NetworkX, created during installation.
Do not add this file to the repository.
"""
import datetime
version = '2.3'
date = 'Thu Apr 11 20:57:18 2019'
# Was NetworkX built from a development version? If so, remember that the major
# and minor versions reference the "target" (rather than "current") release.
dev = False
# Format: (name, major, min, revision)
version_info = ('networkx', '2', '3', None)
# Format: a 'datetime.datetime' instance
date_info = datetime.datetime(2019, 4, 11, 20, 57, 18)
# Format: (vcs, vcs_tuple)
vcs_info = (None, (None, None))
|
9,445 | 920f00632599945397364dd0f52f21234e17f9ef | from context import vicemergencyapi
from vicemergencyapi.vicemergency import VicEmergency
from geographiclib.geodesic import Geodesic
from shapely.geometry import Point
def geoDistance(p1, p2):
return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']
melbourne = Point(144.962272, -37.812274)
def compare(f):
return geoDistance(f.getLocation(), melbourne)
for i in sorted(VicEmergency.getItems(), key=compare):
print(i.properties["sourceTitle"])
print(i.properties["category1"])
print(i.properties["location"])
print("{:.0f}km".format(geoDistance(i.getLocation(), melbourne) / 1000))
print("============================")
|
9,446 | fbd8af4ab3e4ebdcb07509db776d38f9c26fd06a | #
# MIT License
#
# Copyright (c) 2018 Matteo Poggi m.poggi@unibo.it
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from layers import *
from utils import *
from collections import namedtuple
trinet_parameters = namedtuple('parameters',
'encoder, '
'height, width, '
'batch_size, '
'num_threads, '
'num_epochs, '
'alpha_image_loss, '
'disp_gradient_loss_weight, '
'lr_loss_weight, '
'full_summary')
class trinet(object):
def __init__(self,params, mode, left, central, right, reuse_variables=None, model_index=0, net='vgg'):
self.params = params
self.mode = mode
self.model_collection = ['model_0']
self.left = left
self.right = right
self.central = central
self.reuse_variables = reuse_variables
self.model_index = model_index
self.build_model(net)
self.build_outputs()
if self.mode == 'test':
return
self.build_losses()
self.build_summaries()
def gradient_x(self, img):
gx = img[:,:,:-1,:] - img[:,:,1:,:]
return gx
def gradient_y(self, img):
gy = img[:,:-1,:,:] - img[:,1:,:,:]
return gy
def scale_pyramid(self, img, num_scales):
scaled_imgs = [img]
s = tf.shape(img)
h = s[1]
w = s[2]
for i in range(num_scales - 1):
ratio = 2 ** (i + 1)
nh = h // ratio
nw = w // ratio
scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))
return scaled_imgs
def generate_image_left(self, img, disp):
return bilinear_sampler_1d_h(img, -disp)
def generate_image_right(self, img, disp):
return bilinear_sampler_1d_h(img, disp)
def SSIM(self, x, y):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')
sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2
sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2
sigma_xy = slim.avg_pool2d(x * y , 3, 1, 'VALID') - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
return tf.clip_by_value((1 - SSIM) / 2, 0, 1)
def get_disparity_smoothness(self, disp, pyramid):
disp_gradients_x = [self.gradient_x(d) for d in disp]
disp_gradients_y = [self.gradient_y(d) for d in disp]
image_gradients_x = [self.gradient_x(img) for img in pyramid]
image_gradients_y = [self.gradient_y(img) for img in pyramid]
weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_x]
weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_y]
smoothness_x = [disp_gradients_x[i] * weights_x[i] for i in range(4)]
smoothness_y = [disp_gradients_y[i] * weights_y[i] for i in range(4)]
return smoothness_x + smoothness_y
# Build model
def build_model(self,net):
with tf.variable_scope('model', reuse=self.reuse_variables) as scope:
self.left_pyramid = self.scale_pyramid(self.left, 4)
# if self.mode == 'train':
self.right_pyramid = self.scale_pyramid(self.right, 4)
self.central_pyramid = self.scale_pyramid(self.central, 4)
with tf.variable_scope('shared-encoder'):
features_cr = self.build_encoder(self.central,model_name=net)
features_cl = features_cr
with tf.variable_scope('encoder-C2R'):
self.disp_c2r = self.build_decoder(features_cr,model_name=net)
with tf.variable_scope('encoder-C2L'):
self.disp_c2l = self.build_decoder(features_cl,model_name=net)
# Build shared encoder
def build_encoder(self, model_input, model_name='vgg'):
with tf.variable_scope('encoder'):
if model_name == 'vgg':
conv1 = conv_block(model_input, 32, 7) # H/2
conv2 = conv_block(conv1, 64, 5) # H/4
conv3 = conv_block(conv2, 128, 3) # H/8
conv4 = conv_block(conv3, 256, 3) # H/16
conv5 = conv_block(conv4, 512, 3) # H/32
conv6 = conv_block(conv5, 512, 3) # H/64
conv7 = conv_block(conv6, 512, 3) # H/128
return conv7, conv1, conv2, conv3, conv4, conv5, conv6
elif model_name == 'resnet50':
conv1 = conv(model_input, 64, 7, 2) # H/2 - 64D
pool1 = maxpool(conv1, 3) # H/4 - 64D
conv2 = resblock(pool1, 64, 3) # H/8 - 256D
conv3 = resblock(conv2, 128, 4) # H/16 - 512D
conv4 = resblock(conv3, 256, 6) # H/32 - 1024D
conv5 = resblock(conv4, 512, 3) # H/64 - 2048D
return conv5, conv1, pool1, conv2, conv3, conv4
def build_decoder(self, skip, model_name='vgg'):
with tf.variable_scope('decoder'):
if model_name == 'vgg':
upconv7 = upconv(skip[0], 512, 3, 2) #H/64
concat7 = tf.concat([upconv7, skip[6]], 3)
iconv7 = conv(concat7, 512, 3, 1)
upconv6 = upconv(iconv7, 512, 3, 2) #H/32
concat6 = tf.concat([upconv6, skip[5]], 3)
iconv6 = conv(concat6, 512, 3, 1)
upconv5 = upconv(iconv6, 256, 3, 2) #H/16
concat5 = tf.concat([upconv5, skip[4]], 3)
iconv5 = conv(concat5, 256, 3, 1)
upconv4 = upconv(iconv5, 128, 3, 2) #H/8
concat4 = tf.concat([upconv4, skip[3]], 3)
iconv4 = conv(concat4, 128, 3, 1)
disp4 = get_disp(iconv4)
udisp4 = upsample_nn(disp4, 2)
upconv3 = upconv(iconv4, 64, 3, 2) #H/4
concat3 = tf.concat([upconv3, skip[2], udisp4], 3)
iconv3 = conv(concat3, 64, 3, 1)
disp3 = get_disp(iconv3)
udisp3 = upsample_nn(disp3, 2)
upconv2 = upconv(iconv3, 32, 3, 2) #H/2
concat2 = tf.concat([upconv2, skip[1], udisp3], 3)
iconv2 = conv(concat2, 32, 3, 1)
disp2 = get_disp(iconv2)
udisp2 = upsample_nn(disp2, 2)
upconv1 = upconv(iconv2, 16, 3, 2) #H
concat1 = tf.concat([upconv1, udisp2], 3)
iconv1 = conv(concat1, 16, 3, 1)
disp1 = get_disp(iconv1)
elif model_name == 'resnet50':
upconv6 = upconv(skip[0], 512, 3, 2) #H/32
concat6 = tf.concat([upconv6, skip[5]], 3)
iconv6 = conv(concat6, 512, 3, 1)
upconv5 = upconv(iconv6, 256, 3, 2) #H/16
concat5 = tf.concat([upconv5, skip[4]], 3)
iconv5 = conv(concat5, 256, 3, 1)
upconv4 = upconv(iconv5, 128, 3, 2) #H/8
concat4 = tf.concat([upconv4, skip[3]], 3)
iconv4 = conv(concat4, 128, 3, 1)
disp4 = get_disp(iconv4)
udisp4 = upsample_nn(disp4, 2)
upconv3 = upconv(iconv4, 64, 3, 2) #H/4
concat3 = tf.concat([upconv3, skip[2], udisp4], 3)
iconv3 = conv(concat3, 64, 3, 1)
disp3 = get_disp(iconv3)
udisp3 = upsample_nn(disp3, 2)
upconv2 = upconv(iconv3, 32, 3, 2) #H/2
concat2 = tf.concat([upconv2, skip[1], udisp3], 3)
iconv2 = conv(concat2, 32, 3, 1)
disp2 = get_disp(iconv2)
udisp2 = upsample_nn(disp2, 2)
upconv1 = upconv(iconv2, 16, 3, 2) #H
concat1 = tf.concat([upconv1, udisp2], 3)
iconv1 = conv(concat1, 16, 3, 1)
disp1 = get_disp(iconv1)
return disp1, disp2, disp3, disp4
def build_outputs(self):
#self.disparity_cr = self.disp_cr[0][0,:,:,0]
#self.disparity_cl = self.disp_cl[0][0,:,:,0]
#self.warp_left = generate_image_left(self.placeholders['im0'], self.disparity_cl)[0]
#self.warp_right = generate_image_right(self.placeholders['im0'], self.disparity_cr)[0]
# STORE DISPARITIES
with tf.variable_scope('disparities'):
self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2l]
self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2l]
self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2r]
self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2r]
# GENERATE IMAGES
with tf.variable_scope('images'):
self.left_est = [self.generate_image_left(self.central_pyramid[i], self.disp_lc[i]) for i in range(4)]
self.cl_est = [self.generate_image_right(self.left_pyramid[i], self.disp_cl[i]) for i in range(4)]
self.cr_est = [self.generate_image_left(self.right_pyramid[i], self.disp_cr[i]) for i in range(4)]
self.right_est = [self.generate_image_right(self.central_pyramid[i], self.disp_rc[i]) for i in range(4)]
# LR CONSISTENCY
with tf.variable_scope('left-right'):
self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i], self.disp_lc[i]) for i in range(4)]
self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i], self.disp_cl[i]) for i in range(4)]
self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i], self.disp_cr[i]) for i in range(4)]
self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i], self.disp_rc[i]) for i in range(4)]
# DISPARITY SMOOTHNESS
with tf.variable_scope('smoothness'):
self.disp_lc_smoothness = self.get_disparity_smoothness(self.disp_lc, self.left_pyramid)
self.disp_cl_smoothness = self.get_disparity_smoothness(self.disp_cl, self.central_pyramid)
self.disp_cr_smoothness = self.get_disparity_smoothness(self.disp_cr, self.central_pyramid)
self.disp_rc_smoothness = self.get_disparity_smoothness(self.disp_rc, self.right_pyramid)
def build_losses(self):
with tf.variable_scope('losses', reuse=self.reuse_variables):
# IMAGE RECONSTRUCTION
# L1
self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for i in range(4)]
self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in self.l1_left]
self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[i]) for i in range(4)]
self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in self.l1_right]
self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for i in range(4)]
self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in self.l1_cl]
self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for i in range(4)]
self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in self.l1_cr]
# SSIM
self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid[i]) for i in range(4)]
self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]
self.ssim_right = [self.SSIM(self.right_est[i], self.right_pyramid[i]) for i in range(4)]
self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]
self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[i]) for i in range(4)]
self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]
self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[i]) for i in range(4)]
self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]
# WEIGTHED SUM
self.image_loss_right = [self.params.alpha_image_loss * self.ssim_loss_right[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_right[i] for i in range(4)]
self.image_loss_left = [self.params.alpha_image_loss * self.ssim_loss_left[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_left[i] for i in range(4)]
self.image_loss_cl = [self.params.alpha_image_loss * self.ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cl[i] for i in range(4)]
self.image_loss_cr = [self.params.alpha_image_loss * self.ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cr[i] for i in range(4)]
self.image_loss = tf.add_n(self.image_loss_left + self.image_loss_cl + self.image_loss_right + self.image_loss_cr)
self.image_loss_L = tf.add_n(self.image_loss_left + self.image_loss_cl)
self.image_loss_R = tf.add_n(self.image_loss_right + self.image_loss_cr)
# DISPARITY SMOOTHNESS
self.disp_lc_loss = [tf.reduce_mean(tf.abs(self.disp_lc_smoothness[i])) / 2 ** i for i in range(4)]
self.disp_cl_loss = [tf.reduce_mean(tf.abs(self.disp_cl_smoothness[i])) / 2 ** i for i in range(4)]
self.disp_rc_loss = [tf.reduce_mean(tf.abs(self.disp_rc_smoothness[i])) / 2 ** i for i in range(4)]
self.disp_cr_loss = [tf.reduce_mean(tf.abs(self.disp_cr_smoothness[i])) / 2 ** i for i in range(4)]
self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)
self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.disp_cl_loss)
self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.disp_cr_loss)
# LR CONSISTENCY
self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] - self.disp_lc[i])) for i in range(4)]
self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] - self.disp_cl[i])) for i in range(4)]
self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] - self.disp_rc[i])) for i in range(4)]
self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] - self.disp_cr[i])) for i in range(4)]
self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss + self.lr_rc_loss + self.lr_cr_loss)
self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)
self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)
# CENTRAL DISPARITY CONSISTENCY
self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.disp_cl[i] - self.disp_cr[i])) for i in range(4)]
self.central_disparity_loss = tf.add_n(self.central_disparity_dif)
# TOTAL LOSS
self.total_loss = self.image_loss + self.params.disp_gradient_loss_weight * self.disp_gradient_loss + self.params.lr_loss_weight * self.lr_loss + self.central_disparity_loss
self.total_loss_L = self.image_loss_L + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_L + self.params.lr_loss_weight * self.lr_loss_L
self.total_loss_R = self.image_loss_R + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_R + self.params.lr_loss_weight * self.lr_loss_R
def build_summaries(self):
# SUMMARIES
with tf.device('/cpu:0'):
for i in range(4):
tf.summary.scalar('ssim_loss_' + str(i), self.ssim_loss_left[i] + self.ssim_loss_cl[i] + self.ssim_loss_right[i] + self.ssim_loss_cr[i], collections=self.model_collection)
tf.summary.scalar('l1_loss_' + str(i), self.l1_reconstruction_loss_left[i] + self.l1_reconstruction_loss_cl[i] + self.l1_reconstruction_loss_right[i] + self.l1_reconstruction_loss_cr[i], collections=self.model_collection)
tf.summary.scalar('image_loss_' + str(i), self.image_loss_left[i] + self.image_loss_cl[i] + self.image_loss_right[i] + self.image_loss_cr[i], collections=self.model_collection)
tf.summary.scalar('disp_gradient_loss_' + str(i), self.disp_lc_loss[i] + self.disp_cl_loss[i] + self.disp_rc_loss[i] + self.disp_cr_loss[i], collections=self.model_collection)
tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] + self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.lr_cr_loss[i], collections=self.model_collection)
tf.summary.scalar('total_loss_L', self.total_loss_L, collections= self.model_collection)
tf.summary.scalar('total_loss_R', self.total_loss_R, collections=self.model_collection)
tf.summary.scalar('central_disparity_loss', self.central_disparity_loss, collections=self.model_collection)
tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('left_pyramid_' + str(i), self.left_pyramid[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('central_pyramid_' + str(i), self.central_pyramid[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('right_pyramid_' + str(i), self.right_pyramid[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection)
if self.params.full_summary:
#tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('ssim_left_' + str(i), self.ssim_left[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('ssim_right_' + str(i), self.ssim_right[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('ssim_cl_' + str(i), self.ssim_cl[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('ssim_cr_' + str(i), self.ssim_cr[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('l1_left_' + str(i), self.l1_left[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('l1_right_' + str(i), self.l1_right[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('l1_cl_' + str(i), self.l1_cl[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('l1_cr_' + str(i), self.l1_cr[i], max_outputs=4, collections=self.model_collection)
if self.params.full_summary:
tf.summary.image('left', self.left, max_outputs=4, collections=self.model_collection)
tf.summary.image('right', self.right, max_outputs=4, collections=self.model_collection)
tf.summary.image('central', self.central, max_outputs=4, collections=self.model_collection) |
9,447 | 293533d07b530be9e8f97f1720619bf6c3113cca | import os
import sys
import string
from array import *
from datetime import datetime
#f = open('input_test.txt', 'r')
f = open('input_task.txt', 'r')
width = 60
height = 5000
sleepingMinutes = [[0 for x in range(width)] for y in range(height)]
infos = []
# Change lines to tuples and store to array for sorting
for line in f:
line = line.rstrip('\n')
line = line.replace('[','')
splitted = line.split(']')
stringTime = splitted[0]
stringTask = splitted[1]
datetimeTime = datetime.strptime(stringTime, '%Y-%m-%d %H:%M')
lineTuple = (datetimeTime, stringTask)
infos.append(lineTuple)
#print(datetimeTime.minute)
# sort the info we have
infosSorted = sorted(infos, key=lambda time: time[0])
#print(infos)
#print(infosSorted)
sleeping = False
for dataPoint in infosSorted:
splitted = dataPoint[1].split(' ')
#print(splitted)
if splitted[1] == 'Guard':
#print('Vartija vaihtui, vuorossa: ' + splitted[2])
guard = splitted[2].replace('#','')
if splitted[1] == 'falls':
sleeping = True
sleepingTimeStart = dataPoint[0]
#print('vartija ' + guard + ' nukahti hetkellä ' + str(sleepingTimeStart))
if splitted[1] == 'wakes':
sleeping = False
sleepingTimeStop = dataPoint[0]
sleepingTime = sleepingTimeStop - sleepingTimeStart
#print('vartija ' + guard + ' heräsi hetkellä ' + str(sleepingTimeStop) + ' nukkuen ' + str(sleepingTime))
for x in range(sleepingTimeStart.minute, sleepingTimeStop.minute):
sleepingMinutes[int(guard)][x] += 1
maxVartija = 0
maxMinuutti = 0
maxMinuutit = 0
vartija = 0
for x in sleepingMinutes:
summa = sum(x)
minuutti = x.index(max(x))
#print(x)
#print('yhteensä ' + str(summa) + ' nukkui eniten minuutilla ' + str(maxMinuutti))
if maxVartija < summa:
maxVartija = vartija
maxMinuutti = minuutti
maxMinuutit = summa
vartija += 1
print('Eniten nukkui vartija #' + str(maxVartija) + ' nukkuen yhteensä ' + str(maxMinuutit) + ' minuuttia ja eniten minuutilla ' + str(maxMinuutti))
print('Vastaus on siis ' + str(maxVartija*maxMinuutti)) |
9,448 | 1304b6373edeca394070b8a3d144608cf07172e3 | from datetime import datetime
from unittest import mock
import pytest
from freezegun import freeze_time
from datahub.ingestion.api.common import PipelineContext
from src.datahub.ingestion.source.aws.s3_util import make_s3_urn
FROZEN_TIME = "2020-04-14 07:00:00"
@pytest.mark.integration
def test_athena_config_query_location_old_plus_new_value_not_allowed():
from datahub.ingestion.source.sql.athena import AthenaConfig
with pytest.raises(ValueError):
AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"s3_staging_dir": "s3://sample-staging-dir/",
"query_result_location": "s3://query_result_location",
"work_group": "test-workgroup",
}
)
@pytest.mark.integration
def test_athena_config_staging_dir_is_set_as_query_result():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"s3_staging_dir": "s3://sample-staging-dir/",
"work_group": "test-workgroup",
}
)
expected_config = AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"query_result_location": "s3://sample-staging-dir/",
"work_group": "test-workgroup",
}
)
assert config.json() == expected_config.json()
@pytest.mark.integration
def test_athena_uri():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"query_result_location": "s3://query-result-location/",
"work_group": "test-workgroup",
}
)
assert (
config.get_sql_alchemy_url()
== "awsathena+rest://@athena.us-west-1.amazonaws.com:443/?s3_staging_dir=s3%3A%2F%2Fquery-result-location%2F&work_group=test-workgroup&catalog_name=awsdatacatalog&duration_seconds=3600"
)
@pytest.mark.integration
@freeze_time(FROZEN_TIME)
def test_athena_get_table_properties():
from pyathena.model import AthenaTableMetadata
from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource
config = AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"s3_staging_dir": "s3://sample-staging-dir/",
"work_group": "test-workgroup",
}
)
schema: str = "test_schema"
table: str = "test_table"
table_metadata = {
"TableMetadata": {
"Name": "test",
"TableType": "testType",
"CreateTime": datetime.now(),
"LastAccessTime": datetime.now(),
"PartitionKeys": [
{"Name": "testKey", "Type": "string", "Comment": "testComment"}
],
"Parameters": {
"comment": "testComment",
"location": "s3://testLocation",
"inputformat": "testInputFormat",
"outputformat": "testOutputFormat",
"serde.serialization.lib": "testSerde",
},
},
}
mock_cursor = mock.MagicMock()
mock_inspector = mock.MagicMock()
mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor
mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(
response=table_metadata
)
ctx = PipelineContext(run_id="test")
source = AthenaSource(config=config, ctx=ctx)
description, custom_properties, location = source.get_table_properties(
inspector=mock_inspector, table=table, schema=schema
)
assert custom_properties == {
"comment": "testComment",
"create_time": "2020-04-14 07:00:00",
"inputformat": "testInputFormat",
"last_access_time": "2020-04-14 07:00:00",
"location": "s3://testLocation",
"outputformat": "testOutputFormat",
"partition_keys": '[{"name": "testKey", "type": "string", "comment": "testComment"}]',
"serde.serialization.lib": "testSerde",
"table_type": "testType",
}
assert location == make_s3_urn("s3://testLocation", "PROD")
|
9,449 | 39eecf1c7ec19f7c75721caa092c08569f53d3e5 | #Classe do controlador do servidor SEEEEEEERVIDOOOOOOOOOOR
from usuarioModel import *
class ControllerSC:
'''
O controlador define 2 ações:
- adicionar_pessoa: para adicionar novas pessoas no banco de
dados.
- listar_pessoas: retornar a lista das pessoas
Note que as 2 ações supracitadas utilizam a classe do Modelo para
consultar/atualizar o banco de dados
'''
def __init__(self):
pass
@staticmethod
def entrarSC(login, senha):
resultado = Usuario.entrar(login, senha)
return resultado
@staticmethod
def cadastrarSC(usuario):
Usuario.adicionar(usuario)
@staticmethod
def criarPlaylist(dicioPlaylist):
musicas = Playlist.criarPlaylist(dicioPlaylist)
minhasMusicas = json.dumps(musicas.encode())
return minhasMusicas
|
9,450 | b748c489b2c63546feada811aa3b66146ad8d28e | #!/usr/bin/python3
import json
def from_json_string(my_str):
"""Function returns a JSON file representation of an object (string)"""
return json.loads(my_str)
|
9,451 | 369bffa21b5b8c0ca1d93da3aa30a38e2f4c82cc | import scrapy
from kingfisher_scrapy.base_spiders import BigFileSpider
from kingfisher_scrapy.util import components, handle_http_error
class France(BigFileSpider):
"""
Domain
France
Swagger API documentation
https://doc.data.gouv.fr/api/reference/
"""
name = 'france'
# SimpleSpider
data_type = 'release_package'
def start_requests(self):
# A CKAN API JSON response.
# Ministère de l'économie, des finances et de la relance
# https://www.data.gouv.fr/fr/datasets/donnees-essentielles-de-la-commande-publique-fichiers-consolides/
url = 'https://www.data.gouv.fr/api/1/datasets/donnees-essentielles-de-la-commande-publique-fichiers' \
'-consolides/'
yield scrapy.Request(url, meta={'file_name': 'page-1.json'}, callback=self.parse_list)
@handle_http_error
def parse_list(self, response):
for resource in response.json()['resources']:
description = resource['description']
if description and 'ocds' in description.lower():
yield self.build_request(resource['url'], formatter=components(-2))
|
9,452 | 956e63bf06255df4a36b5fa97aa62c0ed805c3f3 | #!/bin/python
from flask import Flask, jsonify, request
import subprocess
import os
app = Flask(__name__)
text = ""
greetings = "'/play' and '/replay'\n"
@app.route('/')
def index():
return greetings
@app.route('/play', methods=['POST'])
def play():
global text
text = request.data.decode('utf-8')
os.system('./play.sh "' + text + '"')
return jsonify({'played': True, "text" : text}), 201
@app.route('/replay')
def replay():
global text
os.system('./replay.sh')
return jsonify({'replayed': True, "text" : text}), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
9,453 | dd06847c3eb9af6e84f247f8f0dd03961d83688e | from battleship.board import Board
from battleship.game import Game
import string
# Board
row_num = list(string.ascii_lowercase[:10]) # A-J
col_num = 10
board = Board(row_num, col_num)
board.display_board()
# Game
guesses = 25
quit = 'q'
game = Game(guesses, quit)
game.take_shot("\nChoose a spot to fire at in enemy seas: ", board)
# Ships
# 2x submarine = 1
# 2x destroyer = 2
# 1x cruiser = 3
# 1x battleship = 4
# 1x carrier = 5
|
9,454 | 356c817e254d8885beb447aa10759fff6a45ca25 | from microbit import *
import music
while True:
if button_a.is_pressed():
music.pitch(400,500)
|
9,455 | 4569413c8ea985a010a1fea4835a5b368a23663a | #import bmm_mysql_connect # Import my connection module
import csv
import MySQLdb
import os
#bmm_mysql_connect.connect() # Connecting to mysql test database
#mycur = conn.cursor() # Creating my cursor
path = os.path.expanduser('~/Projects/bmm_private/login_test.txt')
login = csv.reader(file(path))
# Assign login details to connection variables
for i in login:
host = i[0]
user = i[1]
passwd = i[2]
db = i[3]
# Connect to test database
conn = MySQLdb.connect(host=host,
user=user,
passwd=passwd,
db=db)
mycur = conn.cursor() # Creating my cursor
# creates a 'rooms' list, with reader function of csv module
# each row of the csv is made into it's own list with elements
rooms = csv.reader(file('list.txt'))
for room in rooms: #for each list in the list rooms
room_number = room[0] #pulls first element of each list and assigns to room_number variable
region = room[1] #pulls second element of each list and assigns to region variable
# Inserts the room number and reqion into the rooms table in the test database.
mycur.execute("INSERT INTO rooms VALUES (%r, %r)", (room_number, region))
conn.commit() # Commit the changes to the table
mycur.execute("SELECT * FROM rooms")
print mycur.fetchall() |
9,456 | fdef3e94bbeb29c25bf14e17cd1d013cf848bedc | # from magicbot import AutonomousStateMachine, timed_state, state
# from components.drivetrain import Drivetrain, DrivetrainState
# from components.intake import Intake
# from fieldMeasurements import FieldMeasurements
# class PushBotAuto(AutonomousStateMachine):
# # this auto is intended to push other robots off their lines
# MODE_NAME = "PushBot Auto"
# DEFAULT = False
# drivetrain: Drivetrain
# intake: Intake
# @state(first=True)
# def drive_towards_stations(self, initial_call):
# if initial_call:
# self.drivetrain.drive_to_position(FieldMeasurements.PushBotAuto.initial_drive_distance)
# self.intake.reset()
# self.intake.intake_lift.set_match_start()
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.next_state('turn_towards_robot')
# @state()
# def turn_towards_robot(self, initial_call):
# if initial_call:
# self.drivetrain.turn_to_angle(-90)
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.next_state('drive_towards_robot')
# @state()
# def drive_towards_robot(self, initial_call):
# if initial_call:
# self.drivetrain.drive_to_position(FieldMeasurements.PushBotAuto.distance_to_bot)
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.next_state('turn_pre_push_bot')
# @state()
# def turn_pre_push_bot(self, initial_call):
# if initial_call:
# self.drivetrain.turn_to_angle(-90)
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.next_state('push_bot')
# @state()
# def push_bot(self, initial_call):
# if initial_call:
# self.drivetrain.drive_to_position(
# FieldMeasurements.PushBotAuto.distance_to_bot
# + FieldMeasurements.PushBotAuto.extra_distance
# )
# elif self.drivetrain.pid_manager.get_on_target():
# self.drivetrain.stop()
# self.done() |
9,457 | 8050b757c20da7ad8dd3c12a30b523b752d6a3ff | friends = ["Vino", "Ammu", "Appu"]
print(friends)
print(friends[0])
# returns the last element in the list
print(friends[-1])
# returns the second to last element in the list
print(friends[-2]) |
9,458 | 937fd6aa7bd21258bd6e0f592d94a966519ef885 | '''
# AWS::Chatbot Construct Library
AWS Chatbot is an AWS service that enables DevOps and software development teams to use Slack chat rooms to monitor and respond to operational events in their AWS Cloud. AWS Chatbot processes AWS service notifications from Amazon Simple Notification Service (Amazon SNS), and forwards them to Slack chat rooms so teams can analyze and act on them immediately, regardless of location.
This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.
```python
import aws_cdk.aws_chatbot as chatbot
import aws_cdk.aws_sns as sns
import aws_cdk.aws_iam as iam
slack_channel = chatbot.SlackChannelConfiguration(self, "MySlackChannel",
slack_channel_configuration_name="YOUR_CHANNEL_NAME",
slack_workspace_id="YOUR_SLACK_WORKSPACE_ID",
slack_channel_id="YOUR_SLACK_CHANNEL_ID"
)
slack_channel.add_to_role_policy(iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["s3:GetObject"
],
resources=["arn:aws:s3:::abc/xyz/123.txt"]
))
slack_channel.add_notification_topic(sns.Topic(self, "MyTopic"))
```
## Log Group
Slack channel configuration automatically create a log group with the name `/aws/chatbot/<configuration-name>` in `us-east-1` upon first execution with
log data set to never expire.
The `logRetention` property can be used to set a different expiration period. A log group will be created if not already exists.
If the log group already exists, it's expiration will be configured to the value specified in this construct (never expire, by default).
By default, CDK uses the AWS SDK retry options when interacting with the log group. The `logRetentionRetryOptions` property
allows you to customize the maximum number of retries and base backoff duration.
*Note* that, if `logRetention` is set, a [CloudFormation custom
resource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html) is added
to the stack that pre-creates the log group as part of the stack deployment, if it already doesn't exist, and sets the
correct log retention period (never expire, by default).
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import constructs
from .. import (
CfnResource as _CfnResource_9df397a6,
Duration as _Duration_4839e8c3,
IInspectable as _IInspectable_c2943556,
IResolvable as _IResolvable_da3f097b,
IResource as _IResource_c80c4260,
Resource as _Resource_45bc6135,
TreeInspector as _TreeInspector_488e0dd5,
)
from ..aws_cloudwatch import (
Metric as _Metric_e396a4dc,
MetricOptions as _MetricOptions_1788b62f,
Unit as _Unit_61bc6f70,
)
from ..aws_codestarnotifications import (
INotificationRuleTarget as _INotificationRuleTarget_faa3b79b,
NotificationRuleTargetConfig as _NotificationRuleTargetConfig_ea27e095,
)
from ..aws_iam import (
IGrantable as _IGrantable_71c4f5de,
IPrincipal as _IPrincipal_539bb2fd,
IRole as _IRole_235f5d8e,
PolicyStatement as _PolicyStatement_0fe33853,
)
from ..aws_logs import (
LogRetentionRetryOptions as _LogRetentionRetryOptions_62d80a14,
RetentionDays as _RetentionDays_070f99f0,
)
from ..aws_sns import ITopic as _ITopic_9eca4852
@jsii.implements(_IInspectable_c2943556)
class CfnSlackChannelConfiguration(
_CfnResource_9df397a6,
metaclass=jsii.JSIIMeta,
jsii_type="aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration",
):
'''A CloudFormation ``AWS::Chatbot::SlackChannelConfiguration``.
The ``AWS::Chatbot::SlackChannelConfiguration`` resource configures a Slack channel to allow users to use AWS Chatbot with AWS CloudFormation templates.
This resource requires some setup to be done in the AWS Chatbot console. To provide the required Slack workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console, then copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:cloudformationResource: AWS::Chatbot::SlackChannelConfiguration
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html
:exampleMetadata: fixture=_generated
Example::
# The code below shows an example of how to instantiate this type.
# The values are placeholders you should change.
from aws_cdk import aws_chatbot as chatbot
cfn_slack_channel_configuration = chatbot.CfnSlackChannelConfiguration(self, "MyCfnSlackChannelConfiguration",
configuration_name="configurationName",
iam_role_arn="iamRoleArn",
slack_channel_id="slackChannelId",
slack_workspace_id="slackWorkspaceId",
# the properties below are optional
guardrail_policies=["guardrailPolicies"],
logging_level="loggingLevel",
sns_topic_arns=["snsTopicArns"],
user_role_required=False
)
'''
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
configuration_name: builtins.str,
iam_role_arn: builtins.str,
slack_channel_id: builtins.str,
slack_workspace_id: builtins.str,
guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None,
logging_level: typing.Optional[builtins.str] = None,
sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None,
user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,
) -> None:
'''Create a new ``AWS::Chatbot::SlackChannelConfiguration``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param configuration_name: The name of the configuration.
:param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .
:param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .
:param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.
:param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .
:param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .
:param user_role_required: Enables use of a user role requirement in your chat configuration.
'''
if __debug__:
type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.__init__)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
props = CfnSlackChannelConfigurationProps(
configuration_name=configuration_name,
iam_role_arn=iam_role_arn,
slack_channel_id=slack_channel_id,
slack_workspace_id=slack_workspace_id,
guardrail_policies=guardrail_policies,
logging_level=logging_level,
sns_topic_arns=sns_topic_arns,
user_role_required=user_role_required,
)
jsii.create(self.__class__, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: _TreeInspector_488e0dd5) -> None:
'''Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
'''
if __debug__:
type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.inspect)
check_type(argname="argument inspector", value=inspector, expected_type=type_hints["inspector"])
return typing.cast(None, jsii.invoke(self, "inspect", [inspector]))
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
if __debug__:
type_hints = typing.get_type_hints(CfnSlackChannelConfiguration._render_properties)
check_type(argname="argument props", value=props, expected_type=type_hints["props"])
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The CloudFormation resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> builtins.str:
'''
:cloudformationAttribute: Arn
'''
return typing.cast(builtins.str, jsii.get(self, "attrArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "cfnProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="configurationName")
def configuration_name(self) -> builtins.str:
'''The name of the configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname
'''
return typing.cast(builtins.str, jsii.get(self, "configurationName"))
@configuration_name.setter
def configuration_name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "configuration_name").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "configurationName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="iamRoleArn")
def iam_role_arn(self) -> builtins.str:
'''The ARN of the IAM role that defines the permissions for AWS Chatbot .
This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn
'''
return typing.cast(builtins.str, jsii.get(self, "iamRoleArn"))
@iam_role_arn.setter
def iam_role_arn(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "iam_role_arn").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "iamRoleArn", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelId")
def slack_channel_id(self) -> builtins.str:
'''The ID of the Slack channel.
To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid
'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelId"))
@slack_channel_id.setter
def slack_channel_id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "slack_channel_id").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "slackChannelId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackWorkspaceId")
def slack_workspace_id(self) -> builtins.str:
'''The ID of the Slack workspace authorized with AWS Chatbot .
To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid
'''
return typing.cast(builtins.str, jsii.get(self, "slackWorkspaceId"))
@slack_workspace_id.setter
def slack_workspace_id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "slack_workspace_id").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "slackWorkspaceId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="guardrailPolicies")
def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]:
'''The list of IAM policy ARNs that are applied as channel guardrails.
The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies
'''
return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, "guardrailPolicies"))
@guardrail_policies.setter
def guardrail_policies(
self,
value: typing.Optional[typing.List[builtins.str]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "guardrail_policies").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "guardrailPolicies", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="loggingLevel")
def logging_level(self) -> typing.Optional[builtins.str]:
'''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.
Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel
'''
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "loggingLevel"))
@logging_level.setter
def logging_level(self, value: typing.Optional[builtins.str]) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "logging_level").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "loggingLevel", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="snsTopicArns")
def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]:
'''The ARNs of the SNS topics that deliver notifications to AWS Chatbot .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns
'''
return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, "snsTopicArns"))
@sns_topic_arns.setter
def sns_topic_arns(self, value: typing.Optional[typing.List[builtins.str]]) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "sns_topic_arns").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "snsTopicArns", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="userRoleRequired")
def user_role_required(
self,
) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
'''Enables use of a user role requirement in your chat configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired
'''
return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], jsii.get(self, "userRoleRequired"))
@user_role_required.setter
def user_role_required(
self,
value: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "user_role_required").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "userRoleRequired", value)
@jsii.data_type(
jsii_type="aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps",
jsii_struct_bases=[],
name_mapping={
"configuration_name": "configurationName",
"iam_role_arn": "iamRoleArn",
"slack_channel_id": "slackChannelId",
"slack_workspace_id": "slackWorkspaceId",
"guardrail_policies": "guardrailPolicies",
"logging_level": "loggingLevel",
"sns_topic_arns": "snsTopicArns",
"user_role_required": "userRoleRequired",
},
)
class CfnSlackChannelConfigurationProps:
def __init__(
self,
*,
configuration_name: builtins.str,
iam_role_arn: builtins.str,
slack_channel_id: builtins.str,
slack_workspace_id: builtins.str,
guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None,
logging_level: typing.Optional[builtins.str] = None,
sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None,
user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,
) -> None:
'''Properties for defining a ``CfnSlackChannelConfiguration``.
:param configuration_name: The name of the configuration.
:param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .
:param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .
:param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.
:param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .
:param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .
:param user_role_required: Enables use of a user role requirement in your chat configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html
:exampleMetadata: fixture=_generated
Example::
# The code below shows an example of how to instantiate this type.
# The values are placeholders you should change.
from aws_cdk import aws_chatbot as chatbot
cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(
configuration_name="configurationName",
iam_role_arn="iamRoleArn",
slack_channel_id="slackChannelId",
slack_workspace_id="slackWorkspaceId",
# the properties below are optional
guardrail_policies=["guardrailPolicies"],
logging_level="loggingLevel",
sns_topic_arns=["snsTopicArns"],
user_role_required=False
)
'''
if __debug__:
type_hints = typing.get_type_hints(CfnSlackChannelConfigurationProps.__init__)
check_type(argname="argument configuration_name", value=configuration_name, expected_type=type_hints["configuration_name"])
check_type(argname="argument iam_role_arn", value=iam_role_arn, expected_type=type_hints["iam_role_arn"])
check_type(argname="argument slack_channel_id", value=slack_channel_id, expected_type=type_hints["slack_channel_id"])
check_type(argname="argument slack_workspace_id", value=slack_workspace_id, expected_type=type_hints["slack_workspace_id"])
check_type(argname="argument guardrail_policies", value=guardrail_policies, expected_type=type_hints["guardrail_policies"])
check_type(argname="argument logging_level", value=logging_level, expected_type=type_hints["logging_level"])
check_type(argname="argument sns_topic_arns", value=sns_topic_arns, expected_type=type_hints["sns_topic_arns"])
check_type(argname="argument user_role_required", value=user_role_required, expected_type=type_hints["user_role_required"])
self._values: typing.Dict[str, typing.Any] = {
"configuration_name": configuration_name,
"iam_role_arn": iam_role_arn,
"slack_channel_id": slack_channel_id,
"slack_workspace_id": slack_workspace_id,
}
if guardrail_policies is not None:
self._values["guardrail_policies"] = guardrail_policies
if logging_level is not None:
self._values["logging_level"] = logging_level
if sns_topic_arns is not None:
self._values["sns_topic_arns"] = sns_topic_arns
if user_role_required is not None:
self._values["user_role_required"] = user_role_required
@builtins.property
def configuration_name(self) -> builtins.str:
'''The name of the configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname
'''
result = self._values.get("configuration_name")
assert result is not None, "Required property 'configuration_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def iam_role_arn(self) -> builtins.str:
'''The ARN of the IAM role that defines the permissions for AWS Chatbot .
This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn
'''
result = self._values.get("iam_role_arn")
assert result is not None, "Required property 'iam_role_arn' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def slack_channel_id(self) -> builtins.str:
'''The ID of the Slack channel.
To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid
'''
result = self._values.get("slack_channel_id")
assert result is not None, "Required property 'slack_channel_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def slack_workspace_id(self) -> builtins.str:
'''The ID of the Slack workspace authorized with AWS Chatbot .
To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid
'''
result = self._values.get("slack_workspace_id")
assert result is not None, "Required property 'slack_workspace_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]:
'''The list of IAM policy ARNs that are applied as channel guardrails.
The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies
'''
result = self._values.get("guardrail_policies")
return typing.cast(typing.Optional[typing.List[builtins.str]], result)
@builtins.property
def logging_level(self) -> typing.Optional[builtins.str]:
'''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.
Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel
'''
result = self._values.get("logging_level")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]:
'''The ARNs of the SNS topics that deliver notifications to AWS Chatbot .
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns
'''
result = self._values.get("sns_topic_arns")
return typing.cast(typing.Optional[typing.List[builtins.str]], result)
@builtins.property
def user_role_required(
self,
) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
'''Enables use of a user role requirement in your chat configuration.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired
'''
result = self._values.get("user_role_required")
return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnSlackChannelConfigurationProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.interface(jsii_type="aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration")
class ISlackChannelConfiguration(
_IResource_c80c4260,
_IGrantable_71c4f5de,
_INotificationRuleTarget_faa3b79b,
typing_extensions.Protocol,
):
'''Represents a Slack channel configuration.'''
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationArn")
def slack_channel_configuration_arn(self) -> builtins.str:
'''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.
:attribute: true
'''
...
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationName")
def slack_channel_configuration_name(self) -> builtins.str:
'''The name of Slack channel configuration.
:attribute: true
'''
...
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="role")
def role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The permission role of Slack channel configuration.
:default: - A role will be created.
:attribute: true
'''
...
@jsii.member(jsii_name="addToRolePolicy")
def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:
'''Adds a statement to the IAM role.
:param statement: -
'''
...
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[_Duration_4839e8c3] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[_Unit_61bc6f70] = None,
) -> _Metric_e396a4dc:
'''Return the given named metric for this SlackChannelConfiguration.
:param metric_name: -
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
class _ISlackChannelConfigurationProxy(
jsii.proxy_for(_IResource_c80c4260), # type: ignore[misc]
jsii.proxy_for(_IGrantable_71c4f5de), # type: ignore[misc]
jsii.proxy_for(_INotificationRuleTarget_faa3b79b), # type: ignore[misc]
):
'''Represents a Slack channel configuration.'''
__jsii_type__: typing.ClassVar[str] = "aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration"
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationArn")
def slack_channel_configuration_arn(self) -> builtins.str:
'''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.
:attribute: true
'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationName")
def slack_channel_configuration_name(self) -> builtins.str:
'''The name of Slack channel configuration.
:attribute: true
'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="role")
def role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The permission role of Slack channel configuration.
:default: - A role will be created.
:attribute: true
'''
return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, "role"))
@jsii.member(jsii_name="addToRolePolicy")
def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:
'''Adds a statement to the IAM role.
:param statement: -
'''
if __debug__:
type_hints = typing.get_type_hints(ISlackChannelConfiguration.add_to_role_policy)
check_type(argname="argument statement", value=statement, expected_type=type_hints["statement"])
return typing.cast(None, jsii.invoke(self, "addToRolePolicy", [statement]))
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[_Duration_4839e8c3] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[_Unit_61bc6f70] = None,
) -> _Metric_e396a4dc:
'''Return the given named metric for this SlackChannelConfiguration.
:param metric_name: -
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
if __debug__:
type_hints = typing.get_type_hints(ISlackChannelConfiguration.metric)
check_type(argname="argument metric_name", value=metric_name, expected_type=type_hints["metric_name"])
props = _MetricOptions_1788b62f(
account=account,
color=color,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(_Metric_e396a4dc, jsii.invoke(self, "metric", [metric_name, props]))
# Adding a "__jsii_proxy_class__(): typing.Type" function to the interface
typing.cast(typing.Any, ISlackChannelConfiguration).__jsii_proxy_class__ = lambda : _ISlackChannelConfigurationProxy
@jsii.enum(jsii_type="aws-cdk-lib.aws_chatbot.LoggingLevel")
class LoggingLevel(enum.Enum):
'''Logging levels include ERROR, INFO, or NONE.'''
ERROR = "ERROR"
'''ERROR.'''
INFO = "INFO"
'''INFO.'''
NONE = "NONE"
'''NONE.'''
@jsii.implements(ISlackChannelConfiguration)
class SlackChannelConfiguration(
_Resource_45bc6135,
metaclass=jsii.JSIIMeta,
jsii_type="aws-cdk-lib.aws_chatbot.SlackChannelConfiguration",
):
'''A new Slack channel configuration.
:exampleMetadata: infused
Example::
import aws_cdk.aws_chatbot as chatbot
# project: codebuild.Project
target = chatbot.SlackChannelConfiguration(self, "MySlackChannel",
slack_channel_configuration_name="YOUR_CHANNEL_NAME",
slack_workspace_id="YOUR_SLACK_WORKSPACE_ID",
slack_channel_id="YOUR_SLACK_CHANNEL_ID"
)
rule = project.notify_on_build_succeeded("NotifyOnBuildSucceeded", target)
'''
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
slack_channel_configuration_name: builtins.str,
slack_channel_id: builtins.str,
slack_workspace_id: builtins.str,
logging_level: typing.Optional[LoggingLevel] = None,
log_retention: typing.Optional[_RetentionDays_070f99f0] = None,
log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None,
log_retention_role: typing.Optional[_IRole_235f5d8e] = None,
notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None,
role: typing.Optional[_IRole_235f5d8e] = None,
) -> None:
'''
:param scope: -
:param id: -
:param slack_channel_configuration_name: The name of Slack channel configuration.
:param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.
:param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.
:param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE
:param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE
:param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.
:param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.
:param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None
:param role: The permission role of Slack channel configuration. Default: - A role will be created.
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.__init__)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
props = SlackChannelConfigurationProps(
slack_channel_configuration_name=slack_channel_configuration_name,
slack_channel_id=slack_channel_id,
slack_workspace_id=slack_workspace_id,
logging_level=logging_level,
log_retention=log_retention,
log_retention_retry_options=log_retention_retry_options,
log_retention_role=log_retention_role,
notification_topics=notification_topics,
role=role,
)
jsii.create(self.__class__, self, [scope, id, props])
@jsii.member(jsii_name="fromSlackChannelConfigurationArn") # type: ignore[misc]
@builtins.classmethod
def from_slack_channel_configuration_arn(
cls,
scope: constructs.Construct,
id: builtins.str,
slack_channel_configuration_arn: builtins.str,
) -> ISlackChannelConfiguration:
'''Import an existing Slack channel configuration provided an ARN.
:param scope: The parent creating construct.
:param id: The construct's name.
:param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).
:return: a reference to the existing Slack channel configuration
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.from_slack_channel_configuration_arn)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument slack_channel_configuration_arn", value=slack_channel_configuration_arn, expected_type=type_hints["slack_channel_configuration_arn"])
return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls, "fromSlackChannelConfigurationArn", [scope, id, slack_channel_configuration_arn]))
@jsii.member(jsii_name="metricAll") # type: ignore[misc]
@builtins.classmethod
def metric_all(
cls,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[_Duration_4839e8c3] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[_Unit_61bc6f70] = None,
) -> _Metric_e396a4dc:
'''Return the given named metric for All SlackChannelConfigurations.
:param metric_name: -
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.metric_all)
check_type(argname="argument metric_name", value=metric_name, expected_type=type_hints["metric_name"])
props = _MetricOptions_1788b62f(
account=account,
color=color,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, "metricAll", [metric_name, props]))
@jsii.member(jsii_name="addNotificationTopic")
def add_notification_topic(self, notification_topic: _ITopic_9eca4852) -> None:
'''Adds a SNS topic that deliver notifications to AWS Chatbot.
:param notification_topic: -
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.add_notification_topic)
check_type(argname="argument notification_topic", value=notification_topic, expected_type=type_hints["notification_topic"])
return typing.cast(None, jsii.invoke(self, "addNotificationTopic", [notification_topic]))
@jsii.member(jsii_name="addToRolePolicy")
def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:
'''Adds extra permission to iam-role of Slack channel configuration.
:param statement: -
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.add_to_role_policy)
check_type(argname="argument statement", value=statement, expected_type=type_hints["statement"])
return typing.cast(None, jsii.invoke(self, "addToRolePolicy", [statement]))
@jsii.member(jsii_name="bindAsNotificationRuleTarget")
def bind_as_notification_rule_target(
self,
_scope: constructs.Construct,
) -> _NotificationRuleTargetConfig_ea27e095:
'''Returns a target configuration for notification rule.
:param _scope: -
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.bind_as_notification_rule_target)
check_type(argname="argument _scope", value=_scope, expected_type=type_hints["_scope"])
return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.invoke(self, "bindAsNotificationRuleTarget", [_scope]))
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[_Duration_4839e8c3] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[_Unit_61bc6f70] = None,
) -> _Metric_e396a4dc:
'''Return the given named metric for this SlackChannelConfiguration.
:param metric_name: -
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfiguration.metric)
check_type(argname="argument metric_name", value=metric_name, expected_type=type_hints["metric_name"])
props = _MetricOptions_1788b62f(
account=account,
color=color,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(_Metric_e396a4dc, jsii.invoke(self, "metric", [metric_name, props]))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="grantPrincipal")
def grant_principal(self) -> _IPrincipal_539bb2fd:
'''The principal to grant permissions to.'''
return typing.cast(_IPrincipal_539bb2fd, jsii.get(self, "grantPrincipal"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationArn")
def slack_channel_configuration_arn(self) -> builtins.str:
'''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="slackChannelConfigurationName")
def slack_channel_configuration_name(self) -> builtins.str:
'''The name of Slack channel configuration.'''
return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="role")
def role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The permission role of Slack channel configuration.'''
return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, "role"))
@jsii.data_type(
jsii_type="aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps",
jsii_struct_bases=[],
name_mapping={
"slack_channel_configuration_name": "slackChannelConfigurationName",
"slack_channel_id": "slackChannelId",
"slack_workspace_id": "slackWorkspaceId",
"logging_level": "loggingLevel",
"log_retention": "logRetention",
"log_retention_retry_options": "logRetentionRetryOptions",
"log_retention_role": "logRetentionRole",
"notification_topics": "notificationTopics",
"role": "role",
},
)
class SlackChannelConfigurationProps:
def __init__(
self,
*,
slack_channel_configuration_name: builtins.str,
slack_channel_id: builtins.str,
slack_workspace_id: builtins.str,
logging_level: typing.Optional[LoggingLevel] = None,
log_retention: typing.Optional[_RetentionDays_070f99f0] = None,
log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None,
log_retention_role: typing.Optional[_IRole_235f5d8e] = None,
notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None,
role: typing.Optional[_IRole_235f5d8e] = None,
) -> None:
'''Properties for a new Slack channel configuration.
:param slack_channel_configuration_name: The name of Slack channel configuration.
:param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.
:param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.
:param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE
:param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE
:param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.
:param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.
:param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None
:param role: The permission role of Slack channel configuration. Default: - A role will be created.
:exampleMetadata: infused
Example::
import aws_cdk.aws_chatbot as chatbot
# project: codebuild.Project
target = chatbot.SlackChannelConfiguration(self, "MySlackChannel",
slack_channel_configuration_name="YOUR_CHANNEL_NAME",
slack_workspace_id="YOUR_SLACK_WORKSPACE_ID",
slack_channel_id="YOUR_SLACK_CHANNEL_ID"
)
rule = project.notify_on_build_succeeded("NotifyOnBuildSucceeded", target)
'''
if isinstance(log_retention_retry_options, dict):
log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**log_retention_retry_options)
if __debug__:
type_hints = typing.get_type_hints(SlackChannelConfigurationProps.__init__)
check_type(argname="argument slack_channel_configuration_name", value=slack_channel_configuration_name, expected_type=type_hints["slack_channel_configuration_name"])
check_type(argname="argument slack_channel_id", value=slack_channel_id, expected_type=type_hints["slack_channel_id"])
check_type(argname="argument slack_workspace_id", value=slack_workspace_id, expected_type=type_hints["slack_workspace_id"])
check_type(argname="argument logging_level", value=logging_level, expected_type=type_hints["logging_level"])
check_type(argname="argument log_retention", value=log_retention, expected_type=type_hints["log_retention"])
check_type(argname="argument log_retention_retry_options", value=log_retention_retry_options, expected_type=type_hints["log_retention_retry_options"])
check_type(argname="argument log_retention_role", value=log_retention_role, expected_type=type_hints["log_retention_role"])
check_type(argname="argument notification_topics", value=notification_topics, expected_type=type_hints["notification_topics"])
check_type(argname="argument role", value=role, expected_type=type_hints["role"])
self._values: typing.Dict[str, typing.Any] = {
"slack_channel_configuration_name": slack_channel_configuration_name,
"slack_channel_id": slack_channel_id,
"slack_workspace_id": slack_workspace_id,
}
if logging_level is not None:
self._values["logging_level"] = logging_level
if log_retention is not None:
self._values["log_retention"] = log_retention
if log_retention_retry_options is not None:
self._values["log_retention_retry_options"] = log_retention_retry_options
if log_retention_role is not None:
self._values["log_retention_role"] = log_retention_role
if notification_topics is not None:
self._values["notification_topics"] = notification_topics
if role is not None:
self._values["role"] = role
@builtins.property
def slack_channel_configuration_name(self) -> builtins.str:
'''The name of Slack channel configuration.'''
result = self._values.get("slack_channel_configuration_name")
assert result is not None, "Required property 'slack_channel_configuration_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def slack_channel_id(self) -> builtins.str:
'''The ID of the Slack channel.
To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.
The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.
'''
result = self._values.get("slack_channel_id")
assert result is not None, "Required property 'slack_channel_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def slack_workspace_id(self) -> builtins.str:
'''The ID of the Slack workspace authorized with AWS Chatbot.
To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.
Then you can copy and paste the workspace ID from the console.
For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.
:see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro
'''
result = self._values.get("slack_workspace_id")
assert result is not None, "Required property 'slack_workspace_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def logging_level(self) -> typing.Optional[LoggingLevel]:
'''Specifies the logging level for this configuration.
This property affects the log entries pushed to Amazon CloudWatch Logs.
:default: LoggingLevel.NONE
'''
result = self._values.get("logging_level")
return typing.cast(typing.Optional[LoggingLevel], result)
@builtins.property
def log_retention(self) -> typing.Optional[_RetentionDays_070f99f0]:
'''The number of days log events are kept in CloudWatch Logs.
When updating
this property, unsetting it doesn't remove the log retention policy. To
remove the retention policy, set the value to ``INFINITE``.
:default: logs.RetentionDays.INFINITE
'''
result = self._values.get("log_retention")
return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)
@builtins.property
def log_retention_retry_options(
self,
) -> typing.Optional[_LogRetentionRetryOptions_62d80a14]:
'''When log retention is specified, a custom resource attempts to create the CloudWatch log group.
These options control the retry policy when interacting with CloudWatch APIs.
:default: - Default AWS SDK retry options.
'''
result = self._values.get("log_retention_retry_options")
return typing.cast(typing.Optional[_LogRetentionRetryOptions_62d80a14], result)
@builtins.property
def log_retention_role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The IAM role for the Lambda function associated with the custom resource that sets the retention policy.
:default: - A new role is created.
'''
result = self._values.get("log_retention_role")
return typing.cast(typing.Optional[_IRole_235f5d8e], result)
@builtins.property
def notification_topics(self) -> typing.Optional[typing.List[_ITopic_9eca4852]]:
'''The SNS topics that deliver notifications to AWS Chatbot.
:default: None
'''
result = self._values.get("notification_topics")
return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]], result)
@builtins.property
def role(self) -> typing.Optional[_IRole_235f5d8e]:
'''The permission role of Slack channel configuration.
:default: - A role will be created.
'''
result = self._values.get("role")
return typing.cast(typing.Optional[_IRole_235f5d8e], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "SlackChannelConfigurationProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"CfnSlackChannelConfiguration",
"CfnSlackChannelConfigurationProps",
"ISlackChannelConfiguration",
"LoggingLevel",
"SlackChannelConfiguration",
"SlackChannelConfigurationProps",
]
publication.publish()
|
9,459 | 262d6722f4c158d0a41b22433792cdc35651d156 | # coding=utf-8
"""
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
Example
Given a binary tree as follow:
1
/ \
2 3
/ \
4 5
The maximum depth is 3.
"""
"""
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: The root of binary tree.
@return: An integer
"""
def maxDepth(self, root):
# write your code here
if not root:
return 0
return max(self.maximum(root.left),self.maximum(root.right))+1 |
9,460 | 26289d88ac51ee359faa81ca70b01879d2b1f840 | pairs = ['usdt', 'btc']
warn_msg = '** WARN ** '
info_msg = '** INFO **'
|
9,461 | 2cd7d4fe87de66e85bc0d060e2eaa68be39eed02 | from tasks import video_compress, video_upload
if __name__ == '__main__':
video_compress.apply_async(["a"],queue='high')
video_compress.apply_async(["b"],queue='low')
video_upload.apply_async(["c"], queue='low')
video_upload.apply_async(["d"], queue='high')
|
9,462 | bec3d8546cd7d27f7da48f5658480cf17c36a255 | import os
import re
import sys
import traceback
import readline
from typing import NamedTuple, List
from PyInquirer import prompt
from pygments import highlight
from pygments.formatters.terminal import TerminalFormatter
from pygments.lexers.python import PythonLexer
import argparse
parser = argparse.ArgumentParser(description='Enter project endpoint')
parser.add_argument("proj", help="Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme")
args = parser.parse_args()
proj = args.proj
from analyzer import get_problems, Comment
from finalizing import grade
from ok_interface import get_backup_ids, get_backup_code, submit_comment, submit_grade
from colorama import Fore, Style
from templates import template_completer, templates
import argparse
import config
parser = argparse.ArgumentParser(description='Enter project endpoint')
parser.add_argument("proj", help="Run 'python3 cli.py <proj>', where <proj> is one of the following: hog cats ants scheme")
args = parser.parse_args()
config.proj = args.proj
class Grade(NamedTuple):
score: int
message: str
comments: List[Comment]
def clear():
os.system("cls" if os.name == "nt" else "clear")
def display_code_with_accepted_and_potential_comments(
name, problem, accepted_comments, curr_comment=None
):
clear()
print(f"Problem: {name}")
highlighted_code = highlight(problem.code, PythonLexer(), TerminalFormatter())
for i, line in enumerate(highlighted_code.split("\n")):
line_num = problem.initial_line_number + i
if line_num in accepted_comments or (
curr_comment and line_num == curr_comment.line_num
):
print()
print(f"{Fore.GREEN}{line_num} {Style.RESET_ALL}{line}")
if line_num in accepted_comments or (
curr_comment and line_num == curr_comment.line_num
):
indent_level = len(line) - len(line.strip()) + 3
if line_num in accepted_comments:
for accepted_comment in accepted_comments[line_num]:
print(
Fore.MAGENTA
+ " " * indent_level
+ "# "
+ accepted_comment.comment
)
if curr_comment and line_num == curr_comment.line_num:
print(
Fore.RED
+ Style.BRIGHT
+ " " * indent_level
+ "# "
+ curr_comment.comment
)
print()
print()
def complete(comment):
if comment.fields:
print("Please provide supplementary information:")
field_vals = {}
for field in comment.fields:
q = {"type": "input", "name": "field", "message": field + ":"}
response = wrapped_prompt(q)
field_vals[field] = response["field"]
complete_text = comment.comment.format(**field_vals)
q = {
"type": "input",
"name": "final",
"message": "Final message",
"default": complete_text,
}
response = wrapped_prompt(q)
return Comment(comment.line_num, response["final"])
def add_comment(accepted_comments, new_comment):
if not new_comment:
return
if new_comment.line_num not in accepted_comments:
accepted_comments[new_comment.line_num] = []
accepted_comments[new_comment.line_num].append(new_comment)
class Interrupt(Exception):
def __init__(self, cmd):
super()
self.cmd = cmd
def wrapped_prompt(q):
ret = prompt([q])
if not ret:
receive_command()
return ret
def wrapped_input(q):
try:
ret = input(q)
except KeyboardInterrupt:
return receive_command()
return ret
def receive_command():
inp = input(
f"\n\n"
f"cancel = cancel this comment\n"
f"clear = clear all question comments\n"
f"reset = reset all student comments\n"
f"? {Style.BRIGHT}{Fore.RED}command: {Style.RESET_ALL}"
)
raise Interrupt(inp)
def main():
readline.parse_and_bind("tab: complete")
readline.set_completer_delims("")
print("cli.py main")
for id in get_backup_ids():
try:
code = get_backup_code(id)
problems = get_problems(code)
except Exception:
print(
f"{Fore.RED}An exception occurred while processing backup id #{id}",
file=sys.stderr,
)
traceback.print_exc(file=sys.stderr)
print(f"{Style.RESET_ALL}")
continue
grade = grade_backup(problems)
for comment in grade.comments:
print(comment)
assert not comment.fields, "fields not substituted!"
submit_comment(id, comment.line_num, comment.comment)
submit_grade(id, grade.score, grade.message)
def grade_backup(problems):
comments = []
try:
for name, problem in problems.items():
comments.extend(grade_problem(name, problem))
score, message = grade(comments)
print(message)
q = {
"type": "confirm",
"name": "ok",
"message": "Does this grade look reasonable?",
}
response = wrapped_prompt(q)
return Grade(score, message, comments)
except Interrupt as e:
if e.cmd == "reset":
return grade_backup(problems)
raise
def grade_problem(name, problem):
readline.set_completer(template_completer(name))
try:
accepted_comments = {}
for comment in problem.comments:
try:
display_code_with_accepted_and_potential_comments(
name, problem, accepted_comments, comment
)
print(f"{Fore.CYAN}Potential comment: {Style.RESET_ALL}")
print(
f"{Fore.GREEN}{comment.line_num}{Style.RESET_ALL} {comment.comment}"
)
q = {
"type": "confirm",
"name": "ok",
"message": "Add comment",
"default": True,
}
response = wrapped_prompt(q)
if response["ok"]:
add_comment(accepted_comments, complete(comment))
except Interrupt as e:
if e.cmd == "cancel":
continue
raise
while True:
try:
display_code_with_accepted_and_potential_comments(
name, problem, accepted_comments
)
response = wrapped_input(
f"? {Style.BRIGHT} Custom comment type: {Style.RESET_ALL}"
)
if not response:
q = {
"type": "confirm",
"name": "ok",
"message": "Go to next question?",
"default": True,
}
response = wrapped_prompt(q)
if response["ok"]:
break
continue
if response not in templates:
print(
f"{Fore.RED} Template {response} not found! {Style.RESET_ALL}"
)
continue
text = templates[response]
q = {"type": "input", "name": "line_num", "message": "Line number:"}
response = wrapped_prompt(q)
try:
line_num = int(response["line_num"])
except ValueError:
print(
f"{Fore.RED} Expected a number, received {response['line_num']} not found! {Style.RESET_ALL}"
)
continue
if text:
fields = list(set(re.findall(r"{(.*?)}", text)))
comment = Comment(line_num, text, fields)
add_comment(accepted_comments, complete(comment))
else:
q = {"type": "input", "name": "text", "message": "Comment:"}
response = wrapped_prompt(q)
comment = Comment(line_num, response["text"], [])
add_comment(accepted_comments, comment)
except Interrupt as e:
if e.cmd == "cancel":
continue
raise
print()
return list(sum(accepted_comments.values(), []))
except Interrupt as e:
if e.cmd == "clear":
return grade_problem(name, problem)
raise
if __name__ == "__main__":
try:
main()
except:
print(f"{Style.RESET_ALL}")
|
9,463 | 4c483636316dfa660f10b1aba900813bc3e95ebe | from django.http import HttpResponseRedirect
from django.shortcuts import render
__author__ = 'jhonjairoroa87'
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework_jsonp.renderers import JSONPRenderer
from django.db import models
from .form import NameForm
def multiply(a,b):
return a*b
class Multiply(APIView):
renderer_classes = (JSONPRenderer,)
@staticmethod
def get(request):
form = NameForm()
return render(request, 'name.html', {'form': form})
@staticmethod
def post(request):
form = NameForm(request.POST)
if form.is_valid():
a = form.cleaned_data['one']
b = form.cleaned_data['second']
data = multiply(a, b)
return render(request, 'name.html', {'data': data})
else:
return render(request, 'name.html', {'data': "error"})
class Divide(APIView):
renderer_classes = (JSONPRenderer,)
@staticmethod
def get(request):
try:
first_number = int(request.GET.get('a'))
second_number = int(request.GET.get('b'))
return Response({'result': first_number / second_number})
except Exception as e:
return Response({'result': 'there was an error ' + str(e)})
|
9,464 | e5abab3f718bbbd25dcfc49290383203d53248c3 |
import logging
from exceptions.invalid_api_usage import InvalidAPIUsage
from wgadget.endpoints.ep import EP
class EPInfoLight(EP):
NAME = 'info_light'
URL = '/info'
URL_ROUTE_PAR_PAYLOAD = '/'
URL_ROUTE_PAR_URL = '/actuatorId/<actuatorId>'
METHOD = 'GET'
ATTR_ACTUATOR_ID = 'actuatorId'
def __init__(self, web_gadget):
self.web_gadget = web_gadget
def getRequestDescriptionWithPayloadParameters(self):
ret = {}
ret['name'] = EPInfoLight.NAME
ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD
ret['method'] = EPInfoLight.METHOD
ret['payload-desc'] = [{},{}]
ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID
ret['payload-desc'][0]['type'] = 'integer'
ret['payload-desc'][0]['value'] = 1
return ret
def executeByParameters(self, actuatorId) -> dict:
payload = {}
payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)
return self.executeByPayload(payload)
def executeByPayload(self, payload) -> dict:
actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])
if actuatorId == self.web_gadget.getLightId():
actualValue = self.web_gadget.fetchSavedLightValue()
logging.debug( "WEB request: {0} {1} ('{2}': {3})".format(
EPInfoLight.METHOD, EPInfoLight.URL,
EPInfoLight.ATTR_ACTUATOR_ID, actuatorId)
)
return {"value": actualValue, "thread": self.web_gadget.getThreadControllerStatus()}
# return {"value": actualValue, "thread": {"inProgress": False, "id":1}}
else:
raise InvalidAPIUsage("No such actuator: {0} or value: {1}".format(actuatorId, value), error_code=404)
|
9,465 | c2b3594d25e2d1670d9b99e0d3484c680f59421f |
import random
import tqdm
from keras.models import load_model
from ModelUtil import precision, recall, f1
from tqdm import tqdm
import cv2 as cv
import numpy as np
import os
import pandas as pd
from PIL import Image
os.environ['CUDA_VISIBLE_DEVICES']='1'
model_path = '/home/bo/Project/densenet.hdf5'
train_img_path = '/home/bo/Project/Eyes_data/first_train/'
test_img_path = '/home/bo/Project/Eyes_data/first_test/'
label_df = pd.read_csv('/home/bo/Project/Eyes_data/first_label.csv', error_bad_lines=False, index_col=0)
SIZE = 224
def preprocess_image(image_path, desired_size=SIZE):
"""
Resize the picture to the desired size
:param image_path: the path of image folder
:param desired_size: the size that image will be cropped as. The default size is 224*224
:return: the cropped image
"""
im = Image.open(image_path)
im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)
return im
def set_data(img_path, dataframe):
"""
Correspond the image to the label and return them.
:param img_path: the path of images' folder
:param dataframe: the .csv file that shows relation between image and label
:return: Image, Label and the name of Image
"""
N = len(os.listdir(img_path))
x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)
y_ = np.empty(N)
image_names = np.empty(N, dtype=np.dtype(('U', 15)))
for i, img_name in enumerate(tqdm(os.listdir(img_path))):
x_[i, :, :, :] = preprocess_image(img_path + img_name)
y_[i] = dataframe.loc[img_name.split('.')[0], 'level']
image_names[i] = img_name
return x_, y_
def predict(X):
model = load_model(model_path,
custom_objects={'precision': precision, 'recall': recall, 'f1': f1})
ret = model.predict(X)
return ret
def sobel(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def canny(img_set):
ret = np.empty(img_set.shape)
for i, image in enumerate(tqdm(img_set)):
blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)
gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)
edge_output = cv.Canny(gray, 50, 150)
dst = cv.bitwise_and(image, image, mask=edge_output)
print(dst)
ret[i, :] = dst
return ret
def scharr(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def laplace(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)
dst = cv.convertScaleAbs(gray_lap)
ret[i, :] = dst
return ret
def sp_noise(img_set, prob=0.1):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
out = np.zeros(image.shape, np.uint8)
thres = 1 - prob
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
out[i][j] = 0
elif rdn > thres:
out[i][j] = 255
else:
out[i][j] = image[i][j]
ret[m,:] = out
return ret
def gasuss_noise(img_set, mean=0, var=0.01):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
image = np.array(image/255, dtype=float)
noise = np.random.normal(mean, var ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.
else:
low_clip = 0.
out = np.clip(out, low_clip, 1.0)
out = np.uint8(out*255)
ret[m, :] = out
return ret
def ouput_csv(X_, Y_, csv_path):
model = load_model(model_path,
custom_objects={'precision': precision, 'recall': recall, 'f1': f1})
data = model.predict(X_)
dataDF = pd.DataFrame(data)
dataDF['level'] = Y_[:, 0]
dataDF['label'] = Y_[:, 1]
print(dataDF)
dataDF.to_csv(csv_path, index=False)
## if you would like to use sobel
x_train, y_train = set_data(train_img_path,label_df)
y_in = np.c_[y_train, np.ones(y_train.shape[0])]
x_test, y_test = set_data(test_img_path,label_df)
y_out = np.c_[y_test, np.zeros(y_test.shape[0])]
X_ = np.r_[sobel(x_train), sobel(x_test)]
Y_ = np.r_[y_in, y_out]
ouput_csv(X_, Y_, 'sobel_eye.csv')
## original output without operator
# x_train, y_train = set_data(train_img_path,label_df)
# y_in = np.c_[y_train, np.ones(y_train.shape[0])]
# x_test, y_test = set_data(test_img_path,label_df)
# y_out = np.c_[y_test, np.zeros(y_test.shape[0])]
#
# X_ = np.r_[x_train, x_test]
# Y_ = np.r_[y_in, y_out]
#
# ouput_csv(X_, Y_, 'sobel_eye.csv')
|
9,466 | fedec397ac0346bad1790315b4f85fbb1a662a4e | import subprocess
from dissamblerAbstract import disassemblerAbstract
#lib/ZydisDisasm -64 /home/nislab2/Desktop/DissamblerEffect/metamorphic/00fe0c08024f7db771d6711787d890a3.exe
class ZydisDisassembler(disassemblerAbstract):
def diassemble(self,filename, bits='32bit'):
"""
Disassembly executable file return iterable instruction set.
:param filename : Executable file path
:type filename: str
:param bits : File platform 16, 32 or 64.
:type bits : str [16bit, 32bit, 64bit] (default:32bit)
:return: assembly code iterator:
:rtype: str
"""
mode = bits.replace("bit","")
diasm = subprocess.check_output(['lib/ZydisDisasm',"-"+mode, filename])
return diasm.decode("utf-8")
def getDisassembledCode(self,filename, delimeter='\n', bits='32bit'):
"""
Disassemble file and concatenete offset, size, hexcode and instruction into string result.
:param filename: Binary file name
:type filename: str
:param delimeter: Line delimeter for instruction set
:type delimeter: str
:param bits: File platform 16, 32 or 64.
:type bits: str [16bit, 32bit, 64bit] (default:32bit)
:return assembly instruction list
:rtype : str
"""
return self.diassemble(filename,bits).replace("\n",delimeter)
def getAssemblyCode(self,filename, delimeter='\n', bits='32bit'):
return self.diassemble(filename,bits).replace("\n",delimeter)
def getAssemblyCodeList(self,filename, bits='32bit'):
return self.diassemble(filename,bits).split("\n")
'''
zydisDissambler = ZydisDisasembler()
x = zydisDissambler.getDisassembledCode("/home/nislab2/Desktop/DissamblerEffect/metamorphic/00fe0c08024f7db771d6711787d890a3.exe")
print(x)
''' |
9,467 | caf83d35ce6e0bd4e92f3de3a32221705a529ec1 | #!/usr/bin/env python3
# ---------------------------------------------------
# SSHSploit Framework
# ---------------------------------------------------
# Copyright (C) <2020> <Entynetproject>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
os.system("printf '\033]2;SSHSploit Framework\a'")
import sys
import subprocess
import readline
import time
Q = '\033[1;77m[?] \033[0m'
G = '\033[1;34m[*] \033[0m'
S = '\033[1;32m[+] \033[0m'
W = '\033[1;33m[!] \033[0m'
E = '\033[1;31m[-] \033[0m'
rhost = ""
rport = ""
cmd = ""
attack = ""
pwd = 0
location = []
readline.parse_and_bind("tab: complete")
def banner():
os.system("clear")
os.system("cat banner/banner.txt")
print("")
print("SSHSploit Framework v1.0")
print("------------------------")
print("")
def main():
ui = input('\033[4msshsploit\033[0m> ').strip(" ")
ui = ui.split()
while True:
if ui == []:
pass
elif ui[0] == "exit":
sys.exit()
elif ui[0] == "clear":
os.system("clear")
elif ui[0] == "update":
os.system("chmod +x etc/update.sh && etc/update.sh")
elif ui[0] == "help":
print("")
print("Core Commands")
print("=============")
os.system("cat data/cmds/core_cmds.txt")
print("")
elif ui[0] == "modules":
print("")
print("Modules")
print("=======")
print("")
os.system("cat data/modules/modules.txt")
print("")
elif ui[0] == "use":
if len(ui) < 2:
print("Usage: use <module>")
else:
attack = ui[1]
if attack == "libssh_rce_noauth" or attack == "libssh_shell_noauth":
location[pwd] = c[1]
mod = input('\033[4msshsploit\033[0m(\033[1;31m'+attack+'\033[0m)> ').strip(" ")
mod = mod.split()
while True:
if mod == []:
pass
elif mod[0] == "back":
pwd -= 1
location = location[0:-1]
if location == []:
pwd = 0
break
elif mod[0] == "set":
if len(mod) < 3:
print("Usage: set <option> <value>")
else:
if attack == "libssh_rce_noauth":
if mod[1].lower() == "rhost":
rhost = mod[2]
elif mod[1].lower() == "rport":
rport = mod[2]
elif mod[1].lower() == "cmd":
cmd = mod[2]
else:
print(E+"Options is not found!")
else:
if mod[1].lower() == "rhost":
rhost = mod[2]
elif mod[1].lower() == "rport":
rport = mod[2]
else:
print(E+"Options is not found!")
elif mod[0] == "options":
if attack == "libssh_rce_noauth":
os.system("ruby data/options/options.rb libssh_rce_noauth "+rhost+" "+rport+" "+cmd)
else:
os.system("ruby data/options/options.rb libssh_shell_noauth "+rhost+" "+rport)
elif mod[0] == "use":
if len(mod) < 2:
print("Usage: use <module>")
else:
attack = mod[1]
if attack == "libssh_rce_noauth" or attack == "libssh_shell_noauth":
pwd += 1
location[pwd] = mod[1]
else:
print(E+"Module is not found!")
elif mod[0] == "run":
if rhost == "" or rport == "":
print(E+"Target is not specified!")
else:
if attack == "libssh_rce_noauth":
if cmd == "":
print(E+"Command for RCE is not specified!")
else:
print(G+"Starting libssh_rce_noauth attack...")
os.system("python3 modules/libssh_rce_noauth.py "+rhost+" -p "+rport+" -v '"+cmd+"'")
elif attack == "libssh_shell_noauth":
print(G+"Starting libssh_shell_noauth attack...")
os.system("python3 modules/libssh_shell_noauth.py "+rhost+" -p "+rport+" -v --shell")
elif mod[0] == "clear":
os.system("clear")
elif mod[0] == "exit":
sys.exit()
elif mod[0] == "update":
os.system("chmod +x etc/update.sh && etc/update.sh")
elif mod[0] == "help":
print("")
print("Core Commands")
print("=============")
os.system("cat data/cmds/core_cmds.txt")
print("")
print("Module Commands")
print("===============")
os.system("cat data/cmds/module_cmds.txt")
print("")
else:
print(E+"Unrecognized command!")
mod = input('\033[4msshsploit\033[0m(\033[1;31m'+attack+'\033[0m)> ').strip(" ")
mod = mod.split()
else:
print(E+"Module is not found!")
else:
print(E+"Unrecognized command!")
ui = input('\033[4msshsploit\033[0m> ').strip(" ")
ui = ui.split()
banner()
main()
|
9,468 | fb53ea6a7184c0b06fb8a4cbfaf2145cc5c2e8e2 | import hlp
import pdb
class Nnt(list):
"""
Generic layer of neural network
"""
def __init__(self):
"""
Initialize the neural network base object.
"""
self.tag = None
def y(self, x):
"""
build sybolic expression of output {y} given input {x}
this also the defaut expression returned when the Net object is
called as a function
"""
return x
def __call__(self, x):
"""
build symbolic expression of output given input. This makes the
object callable.
"""
return self.y(x)
def p(self):
"""
return independent parameters - the shared tensor variables in
output {y}'s expression.
"""
return hlp.parms(self.y(0))
def __repr__(self):
return '{}{}'.format(
"" if self.tag is None else self.tag,
super(Nnt, self).__repr__())
|
9,469 | 54276074d84e63e6418f8738bb7f910424f1c94d | import sys
import os
PROJ_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(PROJ_DIR) |
9,470 | 8173afbd82b8da04db4625ac686c0d052e65a21c | from PyQt4 import QtGui
from PyQt4.QtCore import pyqtSignal, pyqtSlot, QObject, Qt
from twisted.internet.defer import inlineCallbacks
import numpy as np
from connection import connection
import pyqtgraph as pg
from pyqtgraph.SignalProxy import SignalProxy
import sys
import time
global harwareConfiguration
class graphingwidget(QtGui.QWidget):
SIGNALID = 104692
update_signal = pyqtSignal(list)
def __init__(self,reactor, configpath):
super(graphingwidget,self).__init__()
self.reactor = reactor
self.configpath = configpath
self.initialize()
self.timeoffset = 200
def mouseMoved(self,evt):
pos = evt
if self.figure.sceneBoundingRect().contains(pos):
mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)
index = int(mousePoint.x())
self.label.setPos(mousePoint)
self.label.setText("{:d}".format(int(mousePoint.x())))
def initialize(self):
sys.path.append(self.configpath)
global hardwareConfiguration
from hardwareConfiguration import hardwareConfiguration
self.ddslist = hardwareConfiguration.ddsDict
self.do_layout()
self.figure.scene().sigMouseMoved.connect(self.mouseMoved)
def do_layout(self):
yaxis = pg.AxisItem(orientation='left')
ticks = []
sorteddict = sorted(self.ddslist.items(),key =lambda x: x[1].channelnumber)
for i in range(0,17):
if i < len(sorteddict):
string = sorteddict[i][0]
else:
string = ""
ticks.append((i+0.5,string))
yaxis.setTicks([ticks])
self.figure = pg.PlotWidget(axisItems ={'left':yaxis})
self.layoutVertical = QtGui.QVBoxLayout(self)
self.layoutVertical.addWidget(self.figure)
for adds,config in self.ddslist.iteritems():
self.figure.addItem(pg.PlotCurveItem(range(10),[1]*10,pen='w'))
self.figure.setYRange(0,17)
self.figure.setMouseEnabled(y=False)
self.figure.showGrid(x=True,y=True,alpha=0.4)
self.label = pg.TextItem(anchor=(0,1))
self.figure.plotItem.addItem(self.label)
@pyqtSlot(list,int,list)
def do_sequence(self,sequence,timelength,steadystatenames):
xdatalist = []
ydatalist = []
for achannelname, adds in self.ddslist.iteritems():
channelpulses = [i for i in sequence if i[0] == achannelname]
channelpulses.sort(key= lambda name: name[1]['ms'])
starttimes = []
endtimes = []
frequencies = []
amplitudes = []
if achannelname in steadystatenames:
starttimes.append(-50)
endtimes.append(0)
for apulse in channelpulses:
starttimes.append(apulse[1]['ms'])
endtimes.append((apulse[1]+ apulse[2])['ms'])
yhigh = 0.75+adds.channelnumber
ylow = 0.25+adds.channelnumber
if len(starttimes) < 0:
xdata = [starttimes[0]+self.timeoffset]
ydata = [yhigh]
else:
xdata = [self.timeoffset]
ydata = [ylow]
for i in range(len(starttimes)):
xdata += [starttimes[i]+self.timeoffset]*2 + [endtimes[i]+self.timeoffset]*2
if ydata[-1] == ylow:
ydata += [ylow,yhigh,yhigh,ylow]
else:
ydata += [yhigh,ylow,ylow,yhigh]
xdata.append(timelength)
ydata.append(ylow)
xdatalist.append(xdata)
ydatalist.append(ydata)
self.plot(xdatalist,ydatalist)
def plot(self,xlist,ylist):
self.figure.clear()
self.figure.addItem(self.label)
for i in range(len(xlist)):
xdata = xlist[i]
ydata = ylist[i]
if len(xdata)>1:
self.figure.addItem(pg.PlotCurveItem(xdata,ydata,pen='w'))
self.figure.addItem(pg.InfiniteLine(self.timeoffset,pen=pg.mkPen('r',style=Qt.DashLine)))
|
9,471 | 6b3f634f3f0108e678d44ef9c89150f9fd116f76 | file_id = '0BwwA4oUTeiV1UVNwOHItT0xfa2M'
request = drive_service.files().get_media(fileId=file_id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print "Download %d%%." % int(status.progress() * 100)
|
9,472 | 210199ed217db0d7a05e280f20e33496c0795f06 | from base64 import b64decode
import time
from lampost.context.resource import m_requires
from lampost.datastore.dbo import KeyDBO
from lampost.datastore.dbofield import DBOField
from lampost.datastore.exceptions import DataError
from lampost.model.player import Player
from lampost.util.encrypt import make_hash, check_password
from lampost.util.lputil import ClientError
m_requires(__name__, 'log', 'perm', 'datastore', 'dispatcher')
class User(KeyDBO):
dbo_key_type = "user"
dbo_set_key = "users"
dbo_indexes = "user_name", "email"
user_name = DBOField('')
password = DBOField()
password_reset = DBOField(False)
email = DBOField('')
notes = DBOField('')
player_ids = DBOField([])
displays = DBOField({})
notifies = DBOField([])
@property
def edit_dto(self):
dto = super().edit_dto
dto['password'] = ''
return dto
@property
def imm_level(self):
if self.player_ids:
return max([perm.immortals.get(player_id, 0) for player_id in self.player_ids])
return 0
class UserManager():
def _post_init(self):
register("user_connect", self._user_connect)
register("player_connect", self._player_connect)
def validate_user(self, user_name, password):
user = self.find_user(user_name)
if not user:
raise ClientError()
self.validate_password(user, password)
return user
def validate_password(self, user, password):
if check_password(user.password, password):
return
salt, old_password = user.password.split('$')
if check_password(b64decode(bytes(old_password, 'utf-8')), password, bytes(salt, 'utf-8')):
warn("Using old password for account {}", user.user_name)
user.password_reset = True
save_object(user)
else:
raise ClientError("invalid_password")
def find_user(self, user_name):
user_name = user_name.lower()
user_id = get_index("ix:user:user_name", user_name)
if user_id:
return load_object(user_id, User)
player = load_object(user_name, Player)
if player:
return load_object(player.user_id, User)
return None
def delete_user(self, user):
for player_id in user.player_ids:
self._player_delete(player_id)
delete_object(user)
dispatch('publish_edit', 'delete', user)
def delete_player(self, user, player_id):
if user:
self._player_delete(player_id)
user.player_ids.remove(player_id)
save_object(user)
def attach_player(self, user, player):
user.player_ids.append(player.dbo_id)
set_index('ix:player:user', player.dbo_id, user.dbo_id)
dispatch('player_create', player, user)
player.user_id = user.dbo_id
save_object(player)
save_object(user)
return player
def find_player(self, player_id):
return load_object(player_id, Player)
def create_user(self, user_name, password, email=""):
user_raw = {'dbo_id': db_counter('user_id'), 'user_name': user_name,
'email': email, 'password': make_hash(password),
'notifies': ['friendSound', 'friendDesktop']}
user = create_object(User, user_raw)
dispatch('publish_edit', 'create', user)
return user
def check_name(self, account_name, user):
account_name = account_name.lower()
if user:
if account_name == user.user_name.lower():
return
for player_id in user.player_ids:
if account_name == player_id.lower():
return
if self.player_exists(account_name) or get_index("ix:user:user_name", account_name):
raise DataError("InUse: {}".format(account_name))
def player_exists(self, player_id):
return object_exists(Player.dbo_key_type, player_id)
def _user_connect(self, user, client_data):
client_data.update({'user_id': user.dbo_id, 'player_ids': user.player_ids, 'displays': user.displays,
'password_reset': user.password_reset, 'notifies': user.notifies})
def _player_connect(self, player, client_data):
client_data['name'] = player.name
if player.imm_level:
client_data['imm_level'] = player.imm_level
def login_player(self, player):
dispatch('player_baptise', player)
player.last_login = int(time.time())
if not player.created:
player.created = player.last_login
player.start()
def logout_player(self, player):
player.age += player.last_logout - player.last_login
player.detach()
save_object(player)
evict_object(player)
def id_to_name(self, player_id):
try:
return player_id.capitalize()
except AttributeError:
pass
def name_to_id(self, player_name):
return player_name.lower()
def player_cleanup(self, player_id):
delete_index('ix:player:user', player_id)
for dbo_id in fetch_set_keys('owned:{}'.format(player_id)):
dbo = load_object(dbo_id)
if dbo and dbo.owner_id == player_id:
dbo.change_owner()
save_object(dbo)
dispatch('publish_update', 'update', dbo)
dispatch('player_deleted', player_id)
def _player_delete(self, player_id):
player = load_object(player_id, Player)
if player:
dispatch('publish_edit', 'delete', player)
delete_object(player)
else:
warn("Attempting to delete player {} who does not exist.".format(player_id))
self.player_cleanup(player_id)
|
9,473 | 4fba13d051a3aceb393a4473cdbf6d4fc684c7ac | fname = input('Enter the file name to open')
fh = open(fname)
lst1 = list()
data = dict()
for ln in fh :
if ln.startswith("From"):
if ln.startswith('From:'):
continue
else :
word = ln.split()
lst1.append(word[1])
for word in lst1:
data[word] = data.get(word,0)+1
bigcount = None
bigword = None
for word,count in data.items():
if bigcount is None or bigcount<count:
bigcount = count
bigword = word
print(bigword,bigcount)
|
9,474 | 988e1f0631c434cbbb6d6e973792a65ebbd9405e | print(4 / 2, 4 / 3, 4 / 4)
print(5 / 2, 5 / 3, 5 / 4)
print(4 // 2, 4 // 3, 4 // 4)
print(5 // 2, 5 // 3, 5 // 4)
print(4.0 / 2, 4 / 3.0, 4.0 / float(4))
print(5.0 / 2, 5 / 3.0, 5.0 / float(4))
print(4.0 // 2, 4 // 3.0, 4.0 // float(4))
print(5.0 // 2, 5 // 3.0, 5.0 // float(4)) |
9,475 | 8ae10aada79b0a687732e341d275eb3823ec0e4a | # Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Data operations, will be used in run_pretrain.py
"""
import os
import math
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.dataset as de
import mindspore.dataset.transforms as C
from mindspore import log as logger
class BucketDatasetGenerator:
"""
Provide data distribution of different gears for the bert network.
Args:
data_set (Dataset): The training dataset.
batch_size (Int): The training batchsize.
bucket_list (List): List of different sentence lengths,such as [128, 256, 512]. Default: None.
"""
def __init__(self, data_set, batch_size, bucket_list=None):
self.dataset = data_set
self.batch_size = batch_size
self.bucket_list = bucket_list
self.data_bucket = {bucket: [] for bucket in bucket_list}
bucket_size = len(bucket_list)
self.random_list = np.random.binomial(n=(bucket_size - 1), p=0.5, size=self.__len__())
self.random_list = (self.random_list + 2) % bucket_size
self.random_list = [bucket_list[i] for i in self.random_list]
self.iter = 0
def __next__(self):
for item in self.iterator:
for seq_length in self.bucket_list:
if np.sum(item[1]) <= seq_length:
self.data_bucket[seq_length].append(item)
break
for key in self.data_bucket.keys():
data = self.data_bucket[key]
if len(data) >= self.batch_size and self.random_list[self.iter] == key:
self.data_bucket[key] = self.data_bucket[key][self.batch_size:]
arr = data[0]
for i in range(1, self.batch_size):
current_data = data[i]
for j in range(len(current_data)):
arr[j] = np.concatenate((arr[j], current_data[j]))
res = ()
for label in arr:
newlabel = np.reshape(label, (self.batch_size, -1))
res += (newlabel,)
res += (np.array(key, np.int32),)
self.iter += 1
return res
raise StopIteration
def __iter__(self):
self.iterator = self.dataset.create_tuple_iterator(output_numpy=True)
return self
def __len__(self):
return (self.dataset.get_dataset_size() // self.batch_size) - 1
def create_albert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None, batch_size=32,
bucket_list=None):
"""create train dataset"""
# apply repeat operations
files = os.listdir(data_dir)
data_files = []
for file_name in files:
if "tfrecord" in file_name:
data_files.append(os.path.join(data_dir, file_name))
data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
shuffle=de.Shuffle.FILES if do_shuffle == "true" else False,
num_shards=device_num, shard_id=rank, shard_equal_rows=True)
if bucket_list:
bucket_dataset = BucketDatasetGenerator(data_set, batch_size, bucket_list=bucket_list)
data_set = de.GeneratorDataset(bucket_dataset,
column_names=["input_ids", "input_mask", "segment_ids",
"next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights",
"sentence_flag"],
shuffle=False)
else:
data_set = data_set.batch(batch_size, drop_remainder=True)
ori_dataset_size = data_set.get_dataset_size()
print('origin dataset size: ', ori_dataset_size)
type_cast_op = C.TypeCast(mstype.int32)
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_positions")
data_set = data_set.map(operations=type_cast_op, input_columns="next_sentence_labels")
data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="input_mask")
data_set = data_set.map(operations=type_cast_op, input_columns="input_ids")
# apply batch operations
logger.info("data size: {}".format(data_set.get_dataset_size()))
logger.info("repeat count: {}".format(data_set.get_repeat_count()))
return data_set
def create_classification_dataset(batch_size=1, repeat_count=1, assessment_method="accuracy",
data_file_path=None, schema_file_path=None, do_shuffle=True,
rank_size=1, rank_id=0):
"""create finetune or evaluation dataset"""
type_cast_op = C.TypeCast(mstype.int32)
ds = de.MindDataset([data_file_path],
columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"], shuffle=do_shuffle,
num_shards=rank_size, shard_id=rank_id)
if assessment_method == "Spearman_correlation":
type_cast_op_float = C.TypeCast(mstype.float32)
ds = ds.map(operations=type_cast_op_float, input_columns="label_ids")
else:
ds = ds.map(operations=type_cast_op, input_columns="label_ids")
ds = ds.map(operations=type_cast_op, input_columns="segment_ids")
ds = ds.map(operations=type_cast_op, input_columns="input_mask")
ds = ds.map(operations=type_cast_op, input_columns="input_ids")
ds = ds.repeat(repeat_count)
# apply batch operations
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def generator_squad(data_features):
for feature in data_features:
yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id)
def generator_squad_train(data_features):
for feature in data_features:
yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.start_position, feature.end_position,
feature.unique_id, feature.is_impossible)
def create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None, schema_file_path=None,
is_training=True, do_shuffle=True, rank_size=1,
rank_id=0):
"""create finetune or evaluation dataset"""
type_cast_op = C.TypeCast(mstype.int32)
if is_training:
print("data_file_path: ", data_file_path)
print("rank_id: ", rank_id)
ds = de.MindDataset([data_file_path],
columns_list=["input_ids", "input_mask", "segment_ids", "start_positions",
"end_positions", "unique_ids", "is_impossible"],
shuffle=do_shuffle, num_shards=rank_size, shard_id=rank_id)
ds = ds.map(operations=type_cast_op, input_columns="start_positions")
ds = ds.map(operations=type_cast_op, input_columns="end_positions")
else:
ds = de.GeneratorDataset(generator_squad(data_file_path), shuffle=do_shuffle,
column_names=["input_ids", "input_mask", "segment_ids", "unique_ids"])
ds = ds.map(operations=type_cast_op, input_columns="input_ids")
ds = ds.map(operations=type_cast_op, input_columns="input_mask")
ds = ds.map(operations=type_cast_op, input_columns="segment_ids")
ds = ds.map(operations=type_cast_op, input_columns="unique_ids")
ds = ds.repeat(repeat_count)
# apply batch operations
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def create_eval_dataset(batchsize=32, device_num=1, rank=0, data_dir=None, schema_dir=None):
"""create evaluation dataset"""
data_files = []
if os.path.isdir(data_dir):
files = os.listdir(data_dir)
for file_name in files:
if "tfrecord" in file_name:
data_files.append(os.path.join(data_dir, file_name))
else:
data_files.append(data_dir)
data_set = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
shard_equal_rows=True)
ori_dataset_size = data_set.get_dataset_size()
print("origin eval size: ", ori_dataset_size)
dtypes = data_set.output_types()
shapes = data_set.output_shapes()
output_batches = math.ceil(ori_dataset_size / device_num / batchsize)
padded_num = output_batches * device_num * batchsize - ori_dataset_size
print("padded num: ", padded_num)
if padded_num > 0:
item = {"input_ids": np.zeros(shapes[0], dtypes[0]),
"input_mask": np.zeros(shapes[1], dtypes[1]),
"segment_ids": np.zeros(shapes[2], dtypes[2]),
"next_sentence_labels": np.zeros(shapes[3], dtypes[3]),
"masked_lm_positions": np.zeros(shapes[4], dtypes[4]),
"masked_lm_ids": np.zeros(shapes[5], dtypes[5]),
"masked_lm_weights": np.zeros(shapes[6], dtypes[6])}
padded_samples = [item for x in range(padded_num)]
padded_ds = de.PaddedDataset(padded_samples)
eval_ds = data_set + padded_ds
sampler = de.DistributedSampler(num_shards=device_num, shard_id=rank, shuffle=False)
eval_ds.use_sampler(sampler)
else:
eval_ds = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids",
"next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
num_shards=device_num, shard_id=rank, shard_equal_rows=True)
type_cast_op = C.TypeCast(mstype.int32)
eval_ds = eval_ds.map(input_columns="masked_lm_ids", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="masked_lm_positions", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="next_sentence_labels", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="segment_ids", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="input_mask", operations=type_cast_op)
eval_ds = eval_ds.map(input_columns="input_ids", operations=type_cast_op)
eval_ds = eval_ds.batch(batchsize, drop_remainder=True)
print("eval data size: {}".format(eval_ds.get_dataset_size()))
print("eval repeat count: {}".format(eval_ds.get_repeat_count()))
return eval_ds
|
9,476 | 8a2fe83ab1adae7de94eb168290ce4843ab39fe1 | import numpy
import multiprocessing
from functools import partial
from textutil.text import read_file
from textutil.util import B
import mmap
import tqdm
class Growable(object):
def __init__(self, capacity=1024, dtype=numpy.uint32, grow=2):
self.grow = grow
self.capacity=capacity
self.dtype=dtype
self.arr = numpy.empty((self.capacity,), dtype=self.dtype)
self.size = 0
def __grow_to__(self, total):
if self.capacity >= total:
return
else:
while self.capacity < total:
self.capacity *= self.grow
new = numpy.empty((self.capacity,), dtype=self.dtype)
new[:self.size] = self.arr[:self.size]
self.arr = new
def __len__(self):
return self.size
def update(self, other):
n = len(other)
self.__grow_to__(self.size + n)
self.arr[self.size : self.size+n] = other
self.size += n
def finalize(self):
return self.arr[:self.size]
def ixifyfile(file, vocab=None):
even = True
arr = Growable()
for sentence in read_file(file):
six = numpy.array([vocab.get(word) for word in sentence], dtype=numpy.uint32)
if not even:
six |= B
even = not even
arr.update(six)
return arr.finalize(), even
def ixifyfiles(ixfile, files, vocab):
ixf = partial(ixifyfile, vocab=vocab)
even = True
files = list(files)
with open(ixfile, 'wb') as ixhandle:
with multiprocessing.Pool(8) as pool:
for arr, i_even in tqdm.tqdm(pool.imap_unordered(ixf, files), total=len(files)):
if even:
ixhandle.write(arr.tobytes())
else:
ixhandle.write((arr ^ B).tobytes())
even = not (i_even ^ even)
|
9,477 | 414cb9a173ac70ad9ad1fc540aec569321fd3f8b | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
string_types = basestring,
else:
text_type = str
string_types = str,
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
|
9,478 | ff6dc347637a81c9f6a541775646b4901d719790 | import math
def sieve(limit):
ans = []
a = [1] * limit
a[0] = a[1] = 0
for i in range(2, limit):
if a[i] == 0:
continue
ans.append(i)
for j in range(i*i, limit, i):
a[j] = 0;
return ans
is_square = lambda x: int(math.sqrt(x) + 1e-9) ** 2 == x
N = 10 ** 6
p = sieve(N)
ps = set(p)
for i in range(9, N, 2):
if i in ps:
continue
found = False
for j in p[1:]:
if j > i:
break
q = (i - j) // 2
if is_square(q):
found = True
break
if not found:
print(i)
break
|
9,479 | c076aed1bfff51f8edf5ab4ef029b7fa7ca2422c | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'meet.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(607, 723)
self.start = QtWidgets.QLabel(Dialog)
self.start.setGeometry(QtCore.QRect(10, 70, 59, 24))
self.start.setObjectName("start")
self.startDate = QtWidgets.QDateEdit(Dialog)
self.startDate.setGeometry(QtCore.QRect(70, 70, 110, 24))
self.startDate.setDate(QtCore.QDate(2017, 1, 1))
self.startDate.setObjectName("startDate")
self.end = QtWidgets.QLabel(Dialog)
self.end.setGeometry(QtCore.QRect(190, 70, 81, 24))
self.end.setObjectName("end")
self.endDate = QtWidgets.QDateEdit(Dialog)
self.endDate.setGeometry(QtCore.QRect(270, 70, 110, 24))
self.endDate.setDate(QtCore.QDate(2017, 1, 1))
self.endDate.setObjectName("endDate")
self.name = QtWidgets.QLabel(Dialog)
self.name.setGeometry(QtCore.QRect(10, 10, 59, 24))
self.name.setObjectName("name")
self.nameEdit = QtWidgets.QLineEdit(Dialog)
self.nameEdit.setGeometry(QtCore.QRect(80, 10, 511, 24))
self.nameEdit.setObjectName("nameEdit")
self.athletes = QtWidgets.QLabel(Dialog)
self.athletes.setGeometry(QtCore.QRect(10, 130, 141, 16))
self.athletes.setObjectName("athletes")
self.addButton = QtWidgets.QPushButton(Dialog)
self.addButton.setGeometry(QtCore.QRect(285, 220, 31, 24))
self.addButton.setObjectName("addButton")
self.removeButton = QtWidgets.QPushButton(Dialog)
self.removeButton.setGeometry(QtCore.QRect(285, 260, 31, 24))
self.removeButton.setObjectName("removeButton")
self.members = QtWidgets.QLabel(Dialog)
self.members.setGeometry(QtCore.QRect(325, 130, 131, 16))
self.members.setObjectName("members")
self.meetCount = QtWidgets.QLabel(Dialog)
self.meetCount.setGeometry(QtCore.QRect(390, 70, 121, 24))
self.meetCount.setObjectName("meetCount")
self.meetCountEdit = QtWidgets.QLineEdit(Dialog)
self.meetCountEdit.setGeometry(QtCore.QRect(510, 70, 81, 24))
self.meetCountEdit.setObjectName("meetCountEdit")
self.sortitionButton = QtWidgets.QPushButton(Dialog)
self.sortitionButton.setGeometry(QtCore.QRect(490, 360, 100, 24))
self.sortitionButton.setObjectName("sortitionButton")
self.cancel = QtWidgets.QPushButton(Dialog)
self.cancel.setGeometry(QtCore.QRect(492, 690, 100, 24))
self.cancel.setObjectName("cancel")
self.athletesList = QtWidgets.QListWidget(Dialog)
self.athletesList.setGeometry(QtCore.QRect(10, 150, 266, 201))
self.athletesList.setObjectName("athletesList")
self.membersList = QtWidgets.QListWidget(Dialog)
self.membersList.setGeometry(QtCore.QRect(325, 150, 266, 201))
self.membersList.setObjectName("membersList")
self.city = QtWidgets.QLabel(Dialog)
self.city.setGeometry(QtCore.QRect(10, 40, 131, 24))
self.city.setObjectName("city")
self.cityEdit = QtWidgets.QLineEdit(Dialog)
self.cityEdit.setGeometry(QtCore.QRect(140, 40, 451, 24))
self.cityEdit.setObjectName("cityEdit")
self.main_referee = QtWidgets.QLabel(Dialog)
self.main_referee.setGeometry(QtCore.QRect(10, 400, 101, 24))
self.main_referee.setObjectName("main_referee")
self.main_clerk = QtWidgets.QLabel(Dialog)
self.main_clerk.setGeometry(QtCore.QRect(10, 430, 131, 24))
self.main_clerk.setObjectName("main_clerk")
self.mainrefCBox = QtWidgets.QComboBox(Dialog)
self.mainrefCBox.setGeometry(QtCore.QRect(120, 400, 471, 24))
self.mainrefCBox.setObjectName("mainrefCBox")
self.mainclerkCBox = QtWidgets.QComboBox(Dialog)
self.mainclerkCBox.setGeometry(QtCore.QRect(140, 430, 451, 24))
self.mainclerkCBox.setObjectName("mainclerkCBox")
self.refList = QtWidgets.QListWidget(Dialog)
self.refList.setGeometry(QtCore.QRect(10, 480, 266, 201))
self.refList.setObjectName("refList")
self.refereeList = QtWidgets.QLabel(Dialog)
self.refereeList.setGeometry(QtCore.QRect(10, 460, 91, 16))
self.refereeList.setObjectName("refereeList")
self.refColList = QtWidgets.QListWidget(Dialog)
self.refColList.setGeometry(QtCore.QRect(325, 480, 266, 201))
self.refColList.setObjectName("refColList")
self.refereeCol = QtWidgets.QLabel(Dialog)
self.refereeCol.setGeometry(QtCore.QRect(325, 460, 141, 16))
self.refereeCol.setObjectName("refereeCol")
self.raddButton = QtWidgets.QPushButton(Dialog)
self.raddButton.setGeometry(QtCore.QRect(285, 560, 31, 24))
self.raddButton.setObjectName("raddButton")
self.rremoveButton = QtWidgets.QPushButton(Dialog)
self.rremoveButton.setGeometry(QtCore.QRect(285, 600, 31, 24))
self.rremoveButton.setObjectName("rremoveButton")
self.wsortitionButton = QtWidgets.QPushButton(Dialog)
self.wsortitionButton.setEnabled(True)
self.wsortitionButton.setGeometry(QtCore.QRect(360, 690, 121, 24))
self.wsortitionButton.setAutoDefault(True)
self.wsortitionButton.setDefault(False)
self.wsortitionButton.setFlat(False)
self.wsortitionButton.setObjectName("wsortitionButton")
self.divrings = QtWidgets.QCheckBox(Dialog)
self.divrings.setGeometry(QtCore.QRect(390, 100, 201, 24))
self.divrings.setObjectName("divrings")
self.weightcatCBox = QtWidgets.QComboBox(Dialog)
self.weightcatCBox.setGeometry(QtCore.QRect(150, 100, 231, 24))
self.weightcatCBox.setObjectName("weightcatCBox")
self.weigthcat = QtWidgets.QLabel(Dialog)
self.weigthcat.setGeometry(QtCore.QRect(10, 100, 131, 24))
self.weigthcat.setObjectName("weigthcat")
self.round = QtWidgets.QLabel(Dialog)
self.round.setGeometry(QtCore.QRect(220, 130, 61, 16))
self.round.setObjectName("round")
self.stage = QtWidgets.QLabel(Dialog)
self.stage.setGeometry(QtCore.QRect(490, 130, 101, 16))
self.stage.setObjectName("stage")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.nameEdit, self.cityEdit)
Dialog.setTabOrder(self.cityEdit, self.startDate)
Dialog.setTabOrder(self.startDate, self.endDate)
Dialog.setTabOrder(self.endDate, self.meetCountEdit)
Dialog.setTabOrder(self.meetCountEdit, self.weightcatCBox)
Dialog.setTabOrder(self.weightcatCBox, self.divrings)
Dialog.setTabOrder(self.divrings, self.athletesList)
Dialog.setTabOrder(self.athletesList, self.addButton)
Dialog.setTabOrder(self.addButton, self.removeButton)
Dialog.setTabOrder(self.removeButton, self.membersList)
Dialog.setTabOrder(self.membersList, self.sortitionButton)
Dialog.setTabOrder(self.sortitionButton, self.mainrefCBox)
Dialog.setTabOrder(self.mainrefCBox, self.mainclerkCBox)
Dialog.setTabOrder(self.mainclerkCBox, self.refList)
Dialog.setTabOrder(self.refList, self.raddButton)
Dialog.setTabOrder(self.raddButton, self.rremoveButton)
Dialog.setTabOrder(self.rremoveButton, self.refColList)
Dialog.setTabOrder(self.refColList, self.wsortitionButton)
Dialog.setTabOrder(self.wsortitionButton, self.cancel)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Создание соревнования"))
self.start.setText(_translate("Dialog", "Начало"))
self.startDate.setDisplayFormat(_translate("Dialog", "dd.MM.yyyy"))
self.end.setText(_translate("Dialog", "Окончание"))
self.endDate.setDisplayFormat(_translate("Dialog", "dd.MM.yyyy"))
self.name.setText(_translate("Dialog", "Название"))
self.athletes.setText(_translate("Dialog", "Список спортсменов"))
self.addButton.setText(_translate("Dialog", ">>"))
self.removeButton.setText(_translate("Dialog", "<<"))
self.members.setText(_translate("Dialog", "Список участников"))
self.meetCount.setText(_translate("Dialog", "Число боев в день"))
self.sortitionButton.setText(_translate("Dialog", "Жеребьевка"))
self.cancel.setText(_translate("Dialog", "Отмена"))
self.city.setText(_translate("Dialog", "Место проведения"))
self.main_referee.setText(_translate("Dialog", "Главный судья"))
self.main_clerk.setText(_translate("Dialog", "Главный секретарь"))
self.refereeList.setText(_translate("Dialog", "Список судей"))
self.refereeCol.setText(_translate("Dialog", "Судейская коллегия"))
self.raddButton.setText(_translate("Dialog", ">>"))
self.rremoveButton.setText(_translate("Dialog", "<<"))
self.wsortitionButton.setText(_translate("Dialog", "Без жеребьевки"))
self.divrings.setText(_translate("Dialog", "Разбивать по рингам"))
self.weigthcat.setText(_translate("Dialog", "Весовая категория"))
self.round.setText(_translate("Dialog", "раунд"))
self.stage.setText(_translate("Dialog", "стадия"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
9,480 | 49ae9e90402d784fc3af3b47e96842fbfe842104 | from utilities.MatplotlibUtility import *
from utilities.PlotDefinitions.DrainSweep.OutputCurve import plot as importedOutputCurvePlot
plotDescription = {
'name':'Chip Output Curves',
'plotCategory': 'chip',
'priority': 40,
'dataFileDependencies': ['DrainSweep.json'],
'plotDefaults': {
'figsize':(2,2.5),
'colorMap':'magma',
},
}
def plot(identifiers, chipIndexes, firstRunChipHistory, recentRunChipHistory, specificRunChipHistory, groupedChipHistory, mode_parameters=None):
if(mode_parameters is None):
mode_parameters = {}
#mode_parameters['enableColorBar'] = False
mode_parameters['colorsOverride'] = (plotDescription['plotDefaults']['colorMap'], 0.85, 0) if(mode_parameters['colorsOverride'] == []) else mode_parameters['colorsOverride']
mode_parameters['figureSizeOverride'] = plotDescription['plotDefaults']['figsize'] if(mode_parameters['figureSizeOverride'] is None) else mode_parameters['figureSizeOverride']
return importedOutputCurvePlot(specificRunChipHistory, identifiers=identifiers, mode_parameters=mode_parameters) |
9,481 | 51b3beee8659bccee0fbb64b80fdce18b693674b | class Solution(object):
def twoSum(self, numbers, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
idx1 = 0
idx2 = len(numbers)-1
while(idx1<idx2): # can also use a for-loop: for num in numbers:
left = numbers[idx1]
right = numbers[idx2]
if (left + right) < target:
idx1 += 1
elif (left + right) > target:
idx2 -= 1
else:
return [idx1+1,idx2+1]
|
9,482 | f0b5ad49fc47adc54fb16a151b4a0ed563f53a42 | from bottle import response,request,route,run
from json import dumps
import ConfigParser
import pickle
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
import pickle
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
def fun(dat):
big=[]
for i in dat['Summary']:
st=''
ls=[]
for j in i.split(','):
#print j
ls.append(wordnet_lemmatizer.lemmatize(j))
#print ls
big.append(' '.join(ls))
return big
#Initialization starts
#configParser=ConfigParser.RawConfigParser()
#configFilePath="Config.cfg"
#configParser.read(configFilePath)
#Host=configParser.get('file','host')
#Port=configParser.get('file','port')
#Config read ends
#This method trains and creates a classifier from training data in csv file
@route('/trainBot',method='POST')
def trainBot():
response.content_type='application/json'
data2=[]
print "training...."
data=pd.read_csv('trainData.csv',header=None)
import preprocess
from preprocess import number_removal,generate_word_frequency
import re
#print data
data.columns=['Intent','Summary']
data['Summary']=data.apply(number_removal,axis=1)
data['Summary'] = data.apply(generate_word_frequency,axis=1)
data['Summary']=fun(data)
from nltk.corpus import stopwords
stop = stopwords.words('english')
stop.extend(('.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}','/','-'))
for i in ['ask','alexa','allexa','tel','tell']:
stop.append(i)
le=LabelEncoder()
X=data['Summary'].fillna('')
y=data['Intent'].fillna('')
y=le.fit_transform(y)
classifier = Pipeline([
('vec',CountVectorizer(strip_accents='unicode',stop_words=stop)),
('tfidf', TfidfTransformer()),
('clf', RandomForestClassifier(n_estimators=10,random_state=0))])
classifier=classifier.fit(X, y)
f = open('random_forest_model.pickle', 'wb')
pickle.dump(classifier, f)
f.close()
f = open('label.pickle', 'wb')
pickle.dump(le, f)
f.close()
print "training completed"
item={"result":"training completed"}
data2.append(item)
return dumps(data2)
#This method classifies the input text based on the trained classifier
@route('/classify2',method='POST')
def classify2():
# read python dict back from the file
f = open('random_forest_model.pickle', 'rb')
classifier=pickle.load(f)
f.close()
f = open('label.pickle', 'rb')
label=pickle.load(f)
f.close()
response.content_type='application/json'
data=[]
inputText=request.json["input"]
print "input text : ",inputText
confidence=classifier.predict_proba([inputText])
index=np.argmax(confidence)
predicted_class=label.inverse_transform(classifier.predict([inputText]))
print str(round(confidence[0][index],2))+" "+ predicted_class[0]
item={"result":str(round(confidence[0][index],2))+" "+ predicted_class[0]}
data.append(item)
return dumps(data)
#This method classifies and returns others based on confidence score
def classifyTextWithScore(inputText):
f = open('random_forest_model.pickle', 'rb')
classifier=pickle.load(f)
f.close()
f = open('label.pickle', 'rb')
label=pickle.load(f)
f.close()
confidence=classifier.predict_proba([inputText])
index=np.argmax(confidence)
predicted_class=label.inverse_transform(classifier.predict([inputText]))
print round(confidence[0][index],2),predicted_class
if (round(confidence[0][index],2)<0.7):
return "others"
elif(len(inputText.split(" "))<2):
return "others"
else:
return predicted_class[0]
#run(host='172.31.45.19', port=7500)
#print "hai"
print classifyTextWithScore("payments made last week where remitter bank wants to stop the payment")
#run(host='192.168.1.7',port=8000)
|
9,483 | 87291d066b94aca1d94cbe5d9281fc72da1b0c35 | import numpy as np
from StudyCaseUdemy.Graph import Graph
class OrderVector:
def __init__(self, size):
self.size = size
self.last_pos = -1
self.values = np.empty(self.size, dtype=object)
def insert(self, vertex):
if self.last_pos == self.size - 1:
print('Capacidad max do Vector atingida')
return
pos = 0
for i in range(self.last_pos+1):
pos = i
temp = self.values[i]
if self.values[i].distance > vertex.distance:
break
if i == self.last_pos:
pos = i + 1
x = self.last_pos
while x >= pos:
self.values[x + 1] = self.values[x]
x -= 1
self.values[pos] = vertex
self.last_pos += 1
def printer(self):
if self.last_pos == -1:
print('Empty Array')
else:
for i in range(self.last_pos+1):
print(i, ' - ', self.values[i].label, ' - ', self.values[i].distance)
class Greedy:
def __init__(self, objective):
self.objective = objective
self.found = False
def search(self, current):
print('------')
print('Current Vertex: {}'.format(current.label))
current.visited = True
if current == self.objective:
self.found = True
else:
orderVector = OrderVector(len(current.adjacents))
for adj in current.adjacents:
if not adj.vertex.visited:
adj.vertex.visited = True
orderVector.insert(adj.vertex)
orderVector.printer()
if orderVector.values[0] is not None:
self.search(orderVector.values[0])
grafo = Graph()
# vector = OrderVector(5)
# vector.insert(grafo.arad)
# vector.insert(grafo.craiova)
# vector.insert(grafo.bucharest)
# vector.insert(grafo.dobreta)
# vector.insert(grafo.lugoj)
# vector.printer()
greedy = Greedy(grafo.bucharest)
greedy.search(grafo.arad)
|
9,484 | 59170e6b0b0705b9908ed1c32bbea87373126594 | #coding:utf-8
#base string opeate
#rstrip()删除字符串末尾被指定的字符,默认是空格,如末尾有多个相同的字符,则一并删除
str1="djcc"
str2="adcd"
print("this's rstrip() function---------")
print(str1.rstrip("c"))
print(str1.rstrip("d"))
#replace()用新字符替换字符串中被指定的字符,str.replace(old, new[, max]),max表示替换多少个,如不指定,全部替换
str3="this is history,it is not fake"
print("this's replace function----------")
print(str3.replace("is","was"))
print(str3.replace("is","was",3))#索引从1开始,0不算
#
|
9,485 | 896d836ede533bad24f4077e5ba964105d96bf7a | list1=[('北京大洋路', '红蛋', '散框批发', '120-125', '44', '落', '8车'), ('北京回龙观', '红蛋', '散框批发', '124', '44', '落', ''), ('北京石门', '红蛋', '散框批发', '124', '44', '落', '') ]
mysql_data=[]
import numpy as np
for l in list1:
array = np.array(l)
tolist = array.tolist()
tolist.insert(0,'ppp')
tolist.append('lll')
mysql_data.append(tolist)
print(mysql_data)
import requests
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
}
get = requests.get('http://www.baidu.com',headers=headers)
print(get.text)
|
9,486 | 5dcb20f52b5041d5f9ea028b383e0f2f10104af9 | from collections import deque
s = list(input().upper())
new = list(set(s)) # 중복 제거 한 알파벳 리스트로 카운트 해줘야 시간초과 안남
n = {}
for i in new:
n[i] = s.count(i)
cnt = deque()
for k, v in n.items():
cnt.append(v)
if cnt.count(max(cnt)) >1:
print('?')
else:
print(max(n, key=n.get))
|
9,487 | 7618d7fde3774a04ac2005dad104e54b9988d3e8 | def execute(n,dico):
"""
Prend en argument n, la position de la requête dans le dictionaire et dico le nom du dictionnaire.
Renvoie une liste dont chaque élément est une réponse de la requête.
"""
l = []
import sqlite3
conn = sqlite3.connect('imdb.db')
c = conn.cursor()
c.execute(dico[n][1])
for row in c:
l.append(row)
conn.close()
return l
def taille_plus_grande_reponse(reponses):
"""
Prend en argument une liste.
Renvoie la taille du plus grand élément de la liste.
"""
l = reponses
maxi = 0
for i in range(len(l)):
if len(str(l[i])) > maxi:
maxi = len(str(l[i]))
return maxi
"""affichage question"""""""""""""""""""""""""""
from tkinter import *
def question(dico):
"""
prend en argument un disctionnaire.
Ne renvoie rien.
"""
l = []
for i in range(len(dico)):
l.append(dico[i][0])
affichage_question(dico,l)
def affichage_question(dico, texte, titre = "Question"):
"""
prend en argument dico un dictionnaire, texte une liste, et titre une chaine de caractère.
Renvoie une page tkinter où chaque indice de la liste texte est un bouton clickable et où titre et le nom de la page.
"""
fenetre = tkinter.Tk()
fenetre.title(titre)
for i in range(len(texte)):
bouton={}
bouton[i]=Button(fenetre, text=texte[i], command=lambda n=i, dico=dico:requete(n,dico))
bouton[i].pack()
fenetre.mainloop()
""""""""""""""""""""""""""""""""""""""""""""""""
def requete(n,dico):
"""
prend en argument n l'indice de la requête dans le dictionnaire et dico un dictionnaire.
ne renvoie rien
"""
r = execute(n,dico)
afficher_table(execute(n,dico),dico[n][0])
import tkinter
import os
def afficher_table(table, titre ="", debut = 0, fin = None):
"""
prend en argument table une liste et titre une chaine de caractère.
ne renvoie rien.
"""
if titre != "":
titre += "\n\n"
#print(titre + texte_table(table, debut, fin))
affichage(titre + texte_table(table, debut, fin), titre)
def texte_table(table, debut = 0, fin = None):
"""
prend en argument table une liste.
renvoie une chaîne de caractère composé d'un tableau avec dans chaque case un élement de table.
"""
max = taille_plus_grande_reponse(table)
texte = '+' + max * '-' + '+\n'
for i in range(len(table)):
texte = texte + '|' + str(table[i]) + (max - len(str(table[i]))) * ' ' + '|' + '\n+' + max * '-' + '+\n'
return texte
def affichage(texte, titre = "Requêtes tables"):
"""
prend en argument texte une chaîne de caractère et titre une chaine de caractère
renvoie une fenêtre tkinter
"""
root = tkinter.Tk()
root.title(str(titre))
RWidth=root.winfo_screenwidth() - 100
RHeight=root.winfo_screenheight() - 100
root.geometry("%dx%d+50+0"%(RWidth, RHeight))
text=tkinter.Text(root, wrap = 'none')
scroll_x=tkinter.Scrollbar(text.master, orient='horizontal', command = text.xview)
scroll_x.config(command = text.xview)
text.configure(xscrollcommand = scroll_x.set)
scroll_x.pack(side = 'bottom', fill = 'x', anchor = 'w')
scroll_y = tkinter.Scrollbar(text.master)
scroll_y.config(command = text.yview)
text.configure(yscrollcommand = scroll_y.set)
scroll_y.pack(side = tkinter.RIGHT, fill = 'y')
text.insert("1.0", texte)
text.pack(side = tkinter.LEFT, expand = True, fill = tkinter.BOTH)
root.mainloop()
def fichier_txt_en_texte(fichier):
"""
prend en argument le chemin d'un fichier texte
Renvoie le contenu du fichier texte sous forme de chaîne de caractère.
"""
with open(fichier, "r") as requete:
return requete.read()
def chemin(nom, repertoire):
"""
Prend en argument le nom du fichier où est stocké la requête et le nom du répertoire dans lequel est stocké la requête.
Renvoie le chemin de la requête.
"""
return repertoire + '/' + nom
def texte_en_liste(nom_requete, repertoire):
requete = fichier_txt_en_texte(chemin(nom_requete, repertoire))
return requete.split()
def liste_en_texte(liste):
"""
prend en argument une liste et un indice et renvoie la même liste mais l'élement d'indice 'n' est transformé en texte.
"""
texte = ""
for i in range(len(liste)):
texte = texte + str(liste[i]) + " "
return texte
def separer_requete_et_question(nom, repertoire):
"""
prend en argument le numéro de la requête et renvoie la question et la requête sésparé.
"""
requete = texte_en_liste(nom, repertoire) #transforme la requête en tableau
question = ""
for i in range(len(requete)): #cherche le moment où la question s'arrête et sépare la question de la requête
if requete[i] == "?":
question = requete[0:i+1] #stock la question
requete = requete[i+1:len(requete)] #stock la réponse
break #stop la boucle quand la "?" est trouvé
return [liste_en_texte(question),liste_en_texte(requete)]
def creer_dictionnaire_vide():
"""
Ne contient aucun argument et renvoie un dictionnaire vide.
"""
dico = {}
return dico
def nom_element_du_repertoire(repertoire):
"""
prend en argument le nom d'un répertoire ranger dans le dossier projetsqlKilian.
renvoie une liste dont chaque élément est le nom d'un des fichier du repertoir.
"""
path = "C:\\Users\\Elève\\Desktop\\projet NSI\\projetsqlKilian\\projetsqlKilian\\" + repertoire
nom_requete = os.listdir(path)
return nom_requete
def stocker_requete(dico, repertoire):
"""
prend en argument dico un dictionnaire vide et repertoire le nom du repertoir où sont sockés les requêtes.
ne renvoie rien
"""
liste = nom_element_du_repertoire(repertoire)
for i in range(len(liste)):
requete = separer_requete_et_question(liste[i], repertoire)
dico[i] = ['#' + str(i+1) + ') ' + requete[0], requete[1]]
def afficher(dico):
"""
prend en argument un dictionnaire et renvoie ce disctionnaire.
"""
return dico
a = creer_dictionnaire_vide()
stocker_requete(a,'requête')
#print(afficher(a))
question(a)
#print(nom_element_du_repertoire('requête'))
#requete(a)
#print(execute(1,a))
#print(taille_plus_grande_reponse(execute(1,a))) |
9,488 | 570e0d46aa1ea88d1784447e8f693199e3c3b6ad | from __future__ import print_function
from __future__ import absolute_import
#
# LinkedIn Sales Module
#
import requests
from bs4 import BeautifulSoup
import logging
from plugins.base import PageGrabber
from plugins.colors import BodyColors as bc
import json
try:
import __builtin__ as bi
except:
import builtins as bi
class LinkedInGrabber(PageGrabber): # LinkedIN.com sales scraper for email lookups
def get_info(self,email): # Requires AUTH, login and request AUTHENTICATED pages from linkedin
client = requests.Session() # Establish the session()
print("["+bc.CPRP+"?"+bc.CEND+"] "+bc.CCYN + "LinkedIn" + bc.CEND)
HOMEPAGE_URL = 'https://www.linkedin.com' # Set homepage for linkedin
LOGIN_URL = 'https://www.linkedin.com/uas/login-submit' # Set login page for linkedin
LOGOUT_URL = 'https://www.linkedin.com/m/logout'
source = client.get(HOMEPAGE_URL).content # Request source
soup = self.get_dom(source) # BS DOM
csrf = soup.find(id="loginCsrfParam-login")['value']
#
# ATTENTION:: YOU MUST POPULATE THE FOLLOWING WITH YOUR REAL CREDENTIALS
#
# ATTENTION:: THIS WILL NOT WORK PROPRLY OTHERWISE
#
# session_key = email session_password = your password
#
try:
with open('./storage/fb_login', 'r') as fbinfo:
login_information = json.loads(fbinfo.read())
#print(json.loads(login_information))
login_information['loginCsrfParam'] = csrf
except:
login_information = {
'session_key':'',
'session_password':'',
'loginCsrfParam': '',
}
pass
if not login_information['session_key']:
if login_information['session_password'] == '': # If no modifications of default u/p, print error, return
print (" ["+bc.CRED+"ATTENTION"+bc.CEND+"] " + \
bc.CYLW+"\tThis module requires authentication to use it properly.\n\tIt will store Credential pairs in plain-text."+bc.CEND)
print (" ["+bc.CRED+"ATTENTION"+bc.CEND+"] " + \
bc.CYLW + "This could produce a trail and identify the used account."+bc.CEND)
print()
savecreds = raw_input("[{}?{}] {}Would you like to save credentials now? {}(Y/n){}]: ".format(bc.CRED,bc.CEND,bc.CRED,bc.CYLW,bc.CEND))
print()
luser = raw_input(" ["+bc.CRED+"?"+bc.CEND+"] " + \
bc.CYLW+"What is your throw-away linkedin username: "+bc.CEND)
lpass = raw_input(" ["+bc.CRED+"?"+bc.CEND+"] " + \
bc.CYLW+"What is your throw-away linkedin password: "+bc.CEND)
login_information = {
'session_key':luser,
'session_password':lpass,
'loginCsrfParam': csrf,
}
if str(savecreds).lower() in ['y','yes']:
try:
with open('./storage/fb_login','w') as fbinfo:
fbinfo.write(json.dumps(login_information))
except Exception as failedtowrite:
print(("Failed to write fbinfo to file: %s") % failedtowrite)
try:
client.post(LOGIN_URL, data=login_information)
results = client.get('https://linkedin.com/sales/gmail/profile/viewByEmail/'+str(email)).text
except Exception as failedlinkedinauth:
print((" ["+bc.CRED+"X"+bc.CEND+"] " + \
bc.CYLW+"This module did not properly authenticate: %s" + \
bc.CEND) % failedlinkedinauth)
soup = self.get_dom(results)
self.get_source(LOGOUT_URL) # Log out of LinkedIn, kills sessionID
try: # Search and set from results
profile = soup.find('a',attrs={'class': 'li-hover-under li-txt-black-85'})['href']
print(" ["+bc.CGRN+"+"+bc.CEND+"] "+ \
bc.CRED+"Profile: "+bc.CEND + \
str(profile)
)
except:
print(" ["+bc.CRED+"X"+bc.CEND+"] " + \
bc.CYLW+"No LinkedIn account found.\n" + \
bc.CEND
)
return
try:
fname = soup.find('span',attrs={'id': 'li-profile-name'})['data-fname']
lname = soup.find('span',attrs={'id': 'li-profile-name'})['data-lname']
name = str(fname) + " " + str(lname)
print(" ["+bc.CGRN+"+"+bc.CEND+"] " + \
bc.CRED+"Name: " + \
bc.CEND+ str(fname) + \
" " + \
str(lname)
)
except:
name = ""
pass # print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No username can be found.\n"+bc.CEND)
try:
company = soup.find('span',{'class': 'li-user-title-company'}).get_text()
print(" ["+bc.CGRN+"+"+bc.CEND+"] " + \
bc.CRED+"Company: " + \
bc.CEND + str(company)
)
except:
company = ""
pass # print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No Company can be found.\n"+bc.CEND)
try:
title = soup.find('div',{'class':'li-user-title'}).get_text()
print(" ["+bc.CGRN+"+"+bc.CEND+"] " + \
bc.CRED+"Title: " + \
bc.CEND+\
str(title)
)
except:
title = ""
pass #print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No Job Title can be found.\n"+bc.CEND)
try:
location = soup.find('div', {'class':'li-user-location'}).get_text()
print(" ["+bc.CGRN+"+"+bc.CEND+"] "+bc.CRED+"Location: "+bc.CEND+ str(location))
except:
location = ""
pass #print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No Location can be found.\n"+bc.CEND)
try:
email = soup.find('span', {'id':'email'}).get_text()
print(" ["+bc.CGRN+"+"+bc.CEND+"] "+bc.CRED+"Email: "+bc.CEND+ str(email))
except:
email =""
pass #print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No Email account found.\n"+bc.CEND)
self.info_dict.update({
"profile": profile,
"name": name,
"location": location,
"company": company,
"title":title,
"email":email
})
bi.outdata['linkedin'] = self.info_dict
print()
return
|
9,489 | 67f09cd8b41c7a4fe457766dfed916aaf71cc20d | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 27 18:34:40 2017
@author: Peiyong Jiang :jiangpeiyong@impcas.ac.cn
Wangsheng Wang : wwshunan@impcas.ac.cn
Chi Feng : fengchi@impcas.ac.cn
supervised by
Zhijun Wang & Yuan He
"""
import os
from win32com.client import Dispatch
folderDealTmp=input('Please input the absolute path of the father-folder:\n')
folderDeal=folderDealTmp.replace('\\','\\\\')
def GetPage5Docx(fileNameWithPath):
#open Word
word = Dispatch('Word.Application')
word.Visible = False
word = word.Documents.Open(fileNameWithPath)
#get number of sheets
word.Repaginate()
num_of_sheets = word.ComputeStatistics(2)
return num_of_sheets
def GetPage5PPT(fileNameWithPath):
Application = Dispatch("PowerPoint.Application")
Presentation = Application.Presentations.Open(fileNameWithPath, WithWindow=False)
slide_count = len(Presentation.Slides)
Presentation.Close()
return slide_count
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile=root+'\\Counter.txt'
with open(StatisticFile,'w') as fid:
pass
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile=root+'\\Counter.txt'
with open(StatisticFile,'a+') as fid:
pagesTotal=0
for name in files:
nameFile=os.path.join(root, name)
mainFile,appdFile=os.path.splitext(nameFile)
mainFolder,fullFile=os.path.split(nameFile)
if (appdFile=='.docx') and (fullFile[0:2]!='~$'):
pagesThis=GetPage5Docx(nameFile)
fid.writelines(fullFile+' '+str(pagesThis)+'\n')
pagesTotal+=pagesThis
fid.writelines('All Docx files in this folder have the pages: '+str(pagesTotal)+'\n\n\n\n\n\n')
for root, dirs, files in os.walk(folderDeal, topdown=False):
StatisticFile=root+'\\Counter.txt'
with open(StatisticFile,'a+') as fid:
pagesTotal=0
for name in files:
nameFile=os.path.join(root, name)
mainFile,appdFile=os.path.splitext(nameFile)
mainFolder,fullFile=os.path.split(nameFile)
if ((appdFile=='.pptx') or (appdFile=='.ppt')) and (fullFile[0:2]!='~$'):
pagesThis=GetPage5PPT(nameFile)
fid.writelines(fullFile+' '+str(pagesThis)+'\n')
pagesTotal+=pagesThis
fid.writelines('All PPT/PPTX files in this folder have the pages: '+str(pagesTotal)+'\n\n\n\n\n\n')
print('Done. Please check it!')
|
9,490 | 146487738006ce3efb5bd35c425835a1fd8e0145 | # -*- coding: utf-8 -*-
#some xml helpers
from xml.dom.minidom import Document
class XMLReport:
def __init__(self, name):
self.doc = Document()
self.main_node = self.add(name, node=self.doc)
def add(self, name, node=None):
if node is None: node = self.main_node
elem = self.doc.createElement(name)
node.appendChild(elem)
return elem
def text(self, text, node):
node.appendChild(self.doc.createTextNode(text))
def set_node_info(self, node, typ):
node.setAttribute("type-id", hex(typ.id))
node.setAttribute("name", typ.get_name())
def __str__(self):
return self.doc.toprettyxml(indent=" ") |
9,491 | 3bec28561c306a46c43dafc8bdc2e01f2ea06180 | from mlagents_envs.registry import default_registry
from mlagents_envs.envs.pettingzoo_env_factory import logger, PettingZooEnvFactory
# Register each environment in default_registry as a PettingZooEnv
for key in default_registry:
env_name = key
if key[0].isdigit():
env_name = key.replace("3", "Three")
if not env_name.isidentifier():
logger.warning(
f"Environment id {env_name} can not be registered since it is"
f"not a valid identifier name."
)
continue
locals()[env_name] = PettingZooEnvFactory(key)
|
9,492 | 92dc0bd3cfcddd98f99d8152d0221f047beb4fb0 | #! /usr/bin/python
# -*- coding: utf8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import unittest
from pyama.filereader import FileReader,Segment
class TestFileReader(unittest.TestCase):
def test_reads_file(self):
reader = FileReader(
"sample_reader_test.txt",
regexes=[(r'name="(\w+)"', 'END SEGMENT'),
(r'\s*\*\s*START\s*(\w+)', 'END SEGMENT'),
(r"PYTHON\s+SEGMENT\s+(\w[\w\d_]*)", None)]
)
file = reader.read()
self.assertEqual(7, len(file.segments))
self.assertEqual('0', file.segments[0].name)
self.assertEqual(2, len(file.segments[0].text))
self.assertEqual('segmentOne', file.segments[1].name)
self.assertEqual(3, len(file.segments[1].text))
self.assertEqual('1', file.segments[2].name)
self.assertEqual(1, len(file.segments[2].text))
self.assertEqual('anotherSegment', file.segments[3].name)
self.assertEqual(6, len(file.segments[3].text))
self.assertEqual('2', file.segments[4].name)
self.assertEqual(2, len(file.segments[4].text))
self.assertEqual('python_segment', file.segments[5].name)
self.assertEqual(4, len(file.segments[5].text))
self.assertEqual('python_segment', file.segments[6].name)
self.assertEqual(3, len(file.segments[6].text))
def test_analyses_parameters(self):
segment = Segment("name","file name")
line = """ SNIPPET START A=B B=13 K='ha mi' ZIG="ZA G" WITH hami -> "mami" """
FileReader("whatnot",["onces"]).analyze_parameters(line,segment)
self.assertEqual(segment.parameters["A"],"B")
self.assertEqual(segment.parameters["B"],"13")
self.assertEqual(segment.parameters["K"],"ha mi")
self.assertEqual(segment.parameters["ZIG"],"ZA G")
if __name__ == '__main__':
unittest.main()
|
9,493 | f8a31cdf5f55b5aed33a407d2c008ba9b969d655 | import cv2
import glob
import numpy as np
import csv
import matplotlib.pyplot as plt
from pydarknet import Detector,Image
"""
Calculates the average precision based on the precision and recall values,
which are essentially the output of getPrecisionRecall
Returns the 101pt interpolation curve and a single average precision value
"""
def getAP(prec,rec):
#smooth
prec0 = prec.copy()
prec0.append(0.0)
smoothprec = np.zeros(101) #smoothed and ready for easy 101pt interpolation
for idx in range(101):
i = (100-idx)/100.
val = 0
for re_idx in range(len(rec)): #go through recs
re_i = len(rec)-re_idx-1 #from back to front
if rec[re_i] >= i: # if value there is larger than i
val = max(prec0[re_i:])
#break
smoothprec[100-idx] = val
#quick 101 pt interpolation
ap = np.mean(smoothprec)
return(smoothprec,ap)
"""
Calculates the intersection of two boxes a and b,
both arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are
the upmost left and downmost right corner
Returns a single value for the Intersection amount in pixels
"""
def getIntersection(a,b): #each in format x1,y1,x2,y2
intersection = [0,0,0,0]
#left ->
if b[0] <= a[0] and a[0] <= b[2]:
intersection[0] = a[0]
elif a[0] <= b[0] and b[0] <= a[2]:
intersection[0] = b[0]
else:
return 0
#down ->
if b[1] <= a[1] and a[1] <= b[3]:
intersection[1] = a[1]
elif a[1] <= b[1] and b[1] <= a[3]:
intersection[1] = b[1]
else:
return 0
#right ->
if b[0] <= a[2] and a[2] <= b[2]:
intersection[2] = a[2]
elif a[0] <= b[2] and b[2] <= a[2]:
intersection[2] = b[2]
else:
return 0
#up ->
if b[0] <= a[3] and a[3] <= b[3]: #up
intersection[3] = a[3]
elif a[0] <= b[3] and b[3] <= a[3]:
intersection[3] = b[3]
else:
return 0
i1 = intersection[3]-intersection[1]
i2 = intersection[2]-intersection[0]
i = i1*i2
return i
"""
Calculates the IoU Intersection over Union for the two boxes a and b,
both arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are
the upmost left and downmost right corner
Returns a single IoU value
"""
def getIoU(a,b): #format of a and b is x1,y1,x2,y2
a = np.array(a, np.float32)
b = np.array(b, np.float32)
intersection = getIntersection(a,b)
asize = (a[2]-a[0])*(a[3]-a[1])
bsize = (b[2]-b[0])*(b[3]-b[1])
if intersection > 0:#
union = asize + bsize - intersection
else:
union = asize + bsize
return(intersection/union)
"""
Calculates the precision and recall values/curve given plist that contains only "TP" and "FP" items
this list was created by predictions that are ordered based on score
and positives, the number of all positives based on the ground truth
Returns tuple of lists for precisions and recalls
"""
def getPrecisionRecall(plist,positives):
tp = 0
fp = 0
precs = []
recs = []
for e in plist:
if e == "TP":
tp += 1
elif e == "FP":
fp += 1
precision = tp/(tp+fp)
precs.append(precision)
recall = tp/(positives)
recs.append(recall)
return(precs,recs)
def readResults(filename):
file = []
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
file.append(row)
return file
"""
converts relative to absolute coordinates,
x = point of box (relative), y = point of box (relative)
w = width of box (relative), h = height of box (relative)
o_x = original width of image, o_y = original height of image
"""
def relativeToAbsolute(x,y,w,h,o_x,o_y):
n_x = float(x)*float(o_x)
n_y = float(y)*float(o_y)
n_w = float(w)*float(o_x)
n_h = float(h)*float(o_y)
return(n_x,n_y,n_w,n_h)
|
9,494 | 9bb1fc4df80d183c70d70653faa3428964b93a94 | from django.db import models
class FoodCategory(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
class Meta:
db_table = 'kitchenrock_category'
def __str__(self):
return self.name |
9,495 | f6d7ce2d020d11086640a34aac656098ab0b0f33 | from datetime import date
atual = date.today().year
totmaior = 0
totmenor = 0
for pessoas in range (1, 8):
nasc = int(input(f'Qual sua data de nascimento? {pessoas}º: '))
idade = atual - nasc
if idade >= 21:
totmaior += 1
else:
totmenor += 1
print(f'Ao todo tivemos {totmaior} pessoas maiores de idade!')
print(f'E tambem tivemos {totmenor} pessoas menores de idade!') |
9,496 | 8a54a71b08d10c5da9ca440e8e4f61f908e00d54 | A = input("입력해주세요.\n") #입력값을 in_AAA로 칭한다
#\n은 문법의 줄바꾸기
print(A.upper()+" World!") #in_AAA를 출력 + "World!")
#upper()는 앞의 값을 대문자화+"
|
9,497 | 48755cf48c6696259d0c319d382021f33751ac01 | def squirrel_play(temp, is_summer):
if is_summer == True :
if 60 <= temp <= 100 :
return True
else :
return False
if is_summer == False :
if 60 <= temp <= 90 :
return True
else :
return False |
9,498 | 752679d2484b6b91a734c7cbe4a99bd5676661eb | import numpy as np
def output(i, out):
with open('B-large.out', 'a') as outfile:
outfile.write("Case #{0}: {1}\n".format(i, out))
def solve(i, stack):
cursymbol = stack[0]
counter = 0 if stack[-1] == "+" else 1
for symbol in stack:
if symbol != cursymbol:
cursymbol = symbol
counter += 1
output(i, counter)
lines = np.loadtxt('B-large.in', dtype=str)
for i, line in enumerate(lines):
if i > 0:
solve(i, line) |
9,499 | f2d7f0b0d27bd43223d0eb6a6279b67968461dad | # binary search
# iterative
def Iter_BinarySearch(array,b,e,value):
while(b<=e):#pay attention to the judgement!
mid=(b+e)/2#floor
if (array[mid]<value):#value in [mid,e]
b=mid+1
elif (array[mid]>value):#value in [b,mid]
e=mid-1
else:
print "find it! the index is: ", mid
return mid
print "cannot fint it!"
return -1
# test code for iterative BinarySearch(array,b,e,value)
array=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
Iter_BinarySearch(array,0,15,15)
# recursive
def Recur_BinarySearch(arrray,b,e,value):
mid=(b+e)/2#floor
if (b<=e):
if (array[mid]<value):#value in [mid,e]
b=mid+1
elif (array[mid]>value):#value in [b,mid]
e=mid-1
else:
print "find it! the index is: ", mid
return mid
else:
print "cannot find it"
return
Recur_BinarySearch(array,b,e,value)
# test code for recursive BinarySearch
array=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
Iter_BinarySearch(array,0,15,16) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.