index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
17,600 | 2cc90a643e3959dc72819256b7d3d436f4a20bfa | import pywhatkit
pywhatkit.sendwhatmsg('+919650760939','pagal didi',23,38)
|
17,601 | 195ea12ac0f1ef9635d405f5804899443f6ae42a | """
Purpose: make a comparison plot: NEI v.s. CIE.
In this file, create the ionic fraction plot
Written by sw, Jun 02, 2019
"""
# Import used modules
import pyatomdb, pylab
import pickle, os
import numpy as np
import astropy.io.fits as pyfits
from astropy.io import ascii
import matplotlib as mpl
#system parameters
rootpath = os.getcwd()+'/'
#Read the condition file
confile = rootpath+'adia.exp_phy.info'
conditions = ascii.read(confile)
ncondi = len(conditions)
Zlist = [1,2,6,7,8,10,12,14,16,18,20,26,28]
cie_spec = pickle.load(open(rootpath+'cie_case/tcspec_cie.pkl','rb'))
nei_spec = pickle.load(open(rootpath+'nei_case/tcspec_nei.pkl','rb'))
ebins = nei_spec['ebins']
nbins = len(cie_spec[0,:])
nei_tspec = np.zeros([ncondi,nbins], dtype=float)
for Z in Zlist:
nei_tspec += nei_spec[Z]
condi_index = [26, 100, 125, 200, 250, 283, 342]
for i in condi_index:
fig, ax = pylab.subplots(1, 1)
fig.show()
ax.loglog(ebins, nei_tspec[i,:]*100, drawstyle='steps', label='NEI')
ax.loglog(ebins, cie_spec[i,:]*100, drawstyle='steps', label='CIE', \
linestyle='dashed')
ax.set_xlabel('Energy (keV)')
ax.set_ylabel('Cts s$^{-1}$ cm$^3$ bin$^{-1}$')
ax.legend(loc=0)
ax.set_xlim([0.1,2.0])
# ax.set_ylim([1e-27,1e-18])
pylab.draw()
radius=conditions[i]['R']/3.0856780e+18
fig.savefig(rootpath+'figures/comp_spec/testmodel_comp_r%4.2fpc.png' % radius)
|
17,602 | d5f92cc8f6298ea2f9783e546c960ad5cb873f7a | # coding: utf-8
"""
Trip Planner
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.trip_request_response_journey_leg_properties import TripRequestResponseJourneyLegProperties # noqa: E501
from swagger_client.rest import ApiException
class TestTripRequestResponseJourneyLegProperties(unittest.TestCase):
"""TripRequestResponseJourneyLegProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTripRequestResponseJourneyLegProperties(self):
"""Test TripRequestResponseJourneyLegProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.trip_request_response_journey_leg_properties.TripRequestResponseJourneyLegProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
17,603 | b36b5cca0827bbd5db631f71221255ed1be0c358 | from django.shortcuts import render, redirect
from .models import *
# Create your views here.
def index(request):
books = Book.objects.all()
context = {
"books": books
}
return render(request,'index.html', context)
def add_book(request):
if request.method == "POST":
title = request.POST['title']
desc = request.POST['desc']
Book.objects.create(title= title, desc= desc)
return redirect("/")
def assign_book(request, book_id):
books = Book.objects.get(id=book_id)
authors = Author.objects.get(id=request.POST['author_id'])
books.authors.add(authors)
return redirect(f'/assign-author/{book_id}')
def display_book(request, book_id):
books = Book.objects.get(id=book_id)
context = {
"books": books,
"authors": Author.objects.exclude(books__id= book_id)
}
return render(request, 'book_list.html', context)
def display_author(request, author_id):
author = Author.object.get(id = author_id)
context = {
"author": author,
"book": Book.objects.exclude(authors__id= author_id)
}
return render(request, author.html, context)
|
17,604 | c9bd3a0459681e802a743dbe3ae185b48be686db | import os
import datetime as dt
import time
#from snownlp import SnowNLP
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
pd.set_option('display.width', 1000)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
matplotlib.rcParams['font.sans-serif']='Microsoft Yahei'
def base_graph(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
return ax
def RAP_SCORE_scatter():
CH_path = os.path.join('./res/', 'Chinese_Rap.csv')
EN_path = os.path.join('./res/', 'English_Rap.csv')
df_CN = pd.read_csv(CH_path, low_memory=False)
df_EN = pd.read_csv(EN_path, low_memory=False)
X_CN=[]
song_score_CN=[]
X_EN = []
song_score_EN = []
for i, r in df_CN.iterrows():
#print(int(r['song_publishTime']))
timeDate = time.strftime('%Y-%m-%d', time.localtime(int(r['song_publishTime'])/ 1000) )
#print(timeDate)
song_score_CN.append(int(r['song_score']))
#if timeDate not in X_CN:
X_CN.append(timeDate)
for i, r in df_EN.iterrows():
timeDate = time.strftime('%Y-%m-%d', time.localtime(int(r['song_publishTime']) / 1000))
song_score_EN.append(int(r['song_score']))
# if timeDate not in X_CN:
X_EN.append(timeDate)
yearlist=[]
for m in X_CN+X_EN:
year = m.replace('-','')[0:4]
if year not in yearlist:
yearlist.append(year)
yearlist.sort()
up_colors = ['SkyBlue', 'IndianRed', 'LimeGreen']
fig, axs = plt.subplots(2, 1, figsize=(16, 9))
axs = [base_graph(ax) for ax in axs]
area = np.pi * 4 ** 2
axs[0].scatter(X_CN, song_score_CN, s=area, c=up_colors[0], alpha=0.4, label='RAP Score CN')
axs[1].scatter(X_EN, song_score_EN, s=area, c=up_colors[1], alpha=0.4, label='RAP Score EN')
axs[1].xaxis.set_ticklabels([], minor=True)
axs[0].xaxis.set_ticklabels([], minor=True)
axs[0].set_xlabel('')
axs[1].set_xlabel('')
handles_top, labels_top = axs[0].get_legend_handles_labels()
#handles_mid, labels_mid = axs[1].get_legend_handles_labels()
handles_bot, labels_bot = axs[1].get_legend_handles_labels()
legend_top = fig.legend(handles_top, '', loc=(0.80, 0.75), title='RAP Score CN')
#legend_mid = fig.legend(handles_mid, states_up, loc=(0.80, 0.45), title='Rate of percent_submit,Eng')
legend_bot = fig.legend(handles_bot, '', loc=(0.80, 0.15), title='RAP Score EN')
fig.add_artist(legend_top)
#fig.add_artist(legend_mid)
fig.add_artist(legend_bot)
fig.subplots_adjust(right=0.82)
fig.text(0.04, 0.5, 'Scatter chart of CN/EN RAP SCORE', va='center', rotation='vertical')
png_path = os.path.join('./res/', 'Scatter_chart_of_CN-EN_RAP_SCORE.png')
ind = np.arange(len(yearlist))*16.4
#plt.setp(axs[0].get_xticklabels(), visible=False)
plt.xticks(ind, yearlist, rotation='vertical')
axs[0].set_xticks([])
#plt.xticks([])
plt.savefig(png_path)
plt.show()
print('Scatter Image saved: ', png_path)
plt.close()
def RAP_year_bar():
CH_path = os.path.join('./res/', 'Chinese_Rap.csv')
EN_path = os.path.join('./res/', 'English_Rap.csv')
df_CN = pd.read_csv(CH_path, low_memory=False)
df_EN = pd.read_csv(EN_path, low_memory=False)
song_publish_dict = {}
for i, r in df_CN.iterrows():
#print(int(r['song_publishTime']))
timeDate = time.strftime('%Y-%m-%d', time.localtime(int(r['song_publishTime'])/ 1000) )
year = timeDate[0:4]
if year not in song_publish_dict:
song_publish_dict[year]=(1,0)
else:
song_publish_dict[year] = (song_publish_dict[year][0]+1, song_publish_dict[year][1])
for i, r in df_EN.iterrows():
#print(int(r['song_publishTime']))
timeDate = time.strftime('%Y-%m-%d', time.localtime(int(r['song_publishTime'])/ 1000) )
year = timeDate[0:4]
if year not in song_publish_dict:
song_publish_dict[year]=(0,1)
else:
song_publish_dict[year] = (song_publish_dict[year][0], song_publish_dict[year][1]+1)
#print(song_publish_dict)
song_publish_dict = sorted(song_publish_dict.items(), key=lambda d: d[0])
yearlist=[]
X_CN=[]
X_EN=[]
for m,v in song_publish_dict:
year = m
if year not in yearlist:
X_CN.append(v[0])
X_EN.append(v[1])
yearlist.append(year)
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
axs = base_graph(ax)
ind = np.arange(len(yearlist))
width = 0.3 # the width of the bars
up_colors = ['SkyBlue', 'IndianRed', 'LimeGreen']
for i in range(2):
if i == 0:
axs.bar(ind - 1 * width / 2, X_CN, width, color=up_colors[2], label='CN')
elif i == 1:
axs.bar(ind + width / 2, X_EN, width, color=up_colors[1], label='EN')
handles_top, labels_top = axs.get_legend_handles_labels()
legend_top = fig.legend(handles_top, labels_top, loc=(0.3, 0.334), title='publish num distribute ')
fig.add_artist(legend_top)
fig.subplots_adjust(right=0.82)
fig.text(0.04, 0.5, 'Most popular rap publish time', va='center', rotation='vertical')
plt.xticks(ind, yearlist,rotation = 45)
png_path = os.path.join('./res', 'The_most_popular_Chinese-English_rap_publish_time_distribution.png')
plt.savefig(png_path)
print('Image saved: ', png_path)
plt.show()
plt.close()
def RAP_song_company_pie():
#exp =[0.1,0,0,0,0,0,0,0]
CH_path = os.path.join('./res/', 'Chinese_Rap.csv')
EN_path = os.path.join('./res/', 'English_Rap.csv')
df_CN = pd.read_csv(CH_path, low_memory=False)
df_EN = pd.read_csv(EN_path, low_memory=False)
company_CN = {}
company_EN = {}
allCN=0
allEN=0
for i, r in df_CN.iterrows():
# print(int(r['song_publishTime']))
song_company = r['song_company']
if song_company not in company_CN:
company_CN[song_company]=1
else:
company_CN[song_company] =company_CN[song_company]+1
allCN=allCN+1
for i, r in df_EN.iterrows():
# print(int(r['song_publishTime']))
song_company = r['song_company']
if song_company not in company_EN:
company_EN[song_company] = 1
else:
company_EN[song_company] = company_EN[song_company] + 1
allEN=allEN+1
company_CN = sorted(company_CN.items(), key=lambda kv: kv[1], reverse=True)
company_EN = sorted(company_EN.items(), key=lambda kv: kv[1], reverse=True)
label_CN=[]
piev_CN = []
for ii in company_CN:
if ii[0]=='UnKown':
label_CN.append('个人网络发行')
piev_CN.append(round(int(ii[1])/allCN,6))
else:
label_CN.append(ii[0])
piev_CN.append(round(int(ii[1])/allCN,6))
label_EN = []
piev_EN = []
for ii in company_EN:
#print(ii)
if ii[0]=='UnKown':
label_EN.append('个人网络发行')
piev_EN.append(round(int(ii[1])/allEN,6))
else:
label_EN.append(ii[0])
piev_EN.append(round(int(ii[1])/allEN,6))
up_colors = ['SkyBlue', 'IndianRed', 'LimeGreen']
fig, axs = plt.subplots(2, 1, figsize=(8, 6))
axs = [base_graph(ax) for ax in axs]
#print(np.sum(piev_CN))
patches, l_text, p_text =axs[0].pie(piev_CN[0:10],pctdistance=0.7,autopct='%.1f%%',startangle=90, labels=label_CN[0:10])
axs[0].axis("equal") # 设置x轴和y轴等长,否则饼图将不是一个正圆
patches, l_text1, p_text1 =axs[1].pie(piev_EN[0:10],autopct='%.1f%%',startangle=90, labels=label_EN[0:10])
axs[1].axis("equal") # 设置x轴和y轴等长,否则饼图将不是一个正圆
axs[0].legend(label_CN[0:10])
axs[1].legend(label_EN[0:10])
axs[0].legend(loc='center left')
axs[1].legend(loc='center right')
for t in p_text:
t.set_size(5)
for t in l_text:
t.set_size(6)
for t in p_text1:
t.set_size(5)
for t in l_text1:
t.set_size(6)
handles_top, labels_top = axs[0].get_legend_handles_labels()
# handles_mid, labels_mid = axs[1].get_legend_handles_labels()
handles_bot, labels_bot = axs[1].get_legend_handles_labels()
legend_top = fig.legend(handles_top, '', loc=(0.06, 0.94), title='RAP Company CN')
# legend_mid = fig.legend(handles_mid, states_up, loc=(0.80, 0.45), title='Rate of percent_submit,Eng')
legend_bot = fig.legend(handles_bot, '', loc=(0.80, 0.45), title='RAP Company EN')
fig.add_artist(legend_top)
# fig.add_artist(legend_mid)
fig.add_artist(legend_bot)
plt.title('Proportion of top 10 Chinese and English rap companies')
plt.savefig('./res/Top_10_Chinese-English_rap_companies.png')
plt.show()
print('Scatter Image saved: ', './res/Top_10_Chinese-English_rap_companies.png')
plt.close()
def RAP_singner_barH():
CH_path = os.path.join('./res/', 'Chinese_Rap.csv')
EN_path = os.path.join('./res/', 'English_Rap.csv')
df_CN = pd.read_csv(CH_path, low_memory=False)
df_EN = pd.read_csv(EN_path, low_memory=False)
company_CN = {}
company_EN = {}
allCN=0
allEN=0
for i, r in df_CN.iterrows():
# print(int(r['song_publishTime']))
song_company = r['song_songer']
if song_company not in company_CN:
company_CN[song_company]=1
else:
company_CN[song_company] =company_CN[song_company]+1
allCN=allCN+1
for i, r in df_EN.iterrows():
# print(int(r['song_publishTime']))
song_company = r['song_songer']
if song_company not in company_EN:
company_EN[song_company] = 1
else:
company_EN[song_company] = company_EN[song_company] + 1
allEN=allEN+1
company_CN = sorted(company_CN.items(), key=lambda kv: kv[1], reverse=True)
company_EN = sorted(company_EN.items(), key=lambda kv: kv[1], reverse=True)
X_CN=[]
Y_CN=[]
for ii in company_CN:
X_CN.append(ii[0])
Y_CN.append(ii[1])
X_EN = []
Y_EN = []
for ii in company_EN:
X_EN.append(ii[0])
Y_EN.append(ii[1])
up_colors = ['SkyBlue', 'IndianRed', 'LimeGreen']
fig, axs = plt.subplots(2, 1, figsize=(11, 6))
axs = [base_graph(ax) for ax in axs]
axs[0].barh( X_CN[0:10],Y_CN[0:10], align='center',color='SkyBlue', ecolor='black')
axs[1].barh( X_EN[0:10],Y_EN[0:10], align='center',color='LimeGreen', ecolor='black')
axs[0].set_xlim(0, 22)
axs[1].set_xlim(0, 22)
plt.xlabel("Number of songs")
plt.ylabel("Singer")
handles_top, labels_top = axs[0].get_legend_handles_labels()
# handles_mid, labels_mid = axs[1].get_legend_handles_labels()
handles_bot, labels_bot = axs[1].get_legend_handles_labels()
legend_top = fig.legend(handles_top, '', loc=(0.80, 0.70), title='TOP 10 RAP SINGER CN')
# legend_mid = fig.legend(handles_mid, states_up, loc=(0.80, 0.45), title='Rate of percent_submit,Eng')
legend_bot = fig.legend(handles_bot, '', loc=(0.80, 0.30), title='TOP 10 RAP SINGER EN')
fig.add_artist(legend_top)
# fig.add_artist(legend_mid)
fig.add_artist(legend_bot)
#plt.title('中英文说唱前十歌手及上榜歌曲数目')
plt.savefig('./res/Comparison_of_the_number_of_top10_Chinese_and_English_rap_singers_and_songs.png')
plt.show()
plt.close()
print('Scatter Image saved: ', './res/Comparison_of_the_number_of_top10_Chinese_and_English_rap_singers_and_songs.png')
def RAP_sentiment_BAR():
CH_path = os.path.join('./res/', 'Chinese_Rap-Emotion_classification.csv')
EN_path = os.path.join('./res/', 'English_Rap-Emotion_classification.csv')
df_EN = pd.read_csv(EN_path, low_memory=False,encoding="utf_8_sig")
try:
df_CN = pd.read_csv(CH_path, low_memory=False,encoding="utf_8_sig")
except:
df_CN = pd.read_csv(CH_path, low_memory=False, encoding="gb18030")
S1=[]
S2=[]
S3 = []
CN_up_all=0
CN_down_all=0
CN_normal_all=0
EN_up_all = 0
EN_down_all = 0
EN_normal_all = 0
for i,r, in df_CN.iterrows():
if r['sentiment']==-1:
CN_down_all=CN_down_all+1
elif r['sentiment']==1:
CN_up_all = CN_up_all + 1
else:
CN_normal_all=CN_normal_all+1
S1.append( round( CN_up_all/(CN_down_all+ CN_up_all+ CN_normal_all)*100,6) )
S2.append( round( CN_down_all/(CN_down_all+ CN_up_all+ CN_normal_all)*100,6) )
S3.append( round( CN_normal_all/(CN_down_all+ CN_up_all+ CN_normal_all)*100,6) )
for i,r, in df_EN.iterrows():
if r['sentiment']==-1:
EN_down_all= EN_down_all+1
elif r['sentiment']==1:
EN_up_all = EN_up_all + 1
else:
EN_normal_all=EN_normal_all+1
S1.append( round( EN_up_all/(EN_down_all+ EN_up_all+ EN_normal_all)*100,6) )
S2.append( round( EN_down_all/(EN_down_all+ EN_up_all+ EN_normal_all)*100,6))
S3.append( round( EN_normal_all/(EN_down_all+ EN_up_all+ EN_normal_all)*100,6))
d = []
for i in range(0, len(S1)):
sum = S1[i] + S2[i]
d.append(sum)
print('中、英文说唱情感积极所占比例为:'+str(S1))
print('中、英文说唱情感消极所占比例为:'+str(S2))
print('中、英文说唱情感中性所占比例为:' + str(S3))
ind = np.arange(2) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, S1, width, color='#d62728') # , yerr=menStd)
p2 = plt.bar(ind, S2, width,bottom=S1) # , yerr=womenStd)
p3 = plt.bar(ind, S3, width, bottom=d)
plt.ylabel('Percentage')
plt.title('Comparison of emotion classification between Chinese and English rap')
plt.xticks(ind, ('CN', 'EN'))
plt.yticks(np.arange(0, 110, 10))
plt.legend((p1[0], p2[0], p3[0]), ('positive', 'negative', 'neutral'))
plt.savefig('./res/Comparison-of-emotion-classification-between-Chinese-and-English-rap.png')
plt.show()
plt.close()
print('Scatter Image saved: ', './res/Comparison-of-emotion-classification-between-Chinese-and-English-rap.png')
#RAP_SCORE_scatter()
#RAP_year_bar()
#RAP_song_company_pie()
#RAP_singner_barH()
#RAP_sentiment()
#RAP_sentiment_BAR() |
17,605 | 3f5c32584d1e5d102745dceae36814f11a8219bc | # -*- coding: utf-8 -*-
from math import sqrt
from random import uniform
from agent import *
#____________________FONCTIONS ____________________#
#==========================
# fonction de dessin
def draw():
"""fonction de dessin"""
win.clear() # nettoyage
dog.drawC()
for i in range(0,nbsheep) : sheep[i].drawL()
#==========================
# fonction animatrice
def anim():
"""fonction animatrice"""
# le chien
#dog.head.obstacle(C,nbC)
dog.head.bounce(win)
dog.move()
dog.leadgroup(sheep,nbsheep,alfscale.get())
# les moutons
for i in range(0,nbsheep) :
#sheep[i].head.obstacle(C,nbC)
sheep[i].head.bounce(win)
sheep[i].move() # déplacement
#____________________PRINCIPAL ____________________#
if __name__ == '__main__':
#==========================
# Démarrage du réceptionnaire d'evenements :
win=MainWindow("snakes",900,450,"white")
win.SetDrawZone(-10,-5,+10,+5)
colP = ['red','orange','green','cyan','blue','magenta']
colC = ['sienna','maroon','brown','pink', 'tan','wheat','gold',
'plum','khaki','indian red','thistle','firebrick','salmon','coral']
ray = 0.08
alf = 0.5
vmod = 0.05
# le chien
pos=Point(uniform(.9*win.xmin(),.9*win.xmax()),uniform(.9*win.ymin(),.9*win.ymax()))
vit=vmod*Vecteur(uniform(-1.,+1.),uniform(-1.,+1.)).normalized()
dog=Leader(5,pos,vit,ray,"black",alf)
nbsheep=15 # nombre de moutons
# Le 1° snake
sheep=[]
alfs =[]
# les snakes suivants
for i in range(0,nbsheep) :
pos=Point(uniform(.9*win.xmin(),.9*win.xmax()),uniform(.9*win.ymin(),.9*win.ymax()))
vit=vmod*Vecteur(uniform(-1.,+1.),uniform(-1.,+1.)).normalized()
sheep.append(Agent(5,pos,vit,ray,colP[i%6],alf))
alfs.append(uniform(.85,.9))
alfscale=win.CreateScalev(label='alf',inf=0.01,sup=0.99,step=0.01)
win.anim=anim
win.draw=draw
win.startmainloop()
|
17,606 | be2e767c40b8adcff5e5f578c6c0fefcd738ba4e | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""i_naturalist2021 dataset."""
from tensorflow_datasets.image_classification.i_naturalist2021 import i_naturalist2021
import tensorflow_datasets.public_api as tfds
class INaturalist2021Test(tfds.testing.DatasetBuilderTestCase):
"""Tests for i_naturalist2021 dataset."""
DATASET_CLASS = i_naturalist2021.INaturalist2021
SPLITS = {
"mini": 2, # Number of fake mini examples
"test": 3, # Number of fake test examples
"train": 3, # Number of fake train examples
"val": 2, # Number of fake val examples
}
OVERLAPPING_SPLITS = ["mini", "train"]
DL_EXTRACT_RESULT = {}
for split, split_file in i_naturalist2021._SPLIT_FILENAMES.items():
DL_EXTRACT_RESULT[f"{split}_img"] = f"{split_file}.tar.gz"
DL_EXTRACT_RESULT[f"{split}_json"] = split_file
SKIP_CHECKSUMS = True
if __name__ == "__main__":
tfds.testing.test_main()
|
17,607 | d382fddd66688d8ac9c6e9d302ac375fabf702dd | # -*- coding: utf-8 -*-
from sound_lab_core.Clasification.Adapters.ClassifierAdapter import ClassifierAdapter
from sound_lab_core.Clasification.Classifiers.ManualClassifier import ManualClassifier
class ManualClassifierAdapter(ClassifierAdapter):
"""
Adapter class for the manual classifier
"""
def __init__(self):
ClassifierAdapter.__init__(self)
self.name = u'Manual'
def get_instance(self):
return ManualClassifier() |
17,608 | 5a2fd2f487087b02a8d8595bedbd0e79eab45913 | # Clase para conectar el backend de la clase USUARIO y el user de Django.
from django.contrib.auth.models import User, check_password
from usuarios.models import Usuario
class BackendUsuarios(object):
def authenticate(self, username=None, password=None):
try:
#Obtenga el usuario
usuario = Usuario.objects.get(cedula_usuario=username)
if usuario is not None:
if check_password(password, usuario.password):
return usuario
# Si el password es incorrecto
else:
return None
# Si el usuario esta vacio
else:
return None
# Error en el acceso a la BD
except User.DoesNotExist:
return None
def get_user(self, user_id):
try:
return Usuario.objects.get(cedula_usuario=user_id)
except User.DoesNotExist:
return None |
17,609 | f41d41a58a1226416c4759f2a3bf42884936bf06 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
python-cfconfigurator is a simple and small library to manage Cloud Foundry
(c) 2016 Jose Riguera Lopez, jose.riguera@springer.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Python 2 and 3 compatibility
from __future__ import unicode_literals, print_function
from cfconfigurator.exceptions import CFException, UAAException
from cfconfigurator.cf import UAA
from cfconfigurator.cf import CF
def main():
u = UAA("https://uaa.test.example.com", "admin", "admin-secret")
a = u.login()
print(a)
user = u.user_find({'userName': 'jose'})
if user['totalResults'] == 1:
deleted = u.user_delete(user['resources'][0]['id'])
print(deleted)
new_user = u.user_save("jose", ["Jose", "Riguera"], "hola", ["jriguera@hola.com"])
print(new_user)
user = u.user_get(new_user['id'])
pas = u.user_set_password(user['id'], 'adios')
print(pas)
print("=====================")
group = u.group_find({'displayName': 'josegroup'})
if group['totalResults'] == 1:
deleted = u.group_delete(group['resources'][0]['id'])
print(deleted)
new_group = u.group_save("josegroup", "Jose Riguera Group")
print(new_group)
# add user
group_member = u.group_manage_member(new_group['id'], user['id'])
print(group_member)
group = u.group_get(group_member['id'])
print(group)
# remove user
group_member = u.group_manage_member(group['id'], user['id'], add=False)
print(group_member)
print("=====================")
clients = u.client_find({'client_id': 'joseclient'})
print(clients)
if clients['totalResults'] == 1:
deleted = u.client_delete(clients['resources'][0]['client_id'])
print(deleted)
new_client = u.client_save("joseclient", "Jose Riguera client", "hola", "adios", scope=['uaa.user'])
print(new_client)
client_secret = u.client_set_secret(new_client['client_id'], "pedro")
print(client_secret)
client = u.client_save("joseclient", "JoseRRRRRRRRRRRR", "hola", "token", scope=['uaa.user'], id=new_client['client_id'])
print(client)
clients = u.client_find({'client_id': 'joseclient'})
print(clients)
# delete
u.group_delete(group['id'])
u.user_delete(user['id'])
u.client_delete(client['client_id'])
if __name__ == '__main__':
main()
|
17,610 | 99846d4a4a4a50c5ee34efe90715f1269f6789c3 | # -*-coding:latin-1 -*
import os
a = 0
b = 0
c = 0
D = 0
a = input("saisissez votre nombre: ")
a = float( a )
b = input("saisissez votre nombre:")
b = float( b )
c = input("saisissez votre nombre:")
c = float( c )
D = ((b**2) - (4*a*c))
if((a == 0 and b == 0) and c == 0):
print("pas d'équation")
elif(a == 0 and (b != 0 and c!= 0)):
print("il s'agit d'une équation du 1er degrée et nn d'une éqution du 2nd , on va donc calculer la seule solution de : bx + c = 0 ")
print( (-c)/b )
else:
if(D < 0):
print("pas de solution réelle")
elif D == 0:
print("on a une solution double x0")
print( (-b)/(2*a) )
else:
print("vous avez bien deux soulutions réelles et distinctes")
if b < 0:
X1 = X1 = ((-b) + sqrt(D))/(2*a)
X2 = X2 = c/a/X1
print(" les solutions sont : \n X1 =", X1, "\n X2 = ", X2 )
else:
X1 = X1 = ((-b) - sqrt(D))/(2*a)
X2 = X2 = c/a/X1
print( "les solutions sont : X1 =", ((-b) - sqrt(D))/(2*a), "\n X2=", c/(a*(((-b) - sqrt(D))/(2*a))) )
os.system("pause") |
17,611 | 262bfb32cefe87f8beaf71ac91741fe0bcc3717b | """
:code:`data_preprocessor.py`
Preprocess appropriately formatted csv data into PreprocessedData object
"""
import copy
import numpy as np
import pandas as pd
import scipy.signal as ss
import theano
from .preprocessed_data import PreprocessedData
def preprocess_data(data_path, last_day=None, schools_unis='two_separate', drop_features=None, min_confirmed=100,
min_deaths=10, smoothing=1, mask_zero_deaths=False, mask_zero_cases=False):
"""
Preprocess data .csv file, in our post-merge format, with different options.
:param data_path: Path of .csv file to process.
:param last_day: Last day of window to analysis to use e.g. str '2020-05-30'. If None (default), go to the last day
in the .csv file.
:param schools_unis: | how to process schools and unis. Options are:
| - two_xor. One xor feature, one and feature.
| - two_separate. One schools feature, one university feature.
| - one_tiered. One tiered feature. 0 if none active, 0.5 if either active, 1 if both active.
| - one_and. One feature, 1 if both active.
:param drop_features: list of strs, names of NPI features to drop. Defaults to all NPIs not collected by the
EpidemicForecasting.org team.
:param min_confirmed: confirmed cases threshold, below which new (daily) cases are ignored.
:param min_deaths: deaths threshold, below which new (daily) deaths are ignored.
:param smoothing: number of days over which to smooth. This should be an odd number. If 1, no smoothing occurs.
:param mask_zero_deaths: bool, whether to ignore (i.e., mask) days with zero deaths.
:param mask_zero_cases: bool, whether to ignore (i.e., mask) days with zero cases.
:return: PreprocessedData object.
"""
# load data from our csv
df = pd.read_csv(data_path, parse_dates=["Date"], infer_datetime_format=True).set_index(
["Country Code", "Date"])
# handle custom last day of analysis
if last_day is None:
Ds = list(df.index.levels[1])
else:
Ds = list(df.index.levels[1])
last_ts = pd.to_datetime(last_day, utc=True)
Ds = Ds[:(1 + Ds.index(last_ts))]
nDs = len(Ds)
all_rs = list([r for r, _ in df.index])
regions = list(df.index.levels[0])
locations = [all_rs.index(r) for r in regions]
sorted_regions = [r for l, r in sorted(zip(locations, regions))]
nRs = len(sorted_regions)
region_names = copy.deepcopy(sorted_regions)
region_full_names = df.loc[region_names]["Region Name"]
if drop_features is None:
# note: these features are taken from the OxCGRT Dataset other than the mask wearing option
drop_features = ['Mask Wearing', 'Travel Screen/Quarantine', 'Travel Bans', 'Public Transport Limited',
'Internal Movement Limited', 'Public Information Campaigns', 'Symptomatic Testing']
for f in drop_features:
print(f'Dropping NPI {f}')
df = df.drop(f, axis=1)
# pull data
CMs = list(df.columns[4:])
nCMs = len(CMs)
ActiveCMs = np.zeros((nRs, nCMs, nDs))
Confirmed = np.zeros((nRs, nDs))
Deaths = np.zeros((nRs, nDs))
Active = np.zeros((nRs, nDs))
NewDeaths = np.zeros((nRs, nDs))
NewCases = np.zeros((nRs, nDs))
for r_i, r in enumerate(sorted_regions):
region_names[r_i] = df.loc[(r, Ds[0])]['Region Name']
for d_i, d in enumerate(Ds):
Confirmed[r_i, d_i] = df.loc[(r, d)]['Confirmed']
Deaths[r_i, d_i] = df.loc[(r, d)]['Deaths']
Active[r_i, d_i] = df.loc[(r, d)]['Active']
ActiveCMs[r_i, :, :] = df.loc[r].loc[Ds][CMs].values.T
# compute new (daily) cases, after using thresholds
Confirmed[Confirmed < min_confirmed] = np.nan
Deaths[Deaths < min_deaths] = np.nan
NewCases[:, 1:] = (Confirmed[:, 1:] - Confirmed[:, :-1])
NewDeaths[:, 1:] = (Deaths[:, 1:] - Deaths[:, :-1])
NewDeaths[NewDeaths < 0] = 0
NewCases[NewCases < 0] = 0
NewCases[np.isnan(NewCases)] = 0
NewDeaths[np.isnan(NewDeaths)] = 0
if smoothing != 1:
print('Smoothing')
# bulk smooth
SmoothedNewCases = np.around(
ss.convolve2d(NewCases, 1 / smoothing * np.ones(shape=(1, smoothing)), boundary='symm',
mode='same'))
SmoothedNewDeaths = np.around(
ss.convolve2d(NewDeaths, 1 / smoothing * np.ones(shape=(1, smoothing)), boundary="symm",
mode='same'))
# correct for specific regions
for r in range(nRs):
# if the country has too few deaths, ignore
if Deaths[r, -1] < 50:
print(f'Note: did not smooth deaths in {region_names[r]}')
SmoothedNewDeaths[r, :] = NewDeaths[r, :]
NewCases = SmoothedNewCases
NewDeaths = SmoothedNewDeaths
print('Masking invalid values')
if mask_zero_deaths:
NewDeaths[NewDeaths < 1] = np.nan
else:
NewDeaths[NewDeaths < 0] = np.nan
if mask_zero_cases:
NewCases[NewCases < 1] = np.nan
else:
NewCases[NewCases < 0] = np.nan
Confirmed = np.ma.masked_invalid(Confirmed.astype(theano.config.floatX))
Active = np.ma.masked_invalid(Active.astype(theano.config.floatX))
Deaths = np.ma.masked_invalid(Deaths.astype(theano.config.floatX))
NewDeaths = np.ma.masked_invalid(NewDeaths.astype(theano.config.floatX))
NewCases = np.ma.masked_invalid(NewCases.astype(theano.config.floatX))
# handle schools and universities
if schools_unis == 'two_xor':
school_index = CMs.index('School Closure')
university_index = CMs.index('University Closure')
ActiveCMs_final = copy.deepcopy(ActiveCMs)
ActiveCMs_final[:, school_index, :] = np.logical_and(ActiveCMs[:, university_index, :],
ActiveCMs[:, school_index, :])
ActiveCMs_final[:, university_index, :] = np.logical_xor(ActiveCMs[:, university_index, :],
ActiveCMs[:, school_index, :])
ActiveCMs = ActiveCMs_final
CMs[school_index] = 'School and University Closure'
CMs[university_index] = 'Schools xor University Closure'
elif schools_unis == 'one_tiered':
school_index = CMs.index('School Closure')
university_index = CMs.index('University Closure')
ActiveCMs_final = copy.deepcopy(ActiveCMs)
ActiveCMs_final[:, school_index, :] = np.logical_and(ActiveCMs[:, university_index, :],
ActiveCMs[:, school_index, :]) + 0.5 * np.logical_xor(
ActiveCMs[:, university_index, :],
ActiveCMs[:, school_index, :])
ActiveCMs = np.delete(ActiveCMs_final, university_index, axis=1)
CMs.remove('University Closure')
elif schools_unis == 'two_separate':
# don't need to do anything for this!
pass
elif schools_unis == 'one_and':
school_index = CMs.index('School Closure')
university_index = CMs.index('University Closure')
ActiveCMs_final = copy.deepcopy(ActiveCMs)
ActiveCMs_final[:, school_index, :] = np.logical_and(ActiveCMs[:, university_index, :],
ActiveCMs[:, school_index, :])
ActiveCMs = np.delete(ActiveCMs_final, university_index, axis=1)
CMs[school_index] = 'School and University Closure'
CMs.remove('University Closure')
return PreprocessedData(Active,
Confirmed,
ActiveCMs,
CMs,
sorted_regions,
Ds,
Deaths,
NewDeaths,
NewCases,
region_full_names)
|
17,612 | 19c9565c349da7c3799ad9098a54a4bacca3a623 | # -*- coding: utf-8 -*-
import sys
import os
import pandas as pd
import time
import XVF3510_CTRL as xvf_ctrl
import codama_ctrl
import UMC404_PLAY as SoundOut
import CODAMA_REC as SoundInCodama
import XVF3510_REC as SoundInXVF3510
import wave_att as att
DeviceOut = SoundOut.UMC404_PLAY()
DeviceCodama = SoundInCodama.CODAMA_REC()
DeviceXVF3510 = SoundInXVF3510.XVF3510_REC()
att = att.wave_att()
xvf_ctrl = xvf_ctrl.XVF3510_CTRL()
def record(outfile1, level1, outfile2, level2, recfile, mic):
print("recording ........")
att.att(outfile1, 'temp0.wav', level1, 0)
att.att(outfile2, 'temp1.wav', level2, 0)
DeviceOut.SetFileName('temp0.wav', 'temp1.wav')
if mic == 'codama':
DeviceCodama.SetFileName(recfile)
DeviceCodama.START()
else:
DeviceXVF3510.SetFileName(recfile)
DeviceXVF3510.START()
DeviceOut.START()
while DeviceOut.chek_active():
time.sleep(0.1)
DeviceOut.STOP()
if mic == 'codama':
DeviceCodama.STOP()
else:
DeviceXVF3510.STOP()
return
def main():
xvf3510_flag = 1
codama_flag = 1
cdm_ctrl = codama_ctrl
if (len(sys.argv) >= 3):
logfName = sys.argv[3]
else:
logfName = './record/rec_log.txt'
logfile = open(logfName,'w')
if (len(sys.argv) >= 2):
rinfName = sys.argv[2]
else:
rinfName = './record/rec_setting.csv'
rec_info = pd.read_csv(rinfName) #録音シーケンス
row = rec_info.shape[0]
rec_cnt = 0
for i in range(row):
if (rec_info.key[i]).lower() == 'start':
rec_cnt += 1
if (rec_info.key[i]).lower() == 'end':
break
mic_flag = 'codama'
envfile = ''
envlevel = 0
reffile = ''
reflevel = 0
rec_path = './record'
rec_file = 'auto'
rec_cnt = 0
for i in range(row):
if (rec_info.key[i]).lower() == 'end':
print("END keyword detect in record setting file")
break
elif (rec_info.key[i]).lower() == 'mic':
if (rec_info.augment0[i]).lower() == 'xvf3510':
mic_flag = 'xvf3510'
readvalue = xvf_ctrl.read('GET_VERSION')
if not readvalue:
print("can't control XVF3510")
xvf3510_flag = 0
else:
xvf3510_flag = 1
print("XVF3510",readvalue)
else:
mic_flag = 'codama'
dev_codama_ctrl = cdm_ctrl.find()
if not dev_codama_ctrl:
print("can't control codama")
codama_flag = 0
else:
codama_flag = 1
elif (rec_info.key[i]).lower() == 'envfile':
envfile = rec_info.augment0[i]
print("env sound file is ", envfile)
elif (rec_info.key[i]).lower() == 'envlevel':
envlevel = rec_info.augment0[i]
print("env sound level is ", envlevel)
elif (rec_info.key[i]).lower() == 'reffile':
reffile = rec_info.augment0[i]
print("reference sound file is ", reffile)
elif (rec_info.key[i]).lower() == 'reflevel':
reflevel = rec_info.augment0[i]
print("reference sound level is ", reflevel)
elif (rec_info.key[i]).lower() == 'rec_path':
rec_path = rec_info.augment0[i]
print("recording path is ", rec_path)
elif (rec_info.key[i]).lower() == 'rec_file':
rec_file = rec_info.augment0[i]
print("recording file is ", rec_file)
elif (rec_info.key[i]).lower() == 'prm':
prm_reg = rec_info.augment0[i]
prm_val = rec_info.augment1[i]
if mic_flag == 'codama':
if codama_flag == 0:
print ("can't control codama")
else:
dev_codama_ctrl.write(prm_reg, prm_val)
ParamValue = dev_codama_ctrl.read(prm_reg)
print("codama's parameter", prm_reg, "is set to ", ParamValue)
elif mic_flag == 'xvf3510':
if xvf3510_flag == 0:
print ("can't control XVF3510")
else:
retval = xvf_ctrl.write(prm_reg,prm_val)
print(retval)
elif(rec_info.key[i]).lower() == 'start':
if rec_file.lower() == 'auto':
rec_filename = rec_path + 'rec' + str(rec_cnt) + '.wav'
else:
rec_filename = rec_path + rec_file
logfile.write("##################### recording start #####################" + '\n')
logfile.write("MIC is:" + mic_flag + '\n')
logfile.write("envelop sound:" + envfile + " level:" + str(envlevel)+'\n')
logfile.write("reference sound:" + reffile + " level:" + str(reflevel)+'\n')
logfile.write("recording to : " + rec_filename + '\n')
if mic_flag == 'xvf3510':
xvf_ctrl.dumpReg(logfile)
if mic_flag == 'codama':
dev_codama_ctrl.dump_reg(logfile)
record(reffile, float(reflevel), envfile, float(envlevel), rec_filename, mic_flag)
rec_cnt = rec_cnt + 1
if __name__ == '__main__':
main()
|
17,613 | 7ecf7068ca12993f251ad23cb7e20b1dbde3fdb3 | our_tuple=1,2,3,"A","B","C" #tuple yek variable ba bishtar az yek moteghayere
our_tuple=(1,2,3,"A","B","C") #be khater shenasayi behtar intori minevisan
print(type(our_tuple))
print(our_tuple[0:3])
#fargh asli tuple o list ine k list bade sakht ghabele taghyire ama tuple na
#string ha ham taghyir na pazire
#####our_tuple[1]=100
tuple()#chizaye dg ro tabdil be tuple mikone
A,B,C=(1,2,3)#chandta ro assign konim ham zaman
print("A class:",type(A))
print(A)
G,H,I="789"#chandta ro assign konim ham zaman
print("G class:",type(A))
print(G)
my_dictionary={"Alireza":25,"Bob":27,"Elham":17,"Elnaz":22 }
# tarife dictionary, age "" nabashe, donbale variable migarde k tarif shode bashe
print(my_dictionary["Alireza"])#baraye entekhabe key az [] estefade karde
print(my_dictionary["Alireza"])
my_dictionary["Alireza"]=26#taghyire data
print(my_dictionary["Alireza"])
del my_dictionary["Bob"]#pak kardan yek key
print(my_dictionary)
print(my_dictionary.keys()) #.keys baraye didan key ha hastesh
print(my_dictionary.items())
#####my_dictionary.keys()[0] # dictionary ro nmishe tike tike did
#bayad list kard ta beshe did
a=list(my_dictionary)
print(a[1])
print(list(my_dictionary.values())[1:])
print(my_dictionary.items())
######print((my_dictionary.values())[1:]) nmishe values ro mostaghim tike tike kard
#dictionary order nadare
#tanha rah dastresi be dade estefade az key hastesh va na value
print("#"*27)
students = {"Alice":26,
"Bob":27,
"Claire":17,
"Dan":21,
"Emma":22}
print(students)
students = {"Alice":["ID001",26,"A"],
"Bob":["ID002",27,"B"],
"Claire":["ID003",17,"C"],
"Dan":["ID004",21,"D"],
"Emma":["ID002",22,"E"]}
print(students)
print(students["Claire"][0])#faghat print kardane id yek nafar
print(students["Dan"][1:])
students = {"Alice":{"id":"ID001","age":26,"grade":"A"},
"Bob":{"id":"ID002","age":27,"grade":"B"},
"Claire":{"id":"ID003","age":17,"grade":"C"},
"Dan":{"id":"ID004","age":21,"grade":"D"},
"Emma":{"id":"ID005","age":22,"grade":"E"}}
print(students)
print(students["Dan"]["age"])
print(students["Emma"]["id"])
|
17,614 | 098af702af0462b78391d151a7361012b758c2fc | #!/usr/bin/python
# -*- coding:utf-8 -*-
# 2018/6/25
import collections
class Solution(object):
def isIsomorphic(self, S, T):
"""
:type s: str
:type t: str
:rtype: bool
"""
temp = dict()
lenght = len(S)
SS = [i for i in range(lenght)]
TT = [i for i in range(lenght)]
for i, j in enumerate(S):
if temp.get(j) is None:
temp[j] = SS[i]
SS[i] = temp.get(j)
temp.clear()
for i, j in enumerate(T):
if temp.get(j) is None:
temp[j] = TT[i]
TT[i] = temp.get(j)
if SS == TT:
return True
else:
return False
print(Solution().isIsomorphic('ab', 'aa'))
|
17,615 | df480a004efbf5eaba829f1e96292cf2f99b5189 | print('master branch')
print('line 2 dev')
|
17,616 | 356cf94af5e5feb38769d51c83f6efa48e882f3d | from text_selection_core.filtering.vocabulary_filtering import matches_any_oov
def test_empty__returns_false():
assert not matches_any_oov([], ["a"])
def test_ab_b__returns_true():
assert matches_any_oov(["a", "b"], ["b"])
def test_ab_ab__returns_false():
assert not matches_any_oov(["a", "b"], ["a", "b"])
|
17,617 | 9cce33fce211430f53733d83bc17f94c5c80fb21 | from django.apps import AppConfig
class DhirenWebConfig(AppConfig):
name = 'dhiren_web'
|
17,618 | 914e66769c8fbfa9ccd0c2a91f5b13d0625694b1 | import re
import sys
import metricbeat
import unittest
@unittest.skipUnless(re.match("(?i)win|linux|darwin|freebsd", sys.platform), "os")
class Test(metricbeat.BaseTest):
def test_drop_fields(self):
self.render_config_template(
modules=[{
"name": "system",
"metricsets": ["cpu"],
"period": "1s"
}],
processors=[{
"drop_fields": {
"when": "range.system.cpu.system.pct.lt: 0.1",
"fields": ["system.cpu.load"],
},
}]
)
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
output = self.read_output_json()
self.assertEqual(len(output), 1)
evt = output[0]
self.assert_fields_are_documented(evt)
print(evt)
print(evt.keys())
self.assertItemsEqual(self.de_dot([
'agent', '@timestamp', 'system', 'metricset.module',
'metricset.rtt', 'metricset.name', 'host', 'service', 'ecs', 'event'
]), evt.keys())
cpu = evt["system"]["cpu"]
print(cpu.keys())
self.assertItemsEqual(self.de_dot([
"system", "cores", "user", "softirq", "iowait",
"idle", "irq", "steal", "nice", "total"
]), cpu.keys())
def test_dropfields_with_condition(self):
"""
Check drop_fields action works when a condition is associated.
"""
self.render_config_template(
modules=[{
"name": "system",
"metricsets": ["process"],
"period": "1s"
}],
processors=[{
"drop_fields": {
"fields": ["system.process.memory"],
"when": "range.system.process.cpu.total.pct.lt: 0.5",
},
}]
)
metricbeat = self.start_beat()
self.wait_until(
lambda: self.output_count(lambda x: x >= 1),
max_timeout=15)
metricbeat.kill_and_wait()
output = self.read_output(
required_fields=["@timestamp"],
)
for event in output:
if float(event["system.process.cpu.total.pct"]) < 0.5:
assert "system.process.memory.size" not in event
else:
assert "system.process.memory.size" in event
def test_dropevent_with_condition(self):
"""
Check drop_event action works when a condition is associated.
"""
self.render_config_template(
modules=[{
"name": "system",
"metricsets": ["process"],
"period": "1s"
}],
processors=[{
"drop_event": {
"when": "range.system.process.cpu.total.pct.lt: 0.001",
},
}]
)
metricbeat = self.start_beat()
self.wait_until(
lambda: self.output_count(lambda x: x >= 1),
max_timeout=15)
metricbeat.kill_and_wait()
output = self.read_output(
required_fields=["@timestamp"],
)
for event in output:
assert float(event["system.process.cpu.total.pct"]) >= 0.001
def test_dropevent_with_complex_condition(self):
"""
Check drop_event action works when a complex condition is associated.
"""
self.render_config_template(
modules=[{
"name": "system",
"metricsets": ["process"],
"period": "1s"
}],
processors=[{
"drop_event": {
"when.not": "contains.system.process.cmdline: metricbeat.test",
},
}]
)
metricbeat = self.start_beat()
self.wait_until(
lambda: self.output_count(lambda x: x >= 1),
max_timeout=15)
metricbeat.kill_and_wait()
output = self.read_output(
required_fields=["@timestamp"],
)
assert len(output) >= 1
def test_include_fields(self):
"""
Check include_fields filtering action
"""
self.render_config_template(
modules=[{
"name": "system",
"metricsets": ["process"],
"period": "1s"
}],
processors=[{
"include_fields": {"fields": ["system.process.cpu", "system.process.memory"]},
}]
)
metricbeat = self.start_beat()
self.wait_until(
lambda: self.output_count(lambda x: x >= 1),
max_timeout=15)
metricbeat.kill_and_wait()
output = self.read_output(
required_fields=["@timestamp"],
)[0]
print(output)
for key in [
"system.process.cpu.start_time",
"system.process.cpu.total.pct",
"system.process.memory.size",
"system.process.memory.rss.bytes",
"system.process.memory.rss.pct"
]:
assert key in output
for key in [
"system.process.name",
"system.process.pid",
]:
assert key not in output
def test_multiple_actions(self):
"""
Check the result when configuring two actions: include_fields
and drop_fields.
"""
self.render_config_template(
modules=[{
"name": "system",
"metricsets": ["process"],
"period": "1s"
}],
processors=[{
"include_fields": {"fields": ["system.process", "process"]},
}, {
"drop_fields": {"fields": ["system.process.memory"]},
}]
)
metricbeat = self.start_beat()
self.wait_until(
lambda: self.output_count(lambda x: x >= 1),
max_timeout=15)
metricbeat.kill_and_wait()
output = self.read_output(
required_fields=["@timestamp"],
)[0]
for key in [
"system.process.cpu.start_time",
"system.process.cpu.total.pct",
"process.name",
"process.pid",
]:
assert key in output, "'%s' not found" % key
for key in [
"system.process.memory.size",
"system.process.memory.rss.bytes",
"system.process.memory.rss.pct"
]:
assert key not in output, "'%s' not expected but found" % key
def test_contradictory_multiple_actions(self):
"""
Check the behaviour of a contradictory multiple actions
"""
self.render_config_template(
modules=[{
"name": "system",
"metricsets": ["process"],
"period": "1s"
}],
processors=[{
"include_fields": {
"fields": ["system.process.memory.size", "proc.memory.rss.pct"],
},
}, {
"drop_fields": {
"fields": ["system.process.memory.size", "proc.memory.rss.pct"],
},
}]
)
metricbeat = self.start_beat()
self.wait_until(
lambda: self.output_count(lambda x: x >= 1),
max_timeout=15)
metricbeat.kill_and_wait()
output = self.read_output(
required_fields=["@timestamp"],
)[0]
for key in [
"system.process.memory.size",
"system.process.memory.rss",
"system.process.cpu.start_time",
"system.process.cpu.total.pct",
"system.process.name",
"system.process.pid",
"system.process.memory.rss.pct"
]:
assert key not in output
def test_rename_field(self):
self.render_config_template(
modules=[{
"name": "system",
"metricsets": ["cpu"],
"period": "1s"
}],
processors=[{
"rename": {
"fields": [{"from": "event.dataset", "to": "hello.world"}],
},
}]
)
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
output = self.read_output_json()
self.assertEqual(len(output), 1)
evt = output[0]
print(evt)
print(evt.keys())
assert "dataset" not in output[0]["event"]
assert "cpu" in output[0]["hello"]["world"]
|
17,619 | cc17a973b1fd7dffd86855d66ff63aab801e4fa6 | from typing import Union
from math import factorial
def prime_factors(number: int) -> dict:
"""
Takes an integer and returns every prime factor and their quantity
:param number: Integer
:return: dict of prime numbers as keys and their ammount as values
"""
f = {}
i = 2
while number > 1 and number >= i:
if number % i == 0:
if i not in f:
f[i] = 1
else:
f[i] += 1
number //= i
else:
i += 1
return f
def divisors(n):
'''return divisors of n'''
d = {1}
for i in range(2, int(pow(n,.5))+1):
if (n % i) == 0:
other = n // i
if other == i:
pair = {i}
else:
pair = {i,other}
d.update(pair)
return d
def prod(num_list: Union[list, tuple, set]) -> int:
product = 1
for i in num_list:
product *= i
return product
def binomial_coeficient(n, k):
"""
n over k
:return:
"""
return int(factorial(n)/(factorial(k)*factorial(n-k))) |
17,620 | da474df17dfb26eaa243df929c06f6b8e1d7397e | from os.path import join,basename
import pandas as pd
from snakemake.utils import validate
from glob import glob
from sklearn.model_selection import train_test_split
configfile: 'config/config.yml'
nifti_files=glob(os.path.join(config['input_dir'])+'/*.nii.gz')
x_train ,x_test = train_test_split(nifti_files,test_size=0.2)
print(x_train)
print(x_test)
model_fn=f"ntrees-{config['c3d']['model_params']['ntrees']}_tdepth-" \
f"{config['c3d']['model_params']['treedepth']}_patch-{config['c3d']['model_params']['patch']}_model.rf"
rule all:
input:
outfile_train=expand(join(config['output_dir'], 'train_data', basename('{train_fn}')), train_fn=x_train),
outfile_test=expand(join(config['output_dir'], 'test_data', basename('{test_fn}')), test_fn=x_test),
#landmark=expand(join(config['out_dir'], 'train_data_labels', os.path.basename('{ifile}').split('.nii')[0] + '_landmarks.nii.gz'), ifile=x_train)
#model=join(config['out_dir'], model_fn),
rule import_subj_test:
input:
test=expand('{test_fn}', test_fn=x_test),
output:
test_out=join(config['output_dir'], 'test_data', basename('{test_fn}')),
group: 'preproc'
shell: 'cp {input.test} {output.test_out}'
rule import_subj_train:
input:
train=expand('{train_fn}', train_fn=x_train),
output:
train_out=join(config['output_dir'], 'train_data', basename('{train_fn}')),
group: 'preproc'
shell: 'cp {input.train} {output.train_out}'
#rule modelTrain:
# input:
# touch=join(config['out_dir'], "train.done"),
# params:
# data = join(config['train_data'], '*.nii.gz'),
# labels = join(config['label_data'], '*.nii.gz'),
# output:
# model = join(config['out_dir'], 'myforest.rf'),
# resources:
# mem_mb = 20000
# shell:
# 'c4d -verbose {params.data} -tile w -popas ALLMRI {params.labels} -tile w -popas ALLSEG -rf-param-patch 2x2x2x0\
# -push ALLMRI -push ALLSEG -rf-param-treedepth 30 -rf-param-ntrees 50 -rf-train {output.model}'
# |
17,621 | d84921e5f9d1196917ef49d9216a07b53babfd6a |
class Truth_Table() :
def __init__(self,equation,operate,file_name) :
self.equation = equation
self.operate = operate
self.test_operate = {}
self.head_equation = {}
self.files = open(f"C:\\Users\\User\\Documents\\{file_name}.txt", "w")
#เป็นฟังก์ชั่นสำหรับการคำนวณค่าโดยดูว่าจะใช้ตัวoperationอะไร
def operation(self,op,data1,data2) :
if op == "+" :
if data1 == "0" and data2 == "0" :
ans = "0"
return ans
elif data1 == "0" and data2 == "1" :
ans = "1"
return ans
elif data1 == "1" and data2 == "0" :
ans = "1"
return ans
elif data1 == "1" and data2 == "1" :
ans = "1"
return ans
elif op == "&" :
if data1 == "0" and data2 == "0" :
ans = "0"
return ans
elif data1 == "0" and data2 == "1" :
ans = "0"
return ans
elif data1 == "1" and data2 == "0" :
ans = "0"
return ans
elif data1 == "1" and data2 == "1" :
ans = "1"
return ans
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def calculatetree(self,data) : #เป็นฟังก์ชั่นคำนวณ expression tree list
n = len(data)
NewList = []
equation_form = []
for i in range(n-1,-1,-1) :
arg = data[i]
if arg == '0' or arg == '1' : #เป็นเงื่อนไขเพื่อบอกว่าหากสมาชิกตัวสุดท้ายของข้อมูลเป็นตัวเลขให้นำค่านั้นใส่ในlistชั่วคราวที่ใช้ในการเก็บตัวเลขสำหรับคำนวณ
NewList.append(arg)
#if self.equation[i] not in '0,1' :
equation_form.append(self.equation[i])
if arg == '&' or arg == '+' :
ope = arg
x = NewList.pop(0)
y = NewList.pop(0)
str1 = equation_form.pop(0)
str2 = equation_form.pop(0)
complete_equation = '(' + str1 + ope + str2 + ')'
cal = self.operation(ope,x, y)
NewList.append(cal)
self.head_equation[complete_equation] = cal
equation_form.append(complete_equation)
elif arg == '!' :
z = NewList.pop(0)
Str = '(' + '!' + equation_form.pop(0) + ')'
if z == "0" :
z = "1"
else :
z = "0"
NewList.append(z)
equation_form.append(Str)
self.head_equation[Str] = z
elif arg == '' :
pass
#print(equation_form)
return NewList[0]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def Make_table(self,Equation) :
#self.Equation = Equation
side = len(self.operate)
show_head = self.set_head(self.equation)
self.side_operate = []
self.side_head = []
count = 0
print('#'*175)
print("_"*150)
self.files.write('#'*175)
self.files.write('\n')
self.files.write("_"*150)
self.files.write('\n')
for i in self.operate :
self.side_operate.append(len(i)+2)
print(' | ',end="")
print(i,end="")
self.files.write(' | ')
self.files.write(i)
count += 1
for i in show_head:
self.side_head.append(len(i)+2)
print(' | ',end="")
print(i,end="")
self.files.write(' | ')
self.files.write(i)
count += 1
self.line = sum(self.side_operate) + sum(self.side_head) + count
print(' |')
print(" "+"_"*self.line + "|")
self.files.write(' |')
self.files.write('\n')
self.files.write(" "+"_"*self.line + "|")
self.files.write('\n')
for number in range(2**side) :
test_digit = bin(number)[2:]
if len(test_digit) < side :
more_digit = side - len(test_digit)
test_digit = ('0'*more_digit) + test_digit
self.get_digit(test_digit)
val_equation = self.take_values(Equation)
output = self.calculatetree(val_equation)
self.show_table()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_digit(self,digit) :
for ope,Value in zip(self.operate,digit) :
self.test_operate[ope] = Value
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def take_values(self,equ) :
value_equation = []
for i in equ :
if i in self.operate :
value_equation.append(self.test_operate[i])
else :
value_equation.append(i)
return value_equation
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def show_table(self) :
index = 0
for i in self.test_operate :
Length = self.side_operate[index] - 2
find_mid = int((Length - 1) / 2)
if (Length - 1) % 2 != 0 :
left = int(((Length - 1) // 2) + 1)
right = int((Length - 1) // 2)
print(' | ',end="")
print((" "*left) + self.test_operate[i] + (" "*right),end="")
self.files.write(' | ')
self.files.write((" "*left) + self.test_operate[i] + (" "*right))
else :
left = find_mid
right = find_mid
print(' | ',end="")
print((" "*left) + self.test_operate[i] + (" "*right),end="")
self.files.write(' | ')
self.files.write((" "*left) + self.test_operate[i] + (" "*right))
index += 1
index = 0
for i in self.head_equation:
Length = self.side_head[index] - 2
find_mid = int((Length - 1) / 2)
if (Length - 1) % 2 != 0 :
left = int(((Length - 1) // 2) + 1)
right = int((Length - 1) // 2)
print(' | ',end="")
print((" "*left) + self.head_equation[i] + (" "*right),end="")
self.files.write(' | ')
self.files.write((" "*left) + self.head_equation[i] + (" "*right))
else :
left = find_mid
right = find_mid
print(' | ',end="")
print((" "*left) + self.head_equation[i] + (" "*right),end="")
self.files.write(' | ')
self.files.write((" "*left) + self.head_equation[i] + (" "*right))
index += 1
print(' |')
print(" "+"_"*self.line + "|")
self.files.write(' |')
self.files.write('\n')
self.files.write(" "+"_"*self.line + "|")
self.files.write('\n')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_head(self,equations) :
n = len(equations)
head = []
equation_form = []
for i in range(n-1,-1,-1) :
arg = equations[i]
if arg not in '&+!' and arg != '' : #เป็นเงื่อนไขเพื่อบอกว่าหากสมาชิกตัวสุดท้ายของข้อมูลเป็นตัวเลขให้นำค่านั้นใส่ในlistชั่วคราวที่ใช้ในการเก็บตัวเลขสำหรับคำนวณ
equation_form.append(arg)
if arg == '&' or arg == '+':
ope = arg
str1 = equation_form.pop(0)
str2 = equation_form.pop(0)
complete_equation = '(' + str1 + ope + str2 + ')'
equation_form.append(complete_equation)
head.append(complete_equation)
#หลังคำนวณเสร็จให้เก็บค่าที่คำนวณเเล้วในlistชั่วคราวอีกตัว
elif arg == '!' :
Str = '(' + '!' + equation_form.pop(0) + ')'
equation_form.append(Str)
head.append(Str)
elif arg == '' :
pass
return head
"""test = ['+', '!', '!', '&', '', '+', '', 'I0', 'I1', '', '', 'I1', 'I2', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
oper = ['I0','I1','I2']
tabel = Truth_Table(test,oper)
tabel.Make_table(test)""" |
17,622 | c8cbede89b7b56640968294c6b1929f68386b390 | #! /usr/bin/env python3
# -*- coding: UTF-8 -*-
#from pexpect import spawn, TIMEOUT
import time
import re as sre
import os
if os.name!='nt':
from Session import Session as CSession
else:
from WinSession import WinSession as CSession
reSessionClosed =sre.compile ('Connection closed by foreign host',sre.M)
class Cisco(CSession):
attrs={}
sutname='SUT'
logger= None
loginstep=[]
argvs=[]
kwargvs={}
seslog=None
output=''
Connect2SUTDone=False
lastActionTime=None
fInteractionMode=False
InteractionBuffer=''
InteractionMatch=''
fExpecting=False
fSending = False
def __init__(self,name,attrs={},logger=None,logpath='./'):
self.sutname=name
self.attrs=attrs
command = attrs.get('CMD')
if not attrs.get('TIMEOUT'):
self.attrs.update({'TIMEOUT':int(30)})
else:
self.attrs.update({'TIMEOUT':int(attrs.get('TIMEOUT'))})
#spawn.__init__(self, command, args, timeout, maxread, searchwindowsize, logfile, cwd, env, ignore_sighup)
import os
log = os.path.abspath(logpath)
log= '%s%s%s'%(log,os.path.sep,'%s.log'%name)
if self.attrs.get('LOGIN'):
from common import csvstring2array
self.loginstep= csvstring2array(self.attrs['LOGIN'])
self.seslog = open(log, "wb")
CSession.__init__(self, name, attrs,logger, logpath)
def Login2SUT(self):
exp = self.attrs['EXP']
if os.name=='nt':
exp = [self.attrs['EXP']]
if self.attrs.get('EXP'):
self.expect(exp, float(self.attrs['TIMEOUT']))
for step in self.loginstep:
if os.name !='nt':
self.send(step[0])
self.send('\r\n')
else:
self.write(step[0])
self.write('\r\n')
if os.name=='nt':
exp = [step[1]]
if len(step)>2:
self.expect(exp,int(step[2]))
else:
self.expect(exp,float(self.attrs['TIMEOUT']))
if __name__=='__main__':
cmd ='telnet 10.245.3.17'
attr={'TIMEOUT': 10,'LOGIN': 'Calix,E7-SIT-S3-2960-2>,40\nenable,word:,40\nadmin,E7-SIT-S3-2960-2#,40','CMD':cmd, 'EXP':'.*'}
s = Cisco('sut_local',attr)
command='SendLine("abcddddddddddddd",AntiIdle=True)'
s.SendLine('command', True, False)
(ActionIsFunction,action,arg,kwarg) = s.ParseCmdInAction(command)
s.CallFun(action, arg, kwarg)
s.EndSession()
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!CASE End!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') |
17,623 | 32923b6a6f20467f56366f1d345bb5ce5a312963 | #############################
#README
#
#there are two functions:
#1. scrapping channels (and their videos from the last 3 months from current date) based on search phrases you specify
#2. exporting a list of the gathered videos based on more criteria you define (e.g. median views, video post freq)
#
# to connect to the program
# 1. if you're on windows to connect download and install putty: https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html
# if you have another os, let me know, I'll help you connect
# 2. run putty and enter: ec2-18-218-46-91.us-east-2.compute.amazonaws.com into the host name box
# 3. click on the + sign next to SSH on the bottom left
# 4. click on Auth
# 5. click Browse and load in dw_key.ppk
# 6. click open
# 7. type ubuntu and the press enter
#to run the srapper:
#1. load search_phrases.txt with keyphrases you want the program to search against to find new channels:
#2. to delete the current list type: rm search_phrases.txt
#3. to edit the blank list type: vim search_phrases.txt
#4. copy your list (deliminated by new lines) and right click anywhere to paste
#5. to save the list type the following then press enter: :wq
#6. type python scrape.py and press enter to run the scrapper
#7. as it finds new channels it'll tell you for channels it finds then channel name, country, median views,
# number of videos in the last 3 months and number of videos in the last 3 months with one of your search phrases in the title or description
#if you want you can include a list of channels you don't want the program to waste time scrapping, add it to
# skip_channel_list.txt
#
# - if you run out of keys with gather quota, I can load in more, just ask me
# - if google bans you for abuse, just ask me, we'll just need to delete all the keys and add in a new list created by unlinked google accounts
#to run the exporter:
#1. load into check_phrases.txt key phrases separated by new lines for it to check how many videos have one of those key phrases
#2. load into skip_channel_list.txt channels you don't want it to export
#3. to edit the remaining criteria type: vim export.py
#4. at the top are the search queries e.g. (Channel.country == 'US') | (Channel.country == 'Not Found') & (Channel.videos_last_3_months > 10)
# & (Channel.median_views > 1000) & (Channel.general_makeup_phrase_videos > 5)
#5. to run the program type and press enter: python export.py
#6. the results will be saved in extract.txt
#
# to download extract.txt to your computer:
# 1. download pscp.exe and place in your putty installation folder e.g. (C:\Program Files\PuTTY)
# https://the.earth.li/~sgtatham/putty/latest/w64/pscp.exe
# 2. run cmd (press windows, type cmd, press enter)
# 3. type and press enter: cd C:\Program Files\PuTTY (or your installation folder)
# 4. type and press enter: pscp.exe -i dw_key.ppk ubuntu@ec2-18-218-46-91.us-east-2.compute.amazonaws.com:/home/ubuntu/extract.txt C:\users\bence\documents\output.txt
# (change the 2nd folder to a folder on your computer)
#
################################
US_only = True
###########################
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
#####
######
import datetime
from tinydb import TinyDB, Query
DEVELOPER_KEY = open('current_key.txt').read().split()[0] #open('key2.txt').read().split()[0] #'AIzaSyCNwpK3VXG5aubU52di5RznPP1EJBiF0FI'
print DEVELOPER_KEY
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
skip_channel_list = open('skip_channel_list.txt').read().split('\n')
'''
import sys
def my_except_hook(exctype, value, traceback):
print exctype
print value
print traceback
current_key = open("current_key.txt").read().split()[0]
print current_key
keylist = open('api_keylist.txt').read().split('\n')
next = False
found = False
for key in keylist:
if len(key)<3:
continue
if next:
open('current_key.txt','w').write(key)
print 'found'
global DEVELOPER_KEY
DEVELOPER_KEY = key
print DEVELOPER_KEY
found = True
break
if key == current_key:
next = True
if not found:
open('current_key.txt','w').write(keylist[0])
sys.excepthook = my_except_hook
start()
'''
def get_channel_videos_before(channel_id,beforeDate):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
videoId_list={}
videos = []
if len(channel_id) < 20:
response = youtube.channels().list(
part = 'contentDetails',
forUsername = channel_id,
).execute()
else:
response = youtube.channels().list(
part = 'contentDetails',
id = channel_id,
).execute()
playlist_id = response['items'][0]['contentDetails']['relatedPlaylists']['uploads']
response = youtube.playlistItems().list(
part = 'snippet',
playlistId = playlist_id,
).execute()
#print response
no_makeup_in_a_row=0
low_views_in_a_row=0
general_makeup_phrases = open('general_makeup_phrases.txt').read().split('\n')
for video in response['items']:
title = video['snippet']['title']
videoId = video['snippet']['resourceId']['videoId']
publishDate = video['snippet']['publishedAt']
description = video['snippet']['description']
phrase_found=False
for keyphrase in general_makeup_phrases:
if keyphrase == '':
continue
if keyphrase.lower() in title.lower() \
or keyphrase.lower() in description.lower():
print '"'+keyphrase+'" found!'
phrase_found=True
no_makeup_in_a_row=0
break
if not phrase_found:
no_makeup_in_a_row+=1
#print no_makeup_in_a_row
if no_makeup_in_a_row > 10:
print "This isn't a makeup channel, stop gathering..."
no_makeup_in_a_row=0
return videos
break
print 'Gathering stats from '+title[:30]+'..'+\
' published on '+publishDate.split('T')[0]+'...'
video_stats = get_video_stats(videoId)
try:
views = int(video_stats[0])
except:
views = 0
print 'views: '+str(views)
if views<400:
low_views_in_a_row+=1
else:
low_views_in_a_row=0
if low_views_in_a_row>5:
print "This channel has low view counts, stop gathering..."
return videos
break
comments = video_stats[1]
likes = video_stats[2]
dislikes = video_stats[3]
channel_id = video_stats[4]
channel_name = video_stats[5]
country = get_country(channel_id)
if not isBefore(beforeDate,publishDate):
return videos
videoEntry = {'video_id':videoId,'publish_date':publishDate,'title':title\
,'description':description,'views':views,'comments':comments\
,'likes':likes,'dislikes':dislikes,'channel_id':channel_id\
,'channel_name':channel_name, 'country':country}
videos.append(videoEntry)
while response:
if 'nextPageToken' in response:
response = youtube.playlistItems().list(
part = 'snippet',
playlistId = playlist_id,
pageToken = response['nextPageToken']
).execute()
for video in response['items']:
title = video['snippet']['title']
videoId = video['snippet']['resourceId']['videoId']
publishDate = video['snippet']['publishedAt']
description = video['snippet']['description']
phrase_found=False
for keyphrase in general_makeup_phrases:
if keyphrase == '':
continue
if keyphrase.lower() in title.lower() \
or keyphrase.lower() in description.lower():
print '"'+keyphrase+'" found!'
phrase_found=True
no_makeup_in_a_row=0
break
if not phrase_found:
no_makeup_in_a_row+=1
#print no_makeup_in_a_row
if no_makeup_in_a_row > 12:
print "This isn't a makeup channel, stop gathering channel"
no_makeup_in_a_row=0
return videos
break
print 'Gathering video stats from '+title[:30]+\
' published on '+publishDate.split('T')[0]+'...'
video_stats = get_video_stats(videoId)
try:
views = int(video_stats[0])
except:
views = 0
print 'views: '+str(views)
if views<400:
low_views_in_a_row+=1
print 'only '+str(views) + ' views '+str(low_views_in_a_row)
else:
low_views_in_a_row=0
if low_views_in_a_row>5:
print "This channel has low view counts, stop gathering..."
no_makeup_in_a_row=0
return videos
break
comments = video_stats[1]
likes = video_stats[2]
dislikes = video_stats[3]
channel_id = video_stats[4]
channel_name = video_stats[5]
country = get_country(channel_id)
if not isBefore(beforeDate,publishDate):
return videos
videoEntry = {'video_id':videoId,'publish_date':publishDate,'title':title\
,'description':description,'views':views,'comments':comments\
,'likes':likes,'dislikes':dislikes,'channel_id':channel_id\
,'channel_name':channel_name, 'country':country}
videos.append(videoEntry)
else:
break
return videos
def get_country(channel_id):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
response = youtube.channels().list(
part = 'snippet',
id = channel_id,
).execute()
try:
country = response['items'][0]['snippet']['country']
except:
country='Not Found'
return country
def get_video_stats(video_id):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
try:
response = youtube.videos().list(
part = 'statistics',
id = video_id,
).execute()
#print response
stats = response['items'][0]['statistics']
views = stats['viewCount']
comments_count = stats['commentCount']
try:
likes = stats['likeCount']
dislikes = stats['dislikeCount']
except:
likes = 0
dislikes = 0
except Exception as e:
#raw_input(e)
print e
views = 0
comments_count=0
likes = 0
dislikes = 0
try:
response = youtube.videos().list(
part = 'snippet',
id = video_id,
).execute()
#print response
snippet = response['items'][0]['snippet']
channel_id = snippet['channelId']
channel_name = snippet['channelTitle']
except Exception as e:
channel_id = ''
channel_name = ''
print e
#raw_input(e)
return [views,comments_count,likes,dislikes,channel_id,channel_name]
def isBefore(date1,date2):
date1=date1.split('T')[0].split('-')
date2=date2.split('T')[0].split('-')
if int(date1[0]) < int(date2[0]):
return True
elif int(date1[1]) < int(date2[1]) and int(date1[0]) == int(date2[0]):
return True
elif int(date1[2]) < int(date2[2]) and int(date1[1]) == int(date2[1]):
return True
else:
return False
def youtube_search(q, max_results=10,order="relevance", token=None, location=None, location_radius=None):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
search_response = youtube.search().list(
q=q,
type="video",
pageToken=token,
order = order,
part="id,snippet",
maxResults=max_results,
location=location,
locationRadius=location_radius
).execute()
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append(search_result)
try:
nexttok = search_response["nextPageToken"]
return(nexttok, videos)
except Exception as e:
nexttok = "last_page"
return(nexttok, videos)
def geo_query(video_id):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
video_response = youtube.videos().list(
id=video_id,
part='snippet, recordingDetails, statistics'
).execute()
return video_response
def get_channel_id(username):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
if len(username) < 20:
response = youtube.channels().list(
part = 'contentDetails',
forUsername = username,
).execute()
channel_id = response['items'][0]['id']
return channel_id
else:
return username
def get_channel_videos_before_old(channel_id,beforeDate):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
videoId_list={}
if len(channel_id) < 20:
response = youtube.channels().list(
part = 'contentDetails',
forUsername = channel_id,
).execute()
else:
response = youtube.channels().list(
part = 'contentDetails',
id = channel_id,
).execute()
playlist_id = response['items'][0]['contentDetails']['relatedPlaylists']['uploads']
print playlist_id
response = youtube.playlistItems().list(
part = 'snippet',
playlistId = playlist_id,
).execute()
#print response
for video in response['items']:
print video
open('responsevideo.txt','w').write(str(video))
title = video['snippet']['title']
videoId = video['snippet']['resourceId']['videoId']
publishDate = video['snippet']['publishedAt']
views = ''
print title, videoId, publishDate, views
if not isBefore(beforeDate,publishDate):
return videoId_list
videoEntry = {'video_id':videoId,'publish_date':publishDate,'title':title}
videoId_list[videoId] = videoEntry
open('response.txt','w').write(str(video))
while response:
if 'nextPageToken' in response:
response = youtube.playlistItems().list(
part = 'snippet',
playlistId = playlist_id,
pageToken = response['nextPageToken']
).execute()
for video in response['items']:
title = video['snippet']['title']
videoId = video['snippet']['resourceId']['videoId']
publishDate = video['snippet']['publishedAt']
description = video['snippet']['description']
views = ''
if not isBefore(beforeDate,publishDate):
return videoId_list
videoEntry = {'video_id':videoId,'publish_date':publishDate,'title':title\
,'description':description}
videoId_list[videoId] = videoEntry
open('response.txt','w').write(str(video))
else:
break
return videoId_list
def store_channel_name(channel_id):
###get channel name and country
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
response = youtube.channels().list(
part = 'snippet',
id = channel_id,
).execute()
channel_name=response['items'][0]['snippet']['title']
print channel_name
#print response['items'][0]['snippet']
try:
country = response['items'][0]['snippet']['country']
except:
country=''
print country
print response
open('channelnametest.txt','w').write(str(response))
#raw_input()
####store in db
db = TinyDB('video_db.json')
Channel = Query()
db.insert({'channel_id': channel_id,'country': country,'channel_name': channel_name})
#db.update({'channel_name':channel_name,'country':country},Channel.channel_id == channel_id)
channelDict = db.search(Channel.channel_id == channel_id)[0]
#raw_input(str(channelDict))
def store_video_stats(video_id, channel_id):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
try:
response = youtube.videos().list(
part = 'statistics',
id = video_id,
).execute()
stats = response['items'][0]['statistics']
open('stats.txt','w').write(str(response['items'][0]))
views = stats['viewCount']
comments_count = stats['commentCount']
try:
likes = stats['likeCount']
dislikes = stats['dislikeCount']
except:
likes = 0
dislikes = 0
except:
views = 0
comments_count=0
likes = 0
dislikes = 0
db = TinyDB('video_db.json')
Channel = Query()
videoDict = db.search(Channel.channel_id == channel_id)[0]['videos']
print videoDict[video_id]
videoDict[video_id]['views']=views
videoDict[video_id]['comments_count']=comments_count
videoDict[video_id]['likes']=likes
videoDict[video_id]['dislikes']=dislikes
db.update({'videos':videoDict},Channel.channel_id == channel_id)
def get_comments(video_id):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
#get views
comment_list=[]
try:
response = youtube.commentThreads().list(
part = 'snippet',
videoId = video_id,
maxResults = 100, # Only take top 100 comments...
order = 'relevance', #... ranked on relevance
textFormat = 'plainText',
).execute()
for item in response['items']:
comment_list.append(item['snippet']\
['topLevelComment']['snippet']\
['textDisplay'])
except Exception as e:
if 'has disabled' in str(e):
return comment_list
elif 'you have exceeded' in str(e) or 'Could not automatically' in str(e) \
or 'The quota will' in str(e):
isNext=False
global DEVELOPER_KEY
for key in DEVELOPER_KEYS.split('\n'):
if isNext:
DEVELOPER_KEY=key
break
if DEVELOPER_KEY == key:
isNext=True
print DEVELOPER_KEY
return get_comments(video_id)
else:
print str(e)
#raw_input(str(e))
while response:
if 'nextPageToken' in response:
try:
response = youtube.commentThreads().list(
part = 'snippet',
videoId = video_id,
maxResults = 100, # Only take top 100 comments...
order = 'relevance', #... ranked on relevance
textFormat = 'plainText',
pageToken = response['nextPageToken']
).execute()
for item in response['items']:
comment_list.append(item['snippet']\
['topLevelComment']['snippet']\
['textDisplay'])
except:
continue
else:
break
return comment_list
######
######
#####
######
import sys
#sys.path.append('/home/spnichol/Dropbox/youtube_tutorial/')
#from youtube_videos import youtube_search
#from youtube_videos import get_video_stats
##from youtube_videos import get_country
#from youtube_videos import get_channel_videos_before
#import pandas as pd
import json
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import datetime
from tinydb import TinyDB, Query
def median(lst):
n = len(lst)
s = sorted(lst)
return (sum(s[n//2-1:n//2+1])/2.0, s[n//2])[n % 2] if n else None
def get_channel_id(username):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
if len(username) < 20:
response = youtube.channels().list(
part = 'contentDetails',
forUsername = username,
).execute()
channel_id = response['items'][0]['id']
return channel_id
else:
return username
def isBefore(date1,date2):
date1=date1.split('T')[0].split('-')
date2=date2.split('T')[0].split('-')
if int(date1[0]) < int(date2[0]):
return True
elif int(date1[1]) < int(date2[1]) and int(date1[0]) == int(date2[0]):
return True
elif int(date1[2]) < int(date2[2]) and int(date1[1]) == int(date2[1]):
return True
else:
return False
def grab_videos(keyword, token=None):
res = youtube_search(keyword, token=token)
try:
token = res[0]
except Exception as e:
print e
#raw_input()
return 'error'
videos = res[1]
print "found " + str(len(videos)) + ' videos'
return [token,videos]
#test = youtube_search("spinners")
def start():
global skip_to_phrase
first_token=True
key_phrases = open('search_phrases.txt').read().split('\n')
print_text = ''
video_db = TinyDB('scrapped_videos.json')
channel_db = TinyDB('scrapped_channels.json')
low_view_video_db = TinyDB('low_view_video_db.json')
before_date = (datetime.date.today() - datetime.timedelta(3*365/12)).isoformat()
search_before_date = (datetime.date.today() - datetime.timedelta(18*365/12)).isoformat()
foreigner_video_db = TinyDB('foreigner_video_db.json')
checked_video_db = TinyDB('checked_video_db.json')
#db.update({'channel_name':channel_name,'country':country},Channel.channel_id == channel_id)
for key_phrase in key_phrases:
if key_phrase == '':
continue
if not skip_to_phrase == '':
if key_phrase != skip_to_phrase:
continue
else:
skip_to_phrase=''
continue
skip_to_phrase = key_phrase
print '_______________________________'
print '_______________________________'
print '_______________________________'
print '_______________________________'
print 'Current search phrase is: '+key_phrase
print '_______________________________'
print '_______________________________'
print '_______________________________'
print '_______________________________'
first_token = False
####loop through all results
page_count=0
count=0
token = None
not_found_in_a_row=0
found_channel = False
while token != "last_page":
if not found_channel:
not_found_in_a_row+=1
if not_found_in_a_row == 5:
print '_______________________________'
print 'Current search phrase is: '+key_phrase
print '_______________________________'
if not_found_in_a_row>10:
break
else:
not_found_in_a_row=0
page_count+=1
if page_count > 50:
break
#grab 10 videos
results = grab_videos(key_phrase, token=token)
if results == 'error':
continue
ten_videos = results[1]
token = results[0]
##grab video stats of each video
for video in ten_videos:
video_id = video['id']['videoId']
##skip if video_id already gathered or checked
Video = Query()
#if checked:
try:
checked_video_db.search(Video.video_id == video_id)[0]
print 'This video was already checked, skip it...'
continue
except:
pass
#if gathered:
try:
video_db.search(Video.video_id == video_id)[0]
print 'This video was already gathered, skip it...'
continue
except:
pass
try:
low_view_video_db.search(Video.video_id == video_id)[0]
checked_video_db.insert({'video_id': video_id})
print 'This video was checked, skip it...'
continue
except:
pass
if video_id in open('video_skiplist.txt').read():
continue
publish_date = video['snippet']['publishedAt']
title = video['snippet']['title']
description = video['snippet']['description']
##grab the remaining stats only if the
##publish date is <3 months ago
if isBefore(search_before_date,publish_date):
video_stats = get_video_stats(video_id)
views = video_stats[0]
comments = video_stats[1]
likes = video_stats[2]
dislikes = video_stats[3]
channel_id = video_stats[4]
channel_name = video_stats[5]
if channel_id in skip_channel_list:
checked_video_db.insert({'video_id': video_id})
print channel_name + ' is in the skip channel list, skip it...'
continue
##skip if the channel name already gathered
Channel=Query()
try:
video_db.search(Video.channel_id == channel_id)[0]
checked_video_db.insert({'video_id': video_id})
print channel_name + ' was already gathered, skip it...'
continue
except:
pass
#print "This channel was already gathered, skip this channel..."
##skip if view count > 500
if views>500:
country = get_country(channel_id)
else:
checked_video_db.insert({'video_id': video_id})
print "Video doesn't have enough views, skip it..."
continue
else:
checked_video_db.insert({'video_id': video_id})
print "Video is more then a year old, skip it..."
continue
#skip non US or not not found if us only is specified
if US_only:
if country <> 'US':
if country <> 'Not Found':
print country+' Foreigner!!!!!!! You shall not Pass!!!!!!!!!!!!!!!!'
print ' saved to foreigner video db...'
checked_video_db.insert({'video_id': video_id})
foreigner_video_db.insert({'video_id': video_id,'publish_date': publish_date,\
'title': title, 'views': views,\
'comments': comments,'likes': likes, \
'dislikes': dislikes, 'channel_id': channel_id,\
'channel_name': channel_name, 'country': country})
continue
###append the video to the list of all videos
print '_______________________________'
print channel_name,views,publish_date.split('T')[0],country,title
print_text+= video_id+'\t'+\
publish_date+'\t'+\
title+'\n'
video_db.insert({'video_id': video_id,'publish_date': publish_date,\
'title': title, 'views': views,\
'comments': comments,'likes': likes, \
'dislikes': dislikes, 'channel_id': channel_id,\
'channel_name': channel_name, 'country': country})
print 'Video added to video db...'
print 'New Channel Found!!!!!!!!!!\n'
print channel_name+'\n'
found_channel=True
###get channel stats and last 90 day video stats
print 'Gathering videos from '+channel_name+' from the last 3 months...'
videos = get_channel_videos_before(channel_id,before_date)
#get median and keyphrase data
views_list = []
comments_list = []
likes_list = []
search_phrases = open('search_phrases.txt').read().split('\n')
general_makeup_phrases = open('general_makeup_phrases.txt').read().split('\n')
cheap_phrases = open('cheap_phrases.txt').read().split('\n')
high_end_phrases = open('high_end_phrases.txt').read().split('\n')
search_phrase_videos = 0
general_makeup_phrase_videos = 0
cheap_phrase_videos = 0
high_end_phrase_videos = 0
total_search_phrases = 0
total_general_makeup_phrases = 0
total_cheap_phrases = 0
total_high_end_phrases = 0
for video in videos:
#print str(video)
views_list.append(int(video['views']))
comments_list.append(int(video['comments']))
likes_list.append(int(video['likes']))
phrase_found=False
for keyphrase in search_phrases:
if keyphrase == '':
continue
if keyphrase.lower() in video['title'].lower() \
or keyphrase.lower() in video['description'].lower():
print '"'+keyphrase+'" found!'
if not phrase_found:
search_phrase_videos+=1
total_search_phrases+1
phrase_found=True
phrase_found=False
for keyphrase in general_makeup_phrases:
if keyphrase == '':
continue
if keyphrase.lower() in video['title'].lower() \
or keyphrase.lower() in video['description'].lower():
print '"'+keyphrase+'" found!'
if not phrase_found:
general_makeup_phrase_videos+=1
total_general_makeup_phrases+=1
phrase_found=True
phrase_found=False
for keyphrase in cheap_phrases:
if keyphrase == '':
continue
if keyphrase.lower() in video['title'].lower() \
or keyphrase.lower() in video['description'].lower():
print '"'+keyphrase+'" found!'
if not phrase_found:
cheap_phrase_videos+=1
total_cheap_phrases+=1
phrase_found=True
phrase_found=False
for keyphrase in high_end_phrases:
if keyphrase == '':
continue
if keyphrase.lower() in video['title'].lower() \
or keyphrase.lower() in video['description']:
print '"'+keyphrase+'" found!'
if not phrase_found:
high_end_phrase_videos+=1
total_high_end_phrases+=1
phrase_found=True
median_views = median(views_list)
median_likes = median(likes_list)
median_comments = median(comments_list)
channel_db.insert({'channel_id':channel_id, 'channel_name':channel_name,\
'median_views':median_views, 'median_likes':median_likes,\
'median_comments':median_comments, 'general_makeup_phrase_videos':general_makeup_phrase_videos,\
'cheap_phrase_videos':cheap_phrase_videos, 'high_end_phrase_videos':high_end_phrase_videos,\
'country':country, 'videos':videos,'videos_last_3_months':len(videos),\
'search_phrase_videos':search_phrase_videos,\
'total_search_phrases':total_search_phrases,\
'total_general_makeup_phrases':total_general_makeup_phrases,\
'total_cheap_phrases':total_cheap_phrases,\
'total_high_end_phrases':total_high_end_phrases})
checked_video_db.insert({'video_id': video_id})
print 'Channel added to channel DB...\n'
print '_______________________________'
print 'Current search phrase is: '+key_phrase
print '_______________________________'
#raw_input()
print '______________________________'
print channel_name+', '+country+', median views:'+str(median_views)+', videos last 3 months: '+str(len(videos))+\
' , makeup videos: '+str(general_makeup_phrase_videos)
print '______________________________'
open('print_text.txt','w').write(print_text.encode('utf8'))
skip_to_phrase = ''
error_count= 0
not_found_in_a_row=0
while 1:
error_count+=1
if error_count>25:
print 'OUT OF SCRAPE QUOTA'
break
try:
start()
except Exception as e:
print str(e)
#raw_input()
if 'Keyboard' in str(e):
break
current_key = open("current_key.txt").read().split()[0]
print current_key
keylist = open('api_keylist.txt').read().split('\n')
next = False
found = False
for key in keylist:
if len(key)<3:
continue
if next:
open('current_key.txt','w').write(key)
#print 'found'
global DEVELOPER_KEY
DEVELOPER_KEY = key
print DEVELOPER_KEY
found = True
break
if key == current_key:
next = True
if not found:
open('current_key.txt','w').write(keylist[0])
|
17,624 | 83993b94098f5fa11f443f30fd18b5023ed6671f | import sys
import pygame
class Rockets():
def __init__(self, speed_factor, screen):
self.screen = screen
self.speed_factor = speed_factor
self.image = pygame.image.load('practice/images/rockets_1.jpg')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = self.screen_rect.centerx
self.rect.centery = self.screen_rect.centery
self.position_x = float(self.rect.centerx)
self.position_y = float(self.rect.centery)
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
def update(self):
if self.moving_right and self.rect.right < self.screen_rect.right:
self.position_x += self.speed_factor
if self.moving_left and self.rect.left > 0:
self.position_x -= self.speed_factor
if self.moving_down and self.rect.bottom < self.screen_rect.bottom:
self.position_y += self.speed_factor
if self.moving_up and self.rect.top > 0:
self.position_y -= self.speed_factor
self.rect.centerx = self.position_x
self.rect.centery = self.position_y
def blitme(self):
self.screen.blit(self.image, self.rect)
def check_events(rockets):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
rockets.moving_right = True
elif event.key == pygame.K_LEFT:
rockets.moving_left = True
elif event.key == pygame.K_UP:
rockets.moving_up = True
elif event.key == pygame.K_DOWN:
rockets.moving_down = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
rockets.moving_right = False
elif event.key == pygame.K_LEFT:
rockets.moving_left = False
elif event.key == pygame.K_UP:
rockets.moving_up = False
elif event.key == pygame.K_DOWN:
rockets.moving_down = False
def run_game():
pygame.init()
screen = pygame.display.set_mode((1200,800))
bg_color = (230,230,230)
speed_factor = 1.5
rockets = Rockets(speed_factor,screen)
while True:
check_events(rockets)
screen.fill(bg_color)
rockets.blitme()
rockets.update()
pygame.display.flip()
run_game() |
17,625 | b632dfbf889e5b71301efab680dd40199ee315d4 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 17:26:49 2018
@author: hjiang
"""
"""
We have a grid of 1s and 0s; the 1s in a cell represent bricks.
A brick will not drop if and only if it is directly connected to the top of the grid,
or at least one of its (4-way) adjacent bricks will not drop.
We will do some erasures sequentially. Each time we want to do the erasure at the location (i, j),
the brick (if it exists) on that location will disappear, and then some other bricks may drop because of that erasure.
Return an array representing the number of bricks that will drop after each erasure in sequence.
Example 1:
Input:
grid = [[1,0,0,0],[1,1,1,0]]
hits = [[1,0]]
Output: [2]
Explanation:
If we erase the brick at (1, 0), the brick at (1, 1) and (1, 2) will drop. So we should return 2.
Example 2:
Input:
grid = [[1,0,0,0],[1,1,0,0]]
hits = [[1,1],[1,0]]
Output: [0,0]
Explanation:
When we erase the brick at (1, 0), the brick at (1, 1) has already disappeared due to the last move.
So each erasure will cause no bricks dropping. Note that the erased brick (1, 0) will not be counted as a dropped brick.
Note:
The number of rows and columns in the grid will be in the range [1, 200].
The number of erasures will not exceed the area of the grid.
It is guaranteed that each erasure will be different from any other erasure, and located inside the grid.
An erasure may refer to a location with no brick - if it does, no bricks drop.
https://leetcode.com/problems/bricks-falling-when-hit/discuss/119829/Python-Solution-by-reversely-adding-hits-bricks-back
https://xingxingpark.com/Leetcode-803-Bricks-Falling-When-Hit/
将所有击落的砖块,先去除(在Grid矩阵中-1),接着用DFS找出所有与顶部砖块连通的砖块,
并用一个矩阵connected记录(既表示已经访问过,又表示与顶部连通)。然后,从最后一块被击落的砖块向前逐一恢复。
每次恢复被击落砖块时,在Grid中+1,并且判断该位置是否原来有砖块存在,是否处于顶部或者四周有没有与顶部连通的砖块存在。
若满足这些条件,说明该被击落的砖块可以恢复,并且以它为起点做DFS,所有与他连通的砖块都可以被恢复,
恢复的数量即为该次击落后,落下砖块的数量。
"""
#class Solution0:
class Solution0(object):
def check_valid(self, r, c, grid):
if r < 0 or r >= len(grid) or c < 0 or c >= len(grid[0]) or grid[r][c] < 1: # grid[r][c] = 1,此地有砖
return False
else:
return True
def dfs_connect(self, grid, connected, r, c):
num_connected = 1
for rr, cc in [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]:
if self.check_valid(rr, cc, grid) and not connected[rr][cc]: # 需要是之前没有connection的,下面置一才有意义
connected[rr][cc] = 1
num_connected += self.dfs_connect(grid, connected, rr, cc)
return num_connected # 一个联通子图里面有多少块砖
def build_connection(self, grid):
connected = [[0 for c in range(len(grid[0]))] for r in range(len(grid))] #grid 的 行(elements 数) 和 列 (elements 数)
for c in range(len(grid[0])):
if self.check_valid(0, c, grid): # 顶部的砖有多少
connected[0][c] = 1 #给顶部有砖的位置置一
self.dfs_connect(grid, connected, 0, c) # 与顶部的砖直接相连的也置一,就是构成联通的都置一
return connected
def check_new_block_connection(self, r, c, grid, connected):
if grid[r][c] < 1:
return False
if r == 0:
return True
for rr, cc in [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]: #原来有砖,被砸掉了,现在加回来之后grid[r][c] = 1
if self.check_valid(rr, cc, grid) and connected[rr][cc] == 1:
return True
return False
def hitBricks(self, grid, hits): # 主函数
"""
:type grid: List[List[int]]
:type hits: List[List[int]]
:rtype: List[int]
"""
ret = [0 for i in range(len(hits))]
for hit in hits:
grid[hit[0]][hit[1]] -= 1 # 把敲过的砖先去除
connected = self.build_connection(grid) # 这个地方是建立砖敲掉之后的所有联通子图
for idx in range(len(hits)):
r, c = hits[-1 - idx] # 从最后一块砖开始恢复
grid[r][c] += 1
if self.check_new_block_connection(r, c, grid, connected):
connected[r][c] = 1
add_num = self.dfs_connect(grid, connected, r, c) - 1 #访问grid 和 connect, 看看原来的图里面有多少是可以加上的
ret[-1 - idx] = add_num
return ret
class Solution:
def hitBricks(self, grid, hits):
"""
:type grid: List[List[int]]
:type hits: List[List[int]]
:rtype: List[int]
"""
m, n = len(grid), len(grid[0])
# Connect unconnected bricks and
def dfs(i, j):
if not (0<=i<m and 0<=j<n) or grid[i][j]!=1: # 敲到外面或者敲到没有砖的地方
return 0
ret = 1
grid[i][j] = 2 #remaining unfallen bricks as 2
ret += sum(dfs(x, y) for x, y in [(i-1, j), (i+1, j), (i, j-1), (i, j+1)])
return ret
# Check whether (i, j) is connected to Not Falling Bricks
def is_connected(i, j):
return i==0 or any([0<=x<m and 0<=y<n and grid[x][y]==2 for x, y in [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]])
# Mark whether there is a brick at the each hit
for i, j in hits:
grid[i][j] -= 1
# Get grid after all hits
for i in range(n):
dfs(0, i)
# Reversely add the block of each hits and get count of newly add bricks
ret = [0]*len(hits)
for k in reversed(range(len(hits))):
i, j = hits[k]
grid[i][j] += 1
if grid[i][j]==1 and is_connected(i, j):
ret[k] = dfs(i, j)-1
return ret
if __name__ == "__main__":
print(Solution0().hitBricks([[1,0,0,0],[1,1,1,0]], [[1,0]]))
# print(Solution().hitBricks([[1,0,0,0],[1,1,0,0]], [[1,0]]))
|
17,626 | 74f405a43f149d4486e56ed3b0f0863baf9b33cd | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2019-08-23 15:47
# @File : misc.py
import redis
from app.core.log import logger
from app.core.config import settings
class RedisHelper(object):
__pool = None
def __init__(self, db=1):
RedisHelper.__get_connect(db)
@staticmethod
def __get_connect(db):
# 创建连接池,提高性能
# redis使用连接池是不需要关闭的
if RedisHelper.__pool is None:
pool = redis.ConnectionPool(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB,
password=settings.REDIS_AUTH)
RedisHelper.__pool = redis.Redis(connection_pool=pool)
return RedisHelper.__pool
@staticmethod
def sAddMember(setName, value):
if setName is None or setName == '':
errMsg = 'error, set key can\'t be empty.'
logger.error(errMsg)
return False
try:
RedisHelper().__pool.sadd(setName, value)
return True
except Exception as e:
errMsg = 'a error was occurred in sAddMember when insert key:{} value:{}.'.format(setName, value)
logger.exception(errMsg)
@staticmethod
def sPop(setName):
try:
return RedisHelper().__pool.spop(setName)
except Exception as e:
errMsg = 'a error was occurred in sAddMember when insert key:{}.'.format(setName)
logger.exception(errMsg)
@staticmethod
def sIsMember(setName, value):
if setName is None or setName == '':
errMsg = 'error, set key can\'t be empty.'
logger.error(errMsg)
return False
try:
return RedisHelper().__pool.sismember(setName, value)
except Exception as e:
errMsg = 'a error was occurred in sIsMember when check key:{} value:{}.'.format(setName, value)
logger.exception(errMsg)
return False
@staticmethod
def sRemMember(setName, value):
if setName is None or setName == '':
errMsg = 'error, set key can\'t be empty.'
logger.error(errMsg)
return False
try:
RedisHelper().__pool.srem(setName, value)
except Exception as e:
errMsg = 'a error was occurred in sRemMember when remove key:{} value:{}.'.format(setName, value)
logger.exception(errMsg)
return False
@staticmethod
def set_expire(key, time):
try:
RedisHelper().__pool.expire(key, time)
except Exception as e:
errMsg = 'a error was occurred when set expire time on key:{}, time {}'.format(key, time)
logger.exception(errMsg)
return False
@staticmethod
def get_ttl(key, ):
return RedisHelper().__pool.ttl(key)
@staticmethod
def sMembers(setName):
if setName is None or setName == '':
errMsg = 'error, set key can\'t be empty.'
logger.error(errMsg)
return False
try:
return RedisHelper().__pool.smembers(setName)
except Exception as e:
errMsg = 'a error was occurred in sMembers when load ({}) members.'.format(setName, )
logger.exception(errMsg)
return None
@staticmethod
def multi_push(setName, vals):
pipe = RedisHelper().__pool.pipeline()
for var in vals:
pipe.sadd(setName, var)
pipe.execute()
@staticmethod
def delete_key(setName):
return RedisHelper().__pool.delete(setName)
@staticmethod
def zAdd(key, score, value):
return RedisHelper().__pool.zadd(key, {value: score})
@staticmethod
def zPopMin(key):
'''
返回一个分数最小。
数据结构:[(data, source)]
:param key:
:return:
'''
return RedisHelper().__pool.zpopmin(key)
@staticmethod
def lPush(key, value):
return RedisHelper().__pool.lpush(key, value)
@staticmethod
def get_key_like(pattern="*"):
return RedisHelper().__pool.keys(pattern)
@staticmethod
def lPop(key):
return RedisHelper().__pool.lpop(key)
@staticmethod
def hGet(key, field):
return RedisHelper().__pool.hget(key, field)
@staticmethod
def hSet(key, field, valud):
return RedisHelper().__pool.hset(key, field, valud)
@staticmethod
def hScan(key, cursor, match=None, count=10):
return RedisHelper().__pool.hscan(key, cursor, match, count)
if __name__ == '__main__':
# print(conf)
# for i in range(1000):
# k = 'key'+str(i)
# v = 'value'+ str(i)
# a = RedisHelper().hSet('host', k, v)
i = 0
cursor = 0
while True:
rst = RedisHelper().hScan('host', cursor,)
cursor = rst[0]
data = rst[1]
for req in data.values():
i +=1
print(req)
if cursor ==0:
break
print(i)
|
17,627 | 1988c8fce178ff99c83cc941055a6b41e46c4471 | # https://www.codewars.com/kata/54a0689443ab7271a90000c6
#
# Harshad numbers (also called Niven numbers) are positive numbers that can
# be divided (without remainder) by the sum of their digits.
#
# For example, the following numbers are Harshad numbers:
#
# 10, because 1 + 0 = 1 and 10 is divisible by 1
# 27, because 2 + 7 = 9 and 27 is divisible by 9
# 588, because 5 + 8 + 8 = 21 and 588 is divisible by 21
#
# While these numbers are not:
#
# 19, because 1 + 9 = 10 and 19 is not divisible by 10
# 589, because 5 + 8 + 9 = 22 and 589 is not divisible by 22
# 1001, because 1 + 1 = 2 and 1001 is not divisible by 2
#
# Harshad numbers can be found in any number base, but we are going to focus
# on base 10 exclusively.
# Your task
#
# Your task is to complete the skeleton Harshad object ("static class") which
# has 3 functions:
#
# isValid() that checks if n is a Harshad number or not
# getNext() that returns the next Harshad number > n
# getSerie() that returns a series of n Harshad numbers, optional start
# value not included
#
# You do not need to care about the passed parameters in the test cases, they
# will always be valid integers (except for the start argument in getSerie()
# which is optional and should default to 0).
#
# Note: only the first 2000 Harshad numbers will be checked in the tests.
# Examples
#
# Harshad.is_valid(1) ==> True
# Harshad.get_next(0) ==> 1
# Harshad.get_series(3) ==> [ 1, 2, 3 ]
# Harshad.get_series(3, 1000) ==> [ 1002, 1008, 1010 ]
class Harshad:
@staticmethod
def is_valid(number):
s = sum([int(i) for i in str(number)])
if number % s == 0:
return True
else:
return False
@staticmethod
def get_next(number):
while True:
number += 1
if Harshad.is_valid(number):
return number
@staticmethod
def get_series(count, start=0):
j = start+1
arr = []
while len(arr) < count:
if Harshad.is_valid(j):
arr.append(j)
j += 1
return arr
|
17,628 | 508e073473e37efea6c23f0408b38574491c100a | import json
import time
import pytz
import datetime
from wetland import config
import paho.mqtt.client as mqtt
# sensor name
name = config.cfg.get("wetland", "name")
# urls to report
host = config.cfg.get("mqtt", "host")
keys_path = config.cfg.get("mqtt", "keys_path")
ca_certs = keys_path + 'ca.crt'
cert_file = keys_path + 'client.crt'
key_file = keys_path + 'client.key'
class plugin(object):
def __init__(self, server):
self.server = server
self.name = config.cfg.get("wetland", "name")
def send(self, subject, action, content):
t = datetime.datetime.fromtimestamp(time.time(),
tz=pytz.timezone('UTC')).isoformat()
if subject == 'wetland' and \
action in ('login_successful', 'shell command', 'exec command',
'direct_request', 'reverse_request'):
pass
elif subject in ('sftpfile', 'sftpserver'):
pass
elif subject == 'content' and action in ('pwd',):
pass
elif subject == 'upfile':
pass
else:
return True
data = {'timestamp': t, 'src_ip': self.server.hacker_ip,
'dst_ip': self.server.myip, 'action': action,
'content': content, 'sensor': self.name,
'src_port': self.server.hacker_port,
'dst_port': 22}
data = json.dumps(data)
client = mqtt.Client()
client.tls_set(ca_certs=ca_certs,
certfile=cert_file,
keyfile=key_file)
client.connect(host)
client.publish('ck/log', data, qos=1)
return True
|
17,629 | 2a606c4e70680e9f6241f839060da65960c70a0a | # DESCRIPTION
# You are given a sorted array consisting of only integers where every element appears exactly twice,
# except for one element which appears exactly once. Find this single element that appears only once.
# Follow up: Your solution should run in O(log n) time and O(1) space.
# EXAMPLE 1:
# Input: nums = [1,1,2,3,3,4,4,8,8]
# Output: 2
# EXAMPLE 2:
# Input: nums = [3,3,7,7,10,11,11]
# Output: 10
# Constraints:
# 1 <= nums.length <= 10^5
# 0 <= nums[i] <= 10^5
class Solution:
'''
Time: O(LOG N), Modified binary search
Space: O(1)
'''
def singleNonDuplicate(self, nums: List[int]) -> int:
if len(nums) == 1:
return nums[0]
l = 0
r = len(nums)-1
while l < r:
mid = l + (r-l)//2
parity = mid % 2
# mid is even
if parity == 0:
if nums[mid] == nums[mid+1]:
# search right
l = mid+2
else:
# search left
r = mid
# mid is odd
else:
if nums[mid] == nums[mid+1]:
# search left
r = mid
else:
# search right
l = mid+1
# l should always be at the start of a pair
return nums[l]
|
17,630 | 82462bf7092de3875c5296035220391211cf7515 | from pepnet import Predictor, SequenceInput, Output
from nose.tools import eq_
def test_basic_rnn():
pred = Predictor(
inputs=SequenceInput(
name="x",
length=4,
variable_length=True,
encoding="embedding",
rnn_layer_sizes=[20],
rnn_type="lstm",
rnn_bidirectional=True),
outputs=Output(dim=1, activation="sigmoid", name="y"))
x = ["SF", "Y", "AALL"]
y = pred.predict({"x": x})["y"]
eq_(len(x), len(y))
found_rnn_layer = any(
"bidirectional" in layer.name for layer in pred.model.layers)
assert found_rnn_layer
|
17,631 | ef75b2e052668d90629b0f8b0119a8a7e5858286 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# The above lines tell the shell to use python as interpreter when the
# script is called directly, and that this file uses utf-8 encoding,
# because of the country specific letter in my surname.
"""
Name: Program 3 "Personal Information Class"
Author: Martin Bo Kristensen Grønholdt.
Version: 1.0 (2017-02-12)
Class that encapsulates the properties of a customer.
"""
from ass14.person import Person
class Customer(Person):
"""
Customer class.
"""
def __init__(self, name='John Doe', address='', phone='', number=0,
mail=False):
"""
Constuctor.
:param name: The name of the customer. Default is 'John Doe'.
:param address: The address of the customer.
:param phone: The phone number of the customer.
:param number: The customer number.
:param mail: True is the customer is on the mailing list.
"""
# Call the parent constructor.
Person.__init__(self, name, address, phone)
# Set to the values passed to the constructor.
self.__number = number
self.__mail = mail
def get_number(self):
"""
Get the customer number.
:return: The scustomer number.
"""
return self.__number
def get_receive_mail(self):
"""
True if the customer is on the mailing list.
:return: The mailing list status.
"""
return self.__mail
def get_receive_mail_str(self):
"""
Return the customer the mailing list status. 'yes' if the customer
receives mail, 'no' otherwise.
:return: The mailing list status.
"""
ret = False
if self.__mail:
ret = True
return ret
def set_number(self, number):
"""
Set customer number.
"""
self.__number = number
def set_receive_mail(self):
"""
Set the customer to receive mail.
"""
self.__mail = True
def set_receive_no_mail(self):
"""
Set the customer to not receive mail.
"""
self.__mail = False
def __str__(self):
"""
Return a string that is directly usable when printing the entry.
:return: string
"""
return('Name:\t\t\t{}\n'.format(self.get_name()) +
'Address:\t\t{}\n'.format(self.get_address()) +
'Phone:\t\t\t{}\n'.format(self.get_phone()) +
'Number:\t\t\t{}\n'.format(self.get_number()) +
'Mailing list:\t{}'.format(self.get_receive_mail_str())
)
|
17,632 | 4bb57aeb8a915c4c484c01f7c786b33c3ebe4fb2 | #!/usr/bin/env python3
import numpy as np
from tqdm import trange
from run_utils import set_seed, build_all_matrices, clusterize, train_test_split, SplitType, export, evaluate
from basic_recommenders import TopPopRecommender
from cbf import UserCBFKNNRecommender
from clusterization import get_clusters
class ClusterizedTopPop:
def __init__(self):
self.urm_train = None
self.ucm = None
self.recommenders = dict()
self.cluster_for_user = dict()
self.std_top_pop = None
self.clusters = None
self.std_top_pop = None
def fit(self, urm_train, clusters):
self.urm_train = urm_train
self.clusters = clusters
self.std_top_pop = TopPopRecommender()
self.std_top_pop.fit(urm_train)
for cluster in self.clusters:
users = clusters[cluster]
for user_id in users:
self.cluster_for_user[user_id] = cluster
filtered_urm = self.urm_train[users, :]
top_pop = TopPopRecommender()
top_pop.fit(filtered_urm)
self.recommenders[cluster] = top_pop
def recommend(self, user_id, at=None, exclude_seen=True):
try:
cluster = self.cluster_for_user[user_id]
return self.recommenders[cluster].recommend(user_id, at, False)
except KeyError:
return self.std_top_pop.recommend(user_id, at, False)
if __name__ == '__main__':
set_seed(42)
EXPORT = False
urm, icm, ucm, target_users = build_all_matrices()
if EXPORT:
urm_train = urm.tocsr()
urm_test = None
else:
urm_train, urm_test = train_test_split(urm, SplitType.PROBABILISTIC)
# TOP-POP
clusters = get_clusters()
top_pop = ClusterizedTopPop()
top_pop.fit(urm_train, clusters)
# USER CBF
user_cbf = UserCBFKNNRecommender()
user_cbf.fit(urm_train, ucm, top_k=496, shrink=0, normalize=False)
# HYBRID FALLBACK
from hybrid import HybridRecommender, MergingTechniques
top_pop = TopPopRecommender()
top_pop.fit(urm_train)
hybrid_fb = HybridRecommender([top_pop, user_cbf], urm_train, merging_type=MergingTechniques.MEDRANK)
if EXPORT:
export(target_users, hybrid_fb)
else:
profile_lengths = np.ediff1d(urm_train.indptr)
warm_users = np.where(profile_lengths != 0)[0]
evaluate(hybrid_fb, urm_test, excluded_users=warm_users)
|
17,633 | b647ee70d79ae59fb5acf38201883edbf3eb4722 | ## test of vectorization of collisionless transport
import numpy as np
import time
def loops(maxi, grid_R2, maxk1, maxk2, R1, dr, dfi, Z, LAMBDA ):
DEP1 = []
for i in range(maxi+1):
R = grid_R2*i
S =0
for k1 in range(maxk1 +1):
RC = R1 + (k1 + 0.5) * dr
for k2 in range(maxk2+1):
fi = (k2+0.5)*dfi
R0 = Z*Z + R*R + RC*RC - 2*RC*R*np.cos(fi)
S += RC * np.exp(-np.sqrt(R0)/LAMBDA)/(R0*R0)
DEP1.append(Z*Z*dr*dfi*S*2/np.pi)
return(DEP1)
def vect(maxi, grid_R2, maxk1, maxk2, R1, dr, dfi, Z, LAMBDA ):
#DEP1 = []
R = grid_R2 *np.arange(maxi+1).reshape(maxi+1,1)
lR = len(R)
R=R.reshape(lR,1)
RC = R1 + dr * (0.5 + np.arange(maxk1+1))
lRC=len(RC)
RC = RC.reshape(lRC,1)
fi = (np.arange(maxk2+1) +0.5) * dfi
lfi=len(fi)
fi=fi.reshape(lfi,1)
R0 = np.dot(np.cos(fi), RC.T)
print(R0.shape)
R0=R0.reshape(lfi, lRC, 1)
print(R0.shape)
R0 = -2 *np.dot(R0,R.T)
print('R0 shape:', R0.shape)
R2 = np.dot(np.ones((lfi, lRC, 1)), (R**2).T)
RC2 =np.dot(np.ones((lfi, 1)), (RC**2).T)
RC2 =np.dot(RC2.reshape(lfi, lRC, 1), np.ones((1,lR)))
print('R0', R0.shape)
print('R2', R2.shape)
print('RC2', RC2.shape)
R0 += RC2 + R2 + Z**2
eR0 = np.exp(-np.sqrt(R0)/LAMBDA)
eR0 /= R0**2
print('e0 shape:', eR0.shape)
S=np.dot(np.swapaxes(eR0,1,2), RC)
print('S: ', S.shape)
S=np.sum(S,0).flatten()
DEP1 = (Z*Z*dr*dfi*2/np.pi) * S
return(DEP1)
if __name__ == "__main__":
t0 = time.time()
l=loops(100,2,50, 30, 20, 2, 3, 100, 30)
t1=time.time()
#print(l)
m=vect(100,2,50, 30, 20, 2, 3, 100, 30)
t2=time.time()
print(m.shape)
#print(np.array(l)/m)
print(t1-t0)
print(t2-t1) |
17,634 | 0fe3d6bde74cc10e583a6936f35ffdbe714ce701 | """
@summary: Maya cooperative shelf library
@run: import coop.shelves as cshelf (suggested)
@license: MIT
@repository: https://github.com/artineering-io/maya-coop
"""
from __future__ import print_function
from __future__ import unicode_literals
import os
import maya.cmds as cmds
import maya.mel as mel
from . import logger as clog
from . import list as clist
from . import lib as clib
LOG = clog.logger("coop.shelf")
def delete_shelves(shelves=None, restart=True):
"""
Delete shelves specified in dictionary
Args:
shelves (unicode, list): Shelves to delete e.g. "Animation", "Animation.mel"
restart (bool): If a restart dialog should appear in the end
"""
if not shelves:
LOG.warning('No shelves specified to delete')
return
# standardize shelve names
shelves_filenames, shelves_names = format_shelves(shelves)
# Maya creates all default shelves in prefs only after each has been opened (initialized)
for shelf in shelves_names:
try:
mel.eval('jumpToNamedShelf("{0}");'.format(shelf))
except RuntimeError:
continue
mel.eval('saveAllShelves $gShelfTopLevel;') # all shelves loaded (save them)
# time to delete them
shelf_top_level = mel.eval('$tempMelStringVar=$gShelfTopLevel') + '|'
for shelf in shelves_names:
if cmds.shelfLayout(shelf_top_level + shelf, q=True, ex=True):
cmds.deleteUI(shelf_top_level + shelf, layout=True)
# mark them as deleted to avoid startup loading
env_dir = clib.get_env_dir()
shelf_dir = os.path.join(env_dir, 'prefs', 'shelves')
for shelf in shelves_filenames:
shelf_path = os.path.join(shelf_dir, shelf)
deleted_shelf_path = shelf_path + '.deleted'
if os.path.isfile(shelf_path):
# make sure the deleted file doesn't already exist
if os.path.isfile(deleted_shelf_path):
os.remove(shelf_path)
os.rename(shelf_path, deleted_shelf_path)
if restart:
clib.dialog_restart()
def restore_shelves():
""" Restores previously deleted shelves """
shelf_dir = os.path.join(clib.get_env_dir(), 'prefs', 'shelves')
for shelf in os.listdir(shelf_dir):
if shelf.endswith('.deleted'):
restored_shelf = os.path.join(shelf_dir, shelf.split('.deleted')[0])
deleted_shelf = os.path.join(shelf_dir, shelf)
# check if it has not been somehow restored
if os.path.isfile(restored_shelf):
os.remove(deleted_shelf)
else:
os.rename(deleted_shelf, restored_shelf)
clib.dialog_restart()
def format_shelves(shelves):
"""
Format shelves into their filename and names i.e., ['shelf_Animation.mel'], ['Animation']
Args:
shelves (unicode, list): Shelves to format into filename, name
Returns:
(list, list): Shelf filenames and names
"""
shelves_filenames = []
shelves_names = []
shelves = clist.enlist(shelves)
for shelf in shelves:
if shelf.startswith('shelf_'):
if shelf.endswith('.mel'):
shelves_filenames.append(shelf)
shelves_names.append(shelf[6:shelf.index('.mel')])
else:
shelves_filenames.append("{}.mel".format(shelf))
shelves_names.append(shelf[6:])
else:
if shelf.endswith('.mel'):
shelves_filenames.append("shelf_{}".format(shelf))
shelves_names.append(shelf[:shelf.index('.mel')])
else:
shelves_filenames.append("shelf_{}.mel".format(shelf))
shelves_names.append(shelf)
return shelves_filenames, shelves_names
|
17,635 | 1627398f8f0aaff5bc9c7cfeaf467737e0024eb5 | #!/usr/bin/env python
'''
Qt文档链接修复工具(Qtdoc Markdown Link Fixer),用于将 Qt 官网的文档经typora转化后的 Markdown 中原链接链接修复为正确的链接
Usage:
qmlf <website> <filename>
Arguments:
website 待翻译的 Qt5 官方文档页面网址,需以https://doc.qt.io/qt-5开头
filename Typora 转化后的 markdown 文档名,转换后该文件将会被覆盖写入
Options:
-h --help
'''
from docopt import docopt
import os.path
import qt_link_modifier
def _fix(website, filename):
if not website.startswith("https://doc.qt.io/qt-5/"):
print(__doc__)
return None
if not filename.endswith(".md"):
filename = filename + ".md"
if not os.path.exists(filename):
return None
print("Read from %s" % filename)
with open(filename) as fin:
markdown = fin.read()
print("Fix links in markdown")
return qt_link_modifier.modify_qt_link(
website, filename, markdown)
def main():
args = docopt(__doc__)
website = args["<website>"]
filename = args["<filename>"]
markdown = _fix(website, filename)
if markdown is None:
print("error!")
return
print("Write to %s" % filename)
with open(filename, "w") as fin:
fin.write(markdown)
if __name__ == "__main__":
main()
|
17,636 | a1b4aa83f621cfe6f222e8b7da4960aaddfd2ef2 | def convert(n):
print(f"passed no. is {n}")
print(type(n))
s = str(n)
print(type(s))
return s
print(convert(10)) |
17,637 | 974a58f9a4b1c5df9fe5c0c15e50a440e2c2c9d6 | #!/usr/bin/env python
"""
CREATED AT: 2021/8/10
Des:
https://leetcode.com/problems/flip-string-to-monotone-increasing/
https://leetcode.com/explore/challenge/card/august-leetcoding-challenge-2021/614/week-2-august-8th-august-14th/3876/
GITHUB: https://github.com/Jiezhi/myleetcode
Reference: https://leetcode.com/problems/flip-string-to-monotone-increasing/solution/
"""
import itertools
from tool import print_results
class Solution:
@print_results
def minFlipsMonoIncr(self, s: str) -> int:
"""
76 / 93 test cases passed.
Time Limit Exceeded at len(s) = 20000
:param s:
:return:
"""
def __minFMI(s, head: str) -> int:
if len(s) == 1:
# last one, no need to flip if head is 0 or s[0] same as head
if s[0] == head or head == '0':
return 0
else:
return 1
if s[0] == head:
# head and s[0] are same, no need to flip as we want minimum flips
return __minFMI(s[1:], head)
elif head == '0': # head = 0 and s[0] = 1, has two choices
# first arg is flip s[0] from 1 to 0, last arg is keep s[0] as 1
# and head changed to 1
return min(__minFMI(s[1:], '0') + 1, __minFMI(s[1:], '1'))
else:
# head = 1 and s[0] = 0, and must flip s[0] to 1
return __minFMI(s[1:], '1') + 1
# assume the virtual head is 0
return __minFMI(s, '0')
@print_results
def minFlipsMonoIncr2(self, s: str) -> int:
"""
93 / 93 test cases passed.
Status: Accepted
Runtime: 372 ms
Memory Usage: 22.1 MB
:param s:
:return:
"""
l = len(s)
def add_str(a, b) -> int:
return int(a) + int(b)
# break_point_location for break line between s
# like | 0 | 1 | 0 | 0 |
# the first one is keep for
bpl = list(itertools.accumulate(s, func=add_str, initial=0))
return min([bpl[i] + (l - i - (bpl[-1] - bpl[i])) for i in range(l + 1)])
@print_results
def minFlipsMonoIncr3(self, s: str):
P = [0]
for x in s:
P.append(P[-1] + int(x))
return min(P[j] + len(s) - j - (P[-1] - P[j])
for j in range(len(P)))
def test():
assert Solution().minFlipsMonoIncr(s="0") == 0
assert Solution().minFlipsMonoIncr(s="1") == 0
assert Solution().minFlipsMonoIncr(s="00110") == 1
assert Solution().minFlipsMonoIncr(s="010110") == 2
assert Solution().minFlipsMonoIncr(s="00011000") == 2
assert Solution().minFlipsMonoIncr2(s="0") == 0
assert Solution().minFlipsMonoIncr2(s="1") == 0
assert Solution().minFlipsMonoIncr2(s="00110") == 1
assert Solution().minFlipsMonoIncr2(s="010110") == 2
assert Solution().minFlipsMonoIncr2(s="00011000") == 2
assert Solution().minFlipsMonoIncr3(s="1") == 0
assert Solution().minFlipsMonoIncr3(s="0") == 0
assert Solution().minFlipsMonoIncr3(s="00110") == 1
assert Solution().minFlipsMonoIncr3(s="010110") == 2
assert Solution().minFlipsMonoIncr3(s="00011000") == 2
if __name__ == '__main__':
test()
|
17,638 | 92baa7c8daa47a30cc5a0a9054e97d48441ef961 | #!/usr/bin/env python3
import os
from pathlib import Path
import sys
__projectdir__ = Path(os.path.dirname(os.path.realpath(__file__)) + '/')
def issubpath(filename, superpath, trueifsame = True):
"""
Checks whether filename is a subpath of superpath
Would return true for issubpath('/home/user/1.txt', '/home/user/')
False for isssubpath('/home/user2/1.txt', '/home/user/')
If trueifsame then also return True for issubpath('/home/user/', '/home/user/')
"""
filename = os.path.abspath(filename)
superpath = os.path.abspath(superpath)
if filename.startswith(superpath + os.sep) or (trueifsame is True and filename == superpath):
return(True)
else:
return(False)
def md5Checksum(filePath):
"""
Taken from https://www.joelverhagen.com/blog/2011/02/md5-hash-of-file-in-python/ on 20171226.
"""
import hashlib
with open(filePath, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
def twofilesaresame(filename1, filename2):
checksum1 = md5Checksum(filename1)
checksum2 = md5Checksum(filename2)
if checksum1 == checksum2:
return(True)
else:
return(False)
def deletelinksdirectory(folder):
for root, dirs, files in os.walk(folder):
for name in files:
if os.path.islink(os.path.join(root, name)):
os.unlink(os.path.join(root, name))
for name in dirs:
if os.path.islink(os.path.join(root, name)):
os.unlink(os.path.join(root, name))
|
17,639 | 98bca4291d562f9959bbc7e4390cbc63682c5fc4 | #coding=utf-8
#from django.shortcuts import render,render_to_response
#from django.http import HttpResponse,HttpResponseRedirect
#from django.template import RequestContext
#from django import forms
from course.models import Course
from rest_framework import viewsets,generics,status,permissions
from course.serializers import CourseSerializer
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
#这个跟django的类方法可不一样 以前都是self,现在是一个viewsets.ModelViewSet 类方法
class CourseViewSet(viewsets.ModelViewSet):
queryset = Course.objects.all()
serializer_class = CourseSerializer
#查询用户信息models.User.objects.all().order_by('-date_joined')
#UserSerializer 序列化(表现层,将数据按照一定格式来处理然后返回给前端)
def list(self, request):
#queryset,serializer_class 这个变量名是死的不能改
queryset = Course.objects.all()
serializer = CourseSerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Course.objects.all()
user = get_object_or_404(queryset, pk=pk)
serializer = CourseSerializer(user)
return Response(serializer.data)
class CourseList(generics.ListAPIView):
queryset = Course.objects.all()
serializer_class = CourseSerializer
class CourseDetail(generics.RetrieveAPIView):
queryset = Course.objects.all()
serializer_class = CourseSerializer
|
17,640 | 3a43e02b7a3d2c171413a9367dc2d371e3340a8e | import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
# name = '모델링 마트 테이블1.csv'
# table = pd.read_csv(name, encoding = 'utf-8')
# table = table[['date','Subway_count','Interest_rate','Cost']]
# # table = table[['date','Mart_count','Subway_count','Interest_rate','Industry_count','Resident_count','Cost']]
#
# for i in range(len(table['date'])):
# table['date'][i] = ''.join(table['date'][i].split('-'))
# table['date'][i] = datetime.strptime(str(table['date'][i]), "%Y%m").date()
#
# all_data = table.interpolate(method = 'polynomial', order = 3)
# all_data.set_index('date', inplace = True)
# all_data = all_data.dropna()
# index = pd.DatetimeIndex(all_data.index.values)
# all_data = pd.DataFrame(all_data.values,index,all_data.columns)
all_data = pd.read_csv('모델링 마트 테이블_분당구.csv', parse_dates=['Date'], index_col=['Date'], infer_datetime_format=True, encoding='utf-8')
def ts_train_test_normalize(all_data, time_steps, for_periods):
"""
input:
data: dataframe with dates and price data
output:
X_train, y_train: data from 2013/1/1-2018/12/31
X_test : data from 2019-
sc : insantiated MinMaxScaler object fit to the training data
"""
# time_steps = 6
# for_periods = 1
# create training and test set
ts_train = all_data[:'2019'].values
ts_test = all_data['2020':].values
ts_train_len = len(ts_train)
ts_test_len = len(ts_test)
feature_counts = len(all_data.columns)
# scale the data
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(0, 1))
ts_train_scaled = sc.fit_transform(ts_train)
# create training data of s samples and t time steps
X_train = []
y_train = []
for i in range(time_steps, ts_train_len - 1):
X_train.append(ts_train_scaled[i - time_steps:i, :])
y_train.append(ts_train_scaled[i:i + for_periods, :])
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshaping X_train for efficient modelling
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], feature_counts))
inputs = all_data.values
inputs = inputs[len(inputs) - len(ts_test) - time_steps:]
inputs = inputs.reshape(-1, feature_counts)
inputs = sc.transform(inputs)
# Preparing X_test
X_test = []
for i in range(time_steps, ts_test_len + time_steps - for_periods):
X_test.append(inputs[i - time_steps:i, :])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], feature_counts))
return X_train, y_train, X_test, sc
def LSTM_model(X_train, y_train, X_test, sc):
# create a model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, SimpleRNN, GRU, LSTM, Dropout
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import SGD
import os
feature_counts = len(all_data.columns)
# LSTM 아키텍쳐
my_LSTM_model = Sequential()
my_LSTM_model.add(LSTM(16,
return_sequences = True,
input_shape = (X_train.shape[1],feature_counts),
activation = 'tanh'))
my_LSTM_model.add(LSTM(units = 16, activation = 'tanh'))
# my_LSTM_model.add(Dropout(0.2))
my_LSTM_model.add(Dense(feature_counts * 2, activation = 'relu'))
my_LSTM_model.add(Dropout(0.2))
my_LSTM_model.add(Dense(feature_counts))
# Compiling
# sgd = optimizers.SGD(lr=0.01, decay=1e-7, momentum=0.9, nesterov=True)
my_LSTM_model.compile(optimizer = SGD(learning_rate = 0.01, decay = 1e-7,
momentum = 0.9, nesterov = False),
loss = 'mean_squared_error')
# model 저장
MODEL_DIR = './model/'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
modelpath = MODEL_DIR + "{epoch:02d}-{val_loss:.4f}.hdf5"
checkpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss',
verbose=1, save_best_only=True)
esc = EarlyStopping(monitor='val_loss', patience=10)
# Fitting to the training set
my_LSTM_model.fit(X_train, y_train, epochs = 500, batch_size = 32,validation_split = 0.2, callbacks = [esc], verbose = 1)
# callbacks = [esc, checkpointer],
LSTM_prediction = my_LSTM_model.predict(X_test)
LSTM_prediction = sc.inverse_transform(LSTM_prediction)
# print(LSTM_prediction)
return my_LSTM_model, LSTM_prediction
def actual_pred_plot(preds):
"""
Plot the actual vs prediction
"""
actual_pred = pd.DataFrame(columns=['Cost', 'prediction'])
actual_pred['Cost'] = all_data['2020':].iloc[:, -1][1:len(preds) + 1]
actual_pred['prediction'] = preds[:, -1]
from keras.metrics import MeanSquaredError
m = MeanSquaredError()
m.update_state(np.array(actual_pred['Cost']), np.array(actual_pred['prediction']))
return m.result().numpy(), actual_pred.plot()
X_train, y_train, X_test, sc = ts_train_test_normalize(all_data, 6, 1)
my_lstm_model, lstm_predictions_2 = LSTM_model(X_train, y_train, X_test, sc)
actual_pred_plot(lstm_predictions_2)
plt.title('2nd_LSTM')
plt.xlabel('2021')
plt.ylabel('cost')
plt.show()
|
17,641 | 674144a7b227775fed94d80affffe58107c17726 | from http_parser.parser import HttpParser
class RTBResponse(object):
def __init__(self):
self.buffer = ''
def receive_buffer(self, buf):
self.buffer += buf
parser = HttpParser()
recved = len(self.buffer)
nparsed = parser.execute(self.buffer, recved)
assert nparsed == recved
if parser.is_message_complete():
return (True, parser)
return (False, parser)
if __name__ == '__main__' :
print 'http res'
rsp = ''
with open('../testing/response.http', 'r') as f:
rsp = f.readlines()
rsp = ''.join(rsp)
rsp = rsp[:-2]
print 'buffer :'
print rsp
print 'parsing ...'
p = HttpParser()
recved = len(rsp)
nparsed = p.execute(rsp, recved)
assert nparsed == recved
if p.is_message_complete():
print 'message complete'
print '--------------------'
rsp_1 = ''
with open('../testing/test1_response_part1.http', 'r') as f:
rsp_1 = f.readlines()
rsp_1 = ''.join(rsp_1)
rsp_1 = rsp_1[:-2]
rsp_2 = ''
with open('../testing/test1_response_part2.http', 'r') as f:
rsp_2 = f.readlines()
rsp_2 = ''.join(rsp_2)
rsp_2 = rsp_2[:-2]
p = HttpParser()
recved = len(rsp_1)
nparsed = p.execute(rsp_1, recved)
assert nparsed == recved
if p.is_message_complete():
print 'message complete'
else :
print 'message incomplete'
print p.recv_body()
recved = len(rsp_2)
nparsed = p.execute(rsp_2, recved)
assert nparsed == recved
if p.is_message_complete():
print 'message complete'
print p.recv_body()
print p.get_headers()
else :
print 'message incomplete'
print p.recv_body()
print '--------------------'
rsp_1 = ''
with open('../testing/test2_response_part1.http', 'r') as f:
rsp_1 = f.readlines()
rsp_1 = ''.join(rsp_1)
rsp_1 = rsp_1[:-2]
rsp_2 = ''
with open('../testing/test2_response_part2.http', 'r') as f:
rsp_2 = f.readlines()
rsp_2 = ''.join(rsp_2)
rsp_2 = rsp_2[:-2]
p = HttpParser()
recved = len(rsp_1)
nparsed = p.execute(rsp_1, recved)
assert nparsed == recved
if p.is_message_complete():
print 'message complete'
else :
print 'message incomplete'
print p.recv_body()
recved = len(rsp_2)
nparsed = p.execute(rsp_2, recved)
assert nparsed == recved
if p.is_message_complete():
print 'message complete'
print p.recv_body()
print p.get_headers()
else :
print 'message incomplete'
print p.recv_body()
print '--------------------'
rsp_1 = ''
with open('../testing/test2_response_part1.http', 'r') as f:
rsp_1 = f.readlines()
rsp_1 = ''.join(rsp_1)
rsp_1 = rsp_1[:-2]
rsp_2 = ''
with open('../testing/test2_response_part2.http', 'r') as f:
rsp_2 = f.readlines()
rsp_2 = ''.join(rsp_2)
rsp_2 = rsp_2[:-2]
p = RTBResponse()
ok, parser = p.receive_buffer(rsp_1)
if parser.is_message_complete():
print 'message complete'
print parser.recv_body()
else :
print 'message incomplete'
rsp_1 += rsp_2
ok, parser = p.receive_buffer(rsp_1)
if parser.is_message_complete():
print 'message complete'
print parser.recv_body()
else :
print 'message incomplete'
|
17,642 | 60279086c588b6c2728ad3198680825cc137d114 | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# test_articles_create_sol.py
# сохраним персональную статью.
import logging
logging.basicConfig(filename='test.log', level=logging.INFO)
FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
logging.basicConfig(format=FORMAT)
from datetime import date
import time
# import tornado.httpserver
import tornado.httpclient
# import tornado.ioloop
import tornado.web
import sys, os
sys.path.append(os.path.dirname('./../'))
# from app import *
import config
import core.models
from core.models.article import Article
from core.models.author import Author
sys.path.pop()
import unittest
longArticleText = """
123456 Защитная реакция, «бей или беги», срабатывает, когда при обработке информации мы осознаём наличие проблем. Вот некоторые физиологические симптомы, которые могут сопровождать стресс:
мышечное напряжение;
учащение пульса и повышение артериального давления;
учащенное дыхание (преимущественно грудью, а не животом);
тошнота;
ощущение, что голова тяжёлая;
пониженная концентрация внимания;
безразличие ко всему.
Защитная реакция − средство поддержания нашей жизнеспособности, позволяющая на раннем этапе затормозить развитие проблем. По сути, все физические реакции нашего организма связаны со стрессом. Например, убегая от медведя в лесу, мы активизируем ресурсы перед быстрым выплеском адреналина.
Или, например, перекусы во время работы могут вызывать тошноту, потому что мозговая активность на пике и требует больше ресурсов, чем обычно, а переваривание пищи затормаживается.
"""
authorId = 1
class TestAutors(unittest.TestCase):
article= None
def setUp(self):
self.autor = Author()
# self.author_login = 'log_1540895938.3078651' #'login_1'
# self.pwdStr = '123123' #'login'
self.author_login = 'login_1'
self.pwdStr = 'login'
self.autor.login(self.author_login, self.pwdStr)
print( 'setUp autor = ' + str(self.autor) )
self.article = Article()
self.article.article_title = 'СуперНоваяСтатья Персонаьно ' + str(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
self.article.article_annotation = 'СуперНоваяСтатья Персонаьно СуперНоваяСтатья Персонаьно СуперНоваяСтатья Персонаьно ' + str(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
self.article.article_category_id = 3
self.article.article_template_id = 5
self.article.article_permissions = 'sol'
self.article.article_source = longArticleText
# artModel.article_category_id = self.get_argument("category_id", 0)
# article_pgroipId = self.get_argument("group_id", 0)
# print( 'setUp 11 article = ' + str(self.article) )
# def test_article_001(self):
# # article = core.models.Article()
# self.assertEqual(self.article.author_name, 'Name0012' )
def test_article_save(self):
# article = core.models.Article()
print( 'setUp 12 article = ' + str(self.article) )
self.article.save(self.autor, './tpl')
print( 'setUp article = ' + str(self.article) )
# self.assertEqual(self.article.author_name, 'Name0012' )
if __name__ == '__main__':
unittest.main() |
17,643 | a32aabc17ee04278921e512d69cd1270876725bc | #小明有10000块钱,小白有1000,然后小白问小明借5000,问小白小明各多钱
# class Money:
# xiaoming_total_money = 10000
# @classmethod
# def xiaoming_money(cls):
# print(cls.xiaoming_total_money)
# xiaoming = 10000
# def __init__(self,name,money):
# self.name = name
# self.money = money
# Money.xiaoming_total_money -= money
#
# xiaobai = Money('小白',5000)
#
# Money.xiaoming_money()
class Person:
def __init__(self,name,money,obj=None):
self.name = name
self.money = money
self.obj = obj
if self.obj:
self.borrow()
def borrow(self):
# obj.money -= 5000
self.obj.money -= 5000
self.money += 5000
xiaoming = Person('小明',10000)
xiaobai = Person('小白',1000,xiaoming)
# xiaobai.borrow(xiaoming)
print('小明有%s,小白有%s.' % (xiaoming.money,xiaobai.money))
|
17,644 | a1a54c9b1e0383ce001f97ea67bfa4a603db20ab | # https://leetcode.com/problems/plus-one/discuss/1034092/Simple-Python-Solution
from typing import List
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
i = len(digits) - 1
while i >= 0:
if digits[i] < 9:
digits[i] += 1
return digits
digits[i] = 0
i -= 1
digits.insert(0, 1)
return digits
s = Solution()
print(s.plusOne(digits = [4,3,2,1])) |
17,645 | 3375e1c6d2cfc3c8dfa31cb7abf3a72606dde4ec | #!/usr/bin/python
# -*- coding: utf-8 -*-
import spark
import tweepy
import os
import time
consumer_token = os.environ['TWITTER_C_TOKEN']
consumer_secret = os.environ['TWITTER_C_SECRET']
access_token = os.environ['TWITTER_A_TOKEN']
access_secret = os.environ['TWITTER_A_SECRET']
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
def tweet_random():
api.update_status(spark.forecast())
for i in range(0):
print spark.forecast().encode('utf-8')
while True:
tweet_random()
time.sleep(60*5)
|
17,646 | e712978cc5889799839553055063cf3b9b3515c6 | from crypto.transactions.serializer import Serializer
def test_serializer(transaction_type_5):
result = Serializer(transaction_type_5).serialize(False, True)
assert result == transaction_type_5['serialized']
|
17,647 | f83bfb9d50715a785aeafb96abb874b73b85db9d | # -×- coding:utf-8 -*-
from wx.lib.embeddedimage import PyEmbeddedImage
Apply = PyEmbeddedImage(
"Qk02DAAAAAAAADYAAAAoAAAAIAAAACAAAAABABgAAAAAAAAMAADEDgAAxA4AAAAAAAAAAAAA////"
"////////////////////////////////+/v77+/v0tPStLe0srKywsLC39/f9vb2/v7+////////"
"////////////////////////////////////////////////////////////////////////////"
"/////f396+zrsrmybHxsSl9LT1tQbXFtoqKi19fX9/f3////////////////////////////////"
"/////////////////////////////////////////////////////f397+/vnKqdRWpGCEsLBEsI"
"BUQIJ0IoYGJgoqKi4+Pj/v7+////////////////////////////////////////////////////"
"/////////////////////////v7+8fHxs7y0THNOBFoJBX8LBH8LBH0LBFYIHjwfdHR0w8PD9fX1"
"/////////////////////////////////////////////////////////////////////////f39"
"8fHxrrqvR3VJBmcNBpEPBo8NBYkMBYcMBIELBDkGUFRQl5eX3t7e/Pz8////////////////////"
"/////////////////////////////////////////////v7+8fHxtsC3SnpNB3IOCJ0SCJ8TB5oR"
"BpIOBYsNBYcMA2EHGzwcamtqtra27+/v////////////////////////////////////////////"
"/////////////////f398fHxrryvTX9QB3cPCaIUCaYVCaYVCaQUB50SB5UQBY4NBIILBEMHPkk/"
"h4eH0tLS+vr6/////////////////////////////////////////////////////v7+8fHxt8G4"
"ToBRB3cOCaMUCacVCacVCacVCacVCaUUCJ4SBpYPBY0NBHYKDDkPXV5dp6en6Ojo/v7+////////"
"/////////////////////////////////////v7+9PT0sL6xVoZZB3YOCaMUCacVCacVCacVCacV"
"CacVCacVCaQUCJ8TBpUPBYoNBFUILEIteHh4xcXF9fX1////////////////////////////////"
"/////////v7+9fX1wMjAYIpjLYkzCqMVCacVCacVCacVCacVCacVCacVCacVCaYVCKMUCJ0RBpMP"
"BIQMBj0IT1NPl5eX39/f/f39////////////////////////////////////+/v7wszCZ45pOo9A"
"j9aUOLhCCacVCacVCacVCacVCacVCaYVCacVCacVCaYVCKMUB5oRBpAOBGsJGTsabG1suLi48PDw"
"/v7+////////////////////////////////3+XfbpZwPpFDmtqem9ugbct0F6wjCacVCacVCacV"
"CKEUA2oKCaMUCacVCacVCaYVCKETB5oRBYwNBEYHQktCh4eH0tLS+fn5////////////////////"
"////////////rcGuMoI3ntqjpd+qoN2lnNugVcJdCacVCacVCJkSCVgNi6GMCmwQCaMUCacVCacV"
"CaYVCKETB5kQBXwMDTwPW15bp6en6Ojo/v7+////////////////////////////Y5RlabRureGx"
"q+Gvpt+rod6mmdqeS75TCJkSE1sXrbKt7OzsUIdTBosPCacVCacVCacVCaYVCKATB5MPBFkJLEIt"
"eXl5xcXF9fX1////////////////////////////aZpsarRvteS4sOO0q+Gvp+Crot6nidKPGHAc"
"fpZ/6Ojo+/v7/v7+DloSCKIUCacVCacVCacVCaYVCKATBo8PBkEIUFRQl5eX3t7e/Pz8////////"
"////////////////fKV+WqZfuea8tuW5seO1rOGwot2nKHothpiH4+Pj/f39////////ZZVnCIUR"
"CaQUCacVCacVCacVCaUVB54TBXMMGkIcaWpptLS07u7u/v7+////////////////////8fXxKWws"
"otmms+K2r+Gzo9ynEmMWcJBy4+Pj+Pj4/////////////f79HWUhCJ0TCacVCacVCacVCacVCaUV"
"B5sSBFIHPks/fX19xcXF8/Pz/v7+////////////////////+/z7IWgkIXAlNIM5HWkhwcjB1N3V"
"+/v7/v7+////////////////rcauD3MWCKAUCacVCacVCacVCacVCaUVCJkRB04LTlROj4+P1dXV"
"+fn5////////////////////////////3+ngvNC95Ork+/v7/v7+////////////////////////"
"////TIRPG5clCacVCacVCacVCacVCacVCaYVBosQFFAXXWFdpaWl5ubm/v7+////////////////"
"/////////////////////////////////////////////////////////f79DVsRasdxVcJdP7pI"
"LbQ3Hq8pFKsfDaQYBnQNKkwscnNyvLy88PDw/v7+////////////////////////////////////"
"////////////////////////////////////z97QHXEhiNSOhdSLgdKHfNCCd85+cs15acdwEGIU"
"RU9Gg4ODy8vL9fX1////////////////////////////////////////////////////////////"
"////////////////KWwseMd+i9aRhtSMgtKIfdCDeM9/c816Xr1lEVkVU1lTmJiY3d3d+/v7////"
"/////////////////////////////////////////////////////////////////////f79El4W"
"h9GNjNaSh9SNg9KJftGEec9/csx5RKFLHU8fZ2lnsLCw6+vr/v7+////////////////////////"
"////////////////////////////////////////////////mbmbRJdIitOPjdaTiNWOhNOKf9GF"
"es+Acct4H3YjOEw5eXp5wcHB8fHx////////////////////////////////////////////////"
"////////////////////////////RX9Ib750kNaVjteUidWPhNOKgNGGe9CBbsh1EWIWSFFJiIiI"
"zs7O////////////////////////////////////////////////////////////////////////"
"////9fj1IWkmhs6LlNmZj9eVitWQhdSLgdKHfNCCWbVfGl0eVFxVoaGh////////////////////"
"////////////////////////////////////////////////////////////6/HrHGogj9SUldma"
"kNeVi9aRhtSMgtKIes6AP5pFJE0mgICA////////////////////////////////////////////"
"////////////////////////////////////////y9vMJXgpltibltmbkdiWjNaSh9SNg9KJc8h5"
"CFIMdHZ0////////////////////////////////////////////////////////////////////"
"////////////////////eqN8VKZamtqfl9qcktiXjdaTiNWOfc6DCFcNg4aD////////////////"
"/////////////////////////////////////////////////////////////////////////v7+"
"JWoois+PndyhmNqdk9iYjteUe8uBDVURtbW1////////////////////////////////////////"
"////////////////////////////////////////////////////7/TvFWAZfcaCl9ibldiajNSR"
"HnQiip2L4+Pj////////////////////////////////////////////////////////////////"
"////////////////////////////////+Pr4H2YjPpBEN4o9DFkQprqn19/X+vr6")
Help = PyEmbeddedImage(
"Qk02DAAAAAAAADYAAAAoAAAAIAAAACAAAAABABgAAAAAAAAMAADEDgAAxA4AAAAAAAAAAAAA////"
"////////////////////////////////////9/f339/fwcHBoqKih4eHdXV1dHR0goKCmJiYtbW1"
"1tbW8vLy////////////////////////////////////////////////////////////////////"
"////8O/uyL+7mHRjlVY4qFUtulcow1onwVonvFYlsFIkmkkkhEUqbkc2eWpjsKyr5OTk////////"
"////////////////////////////////////////////////+vr60sjEo29Xtlsw3nE18YdJ95dW"
"+6Nl/a9w/bF2/atu+p1e9o5M6ns90mQuoUwieEIrcl5WtrS08vLy////////////////////////"
"////////////////////8vHwuZuOt1826Xw++Jpd/a1x/Kps+p9f+JdV9o5N84tJ84lJ9IxK9pJQ"
"+ZlX+pxc9IxL3W0zpE0kbEQylI2K4uLi////////////////////////////////////8e7stYNs"
"zGcz95lY/Kxw+6Zm+JdW9pFQ9pJQ9pJQ9o9P9o5M9ItK84hJ8odF8IJB8YJB9o5L+ZdW84tKxlwo"
"ekEog3Zw29vb////////////////////////////9fLxuYRr3nE4+6Vl/a9x+JhX9pJQ95NT95NS"
"95NS9pNR9pFP9o9N9I1L84tK84hH8oZF8INC734+74BA+JNR+JRS12cwhUQnhHhx4uLi////////"
"/////////////Pv7xJiE3nI3+6Zo+6do95NS95NS95RT95NT95RS9pNR+Kp2/Na+/vj1+9W89qRy"
"84lK8oZF8IND74FC7n4/7Hs99IxK95NS22gwfUMplY2K8/Pz////////////////3Ma7zWk3+6Vl"
"+6Zo9pJR95NS95RR95RT95RR9pNR43xB56OB////////////+9S884lK8odG8IND74FC738/7Hw+"
"63o79ItJ+JRSxl4rckY0trW0////////////+PXzxnRM95ZW/Kxw95NQ9pNS95NR95NR9pNS95NR"
"9pNS2G03/vn1/////////////vj084tJ8odF8IVE8IJA738+7Hw97Hs963k69pFP9ItLrlInd2FX"
"5OTk////////2rio53k7/K1w95RV9pFQ9pNQ9pJQ9pJQ9pJQ9pJQ9pJR2G0256OA////////////"
"+9S884tJ8ohG8YND8IFA738+7Hw97Hs86nc663k8+JZT43M3hEkusK2s/////fz7ynhR+Jxd+6Ji"
"9I5M9pFQ9pFN9pFP9pFP9o9P9pFP9pFP9pFP2XVE56J//vj0+9W99qVz84lH8odG8YVE8IFA738/"
"7Hw97Ho863g76HQ48oZF9o5NtlYpfmxk8vLy7t7X33E3/K9z9pJQ9I9N9I5N9I5N9I5M9I5N9I5N"
"9I5M9I1L9I5M9I5M4nk+4ng94nc984tK8ohH8oZF8IND74FB7H4/7Ho863g66Xg56Xc56Xc4+ZhW"
"3m0zfE851dXV37Sf8YdH+6hp9IxK9I1M9I1L9IxL84xK9I1M9pFP9pJR9pNR9pJQ9o9N9o5M9I5L"
"9IxK84xK84tK8ohJ8YVF74BB7Ho96nk76nc66HU46XU45XI19o1L7oBBl1AwtbW11Y5s+JpZ+ZpZ"
"9IxK9ItK84tJ9pFP95dW+Jxc+Z9f+J1e+JxZ95hX2XhH////////////96x79I5N9JFQ9pJR9I5P"
"8odG7H4/6HU45XM253Q25XE174BB9IxMsFYsl5eX1HdL+6tu9pJQ84hH9pFP+Jpc+qhs+6tu+6tu"
"+qZp+aJj+Z5e95lY2XZD////////////+cOh9pJQ9pRT95hY95pc95lZ9pZW8otL6nw95XI15XA0"
"6nc6+ZdWx14sgoKC1Wk0/bV984lK9pdX+6xx/LV9/Ld+/LB3+6tw+qZo+aJj+J1e95pZ2G85/vj0"
"/////////vDo96Jq9pZV9pdY95pZ+J1f+aJl+KFl9ppc8IZH5XM1428z+6Ff1WUudHR01mcv/bqC"
"+J5f/LmD/b2M/bmD/LR9+692+6xw+qhr+aJl+Z5f951d5H9F5p54/////////////vj0+LOE95lY"
"95xd+J1f+J9i+aVo+qhu+aZs849R5XQ2+6Ni12kvdXV12nA8/bV7/byH/cST/b6J/biG/LR++7F3"
"+61x+6ps+qVo+aFj+J5e+Jxd5IBF56uM////////////////+8qp+J5f+KFh+KJl+aZp+ahs+q11"
"+q1185RV+6Vl12cvh4eH3oRZ/Kts/cCN/cGP/b6M/bqH/LeB/LN7+691+6tw+qdp+aVm+aFi+Z9f"
"+J5d5IFI+8qp/////////////////M2s+aNm+aVo+adr+apu+qxx+7N7+q11+qJjz2MuoqKi5aSF"
"+Z1d/byG/cKQ/cCN/bqH/LiB/LV9+7F3+61z+6pu+qdp+aVm+aNl+aJj+aFi5YNL5pJj//n0////"
"////////+8ml+adr+apu+qxx+611+7N6+7F6+JdXvGE0wcHB8c6884xL/bqD/cKQ/cCP/byJ/bmC"
"/LV9+7N7+7B2+6xx+6pu+qhr+qdo+aZl+aVl+aVl5YVM+7yQ/////////////u7i+qpw+6xz+691"
"+7B3+7WB/LR78olKqWE/39/f+/Lu6X5E/bR6/b6M/cGP/b6M/bqG/LiC2oVb/////////////LyN"
"+6pu+6pr+qhs+qhs+qhs5YlQ/////////////vTs+611+612+7F6+7N9+7N9/K1x6Xo9pn1p9/f3"
"////6aSA+Z1e/bqG/cKQ/cCN/b6M/byG2oFT/////////////d/I+69z+61z+6xz+qxx+6tw+8OY"
"/////////////u/k+7F6+7N7+7R9/LeC/LV9+JpZyGo5zMG8////////+OLX8IZG/bN3/byH/cGP"
"/b6M/b2J55Rg//Xu/////////////uXS/LuJ+7B2+692/c+t//r2/////////////dKv+7V7+7d+"
"/LqD+7V+/K9z8IVFtXpe8O/u//////////7+7a2L+JlY/byD/L2M/cCN/b6M/b2J55tu////////"
"/////////////////////////////////////uvc/Lh+/LiC/LmG/LmC/Lh++JxczHBA1szG////"
"/////////////PLt6ZBh+qVl/bqD/b6M/cCN/b6N/byM6J1z//r3////////////////////////"
"/////////ujV/LqD/LqD/LqG/LmG/LiB+6Zo3ndAxaST+vr6////////////////////99/S74tS"
"+6do/bqC/LyH/b6N/b6M/b2M55Rh6KaC//Ln/////////////////dq+/c6m/LqH/LyJ/L2M/LqG"
"/Lh++6ps7YVFx5F18/Hw////////////////////////////9tfG7oxU+6Vm/bqD/LmC/cCN/b6M"
"/b6M/b6M55Rj55Rh55Rh55Rh55Rh/b2J/b2J/b6M/b6M/LqD/bmB+6ps7oZIzpR18u/t////////"
"////////////////////////////+N/S6pRh+Zxc/bR3/beB/LyH/b6N/b2M/b2M/b6M/b2M/b6M"
"/b6M/b6M/b2J/LqH/Ld+/bV7+qJi5IFG1aeO9vPx////////////////////////////////////"
"/////////PPu8LKQ84pL+qFi/bR6/bd+/LV+/LiD/LyG/byM/LqH/LmD/Ld+/Ld+/bV7+6Zm9I1N"
"3opc5M3A/Pz7//////////////////////////////////////////////////////7++eTY7qqF"
"74hJ95ZV+6Nj/K9x/bd+/byD/bmB/LB1+6Zm+JhX8YlL4Y5f5cGu+fX0////////////////////"
"/////////////////////////////////////////////////////PPv9NK/7q6M6pNj6oNK6nw+"
"6X9C6ItY5p936r+m8+LZ/fz8////////////////////////////////////////")
|
17,648 | 8c6636fb3cb17f78e633960a1da2539e51f499fd | # -*- coding:utf-8 -*-
import tensorflow as tf
X = tf.constant([[1,-2]])
A = tf.constant([1,-1])
B = tf.constant([3,6])
b = tf.constant(-6)
y = tf.nn.relu(A)
# 修剪函数
c = tf.clip_by_value(X,-1,1)
ab = tf.reduce_mean(B*X)
sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)
print sess.run(y)
print sess.run(c)
print sess.run(ab)
|
17,649 | d63a1e9cc02945033c882550eec3dd9e1ea48678 | # coding=utf-8
from selenium import webdriver
import os
import time
driver = webdriver.Firefox()
file_path='File:///' + os.path.abspath("E://javatest//200808//selenium_html//drop_down.html")
driver.get(file_path)
driver.maximize_window()
# 下拉框处理
# 方法一:直接 xpath 定位
# driver.find_element_by_xpath("//*[@id='ShippingMethod']/option[4]").click()
# 方法二:用 option 定位(循环)
lists = driver.find_element_by_tag_name("option")
# for list in lists:
# if list.get_attribute("value") == '9.03':
# list.click()
# 或
# lists[3].click()
time.sleep(5)
driver.quit() |
17,650 | 2e429535da6186f063d8cd957b04566a27a2fa0c | #!/usr/bin/python
import sys
sys.path.append('/home/ramnatthan/code/adsl-work/ALC/alc-strace/error_reporter')
import error_reporter
from error_reporter import FailureCategory
def failure_category(msg):
msg = msg.strip()
if 'Possible corruption' in msg:
return [FailureCategory.CORRUPTED_READ_VALUES]
elif 'Durability signal found but retrieved 0 rows..which is not proper' in msg:
return [FailureCategory.DURABILITY_VIOLATION]
elif 'invalid authorization specification - not found: SA' in msg or 'error in script file line: /home/ramnatthan/workload_snapshots/hsqldb/replayedsnapshot/mydatabase 36' in msg or 'java.io.FileNotFoundException' in msg or 'user lacks privilege or object not found: CONTACTS' in msg:
return[FailureCategory.FULL_WRITE_FAILURE, FailureCategory.FULL_READ_FAILURE]
print 'Should not come here!!'
assert False
def is_correct(msg):
msg = msg.strip()
# print msg
if msg == 'Durability signal absent. Ignoring durability checks' or msg == 'Durability signal found. No problem':
return True
else:
return False
error_reporter.report_errors('\n', './micro_cache_file', './replay_output', is_correct, failure_category)
|
17,651 | f9cbf382a83f648de581682372a02ab138bda41d | import pandas as pd
import numpy as np
from operator import itemgetter
def saveArray(np_array,year):
np.savetxt(str(year)+'_top_list.csv',np_array,delimiter = ',',fmt='%s')
def giveValues(data_top,year):
d = {}
d['year'] = year
for i in range(0,data_top.size):
d[data_top[i]] = i+1
return d
def getTop(year,data):
index = year - 2008
data_year = data[::,index]
#print('\n\nYear %s'%(year))
array2save = data_year.reshape((data_year.size,1))
#print(array2save)
#saveArray(array2save,year)
data_top = giveValues(data_year,year)
return data_top
def getProm(data,TOP_LIST):
diccionary_prom = {}
for key in data:
number_of_years = 0
if not key=='year':
for top_list in TOP_LIST:
if top_list.get(key):
number_of_years += 1
if diccionary_prom.get(key):
diccionary_prom[key] = diccionary_prom[key] + top_list[key]
else:
diccionary_prom[key] = top_list[key]
else:
"""
number_of_years += 1
if diccionary_prom.get(key):
diccionary_prom[key] = diccionary_prom[key] + 20
else:
diccionary_prom[key] = top_list[key]
"""
print('Llave %s no encontrada en año %s'%(key,top_list['year']))
diccionary_prom[key] = float(diccionary_prom[key]/number_of_years)
return diccionary_prom
def extract_data_money():
doc = pd.read_excel('Productos Agrícolas.xlsx') #reading the data
products = doc.head(10).values
print(f"{products.shape} \n\n {products}")
products = products[::,::2] #get only the product's name
print(f"\n\n{products}")
return products
def extract_data():
doc = pd.read_excel('Productos Agrícolas.xlsx') #reading the data
#products = pd.DataFrame(doc,columns = ['2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018'])
products = doc.head(20).values
products = products[::,::2] #get only the product's name
#print(products)
return products
def get_all_Top_lists():
products = extract_data()
TOP_LIST = []
#Get all the top list between 2008-2018
for i in range(2008,2019):
TOP_LIST.append(getTop(i,products))
for element in TOP_LIST:
print(element)
print()
#Gets the average position for each product
result = getProm(TOP_LIST[0],TOP_LIST)
return result
def sort_elements(result):
sortedResult = {}
print(result)
#sortedResult = sorted(result.items(),key=itemgetter(1))
#Sort the dictionary for value of position
for key in sorted(result,key=result.get):
sortedResult[key] = result[key]
return sortedResult
def save_top_list(sortedResult):
#Creates an array to save the result
table = np.array(['Producto','Puntaje'])
for key in sortedResult:
new_row = np.array([key,sortedResult[key]])
table = np.vstack([table,new_row])
print(table[:21,:])
#saveArray(table,'2008-2018')
#print(sortedResult)
#print(sortedResult.keys())
TOP_LIST = get_all_Top_lists()
sortedResult = sort_elements(TOP_LIST)
save_top_list(sortedResult)
#extract_data_money() |
17,652 | 10910402d9e1bf180242957cd821fec453be75d4 | from Interfaces.ITaker import ITaker
from Interfaces.IObservable import IObservable
from Exceptions.NotFoundHtmlPageException import NotFoundHtmlPageException
import requests
class CategoryHtmlTaker(ITaker, IObservable):
def __init__(self, parser):
self.__parser = parser
def Take(self, url):
response = requests.get(url)
if(response.status_code == 200):
self.NotifyObservers(response.content)
elif(response.status_code == 404):
raise NotFoundHtmlPageException(self.__notFoundExceptionMessage, str(type(self).__name__))
def RegisterObserver(self, observer):
self.__observers.append(observer)
def RemoveObserver(self, observer):
self.__observers.remove(observer)
def NotifyObservers(self,data):
for observer in self.__observers:
observer.Update(self.__parser.Pars(data))
__observers = list()
__parser = None
__notFoundExceptionMessage = "Запрашиваемый URL не найден"
|
17,653 | 52d9030f3fe7ddb4e3882c7eaccfa78e3787f81f | # 创建一个空的bytes值
b1 = bytes()
b2 = b''
# 通过b前缀指定hello是bytes类型的值
b3 = b'hello'
print(b3)
print(b3[0])
print(b3[2:4])
# 调用bytes方法将字符串转换为bytes对象
b4 = bytes('我爱Python编程', encoding='utf-8')
print(b4)
# 利用字符串的encode()方法编码成bytes,默认使用UTF-8字符集
b5 = "学习Python很有趣".encode('utf-8')
print(b5)
|
17,654 | 604300eb202b5cfe68503ffa397fcd0dd5a2a48f | """
In this exercise, you'll be playing around with the sys module,
which allows you to access many system specific variables and
methods, and the os module, which gives you access to lower-
level operating system functionality.
"""
import sys
# See docs for the sys module: https://docs.python.org/3.7/library/sys.html
# Print out the command line arguments in sys.argv, one per line:
# YOUR CODE HERE
print(f'Length of Command Line Arguments: {len(sys.argv)}')
print(f'Command Line Argument: {sys.argv}')
'''
confused a bit on what this means.. ^^^
'''
# Print out the OS platform you're using:
# YOUR CODE HERE
import platform
print(f'Operating System: {platform.system()}')
# Print out the version of Python you're using:
# YOUR CODE HERE
print(f'Current Version of Python: {sys.version}')
# See the docs for the OS module: https://docs.python.org/3.7/library/os.html
# Print the current process ID
# YOUR CODE HERE
import os
print(f'Current Process ID: {os.getpid()}')
# Print the current working directory (cwd):
# YOUR CODE HERE
print(f'Current Working Directory: {os.getcwd()}')
# Print out your machine's login name
# YOUR CODE HERE
print(f'Machine Login Name: {os.getlogin()}')
'''
run python src/03_modules.py in terminal
'''
|
17,655 | 3425df97f5dac6f1cbe2a4e87a966ab37368aa38 | import numpy as np
import pygame
import random
from pygame.locals import *
from quickunionfind import QuickUnionFind
from percolation import Percolation
# Colors
BLACK = (102, 51, 0)
WHITE = (255, 255, 255)
BLUE = (110, 193, 248)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
# Site dimensions
WIDTH = 6
HEIGHT = 6
MARGIN = 1
# Grid initialization
grid = []
rows = 80
columns = 80
for row in range(rows):
grid.append([])
for column in range(columns):
grid[row].append(0)
random_rows = random.sample(range(0, rows), rows)
random_columns = random.sample(range(0, columns), columns)
# Pygame initialization
WINDOW_SIZE = [(columns * WIDTH + (MARGIN * columns) + MARGIN), (rows * HEIGHT + (MARGIN * rows) + MARGIN) + 20]
screen = pygame.display.set_mode(WINDOW_SIZE)
pygame.display.set_caption("Percolation Finder")
clock = pygame.time.Clock()
pygame.init()
percolation = Percolation(rows)
# Does it percolate label initialization
does_it_percolate_text = pygame.font.SysFont("monospace", 12)
does_it_percolate_label = does_it_percolate_text.render("Percolates: False", 1, (255,255,255))
screen.blit(does_it_percolate_label, (50, 565))
# Number of open sites label initialization
number_of_open_sites_text = pygame.font.SysFont("monospace", 12)
number_of_open_sites_label = number_of_open_sites_text.render("Number of open sites: 0", 1, (255,255,255))
screen.blit(number_of_open_sites_label, (200, 565))
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# Fills new square
while True:
random_row = random.randint(0, rows) - 1
random_column = random.randint(0, columns) - 1
if grid[random_row][random_column] == 0:
grid[random_row][random_column] = 1
break
# Drawing
screen.fill(BLACK)
for row in range(rows):
for column in range(columns):
color = BLACK
if grid[row][column] == 1:
if percolation.is_full(row, column):
percolation.open_site(row, column)
color = BLUE
else:
percolation.open_site(row, column)
color = WHITE
pygame.draw.rect(screen, color, [(MARGIN + WIDTH) * column + MARGIN, (MARGIN + HEIGHT) * row + MARGIN, WIDTH, HEIGHT])
if percolation.percolates:
does_it_percolate_label = does_it_percolate_text.render("Percolates: True", 1, (255,255,255))
screen.blit(does_it_percolate_label, (50, 565))
number_of_open_sites_label = number_of_open_sites_text.render("Number of open sites: " + str(percolation.get_number_of_open_site()), 1, (255,255,255))
screen.blit(number_of_open_sites_label, (200, 565))
# FPS
clock.tick(60)
pygame.display.flip()
pygame.quit()
|
17,656 | daf8e81551888c583991f83d1bd97402b4327efd | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from users.models import Utilisateur
from users.tests.auth_utils import compute_auth_header
ID_UTILISATEUR_1 = 1
PRENOM_UTILISATEUR_1 = "Pierre"
NOM_UTILISATEUR_1 = "Martinet"
MAIL_UTILISATEUR_1 = "p.martinet@live.fr"
PASS_UTILISATEUR_1 = "intraitable"
class WhoamiTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.utilisateur1 = Utilisateur.objects.create_user(id=ID_UTILISATEUR_1,
last_name=NOM_UTILISATEUR_1,
first_name=PRENOM_UTILISATEUR_1,
email=MAIL_UTILISATEUR_1,
password=PASS_UTILISATEUR_1)
def test_should_return_correct_info_for_whoami(self):
auth_headers = compute_auth_header(self.client, MAIL_UTILISATEUR_1, PASS_UTILISATEUR_1)
url_whoami = reverse("whoami")
response_whoami = self.client.get(url_whoami, **auth_headers)
print(response_whoami)
self.assertEqual(response_whoami.status_code, status.HTTP_200_OK)
self.assertEqual(response_whoami.data["id_utilisateur"], ID_UTILISATEUR_1)
self.assertEqual(response_whoami.data["prenom"], PRENOM_UTILISATEUR_1)
self.assertEqual(response_whoami.data["nom"], NOM_UTILISATEUR_1)
|
17,657 | a9d0c6243d32e586cb3ba28ab626ffd8af19812f | import sys
sys.setrecursionlimit(500*500)
H,W = map(int, input().split())
maze = [input() for _ in range(H)]
reached = [[False]*W for _ in range(H)]
S = None
G = None
for x in range(W):
for y in range(H):
if maze[y][x] == 's':
S = (x,y)
if maze[y][x] == 'g':
G = (x,y)
if S and G:
break
if S and G:
break
def dfs(x,y):
if(x < 0 or W <= x or y < 0 or H <= y or maze[y][x] == '#'):
return False
if reached[y][x]:
return False
if (x,y) == G:
return True
reached[y][x] = True
return dfs(x+1,y) or dfs(x-1,y) or dfs(x,y+1) or dfs(x,y-1)
if dfs(*S):
print("Yes")
else:
print("No")
|
17,658 | 57c308bf0937d00e50ccea2228c48b577773d82c | from flask import Flask, request
from flask_restful import Resource, Api
from flask_jwt import JWT, jwt_required # JWT --- JSON web token
from secure import authenticate, identity
from user import User, RegisterdUser
# create app instance
app = Flask(__name__)
# create api instance of app
api = Api(app)
app.secret_key = 'rahul@123'
# will create an endpoint 127.0.0.0:5000/auth
jwt = JWT(app, authenticate, identity)
# create in-memory database of books
books = [
{
'ISBN': '978-354-216',
'author': 'Jon Mac',
'name': 'The man',
'quantity': 20
},
{
'ISBN': '128-689-341',
'author': 'Michael Jack',
'name': 'Although what',
'quantity': 10
}
]
# create a Resource Book
class Book(Resource):
# get book by ISBN
@jwt_required()
def get(self, ISBN):
# search for the book with the ISBN , return it otherwise return a message
for book in books:
if book["ISBN"] == ISBN:
return book
return " Book Not Found"
# create book
def post(self, ISBN):
# check whether ISBN exists or not
pos = 0
for book in books:
if book["ISBN"] == ISBN:
# add only quantity
book["quantity"] = book["quantity"] + 1
# this line will not work the way we expect it to work.
# books.extend(book)
books[pos] = book
return book, 201
pos = pos+1
# otherwise add the book to the books db
# collect data
payload = request.get_json()
isbn = payload["ISBN"]
author = payload["author"]
name = payload["name"]
quantity = payload["quantity"]
newbook = {
"ISBN": isbn,
"author": author,
"name": name,
"quantity": quantity
}
# add this payload to books
books.append(newbook)
return newbook
def delete(self, ISBN):
# if the ISBN exists, delete that book
pos = 0
for book in books:
if book["ISBN"] == ISBN:
# we found the book
books.pop(pos)
return "Book Deleted"
pos = pos+1
# else send a message that the book is not in the store
return "No book with the ISBN number found"
def put(self, ISBN):
pos = 0
for book in books:
if book["ISBN"] == ISBN:
# The book is already there, so update the quantity
book["quantity"] = book["quantity"] + 1
books[pos] = book
return "Book quantity updated by 1"
pos = pos+1
# add the book if it is not in the books list
payload = request.get_json()
isbn = payload["ISBN"]
author = payload["author"]
name = payload["name"]
quantity = payload["quantity"]
newbook = {
"ISBN": isbn,
"author": author,
"name": name,
"quantity": quantity
}
# add this payload to books
books.append(newbook)
return newbook, 201
# Resource for getting all books from the db
class BookList(Resource):
# returns all the books in the store
def get(self):
return books
# create end points to the resource
api.add_resource(Book, '/book/<ISBN>')
api.add_resource(BookList, '/books') # returns all the books
api.add_resource(RegisterdUser, '/register')
# run the app
app.run(debug=True)
|
17,659 | 2ca50975499371936a903e3b645819e82a29782d | import numpy as np
import lib_planetcal as lib_p
src_planet = 'Jupiter'
nu_obs = 40.e9
beam_solid_str = 2.481e-4
print lib_p.planet_info(src_planet,nu_obs,beam_solid_str) |
17,660 | 619e8c027a10bda88ca51c7bfd66e7b49b7a67ea | #Write a script to compute how many unique prime factors an integer has.
#For example, 12 = 2 x 2 x 3, so has two unique prime factors, 2 and 3.
#Use your script to compute the number of unique prime factors of 1234567890
def uniq_factors(n):
answer =[]
i=2
while i<= n:
if n%i ==0:
n = n/i
if i not in answer:
answer.append(i)
i=1
i=i+1
return len(answer)
n = int(input())
print uniq_factors(n) |
17,661 | ca4475bf20cf02ffff64696c447de154882a10df | # packages
import numpy as np
# generate moving result
def TransMat(num_row, num_col, now_loc, action):
'''
This function will generate the moving result from (loc_row, loc_col)
Input:
-num_row: the row of grid
-num_col: the column of grid
-now_loc: now location
-action: moving direction (Valid moving: up, down, left, right)
'''
# size check
if now_loc >= num_row*num_col:
print('Error! Out of index!')
return None
# create map
WholeMap = np.zeros((num_row+2,num_col+2))
# decide the row and column of now location
loc_row = int(now_loc / num_col)
loc_col = now_loc - loc_row*num_col
# now location to map location
MapRow = loc_row+1
MapCol = loc_col+1
# moving
if action == 'up':
WholeMap[MapRow-1, MapCol] = 1
elif action == 'left':
WholeMap[MapRow, MapCol-1] = 1
elif action == 'down':
WholeMap[MapRow+1, MapCol] = 1
elif action == 'right':
WholeMap[MapRow, MapCol+1] = 1
else:
print(str(action) + 'is not a valid action !')
# rebound from the wall
if np.max(WholeMap[:,0]) > 0:
idx = np.argmax(WholeMap[:,0])
WholeMap[idx,1] += WholeMap[idx,0]
if np.max(WholeMap[:,num_col+1]) > 0:
idx = np.argmax(WholeMap[:,num_col+1])
WholeMap[idx,num_col] += WholeMap[idx,num_col+1]
if np.max(WholeMap[0,:]) > 0:
idx = np.argmax(WholeMap[0,:])
WholeMap[1,idx] += WholeMap[0,idx]
if np.max(WholeMap[num_row+1,:]) > 0:
idx = np.argmax(WholeMap[num_row+1,:])
WholeMap[num_row,idx] += WholeMap[num_row+1,idx]
return WholeMap[1:(num_row+1),1:(num_col+1)]
|
17,662 | 63a67b12e855acca7ae98bd7b1b997ee96fcbbee | i=1
j=1
while i<=9 :
while j<=9 :
print("{}*{}={}".format(i,j,i*j))
j=j+1
i=i+1
j=1 |
17,663 | 5c81f17264113a2bfe9b7f8858556adb74c58fef | #!/usr/bin/env python
import sys
import os
import sqlite3
import argparse
import subprocess
import shutil
import platform
from zipfile import ZipFile
rundir = os.path.dirname(os.path.realpath(__file__))
basedir = os.path.realpath("%s/../../"%(rundir)) # path of the application, i.e. pred/
path_result = "%s/static/result"%(basedir)
path_cache = "%s/static/result/cache"%(basedir)
linux_dist = platform.dist()[0].lower()
user = "www-data"
group = "www-data"
if linux_dist in ["centos", "redhat"]:
user = "apache"
group = "apache"
elif linux_dist in ["debian", "ubuntu"]:
user = "www-data"
group = "www-data"
else:
print("Unrecognized platform %s"%(linux_dist), file=sys.stderr)
sys.exit(1)
def ZipResultFolder(md5_key, cnt):
"""Zip the result folder
"""
subfoldername = md5_key[:2]
md5_subfolder = "%s/%s"%(path_cache, subfoldername)
cachedir = "%s/%s/%s"%(path_cache, subfoldername, md5_key)
zipfile_cache = cachedir + ".zip"
if os.path.exists(cachedir) and not os.path.exists(zipfile_cache):
origpath = os.getcwd()
os.chdir(md5_subfolder)
targetfile = os.path.join(cachedir, "query.result.txt")
if os.path.exists(targetfile):
cmd = ["zip", "-rq", "%s.zip"%(md5_key), md5_key]
cmdline = " ".join(cmd)
try:
print(("%d: %s"%(cnt, cmdline)))
subprocess.check_call(cmd)
print(("%d: %s"%(cnt, "rmtree(%s)"%(md5_key) )))
os.system("chown %s:%s %s"%(user, group, "%s.zip"%(md5_key)))
shutil.rmtree(md5_key)
except:
print("Failed to zip folder %s"%(cachedir), file=sys.stderr)
raise
else:
print(("%d: %s"%(cnt, "bad result! just rmtree(%s)"%(md5_key) )))
shutil.rmtree(md5_key)
os.chdir(origpath)
elif os.path.exists(zipfile_cache):
#check weather the zipped file is a valid prediction result
try:
with ZipFile(zipfile_cache, "rb") as myzip:
li = myzip.namelist()
target = "%s/query.result.txt"%(md5_key)
if target in li:
print(("%d: %s"%(cnt, "Valid zipped result for %s"%(md5_key) )))
else:
print(("%d: %s"%(cnt, "bad zipped result! just delete zipfile(%s)"%(md5_key) )))
os.remove(zipfile_cache)
except Exception as e:
print(("%d: %s"%(cnt, "BadZipFile! just delete zipfile(%s)"%(md5_key) )))
os.remove(zipfile_cache)
if __name__ == '__main__':
#metavar='' is the text shown after then option argument
parser = argparse.ArgumentParser(
description='zip cached job folder',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''\
Created 2018-09-10, updated 2018-09-10, Nanjiang Shu
Examples:
%s -db dbname -exclude-md5 file
'''%(sys.argv[0]))
parser.add_argument('-db' , metavar='DB', dest='db', required=False,
help='the name of the finished date db')
parser.add_argument('-exclude-md5' , metavar='FILE', dest='exclude_md5_file', required=False,
help='name of the exclude md5 list file')
parser.add_argument('-md5' , metavar='FILE', dest='md5_file', required=False,
help='name of the include md5 list file')
args = parser.parse_args()
db = args.db
exclude_md5_file = args.exclude_md5_file
md5_file = args.md5_file
if not db == None:
exclude_md5_list = open(exclude_md5_file).read().split("\n")
exclude_md5_set = set(exclude_md5_list)
#print exclude_md5_set
tbname_content = "data"
con = sqlite3.connect(db)
with con:
cur = con.cursor()
cmd = "SELECT md5, seq, date_finish FROM %s"%(tbname_content)
cnt = 0
for row in cur.execute(cmd):
# print row
cnt += 1
md5_key = row[0]
seq = row[1]
date_finish = row[2]
if not md5_key in exclude_md5_set:
ZipResultFolder(md5_key, cnt)
elif not md5_file == None:
md5_list = open(md5_file).read().split("\n")
md5_list = [_f for _f in md5_list if _f]
cnt = 0
for md5_key in md5_list:
cnt += 1
ZipResultFolder(md5_key, cnt)
|
17,664 | 8a9799056a6a983aced5997d0417979c2553a3e4 | from deepdab import *
def print_info(board_size, policy, num_episodes=None, num_iterations=None, learning_rate=None, min_learning_rate=None, epsilon=None,
min_epsilon=None, temperature=None, min_temperature=None, mode='self-play', reward='delayed',
updates='online', architecture=None, decay_speed=None, rand_prob=None, batch_size=None,
learning_rate_schedule=None, epsilon_schedule=None, temperature_schedule=None,
dropout_keep_prob=None, use_symmetries=None, num_episodes_per_policy_update=None,
num_episodes_per_opponent_cache=None, opponent_pool_max_size=None, episodes_per_thread=None,
mcts=None, mcts_simulations=None, mcts_c=None, normalize_action_probs_with_softmax=None,
activation=None, episodes_per_worker=None, episodes_per_iteration=None, num_workers=None,
num_epochs_per_update=None):
print("board size: %sx%s" % board_size)
print("policy: %s" % policy.__class__.__name__)
if mcts is not None:
print("MCTS policy: %s" % mcts.__class__.__name__)
if mcts_simulations is not None:
print("MCTS simulations: %s" % mcts_simulations)
if mcts_c is not None:
print("MCTS c: %s" % mcts_c)
if architecture is not None:
print("architecture: %s" % architecture)
print("mode: %s" % mode)
print("reward: %s" % reward)
print("updates: %s" % updates)
if num_episodes is not None:
print("number of episodes: %s" % num_episodes)
if num_iterations is not None:
print("number of iterations: %s" % num_iterations)
if learning_rate is not None:
print("learning rate: %s" % learning_rate)
if min_learning_rate is not None:
print("min. learning rate: %s" % min_learning_rate)
if epsilon is not None:
print("epsilon: %s" % epsilon)
if min_epsilon is not None:
print("min. epsilon: %s" % min_epsilon)
if temperature is not None:
print("temperature: %s" % temperature)
if min_temperature is not None:
print("min. temperature: %s" % min_temperature)
if rand_prob is not None:
print("random player prob.: %s" % rand_prob)
if batch_size is not None:
print("batch size: %s" % batch_size)
if decay_speed is not None:
print("decay speed: %s" % decay_speed)
if learning_rate_schedule is not None:
print("learning rate schedule: %s" % sorted(learning_rate_schedule.items()))
if epsilon_schedule is not None:
print("epsilon schedule: %s" % sorted(epsilon_schedule.items()))
if temperature_schedule is not None:
print("temperature schedule: %s" % sorted(temperature_schedule.items()))
if dropout_keep_prob is not None:
print("dropout keep prob.: %s" % dropout_keep_prob)
if use_symmetries is not None:
print("transitions to symmetries: %s" % use_symmetries)
if num_episodes_per_policy_update is not None:
print("params updated every %s episodes" % num_episodes_per_policy_update)
if num_episodes_per_opponent_cache is not None:
print("current policy added to pool every %s episodes" % num_episodes_per_opponent_cache)
if opponent_pool_max_size is not None:
print("max opponent pool size: %s" % opponent_pool_max_size)
if episodes_per_thread is not None:
print("# episodes per thread: %s" % episodes_per_thread)
if num_workers is not None:
print("number of workers: %s" % num_workers)
if num_epochs_per_update is not None:
print("number of epochs per update: %s" % num_epochs_per_update)
if episodes_per_worker is not None:
print("episodes per worker: %s" % episodes_per_worker)
if episodes_per_iteration is not None:
print("episodes per iteration: %s" % episodes_per_iteration)
if normalize_action_probs_with_softmax is not None:
print("normalize action probs with softmax: %s" % normalize_action_probs_with_softmax)
if activation is not None:
print("convolutional activation: %s" % activation.__name__)
def to_one_hot_action(board_state, edge_index):
action_vector = [0]*len(board_state)
action_vector[edge_index] = 1
return action_vector
def to_state_action_pair_symmetries(board_size, state, action, edge_length=1, include_dots=True):
state_edge_matrix = convert_board_state_to_edge_matrix(board_size, state, edge_length, include_dots)
action_edge_matrix = convert_board_state_to_edge_matrix(board_size, action, edge_length, include_dots)
# I symmetry
state_i = np.array(state_edge_matrix)
action_i = np.array(action_edge_matrix)
rows = state_i.shape[0]
cols = state_i.shape[1]
if rows == cols:
# S symmetry
state_s = np.flip(state_i, 1)
action_s = np.flip(action_i, 1)
# R symmetry
state_r = np.rot90(state_i, 1)
action_r = np.rot90(action_i, 1)
# R^2 symmetry
state_r2 = np.rot90(state_i, 2)
action_r2 = np.rot90(action_i, 2)
# R^3 symmetry
state_r3 = np.rot90(state_i, 3)
action_r3 = np.rot90(action_i, 3)
# SR symmetry
state_sr = np.rot90(state_s, 1)
action_sr = np.rot90(action_s, 1)
# SR^2 symmetry
state_sr2 = np.rot90(state_s, 2)
action_sr2 = np.rot90(action_s, 2)
# SR^3 symmetry
state_sr3 = np.rot90(state_s, 3)
action_sr3 = np.rot90(action_s, 3)
all_possible = [[state_i, action_i], [state_s, action_s], [state_r, action_r],
[state_r2, action_r2], [state_r3, action_r3], [state_sr, action_sr],
[state_sr2, action_sr2], [state_sr3, action_sr3]]
else:
# S symmetry
state_s = np.flip(state_i, 1)
action_s = np.flip(action_i, 1)
# R^2 symmetry
state_r2 = np.rot90(state_i, 2)
action_r2 = np.rot90(action_i, 2)
# SR^2 symmetry
state_sr2 = np.rot90(state_s, 2)
action_sr2 = np.rot90(action_s, 2)
all_possible = [[state_i, action_i], [state_s, action_s], [state_r2, action_r2], [state_sr2, action_sr2]]
def contains(list, arr):
for item in list:
if np.array_equal(item[0], arr[0]):
return True
return False
symmetries = []
for sym in all_possible:
if not contains(symmetries, sym):
symmetries.append([convert_edge_matrix_to_board_state(sym[0], edge_length),
convert_edge_matrix_to_board_state(sym[1], edge_length)])
return symmetries
def append_transitions(states, actions, outcomes, all_transitions, use_symmetries, board_size,
edge_length=1, include_dots=True):
for i, _ in enumerate(actions):
state = states[i]
action = actions[i]
reward = outcomes[i]
if use_symmetries:
state_action_symmetries = to_state_action_pair_symmetries(board_size, state, action, edge_length, include_dots)
for symmetry in state_action_symmetries:
all_transitions.append([symmetry[0], symmetry[1], reward])
else:
all_transitions.append([state, action, reward])
def append_value_transitions(states, outcomes, all_transitions, use_symmetries, board_size, edge_length=1, include_dots=True):
for i in range(len(outcomes)):
state = states[i]
reward = outcomes[i]
state_edge_matrix = convert_board_state_to_edge_matrix(board_size, state, edge_length, include_dots)
if use_symmetries:
symmetries = convert_edge_matrix_to_symmetries(state_edge_matrix)
for symmetry in symmetries:
all_transitions.append([convert_edge_matrix_to_board_state(symmetry, edge_length=edge_length), reward])
else:
all_transitions.append([state, reward])
|
17,665 | 37bec1478e0fc71b16a5bc37da97056576b8e522 | import tensorflow as tf
import problem_unittests as tests
import helper
import pickle
x, y, keep_prob, cost, optimizer, accuracy = 0, 0, 0, 0, 0, 0
def neural_net_image_input(image_shape):
"""
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
"""
return tf.placeholder(tf.float32, shape=[None]+list(image_shape), name="x")
def neural_net_label_input(n_classes):
"""
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
"""
return tf.placeholder(tf.float32, shape=[None, n_classes], name="labels")
def neural_net_keep_prob_input():
"""
Return a Tensor for keep probability
: return: Tensor for keep probability.
"""
return tf.placeholder(tf.float32, name="keep_prob")
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides, name="conv"):
"""
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
"""
with tf.name_scope(name):
# Create weight and bias
with tf.name_scope("weights"):
W = tf.Variable(tf.truncated_normal(list(conv_ksize) + [x_tensor.get_shape().as_list()[3], conv_num_outputs], stddev=0.1))
variable_summaries(W)
with tf.name_scope("biases"):
b = tf.Variable(tf.constant(0.1, shape=[conv_num_outputs]))
variable_summaries(b)
# Apply convolution and add bias
with tf.name_scope('Wx_plus_b'):
conv = tf.nn.conv2d(x_tensor, W, strides=[1] + list(conv_strides) + [1], padding='SAME') + b
tf.summary.histogram('pre_activations', conv)
# Apply ReLu activation function
conv = tf.nn.relu(conv, name='activation')
tf.summary.histogram('activations', conv)
# Apply max pooling
pool = tf.nn.max_pool(conv, ksize=[1] + list(pool_ksize) + [1], strides= [1] + list(pool_strides) + [1], padding='SAME')
return pool
def conv2d(x_tensor, conv_num_outputs, conv_ksize, conv_strides, name = "conv"):
"""Apply convolution to x_tensor
@param x_tensor: Tensorflow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
return tensor after convolution
"""
with tf.name_scope(name):
# Create weight and bias
with tf.name_scope("weights"):
W = tf.Variable(tf.truncated_normal(list(conv_ksize) + [x_tensor.get_shape().as_list()[3], conv_num_outputs], stddev=0.1))
variable_summaries(W)
with tf.name_scope("biases"):
b = tf.Variable(tf.constant(0.1, shape=[conv_num_outputs]))
variable_summaries(b)
# Apply convolution and add bias
with tf.name_scope('Wx_plus_b'):
conv = tf.nn.conv2d(x_tensor, W, strides=[1] + list(conv_strides) + [1], padding='SAME') + b
tf.summary.histogram('pre_activations', conv)
return conv
def pool2d(x_tensor, pool_ksize, pool_strides, name = "pool"):
"""
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
return tensor after pool
"""
with tf.name_scope(name):
return tf.nn.max_pool(x_tensor,
ksize = [1] + list(pool_ksize) + [1],
strides = [1] + list(pool_strides) + [1],
padding = 'SAME')
def avg_pool2d(x_tensor, pool_ksize, pool_strides, name = "pool"):
"""
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
return tensor after pool
"""
with tf.name_scope(name):
return tf.nn.avg_pool(x_tensor,
ksize = [1] + list(pool_ksize) + [1],
strides = [1] + list(pool_strides) + [1],
padding = 'SAME')
def flatten(x_tensor):
"""
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
"""
with tf.name_scope('input_reshape'):
x = x_tensor.get_shape().as_list()[1]
y = x_tensor.get_shape().as_list()[2]
z = x_tensor.get_shape().as_list()[3]
image_shaped_input = tf.reshape(x_tensor, [-1, x*y*z])
return image_shaped_input
def fully_conn(x_tensor, num_outputs, name="fc"):
"""
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
with tf.name_scope(name):
return tf.layers.dense(x_tensor, num_outputs)
def output(x_tensor, num_outputs, name="output"):
"""
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
with tf.name_scope(name):
return tf.layers.dense(x_tensor, num_outputs)
def resNet_block(x_tensor, bottleneck_d, num_outputs, _strides = (1, 1), short_cut = False, name = "resNet_block"):
with tf.name_scope(name):
shortcut = x_tensor
"""bottleneck desgin: 1x1 3x3 1x1 conv"""
x_tensor = conv2d(x_tensor, bottleneck_d, (1, 1), (1, 1))
x_tensor = tf.layers.batch_normalization(x_tensor)
x_tensor = tf.nn.relu(x_tensor)
x_tensor = conv2d(x_tensor, bottleneck_d, (3, 3), _strides)
x_tensor = tf.layers.batch_normalization(x_tensor)
x_tensor = tf.nn.relu(x_tensor)
x_tensor = conv2d(x_tensor, num_outputs, (1, 1), (1, 1))
x_tensor = tf.layers.batch_normalization(x_tensor)
if short_cut or _strides != (1, 1):
shortcut = conv2d(shortcut, num_outputs, (1, 1), _strides)
x_tensor = tf.layers.batch_normalization(x_tensor)
# Identity
x_tensor = tf.add(x_tensor, shortcut)
x_tensor = tf.nn.relu(x_tensor)
return x_tensor
# https://arxiv.org/pdf/1512.03385.pdf
def resNet(image, resNet_block):
tf.summary.image('input', image)
# Conv1
with tf.variable_scope("conv1"):
image = conv2d(image, 16, (3, 3), (1, 1))
image = tf.layers.batch_normalization(image)
image = tf.nn.relu(image)
# Conv2
for i in range (18):
with tf.variable_scope("conv2_%d" % (i + 1)):
if i == 0:
# image = tf.nn.max_pool(image, ksize=[1, 3, 3, 1], strides= [1, 2, 2, 1], padding='SAME')
image = resNet_block(image, 16, 64, short_cut = True)
else:
image = resNet_block(image, 16, 64)
# Conv3
for i in range(18):
with tf.variable_scope("conv3_%d" % (i + 1)):
if i == 0:
image = resNet_block(image, 32, 128, _strides = (2, 2))
else:
image = resNet_block(image, 32, 128)
# Conv4
for i in range(18):
with tf.variable_scope("conv4_%d" % (i + 1)):
if i == 0:
image = resNet_block(image, 64, 256, _strides = (2, 2))
else:
image = resNet_block(image, 64, 256)
"""
# Conv5
for i in range(3):
with tf.variable_scope("conv5_%d" % (i + 1)):
if i == 0:
image = resNet_block(image, 128, 256, _strides = (2, 2))
else:
image = resNet_block(image, 128, 256)
"""
# Avg Pool
image = tf.layers.batch_normalization(image)
image = tf.nn.relu(image)
image = avg_pool2d(image, (8, 8), (1, 1))
# Reshape
image = flatten(image)
# FC
image = fully_conn(image, 10)
return image
def vgg_net(x, keep_prob):
tf.summary.image('input', x)
with tf.variable_scope("conv1"):
x = conv2d(x, 16, (3, 3), (1, 1))
x = tf.layers.batch_normalization(x)
x = tf.nn.relu(x)
x = pool2d(x, (2, 2), (2, 2))
x = tf.nn.dropout(x, keep_prob, name="dropout1")
with tf.variable_scope("conv2"):
x = conv2d(x, 32, (3, 3), (1, 1))
x = tf.layers.batch_normalization(x)
x = tf.nn.relu(x)
x = pool2d(x, (2, 2), (2, 2))
x = tf.nn.dropout(x, keep_prob, name="dropout1")
with tf.variable_scope("conv3"):
x = conv2d(x, 64, (3, 3), (1, 1))
x = tf.layers.batch_normalization(x)
x = tf.nn.relu(x)
x = tf.nn.dropout(x, keep_prob, name="dropout1")
with tf.variable_scope("conv4"):
x = conv2d(x, 64, (3, 3), (1, 1))
x = tf.layers.batch_normalization(x)
x = tf.nn.relu(x)
x = tf.nn.dropout(x, keep_prob, name="dropout1")
with tf.variable_scope("conv5"):
x = conv2d(x, 128, (3, 3), (1, 1))
x = tf.layers.batch_normalization(x)
x = tf.nn.relu(x)
x = tf.nn.dropout(x, keep_prob, name="dropout1")
with tf.variable_scope("conv6"):
x = conv2d(x, 128, (3, 3), (1, 1))
x = tf.layers.batch_normalization(x)
x = tf.nn.relu(x)
x = tf.nn.dropout(x, keep_prob, name="dropout1")
# Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
flatten1 = flatten(x)
# Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
fc1 = fully_conn(flatten1, 256, "fc1")
dropout2 = tf.nn.dropout(fc1, keep_prob, name="dropout2")
fc2 = fully_conn(dropout2, 256, "fc2")
dropout3 = tf.nn.dropout(fc2, keep_prob, name="dropout3")
# Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
return output(dropout3, 10)
def conv_net(x, keep_prob):
"""
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
"""
# Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
tf.summary.image('input', x)
x = conv2d_maxpool(x, 64, (5, 5), (1, 1), (3, 3), (2, 2), "conv1")
x = tf.nn.lrn(x)
x = conv2d_maxpool(x, 64, (5, 5), (1, 1), (3, 3), (2, 2), "conv2")
x = tf.nn.lrn(x)
x = tf.nn.dropout(x, keep_prob, name="dropout1")
# Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
x = flatten(x)
# Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
x = fully_conn(x, 384, "fc1")
# x = tf.nn.dropout(x, keep_prob, name="dropout2")
x = fully_conn(x, 192, "fc2")
# x = tf.nn.dropout(x, keep_prob, name="dropout3")
# Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
return output(x, 10)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch, writer, merged_summary, i):
"""
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
"""
if i % 5 == 0:
s = session.run(merged_summary, feed_dict={x:feature_batch, y:label_batch, keep_prob:keep_probability})
writer.add_summary(s, i)
session.run(optimizer, feed_dict={x:feature_batch, y:label_batch, keep_prob:keep_probability})
def print_stats(session, feature_batch, label_batch, cost, accuracy):
"""
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
"""
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open(helper.pickle_file_path('preprocess_validation.p'), mode='rb'))
loss = session.run(cost, feed_dict= {x:feature_batch, y:label_batch, keep_prob: 1.0})
valid_acc = session.run(accuracy, feed_dict= {x:valid_features, y:valid_labels, keep_prob: 1.0})
print(loss)
print(valid_acc)
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def test_implementation():
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
tests.test_con_pool(conv2d_maxpool)
tests.test_flatten(flatten)
tests.test_fully_conn(fully_conn)
tests.test_output(output)
build_cnn()
tests.test_conv_net(conv_net)
tests.test_train_nn(train_neural_network)
def build_cnn():
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
global x, y, keep_prob
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = resNet(x, resNet_block)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss
with tf.name_scope("xent"):
global cost
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
tf.summary.scalar("cross_entropy", cost)
# Optimizer
with tf.name_scope("train"):
global optimizer
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
with tf.name_scope("accuracy"):
with tf.name_scope("correct_prediction"):
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
with tf.name_scope("accuracy"):
global accuracy
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tf.summary.scalar("accuracy", accuracy)
def train_cnn_single_batch(epochs, batch_size, keep_probability):
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
def train_cnn_all_batches(epochs, batch_size, keep_probability):
save_model_path = '../model/image_classification'
print('Training...')
sess = tf.InteractiveSession()
# Visualize graph and merge all the summaries
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('../tmp/cifar/24' + '/train', sess.graph)
test_writer = tf.summary.FileWriter('../tmp/cifar/24' + '/test')
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
i = 0
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels, train_writer, merged, i)
i += 1
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
train_writer.close()
test_writer.close()
# test_implementation()
build_cnn()
#train_cnn_single_batch(10, 256, 0.5)
train_cnn_all_batches(150, 256, 0.5)
|
17,666 | a16f7a3b69e3c52fcedc1ff0ac41752dd47e46b2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Kun Jia
# date: 2/23/17
# email: me@jarrekk.com
from flask import g, jsonify, request
from flask_httpauth import HTTPBasicAuth
from app.models import User, AnonymousUser
from . import api_1_0
from .errors import unauthorized
from .constant import login_required_list
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(email_or_token, password):
if email_or_token == '':
g.current_user = AnonymousUser()
return True
if password == '':
g.current_user = User.verify_auth_token(email_or_token)
g.token_used = True
return g.current_user is not None
user = User.objects.filter(email=email_or_token)
if not user:
return False
user = user.first_or_404()
g.current_user = user
g.token_used = False
return user.verify_password(password)
@auth.error_handler
def auth_error():
return unauthorized('Invalid credentials')
@api_1_0.before_app_request
@auth.login_required
def before_request():
if request.method != 'OPTIONS':
if g.current_user.is_anonymous and request.endpoint:
if '.' in \
request.endpoint and \
request.endpoint.startswith('api_1_0') and \
request.endpoint.split('.')[1] in login_required_list:
return unauthorized('Unauthorized account')
else:
pass
@api_1_0.route('/token')
def get_token():
if g.current_user.is_anonymous or g.token_used:
return unauthorized('Invalid credentials')
return jsonify(token=g.current_user.generate_auth_token(expiration=86400).decode(), expiration=86400,
email=g.current_user.email)
@api_1_0.route('/test')
@auth.login_required
def login_test():
if g.current_user == AnonymousUser():
return jsonify(status='error', data='Anonymous user!'), 401
else:
return jsonify(email=g.current_user.email, status='success')
|
17,667 | 3ec077bf01a2a44ca3122e4a76ecdd77da5fe979 | def ans(n,t):
no_t=t//n
return(((n)*(no_t)*(no_t+1))//2)
t=int(input())
print(ans(3,t)+ans(5,t)-ans(15,t))
|
17,668 | 79f11094aa7c7c59fbdf81aa5aa6dec4948b97cd | import json
from tqdm import tqdm
import pickle
# loading review .......
with open('../../EECS595/EECS595/yelp_dataset/yelp_academic_dataset_review.json', encoding='utf-8') as f:
user_id = []
business_id = []
cnt = 0
for line in f:
if cnt == 1000:
break
cnt += 1
review_text = json.loads(line)
user_id.append(review_text['user_id'])
business_id.append(review_text['business_id'])
print(len(user_id))
with open('../../EECS595/EECS595/yelp_dataset/yelp_academic_dataset_tip.json', encoding='utf-8') as f:
cnt = 0
for line in f:
if cnt == 1000:
break
cnt += 1
review_text = json.loads(line)
user_id.append(review_text['user_id'])
business_id.append(review_text['business_id'])
print(len(user_id))
with open('../../EECS595/EECS595/yelp_dataset/yelp_academic_dataset_photo.json', encoding='utf-8') as f:
cnt = 0
for line in f:
if cnt == 1000:
break
cnt += 1
review_text = json.loads(line)
#user_id.append(review_text['user_id'])
business_id.append(review_text['business_id'])
print(len(user_id))
print(len(business_id))
# loading user ......
with open('../../EECS595/EECS595/yelp_dataset/yelp_academic_dataset_user.json', encoding='utf-8') as f:
new_user_info = []
for line in tqdm(f):
user_info = json.loads(line)
if user_info['user_id'] in user_id:
new_user_info.append(user_info)
print(len(new_user_info))
user_to_id = {}
cnt = 1
for item in new_user_info:
user_to_id[item['user_id']] = cnt
cnt += 1
print(len(user_to_id))
pickle.dump(new_user_info, open('./user.pkl', 'wb'))
pickle.dump(user_to_id, open('./user2id.pkl', 'wb'))
# loading business ......
with open('../../EECS595/EECS595/yelp_dataset/yelp_academic_dataset_business.json', encoding='utf-8') as f:
new_business_info = []
for line in tqdm(f):
business_info = json.loads(line)
if business_info['business_id'] in business_id:
new_business_info.append(business_info)
pickle.dump(new_business_info, open('./business.pkl', 'wb'))
print(len(new_business_info))
business_to_id = {}
cnt = 1
for item in new_business_info:
business_to_id[item['business_id']] = cnt
cnt += 1
print(len(business_to_id))
pickle.dump(business_to_id, open('./business2id.pkl', 'wb'))
location_to_id = {}
cnt = 1
for item in new_business_info:
if item['address'] not in location_to_id:
location_to_id[item['address']] = cnt
cnt += 1
print(len(location_to_id))
pickle.dump(location_to_id, open('./location2id.pkl', 'wb'))
|
17,669 | ff2e8fea6c82d5faa5caf7992da1a3a83f0f78b6 | import spatial
import art
import player
import query
def sweptaway(player_obj):
print('')
art.display("sweptaway")
print('#'*100)
print("You've been swept away!")
player_obj.kill()
def enter(player_obj, space_obj):
art.display("river")
print('#'*100)
print('You are on the edge of a river, it is unsafe to cross.')
while True:
q = input('What would you like to do? ')
err, rtext = query.parse_movement(q)
if err == False:
if rtext == 'forward' or rtext == 'right' or rtext == 'left':
sweptaway(player_obj)
break
else:
player_obj.move(rtext)
break
def init():
riverspace = spatial.space()
riverspace.name = "river"
riverspace.enter = enter
art.load("river", "content/river.ansi")
art.load("sweptaway", "content/sweptaway.ansi")
return riverspace |
17,670 | 63be0d5175e988d00705b400bbd9a3aceb737e8f | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 28 21:16:37 2016
@author: zc
"""
import tensorflow as tf
matrix1 = tf.constant([[3.,3.]])
matrix2 = tf.constant([[2.],[2.]])
product = tf.matmul(matrix1,matrix2)
with tf.Session() as sess:
result = sess.run([product])
print result |
17,671 | 44d904a857f4ed7cd816bc563d5d278e9f08e267 | import numpy as np
class Optimizer():
'''
Base class for optimizer
'''
def __init__(self,lr=0.01):
self.lr = lr
def step(self):
'''
Every optimizer will have "step" method
'''
pass
class SGD(Optimizer):
def __init__(self,lr=0.01):
super().__init__(lr)
def step(self):
'''
For each parameter, adjust in the appropriate direction, with the
magnitude of the adjustment based on the learning rate.
'''
for (param, param_grad) in zip(self.net.params(),
self.net.param_grads()):
param -= self.lr * param_grad |
17,672 | 237a962dd13b8c4bff0fd800252c27869b3a3df8 | # @author Jake Runyan
# @desc A hypothetical trader that unit tests a variety of VectorSpaces' new APIs to see if using those as margins to trade is worth it.
# ASSUMED FILE TYPES ARE ".PF"
import ccxt
import datetime
from JScraper import JScraper # https://github.com/runyanjake/JScraper
import json
import platform
import matplotlib as mpl
if platform.system() == "Darwin":
mpl.use('TkAgg')
elif platform.system() == "Windows":
mpl.use('Agg')
else: #linux
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from matplotlib import ticker
import sqlite3
import sys
#exception type for creating a special exception with a string
#THESE EXCEPTIONS ARE FATAL AND SHOULD BE CAUGHT & TRIGGER Portfolio.__del__
class PortfolioException(Exception):
pass
class Portfolio:
#private
__filepath = None
__cashpool_amt = None
__portfolio_id = None
__portfolio_owner = None
__portfolio_trade_history = {}
__portfolio = {}
#public
#default constructor FOR EXISTING PORTFOLIO
#if wanting a blank new portfolio, use the static method Portfolio.generatePortfolio(...)
def __init__(self, path_to_file):
file = None
try:
file = open(path_to_file)
except(Exception):
raise PortfolioException("Portfolio file not found: " + str(path_to_file))
data = json.load(file)
tmp = data["cashpool_amt"]
if tmp == None:
raise PortfolioException("Corrupted portfolio file: No cashpool amount for " + str(path_to_file))
self.__cashpool_amt = tmp
tmp = data["portfolio_id"]
if tmp == None:
raise PortfolioException("Corrupted portfolio file: No ID for " + str(path_to_file))
self.__portfolio_id = tmp
tmp = data["portfolio_owner"]
if tmp == None:
raise PortfolioException("Corrupted portfolio file: No Owner for " + str(path_to_file))
self.__portfolio_owner = tmp
tmp = data["portfolio"]
if tmp == None:
raise PortfolioException("Corrupted portfolio file: No Portfolio for " + str(path_to_file))
if tmp is not None and len(tmp) > 0:
for entry in tmp:
self.__portfolio[entry] = tmp[entry]
tmp = data["portfolio_trade_history"]
if tmp == None:
raise PortfolioException("Corrupted portfolio file: No Portfolio Trade History for " + str(path_to_file))
if tmp is not None and len(tmp) > 0:
for entry in tmp:
self.__portfolio_trade_history[entry] = tmp[entry]
self.__filepath = path_to_file
@staticmethod
#Creates new instance of portfolio, as a JSON file. this DOES NOT create a PF instance
#ID must be number, owner must be string, portfolio must be dictionary of string : number pairs denoting ticker : amount relation
#portfolio id is mainly a semantic identifier, the filename of the portfolio is the important thing.
def generatePortfolio(path, portfolioID, portfolioOwner, portfolio, portfolioTradeHistory, cashpool_amount):
data = {}
data["cashpool_amt"] = cashpool_amount
data["portfolio"] = portfolio
data["portfolio_id"] = portfolioID
data['portfolio_owner'] = portfolioOwner
data['portfolio_trade_history'] = portfolioTradeHistory
try:
open(path)
raise PortfolioException("A portfolio with this name exists. Portfolios will not overwrite other portfolios.")
except(IOError):
file = open(path, "w+")
json.dump(data, file, indent=4)
pass
#tostring
def __str__(self):
mystr = ""
mystr = mystr + "Portfolio #" + str(self.__portfolio_id) + " at " + str(self.__filepath) + ", owned by " + str(self.__portfolio_owner) + " containing {"
first = True
for entry in self.__portfolio:
if first:
mystr = mystr + "(" + str(entry) + ", " + str(self.__portfolio[entry]) + ")"
first = False
else:
mystr = mystr + ", (" + str(entry) + ", " + str(self.__portfolio[entry]) + ")"
mystr = mystr + "} with an idle cashpool amount of " + str(self.__cashpool_amt) + "."
return mystr
#a getter for portfolio contents
def getPortfolio(self):
return self.__portfolio
#a getter for portfolio trade history contents
def getTradeHistory(self):
return self.__portfolio_trade_history
#a getter for portfolio cashpool
def getCashpool(self):
return self.__cashpool_amt
#uses JScraper to retrive current values and compute worth of portfolio
#CALCULATED BY MEDIAN AMOUNT
def getWorth(self):
if self.__portfolio is None:
raise PortfolioException("Corrupted portfolio: No Portfolio")
else:
#TODO: make JScraper able to report latest price and use it to calculate data
scpr = JScraper()
total = self.__cashpool_amt
for ticker in self.__portfolio:
price_per_share = scpr.retrieveMedians(curr=ticker)[0]
total = total + price_per_share * self.__portfolio[ticker]
del scpr
return total
#returns the amount of each coin held by the portfolio.
def amount(self, ticker):
if ticker in self.__portfolio:
return self.__portfolio[ticker]
else:
return -1
#Saves this portfolio to disk
def save(self):
data = {}
data['cashpool_amt'] = self.__cashpool_amt
data["portfolio"] = self.__portfolio
data["portfolio_id"] = self.__portfolio_id
data['portfolio_owner'] = self.__portfolio_owner
data['portfolio_trade_history'] = self.__portfolio_trade_history
file = open(self.__filepath, "w+")
json.dump(data, file, indent=4)
#tmp files
connection = sqlite3.connect("./databases/" + str(self.__filepath) + ".db")
try:
connection.execute('''CREATE TABLE totalWorth (worth real, date timestamp)''')
except sqlite3.OperationalError:
pass #already exists
now = datetime.datetime.utcnow()
prepstmt = "INSERT INTO totalWorth VALUES (" + str(self.getWorth()) + ",'" + str(now) + "')"
connection.execute(prepstmt)
connection.commit()
dates = []
totals = []
for row in connection.execute("SELECT * FROM totalWorth ORDER BY date DESC"):
totals.insert(0,row[0])
dates.insert(0,row[1])
xticks = ticker.MaxNLocator(20)
fig1, ax = plt.subplots( nrows=1, ncols=1) # create figure & 1 axis
ax.plot(dates, totals, label="Totals")
ax.xaxis.set_major_locator(xticks) #set number of ticks on plot
plt.setp(ax.xaxis.get_majorticklabels(), rotation=270) #rotate labels
plt.tight_layout() #make room for lablels
fig1.savefig('./output/' + str(self.__filepath) + '_worth.png', dpi=1000) # save the figure to file
plt.close(fig1)
connection.close()
#purchase some amount of a specified currency
def purchase(self, ticker, amt):
#TODO: checkout ccxt https://github.com/ccxt/ccxt for selling to a bunch of markets
#determine best market, connect to it
#CALCULATED BY MEDIAN AMOUNT
scpr = JScraper()
price_per_share = scpr.retrieveMedians(curr=ticker)[0]
#facilitate trade
if(price_per_share * amt > self.__cashpool_amt):
raise PortfolioException("Not enough in cashpool to purchase " + str(amt) + " of " + str(ticker) + ".")
if ticker in self.__portfolio:
self.__portfolio[ticker] = self.__portfolio[ticker] + amt
else:
self.__portfolio[ticker] = amt
self.__portfolio_trade_history[ticker].append(("BUY", price_per_share, amt))
self.__cashpool_amt = self.__cashpool_amt - (amt * price_per_share)
del scpr
#update hardcopy portfolio
self.save()
#sell some amount of a specified currency
def sell(self, ticker, amt):
#TODO: checkout ccxt https://github.com/ccxt/ccxt for selling to a bunch of markets
#determine best market, connect to it
#CALCULATED BY MEDIAN AMOUNT
scpr = JScraper()
price_per_share = scpr.retrieveMedians(curr=ticker)[0]
#facilitate trade
if ticker in self.__portfolio:
if amt > self.__portfolio[ticker]:
raise PortfolioException("Portfolio does not contain that amount of " + str(ticker) + ".")
self.__portfolio[ticker] = self.__portfolio[ticker] - amt
self.__cashpool_amt = self.__cashpool_amt + (amt * price_per_share)
self.__portfolio_trade_history[ticker].append(("SELL", price_per_share, amt))
else:
raise PortfolioException("Portfolio does not contain any coins of the type " + str(ticker) + ".")
del scpr
#update hardcopy portfolio
self.save()
#main imethod, unit testing
if __name__ == "__main__":
p = Portfolio("portfolio.pf")
# Portfolio.generatePortfolio("./port.pf", 11111, "Jake Runyan", {"BTC" : 9000, "ETH" : 200}, 100)
# p2 = Portfolio("./port.pf")
# print("Testing 3 bad cases....")
# try:
# p.purchase("BTC", 100000000)
# except(PortfolioException):
# print("Exception caught successfully.")
# try:
# p.sell("BTC", 10000000)
# except(PortfolioException):
# print("Exception caught successfully.")
# try:
# p.sell("NONEXISTANT", 1)
# except(PortfolioException):
# print("Exception caught successfully.")
print(p)
print("Total Worth: " + str(p.getWorth()))
p.purchase("BTC", 0.01)
print(p)
print("Total Worth: " + str(p.getWorth()))
print("BTC " + str(p.amount("BTC")))
p.save() |
17,673 | 06b66d87787bd80e7a8fdc3bd7078b8a14063519 | #Multiple inheritance occurs when the child class
# inherits from more than one parent class.
class employee:
company = " Amazon"
ecode = 120
def upgradeecode(self):
self.ecode = self.ecode+2
class freelancer:
company = "Fiverr"
level = 10
def upgradelevel(self):
self.level = self.level+2
class programmer(employee,freelancer):
name = "ahadraza"
p = programmer()
p.upgradelevel()
print(p.level)
p1 = freelancer()
p1.upgradelevel()
print(p1.upgradelevel)
p2 = employee()
p2.upgradeecode()
print(p2.ecode) |
17,674 | 17df0dd7e6feeea327322f0bdce8f35fda71f9c8 | import logging
import plotly.graph_objects as go
import numpy as np
import pandas as pd
from pandemic.data.Analytics import fit, func
logger = logging.getLogger("lplot")
def plot_country_state(country, date, cases, province=""):
data_state = cases[province + "/" + country]
fig = go.Figure()
fig.add_trace(go.Scatter(x=date, y=data_state, marker=dict(symbol="circle"), name=province + "/" + country))
fig.update_traces(mode='lines+markers', showlegend=True, line=dict(shape="spline", smoothing=0.2))
fig.update_xaxes(title=dict(text="Date"), type="date", autorange=False, range=[date[0], date[-1]])
fig.update_yaxes(title=dict(text="Number of Confirmed Cases"), type="log", range=[0, 5])
fig.update_layout(width=1000,
height=600,
margin=dict(l=20, r=20, t=50, b=20),
plot_bgcolor="WhiteSmoke",
paper_bgcolor="LightSteelBlue", )
return fig
def plot_countries(country_list, date, cases):
fig = go.Figure()
for c in country_list:
key = "/" + c
try:
cases_in_country = cases[key]
fig.add_trace(go.Scatter(x=date, y=cases_in_country, marker=dict(symbol="circle"), name=c))
except KeyError:
logger.warning(key + " is not found in the data.")
fig.update_traces(mode='lines+markers', showlegend=True, line=dict(shape="spline", smoothing=0.2))
fig.update_xaxes(title=dict(text="Date"), type="date", autorange=False, range=[date[0], date[-1]])
fig.update_yaxes(title=dict(text="Number of Confirmed Cases"), type="log", range=[0, 5])
fig.update_layout(width=1000,
height=600,
margin=dict(l=20, r=20, t=50, b=20),
plot_bgcolor="WhiteSmoke",
paper_bgcolor="LightSteelBlue", )
return fig
def plots(date, cases):
fig = go.Figure()
for n, v in cases.items():
fig.add_trace(go.Scatter(x=date, y=v, marker=dict(symbol="circle-open"), name=n))
fig.update_traces(mode='lines+markers', showlegend=True, line=dict(shape="spline", smoothing=0.2))
fig.update_xaxes(title=dict(text="Date"), type="date", autorange=False, range=[date[0], date[-1]])
fig.update_yaxes(title=dict(text="Number of Confirmed Cases"), type="log", range=[0, 5])
fig.update_layout(width=1000,
height=600,
margin=dict(l=20, r=20, t=50, b=20),
plot_bgcolor="WhiteSmoke",
paper_bgcolor="LightSteelBlue", )
return fig
def plot2_countries(country_data_list):
fig = go.Figure()
for c, d in country_data_list.items():
fig.add_trace(go.Scatter(x=d.df.index, y=d.df[c], marker=dict(symbol="circle"), name=c))
fig.update_traces(mode='lines+markers', showlegend=True, line=dict(shape="spline", smoothing=0.2))
fig.update_xaxes(title=dict(text="Date"), type="date")
fig.update_yaxes(title=dict(text="Number of Confirmed Cases"), type="log", range=[0, 5])
fig.update_layout(width=1000,
height=600,
margin=dict(l=20, r=20, t=50, b=20),
plot_bgcolor="WhiteSmoke",
paper_bgcolor="LightSteelBlue", )
def plot_figure_country_region(df):
"""
Used by Tools
:param df:
:return:
"""
rangeUpper = 5.0
max_scale = np.log10(df.iloc[-1,].max())
if max_scale > rangeUpper:
rangeUpper = max_scale
fig = go.Figure()
for c in df.columns:
fig.add_trace(go.Scatter(x=df.index, y=df[c], name=c,
line=dict(width=4), hovertemplate="%{x}<br>Cases: %{y:d}", hoverinfo="x+text"))
fig.update_traces(mode='lines', line=dict(shape="spline", smoothing=0.5))
fig.update_xaxes(title=dict(text="Date", font=dict(size=16)),
type="date", autorange=False, range=[df.index[0], df.index[-1]],
ticks="inside")
fig.update_yaxes(visible=True, title=dict(text="Number of Confirmed Cases", font=dict(size=16)),
type="log", autorange=True, range=[0.0, rangeUpper], showgrid=True, gridcolor="#eee",
ticks="inside", tickson="labels")
fig.update_layout(
showlegend=True, legend=dict(font=dict(size=14)),
margin=dict(l=20, r=20, t=50, b=50),
plot_bgcolor="white",
paper_bgcolor="white", )
return fig
def plot_figure_confirmed_100_plus(df):
"""
Used by Tools
Plot the evolution of confirmed cases after the days with more than 100 cases.
:param df: The DataFrome with countries as columns.
:return:
"""
range_upper = 5.0
max_scale = np.log10(df.iloc[-1,].max())
if max_scale > range_upper:
range_upper = max_scale
fig = go.Figure()
longest_day = 0
for c in df.columns:
cty = df[c]
hundred = cty[cty > 100]
days = np.arange(len(hundred))
if len(days) > longest_day:
longest_day = len(days)
fig.add_trace(go.Scatter(x=days, y=hundred, name=c,
line=dict(width=4), hovertemplate="Days: %{x}<br>Cases: %{y:d}", hoverinfo="x+text"))
fig.update_traces(mode='lines', line=dict(shape="spline", smoothing=0.5))
fig.update_xaxes(title=dict(text="Days After 100+ Cases", font=dict(size=16)),
autorange=False, range=[0, longest_day],
ticks="inside")
fig.update_yaxes(visible=True, title=dict(text="Number of Confirmed Cases", font=dict(size=16)),
type="log", autorange=True, range=[0.0, range_upper], showgrid=True, gridcolor="#eee",
ticks="inside", tickson="labels")
fig.update_layout(
showlegend=True, legend=dict(font=dict(size=14)),
margin=dict(l=20, r=20, t=50, b=50),
plot_bgcolor="white",
paper_bgcolor="white", )
return fig
def plot_confirmed_death_recovered_by(df):
rangeUpper = 5.0
max_scale = np.log10(df["Confirmed"][-1])
if max_scale > rangeUpper:
rangeUpper = max_scale
fig = go.Figure()
fig.add_trace(
go.Scatter(x=df.index, y=df["Confirmed"], name="Confirmed", line=dict(width=4), fill="tonexty", mode="none"))
fig.add_trace(go.Scatter(x=df.index, y=df["Death"], name="Deaths", line=dict(width=4), fill="tozeroy", mode="none"))
fig.add_trace(
go.Scatter(x=df.index, y=df["Recovered"], name="Recovered", line=dict(width=4), fill="tozeroy", mode="none"))
fig.update_xaxes(title=dict(text="Date", font=dict(size=16)),
type="date", autorange=False, range=[df.index[0], df.index[-1]],
ticks="inside")
fig.update_yaxes(visible=True, title=dict(text="Number of Cases", font=dict(size=16)),
type="log", range=[0.0, rangeUpper], showgrid=True, gridcolor="#eee",
ticks="inside", tickson="labels")
fig.update_layout(
showlegend=True, legend=dict(font=dict(size=14)),
margin=dict(l=20, r=20, t=50, b=50),
plot_bgcolor="white",
paper_bgcolor="white", )
return fig
def plot_confirmed_infection_rate(df, date=None, period=7):
"""
Used by Tools
:param df:
:param date: None or Date type
:param period:
:return:
"""
# Adjust maximum value at y-axis
rangeUpper = 5.0
max_scale = np.log10(df["Confirmed"][-1])
if max_scale > rangeUpper:
rangeUpper = max_scale
fig = go.Figure()
fig.add_trace(
go.Scatter(x=df.index, y=df["Confirmed"], name="Confirmed",
line=dict(width=4), fill="tonexty", fillcolor="#DCDCDC", mode="none",
hovertemplate="%{x}<br>Cases: %{y:d}", hoverinfo="x+text"))
fig.add_trace(
go.Scatter(x=df.index, y=df["Recovered"], name="Recovered",
line=dict(width=4), fill="tonexty", fillcolor="#C0C0C0", mode="none",
hovertemplate="%{x}<br>Cases: %{y:d}", hoverinfo="x+text"))
fig.add_trace(
go.Scatter(x=df.index, y=df["Death"], name="Deaths",
line=dict(width=4), fill="tozeroy", fillcolor="#808080", mode="none",
hovertemplate="%{x}<br>Cases: %{y:d}", hoverinfo="x+text"))
if date is not None:
confirmed = df["Confirmed"]
x_date, popt, pcov = fit(confirmed, date, period)
# Create predicted data
cf_pred = confirmed[x_date[0]:confirmed.index[-1]]
y_pred_model = 10 ** func(range(len(cf_pred)), *popt)
cf_model = pd.Series(y_pred_model, index=cf_pred.index)
# On_Date
on_date = cf_model.iloc[0:period + 1]
# Off_Date
off_date = cf_model.iloc[period:]
if popt[1] > 0.0:
infect_rate = "20% new cases in <i>" + str(round(0.2 / popt[1])) + "</i> days*"
fig.add_trace(
go.Scatter(x=off_date.index, y=off_date, mode='lines', name="Projected",
line=dict(color='Black', width=4, dash='dot'),
showlegend=False, hovertemplate="%{x}<br>Projected Cases: %{y:.0f}", hoverinfo="x+text"))
else:
infect_rate = "No new cases"
fig.add_trace(
go.Scatter(x=on_date.index, y=on_date, mode='lines', name="Fitted",
line=dict(color='Black', width=4),
showlegend=False, hovertemplate="%{x}<br>" + infect_rate, hoverinfo="x+text"))
fig.update_xaxes(title=dict(text="Date", font=dict(size=16)),
type="date", autorange=False, range=[df.index[0], df.index[-1]],
ticks="inside")
fig.update_yaxes(visible=True, title=dict(text="Number of Cases", font=dict(size=16)),
type="log", range=[0.0, rangeUpper], showgrid=True, gridcolor="#eee",
ticks="inside", tickson="labels")
fig.update_layout(
showlegend=True, legend=dict(font=dict(size=14)),
margin=dict(l=20, r=20, t=50, b=50),
plot_bgcolor="white",
paper_bgcolor="white", )
return fig
def plot_figure_country_case_daily(df):
fig = go.Figure()
if df.columns.size == 1:
country = df.columns[0]
new_cases = df[country].iloc[1:].values - df[country].iloc[:-1].values
fig.add_trace(go.Bar(x=df.index, y=new_cases, name=country))
fig.update_xaxes(title=dict(text="Date"), type="date", autorange=False, range=[df.index[0], df.index[-1]])
fig.update_yaxes(title=dict(text="Number of New Cases"))
fig.update_layout(width=1000, height=600,
margin=dict(l=20, r=20, t=50, b=20),
plot_bgcolor="white",
paper_bgcolor="white", )
return fig
def plot_figure_of(df, province):
fig = go.Figure()
fig.add_trace(go.Scatter(x=df.index, y=df[province], marker=dict(symbol="circle"), name=province))
fig.update_traces(mode='lines+markers', showlegend=True, line=dict(shape="spline", smoothing=0.2))
fig.update_xaxes(title=dict(text="Date"), type="date", autorange=False, range=[df.index[0], df.index[-1]])
fig.update_yaxes(title=dict(text="Number of Confirmed Cases"), type="log", range=[0, 5])
fig.update_layout(width=1000, height=600,
margin=dict(l=20, r=20, t=50, b=20),
plot_bgcolor="WhiteSmoke",
paper_bgcolor="LightSteelBlue", )
return fig
def plot_all_figures(df):
fig = go.Figure()
for c in df.columns:
fig.add_trace(go.Scatter(x=df.index, y=df[c], marker=dict(symbol="circle"), name=c))
fig.update_traces(mode='lines+markers', showlegend=True, line=dict(shape="spline", smoothing=0.2))
fig.update_xaxes(title=dict(text="Date"), type="date")
fig.update_yaxes(title=dict(text="Number of Confirmed Cases"), type="log", range=[0, 5])
fig.update_layout(width=1000,
height=600,
margin=dict(l=20, r=20, t=50, b=20),
plot_bgcolor="WhiteSmoke",
paper_bgcolor="LightSteelBlue", )
return fig
|
17,675 | 6f5565088e4d3477bfe6896218a2ba6c76a83865 | n1, n2 = input().split()
print(bool(int(n1)) or bool(int(n2))) |
17,676 | 9f022d74102dd9de477447cad980ba62f510b752 | __author__ = 'Ciddhi'
from DBUtils import *
import GlobalVariables as gv
class Reallocation:
def reallocate(self, startDate, startTime, endDate, endTime, dbObject):
# get all individuals which are active in last window
resultIndividuals = dbObject.getIndividuals(startDate, startTime, endDate, endTime)
posDeltaIndividuals = []
negDeltaIndividuals = []
noDeltaIndividuals = []
for individualId, dummy1 in resultIndividuals:
# get last state for the individual
resultLastState = dbObject.getLastState(individualId)
for lastState, individual in resultLastState:
resultNextState = dbObject.getNextState(individualId, lastState)
for nextState, dummy2 in resultNextState:
# Depending upon suggested next state, segregate individual_id
if nextState==0:
negDeltaIndividuals.append(individualId)
else:
if nextState==1:
noDeltaIndividuals.append(individualId)
else:
posDeltaIndividuals.append(individualId)
# update asset and state for all individuals accordingly
for i in range(0, len(negDeltaIndividuals), 1):
dbObject.reduceFreeAsset(negDeltaIndividuals[i], gv.unitQty)
dbObject.addNewState(negDeltaIndividuals[i], endDate, endTime, 0)
for i in range(0, len(posDeltaIndividuals), 1):
dbObject.increaseFreeAsset(posDeltaIndividuals[i], gv.unitQty)
dbObject.addNewState(posDeltaIndividuals[i], endDate, endTime, 2)
for i in range(0, len(noDeltaIndividuals), 1):
dbObject.addNewState(noDeltaIndividuals[i], endDate, endTime, 1)
# To test
if __name__ == "__main__":
dbObject = DBUtils()
dbObject.dbConnect()
reallocationObject = Reallocation()
reallocationObject.reallocate(dbObject)
dbObject.dbClose()
|
17,677 | df0d0bb0b9ae7ab43ad4b25198253226d808b5f2 | import sys
sys.path.append("MyDetector") |
17,678 | 33490a0d8d566f9720b4b3c8a26cc44165cbc657 | import os
import pygame
from pygame.locals import *
import random
from numpy.random import choice
import numpy as np
pygame.init()
SIZE = WIDTH, HEIGHT = 640, 480
BACKGROUND_COLOR = pygame.Color('black')
FPS = 80
screen = pygame.display.set_mode(SIZE)
clock = pygame.time.Clock()
time = pygame.time
#load font, prepare values
font = pygame.font.Font(None, 80)
text = 'Fonty'
size = font.size(text)
fg = 250, 250, 250 #font color
bg = 5, 5, 5
a_sys_font = pygame.font.SysFont("Arial", 30)
b_sys_font = pygame.font.SysFont("Arial", 40)
c_sys_font = pygame.font.SysFont("Arial", 60)
d_sys_font = pygame.font.SysFont("Arial", 25)
e_sys_font = pygame.font.SysFont("Arial", 10)
#table for probablities for selecting final reel image
#weighted towards hex and demons
#each reel gets a different probability
PROBABILTY1=[0.05,0.31,0.07,0.14,0.17,0.16,0.01,0.01,0.01,0.06,0.01]
PROBABILTY2=[0.01,0.2,0.01,0.21,0.2,0.21,0.01,0.01,0.01,0.06,0.07]
PROBABILTY3=[0.21,0.05,0.21,0.09,0.05,0.02,0.11,0.02,0.06,0.06,0.12]
#set normal probability to start
PROBABILTY = PROBABILTY1
#number of images loaded from image folder
IMAGES_N = 11
if IMAGES_N <> len(PROBABILTY):
print"number of images and probability array length do not match!"
background_image = pygame.image.load("slot_background_images/devils_delight_wallpaper_640_480.png").convert()
background_image_blank = pygame.image.load("slot_background_images/devils_delight_wallpaper_640_480_blank.png").convert()
#icon for app window
icon = pygame.image.load('slot_misc_images/icon.png')
pygame.display.set_icon(icon)
pygame.display.set_caption("Devil's Bargain")
pygame.mixer.init()
#create background music for the game
pygame.mixer.music.load("slot_sounds/gravewalk.ogg")
#loop the sound file continuously starting at 0.0
pygame.mixer.music.play(-1,0.0)
pygame.mixer.music.set_volume(0.2) #between 0-1
#create array to hold soundsif you want to go this way
# sounds = []
#create the variables and assign them the sounds
# spinSound = pygame.mixer.Sound("Sounds/slotSpinSound.ogg")
# winSound = pygame.mixer.Sound("Sounds/slotWinSound.ogg")
#add them to the sounds array
# sounds.append(spinSound)
# sounds.append(winSound)
#unique sound for each reel
reelSpin1 = pygame.mixer.Sound('slot_sounds/reel_fast.ogg')
reelSpin2 = pygame.mixer.Sound('slot_sounds/reel_fast2.ogg')
reelSpin3 = pygame.mixer.Sound('slot_sounds/reel_fast.ogg')
#unique sound for each image
reelImageSounds = []
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/dice.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/hex.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/growl.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/devil_laugh.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/witch_laugh.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/reaper_laugh.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/crow1.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/crow2.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/crow3.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/moo.ogg'))
reelImageSounds.append(pygame.mixer.Sound('slot_sounds/thunder.ogg'))
#sounds for various outcomes (win, lose)
win1_sound = pygame.mixer.Sound('slot_sounds/win1.ogg')
win2_sound = pygame.mixer.Sound('slot_sounds/win2.ogg')
win3_sound = pygame.mixer.Sound('slot_sounds/win3.ogg')
win4_sound = pygame.mixer.Sound('slot_sounds/win_moo.ogg')
lose_sound = pygame.mixer.Sound('slot_sounds/error.ogg')
noPoints_sound = pygame.mixer.Sound('slot_sounds/sigh.ogg')
#set up event clock(s)
pygame.time.set_timer(USEREVENT+1, 50)
def timerFunc(index):
index_old = index
index = index+1
#print"index incremented to "+repr(index)+" in timerFunc"
return index
def load_images(path):
"""
Loads all images in directory. The directory must only contain images.
Args: path: The relative or absolute path to the directory to load images from.
Returns: List of images.
"""
images = []
images_names = []
for file_name in os.listdir(path):
image_name = file_name
images_names.append(image_name)
images_names = sorted(images_names) #use sort to insure linux file sys behaves
print(images_names) #check for proper order
for file_name in images_names:
image = pygame.image.load(path + os.sep + file_name).convert()
images.append(image)
return images
def create_reels(imageArr):
#create each reel from shuffled image array to look random
#random.shuffle(imageArr)
reel1 = AnimatedSprite(position=(15, 10), images=imageArr)
#random.shuffle(imageArr)
reel2 = AnimatedSprite(position=(225, 10), images=imageArr)
#random.shuffle(imageArr)
reel3 = AnimatedSprite(position=(435, 10), images=imageArr)
# Create sprite groups and add reels to them.
all_spinning_sprites = pygame.sprite.Group(reel1, reel2, reel3)
only_2_3_spinning_sprites = pygame.sprite.Group(reel2, reel3)
only_3_spinning_sprites = pygame.sprite.Group(reel3)
return all_spinning_sprites,only_2_3_spinning_sprites,only_3_spinning_sprites
def choose_final_images(n, probability):
#choose final reel image
#use probabilities to choose 3 random images
imageIndexs = np.random.choice(n, 3, p=probability)
return imageIndexs
def populate_names_cats():
# name_decoder- array of image names based on index position in 'images' array
name = ["Dice", "Hex", "666", "Demon", "Devil Girl",
"Satan", "Tomestone Q", "Tomestone K", "Tomestone A", "Evil Cow", "God Cow"]
catagory = ["Satanic", "Satanic", "Satanic", "Demon", "Demon", "Demon", "Grave", "Grave", "Grave",
"Special", "xSpecial"]
#print"len(name_decoder): "+repr(len(name_decoder))
return (name, catagory)
def check_input():
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
print"In check_input fxn, Key hit: down detected, QUIT"
pygame.quit()
sys.exit()
return index
def draw_player_data(bank):
textSurf = a_sys_font.render("Player Soul Bank: $"+("{:.2f}".format(bank)), 1, fg)
textRect = textSurf.get_rect()
textRect.center = (10, HEIGHT-40)
screen.blit(textSurf, textRect)
def score_update(current, total, bonusNum, bonusName, bonusCat, specialCat, xspecialCat):
print"length of bonusNum: "+repr(len(bonusNum))
print"bonusName: "+bonusName
print"bonusCat: "+bonusCat
nameScore = bonusNum[0]
catScore = bonusNum[1]
specialScore = bonusNum[2]
xspecialScore = bonusNum[3]
text_y = 180 #start text here
text_height = 40 #increment y by this
subscoreName = a_sys_font.render("Bonus: $"+repr(nameScore)+" "+bonusName+" ", 1, fg)
screen.blit(subscoreName, (5, text_y))
pygame.display.update()
if bonusName > 0:
win1_sound.play()
pygame.time.wait(500)
text_y = text_y + text_height
subscore_cat = a_sys_font.render("Multiplier: X"+repr(catScore), 1, fg)
screen.blit(subscore_cat, (5, text_y))
pygame.display.update()
if catScore > 0:
win1_sound.play()
pygame.time.wait(500)
text_y = text_y + text_height
subscore_total = a_sys_font.render("**Sub Total: $"+repr(catScore*nameScore)+"**", 1, fg)
screen.blit(subscore_total, (5, text_y))
pygame.display.update()
if catScore*nameScore > 0:
win1_sound.play()
else:
lose_sound.play()
pygame.time.wait(800)
text_y = text_y + text_height + 5
if specialScore > 0:
subscore_special = a_sys_font.render("Add Special: $"+repr(specialScore)+" "+specialCat+" ", 1, fg)
screen.blit(subscore_special, (5, text_y))
pygame.display.update()
text_y = text_y + text_height
win2_sound.play()
pygame.time.wait(500)
else:
text_y = text_y
if xspecialScore <> 0:
subscore_xspecial = a_sys_font.render("Wrath of God: $"+repr(xspecialScore)+" "+xspecialCat+" ", 1, fg)
screen.blit(subscore_xspecial, (5, text_y))
pygame.display.update()
win4_sound.play()
text_y = text_y + text_height + 4
pygame.time.wait(500)
else:
text_y = text_y + 4
pygame.draw.lines(screen, fg, False, [(5, text_y), (300,text_y)], 3)
pygame.time.wait(500)
#text_y = text_y + text_height
ren1 = a_sys_font.render("Current Score: $"+repr(current), 1, fg)
screen.blit(ren1, (5, text_y))
pygame.display.update()
if current > 0:
win3_sound.play()
else:
noPoints_sound.play()
pygame.time.wait(1200)
#redraw backround to clear previous text
screen.blit(background_image, [0, 0])
draw_player_data(total)
pygame.display.update()
def pay_update(cost, total):
total = total - cost
pygame.display.update()
pygame.time.wait(1500)
#redraw backround to clear previous text
screen.blit(background_image, [0, 0])
draw_player_data(total)
pygame.display.update()
return total
#process keyboard input to set bet
# bet - the current bet
# key - the keyboard key pressed
#returns - the new bet
def procBet(bet,key):
betStr = str(bet) #get the bet string
#try because chr will fail if character is > 255, but we dont care about them anyways
try:
#if the key is a backspace
if key == K_BACKSPACE:
betStr = betStr[0:-1] #remove the last digit
#if key is a digit
elif (chr(key).isdigit()):
betStr += chr(key) #add it to the bet string
#if user entered an invalid bet (nothing)
if(not betStr):
return 0 #new bet of 0
#if there was any problem, return the original bet
except Exception:
return bet
#convert and return the new bet
return int(betStr)
def pull_handle(images, spinning_3_sprites,spinning_2_sprites,spinning_1_sprites, final_reel1, final_reel2, final_reel3):
reel1_static = False
reel2_static = False
reel3_static = False
spins = 0
running = True
while running:
# Amount of seconds between each loop.
#dt = clock.tick(FPS) / 1000
dt = clock.get_rawtime()
for event in pygame.event.get():
if event.type == USEREVENT+1:
spins = timerFunc(spins) #calling the function whenever we get timer event.
#print"Event index(spins): " + repr(spins)
if spins >= 40:
spin1 = False
else:
spin1 = True
if spins >= 75:
spin2 = False
else:
spin2 = True
if spins >= 120:
spin3 = False
else:
spin3 = True
if spin1 & spin2 & spin3:
#print"all true!"
spinning_3_sprites.update(spins,USEREVENT+1)
spinning_3_sprites.draw(screen)
reelSpin1.play()
reelSpin2.play()
reelSpin3.play()
if not reel1_static:
#print"play effect: "+repr(final_reel1)
reel1_static = True
elif spin2 & spin3:
#print"2 and 3 true!"
if not reel2_static:
#print"play effect: "+repr(final_reel2)
reelSpin1.stop()
reelImageSounds[final_reel1].play()
reel2_static = True
spinning_2_sprites.update(spins,USEREVENT+1)
spinning_2_sprites.draw(screen)
screen.blit(images[final_reel1], (15,10))
elif spin3:
#print"only 3 true!"
if not reel3_static:
#print"play effect: "+repr(final_reel3)
reelSpin2.stop()
reelImageSounds[final_reel1].stop()
reelImageSounds[final_reel2].play()
reel3_static = True
spinning_1_sprites.update(spins,USEREVENT+1)
spinning_1_sprites.draw(screen)
screen.blit(images[final_reel1], (15,10))
screen.blit(images[final_reel2], (225,10))
else:
print"Game over!"
reelSpin3.stop()
reelImageSounds[final_reel2].stop()
reelImageSounds[final_reel3].play()
screen.blit(images[final_reel1], (15,10))
screen.blit(images[final_reel2], (225,10))
screen.blit(images[final_reel3], (435,10))
running = False
#all_sprites.update(dt,USEREVENT+1) # Calls the 'update' method on all sprites in the list.
#print"pygame.time.get_ticks: " + repr(time.get_ticks())
#all_sprites.draw(screen)
#screen.fill(BACKGROUND_COLOR)
pygame.display.update()
pygame.event.pump()
def redraw_static_reels(images, final_reel1, final_reel2, final_reel3):
screen.blit(images[final_reel1], (15,10))
screen.blit(images[final_reel2], (225,10))
screen.blit(images[final_reel3], (435,10))
pygame.display.update()
def run_game(bank, cost):
#load the reel images
#Make sure to provide the relative or full path to the images directory.
reelImages = load_images(path='slot_images')
#polulate the names of each image and the catagory into name_decoder, catagory_decoder arrays
name_decoder, catagory_decoder = populate_names_cats()
#create the 3 reels and animate to spin 1, 2, or 3
spinning_3_sprites,spinning_2_sprites,spinning_1_sprites = create_reels(reelImages)
#choose final reel image
imageIndexs = choose_final_images(IMAGES_N, PROBABILTY1)
final_reel1 = imageIndexs[0]
print"final_reel1: "+repr(final_reel1)
imageIndexs = choose_final_images(IMAGES_N, PROBABILTY2)
final_reel2 = imageIndexs[0]
print"final_reel2: "+repr(final_reel2)
imageIndexs = choose_final_images(IMAGES_N, PROBABILTY3)
final_reel3 = imageIndexs[0]
print"final_reel3: "+repr(final_reel3)
imageIndexs = [final_reel1,final_reel2,final_reel3]
print"imageIndexs: "+repr(imageIndexs)
print"********************"
print"choose these reelImages["+repr(imageIndexs[0])+"], ["+repr(imageIndexs[1])+"], ["+repr(imageIndexs[2])+"]"
final_name1 = name_decoder[imageIndexs[0]]
final_name2 = name_decoder[imageIndexs[1]]
final_name3 = name_decoder[imageIndexs[2]]
final_cat1 = catagory_decoder[imageIndexs[0]]
final_cat2 = catagory_decoder[imageIndexs[1]]
final_cat3 = catagory_decoder[imageIndexs[2]]
#Make numpy arrays so we can do advanced searching/matching
final_reels = np.array([final_reel1,final_reel2,final_reel3])
final_names = np.array([final_name1,final_name2,final_name3])
final_cats = np.array([final_cat1,final_cat2,final_cat3])
#bank = bank - cost #charge cost_pull for a spin!
bank = pay_update(cost, bank)
draw_player_data(bank)
pull_handle(reelImages, spinning_3_sprites,spinning_2_sprites,spinning_1_sprites, final_reel1, final_reel2, final_reel3)
# puase after reels have all stopped
pygame.time.wait(1000)
#display and calculate score
scoreName = 0 #reset score for individual images
bonusName = ""
bonusCat = ""
specialCat = ""
xspecialCat = ""
score_special = 0
score_xspecial = 0
#set to 'True' if matches 3 or 2
setGodMatch = False
setEvilMatch = False
setHexMatch = False
for name in name_decoder:
#print"name: "+repr(name)
#print"np.count_nonzero: "+repr(np.count_nonzero(final_names == name))
if np.count_nonzero(final_names == name) == 3:
if name == "God Cow":
score_xspecial = -30
xspecialCat = "Triple Cow God!"
lose_sound.play()
setGodMatch = True
elif name == "Evil Cow":
score_special = 6
specialCat = "Triple Evil Cow!"
win3_sound.play()
setEvilMatch = True
elif name == "Satan":
scoreName = 50
bonusName = "Triple Satan!"
win3_sound.play()
elif name == "Devil Girl":
scoreName = 20
bonusName = "Triple Devil Girl!"
win3_sound.play()
elif name == "Demon":
scoreName = 10
bonusName = "Triple Demon!"
win3_sound.play()
elif name == "Hex":
scoreName = 4
bonusName = "Triple Hex!"
win1_sound.play()
win2_sound.play()
setHexMatch = True
else:
print"no match for 3 scoreName "+name
elif np.count_nonzero(final_names == name) == 2:
#print"np.count_2(final_names == "+name+" == "+repr(np.count_nonzero(final_names == name))
if name == "God Cow":
score_xspecial = -20
xspecialCat = "Double Cow God!"
lose_sound.play()
setGodMatch = True
elif name == "Evil Cow":
score_special = 4
specialCat = "Double Evil Cow!"
win2_sound.play()
setEvilMatch = True
elif name == "Hex":
scoreName = 2
bonusName = 'Double Hex!'
win2_sound.play()
setHexMatch = True
else:
print"no match for 2 scoreName "+name
#print"first loop looking for triplets done, scoreName: "+repr(scoreName)+", bonusName: "+bonusName
for name in name_decoder:
#print"np.count_1(final_names == "+name+" == "+repr(np.count_nonzero(final_names == name))
if np.count_nonzero(final_names == name) == 1:
print"looking for match for 1 scoreName == "+name
if name == "God Cow":
print"Match for scoreName "+name
if setGodMatch == False:
score_xspecial = -10
xspecialCat = "Single Cow God!"
lose_sound.play()
elif name == "Evil Cow":
if setEvilMatch == False:
score_special = 2
specialCat = "Single Evil Cow!"
win1_sound.play()
print"Match for scoreName "+name
elif name == "Hex":
if setHexMatch == False:
scoreName = 1
bonusName = "Single Hex!"
win1_sound.play()
print"Match for scoreName "+name
elif np.count_nonzero(final_names == "Demon") == 1:
if np.count_nonzero(final_names == "Devil Girl") == 1:
if np.count_nonzero(final_names == "Satan") == 1:
scoreName = 4
bonusName = "All Demons!"
win2_sound.play()
print"Match for scoreName "+name
scoreCat = 1
finalScore = (scoreName * scoreCat) + score_special + score_xspecial
if finalScore == 0:
lose_sound.play()
print"reelImages["+repr(final_reel1)+"], ["+repr(final_reel2)+"], ["+repr(final_reel3)+"]"
print"["+final_name1+"], ["+final_name2+"], ["+final_name3+"]"
print"["+final_cat1+"], ["+final_cat2+"], ["+final_cat3+"]"
print"bonusName: "+repr(bonusName)
print"scoreName: "+repr(scoreName)
print"scoreCat: "+repr(scoreCat)
print"score_special: "+repr(score_special)
print"score_xspecial: "+repr(score_xspecial)
bank = bank+finalScore
print"Current Total Score: "+repr(finalScore)
print"Cummalative Total: "+repr(bank)
print"**************************************"
bonusNums = []
bonusNums = [scoreName, scoreCat, score_special, score_xspecial]
score_update(finalScore, bank, bonusNums, bonusName, bonusCat, specialCat, xspecialCat)
redraw_static_reels(reelImages, final_reel1, final_reel2, final_reel3)
#redraw backround to clear previous text
screen.blit(background_image, [0, 0])
pygame.display.update()
redraw_static_reels(reelImages, final_reel1, final_reel2, final_reel3)
draw_player_data(bank) #update the running data like total bank
return bank
class AnimatedSprite(pygame.sprite.Sprite):
def __init__(self, position, images):
"""
Animated sprite object.
Args:
position: x, y coordinate on the screen to place the AnimatedSprite.
images: Images to use in the animation.
"""
super(AnimatedSprite, self).__init__()
size = (185, 150) # This should match the size of the images.
self.rect = pygame.Rect(position, size)
self.images = images
#pick a random starting image
self_index_start = random.randint(0, IMAGES_N-1)
#Sprint"self.index: " + repr(self_index_start)
self.index = self_index_start
self.image = images[self.index] # 'image' is the current image of the animation.
self.animation_time = 0.1
self.current_time = 0
self.animation_frames = IMAGES_N
self.current_frame = 0
print "Sprit initiated!"
def update_event_dependent(self, counter):
"""
Updates the image of Sprite by event timer.
Args:
events occur periodically by timer.
"""
#print"event count: " + repr(counter)
self.index = (self.index + 1) % len(self.images)
self.image = self.images[self.index]
def update(self, counter, USEREVENT):
self.update_event_dependent(counter)
def main():
#reset scores
print"reset scores...."
finalScore = 0.00 #current reels
bank = 1.00 #total bank, start with 0
cost_pull = 1.00 #cost per pull
repullMultiplier = 0.25 #mult bank if repull before cashout
running = True
#draw intro screen
## textSurf = b_sys_font.render("Soul Cost $1", 1, fg)
## textRect = textSurf.get_rect()
## textRect.center = ((WIDTH/2),(HEIGHT/5))
## screen.blit(textSurf, textRect)
##
## screen.blit(background_image, [0, 0])
## pygame.display.update()
## textSurf = b_sys_font.render("Press Down Arrow To Spin", 1, fg)
## textRect = textSurf.get_rect()
## textRect.center = ((WIDTH/2),(HEIGHT/3))
## screen.blit(textSurf, textRect)
pygame.display.update()
firstSpin = True #change to false after first spin
while (running):
screen.blit(background_image_blank, [0, 0])
textSurf = b_sys_font.render("Soul Cost $1", 1, fg)
textRect = textSurf.get_rect()
textRect.center = ((WIDTH/2),(HEIGHT/6))
screen.blit(textSurf, textRect)
textSurf = b_sys_font.render("Press Down Arrow To Sell Soul", 1, fg)
textRect = textSurf.get_rect()
textRect.center = ((WIDTH/2),(HEIGHT/2.5))
screen.blit(textSurf, textRect)
draw_player_data(bank) #update the running data like total bank
#pygame.display.update()
bribe = 0
if firstSpin == False:
if bank > 2:
bribe = bank*repullMultiplier
if bribe > 2:
adjBribe = 2
textSurf = e_sys_font.render("* Up to $2 Maximum", 1, fg, bg)
textRect = textSurf.get_rect()
textRect.center = ((WIDTH/2),(HEIGHT/(3)))
screen.blit(textSurf, textRect)
else:
adjBribe = bribe
textSurf = d_sys_font.render("Devil's Bargain*: Soul Bank"
+" X "
+"{:.2f}".format(repullMultiplier)
+" = $"
+"{:.2f}".format(adjBribe)
+" bribe to spin again", 1, fg, bg)
textRect = textSurf.get_rect()
textRect.center = ((WIDTH/2),(HEIGHT/(4)))
screen.blit(textSurf, textRect)
for event in pygame.event.get():
if event.type == pygame.QUIT:
print"You QUIT"
pygame.quit()
sys.exit()
elif event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
print"Key hit: 'up key', so program quit"
running = False
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
print"Key hit: 'down key', so spin again!"
if bank <= 0:
bank = 1
# double bank for repeat pull!
if bank > 1:
if bribe > 2:
adjBribe = 2
textSurf = d_sys_font.render("* Up to $2 Maximum", 1, fg, bg)
textRect = textSurf.get_rect()
textRect.center = ((WIDTH/2),(HEIGHT/(3)))
screen.blit(textSurf, textRect)
else:
adjBribe = bribe
bank = bank + adjBribe
firstSpin = False
screen.blit(background_image, [0, 0])
pygame.display.update()
bank = run_game(bank, cost_pull)
screen.blit(background_image, [0, 0])
draw_player_data(bank)
if bank < cost_pull:
print"GAME OVER!"
#fill background
screen.fill(BACKGROUND_COLOR)
textSurf = c_sys_font.render("GAME OVER!", 1, fg)
textRect = textSurf.get_rect()
textRect.center = ((WIDTH/2),(HEIGHT/3))
screen.blit(textSurf, textRect)
pygame.display.update()
bank = 0 #reset score in case it is negative
pygame.time.wait(5000)
#pygame.quit()
pygame.display.update()
if __name__ == '__main__':
main()
|
17,679 | 4e58c444c8ac53dd955af8ba040b212fe2a2bc0a | # Each new term in the Fibonacci sequence is generated by adding the previous two terms.
# By starting with 1 and 2, the first 10 terms will be: 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
# By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the
# even-valued terms.
# Defines Fibonacci sequence up to last term <= n
def fibonacci(n):
fib_sequence = [1, 2]
while fib_sequence[len(fib_sequence) - 1] <= n:
fib_sequence.append(fib_sequence[len(fib_sequence) - 1] + fib_sequence[len(fib_sequence) - 2])
fib_sequence.pop(len(fib_sequence) - 1) # Removes last element
return fib_sequence
# Removes the odd elements from a list
def get_evens(elements):
i = 0
while i < len(elements):
if elements[i] % 2 == 1:
elements.pop(i)
i -= 1
i += 1
if __name__ == '__main__':
sequence = fibonacci(4000000)
get_evens(sequence)
print(sum(sequence))
|
17,680 | 4cac5fddcff29ea51e9e76d6c60cd6f60c01e201 | from django.shortcuts import render
from .models import Student
from .forms import StudentForm
import csv,io
from django.contrib import messages
from django.http import HttpResponse
def file_upload(request):
template='csvupload.html'
context={
'msg':'Please upload valid file only'
}
if request.method=='GET':
return render(request,template,context)
csv_file=request.FILES['file']
if not csv_file.name.endswith('.csv'):
messages.error(request,"Please upload Valid CSV FILE ONLY")
data_set=csv_file.read().decode('UTF-8')
print(data_set)
io_string=io.StringIO(data_set)
print(io_string)
next(io_string)
for column in csv.reader(io_string, delimiter=',', quotechar='|'):
_, created=Student.objects.update_or_create(
username=column[0],
college_name=column[1],
std_cell_no=column[2],
emai_id=column[3],
)
context1={}
return render(request,template,context1)
def downloadcsv(request):
items=Student.objects.all()
response=HttpResponse(content_type='text/csv')
response['Content-Disposition']='attachment; filename="demo.csv"'
writer=csv.writer(response, delimiter=',')
writer.writerow(['Username','college_name','std_cell_no','emai_id'])
for obj in items:
writer.writerow([obj.username,obj.college_name,obj.std_cell_no,obj.emai_id])
return response
|
17,681 | 8850d451a36abc8d8d31ab9b214a0ce99e2c38c3 | from cloudcms.deployment.deployment_target import DeploymentTarget
from . import CloudCMSObject
from ..repository import Repository
from ..project import Project
from ..job import Job
from ..error import JobError, RequestError
import time
class Platform(CloudCMSObject):
def __init__(self, client, data):
super(Platform, self).__init__(client, data)
def uri(self):
return ''
def list_repositories(self):
uri = self.uri() + '/repositories'
res = self.client.get(uri)
return Repository.repository_map(self.client, res['rows'])
def read_repository(self, repository_id):
res = self.client.get('/repositories/' + repository_id)
repository = Repository(self.client, res)
return repository
def create_repository(self, obj={}):
uri = self.uri() + '/repositories'
res = self.client.post(uri, obj)
repository_id = res['_doc']
return self.read_repository(repository_id)
# Projects
def read_project(self, projectId):
uri = self.uri() + '/projects/' + projectId
res = self.client.get(uri, {})
return Project(self.client, res)
def start_create_project(self, obj):
uri = self.uri() + '/projects/start'
response = self.client.post(uri, {}, obj)
return response['_doc']
# Jobs
def read_job(self, jobId):
uri = self.uri() + '/jobs/' + jobId
res = self.client.get(uri, {})
return Job(self.client, res)
def query_jobs(self, query, pagination):
uri = self.uri() + '/jobs/query'
res = self.client.post(uri, pagination, query)
return Job.job_map(self.client, res['rows'])
def kill_job(self, jobId):
uri = self.uri() + '/jobs/' + jobId + '/kill'
res = self.client.post(uri, {}, {})
return Job(self.client, res)
# Deployment
# Targets
def list_deployment_targets(self, pagination={}):
uri = self.uri() + '/deployment/targets'
res = self.client.get(uri, pagination)
return DeploymentTarget.deployment_target_map(self.client, res['rows'])
def query_deployment_targets(self, query, pagination={}):
uri = self.uri() + '/deployment/targets/query'
res = self.client.post(uri, pagination, query)
return DeploymentTarget.deployment_target_map(self.client, res['rows'])
def read_deployment_target(self, id):
uri = self.uri() + '/deployment/targets/' + id
try:
res = self.client.get(uri)
target = DeploymentTarget(self.client, res)
except RequestError:
target = None
return target
def create_deployment_target(self, obj):
uri = self.uri() + '/deployment/targets/'
res = self.client.post(uri, data=obj)
targetId = res['_doc']
return targetId
def wait_for_job_completion(self, jobId):
# Use with caution
while True:
job = self.read_job(jobId)
if job.data['state'] == 'FINISHED':
return job
elif job.data['state'] == 'ERROR':
raise JobError(jobId)
else:
time.sleep(1)
|
17,682 | e5bb4fa3203f911d5236635ee65ffdfefc4bd960 | '''
Написать генератор цифр последовательности Конвея «Look and Say».
https://oeis.org/A005150
(Сама последовательность Конвея https://oeis.org/A034002).
Ввести N⩾0 и вывести N-ю цифру последовательности.
Input:
100500
Output:
2
'''
def generat_conoway1(seed):
yield seed
l = [int(seed)]
s = 1
y=''
while True:
x = str(l[len(l) - 1]) + 'Q'
for i in range(len(x) - 1):
if x[i] == x[i + 1]:
s += 1 # счетчик количества одинаковых цифр
else: # как только цифры не совпадают, возвращаем счетчик к исходному состоянию сохраняем рез-т
y += str(s)+str(x[i])
yield s
yield x[i]
s = 1 # для последнего значения "Q" счетчик возвращается в 1
l.append(int(y))
y = ''
# print(l) # A005150
def generat_conoway2(seed):
yield seed
previous = str(seed)
seq = str(seed)
while True:
next = ''
idx = 0 # счетчик количества одинаковых цифр
l = len(previous) # длина строки p
while idx < l: # проход по строке p
start = idx
idx += 1 # засчитываем первую цифру, изначально счетчик установлен в 0
while idx < l and previous[idx] == previous[start]:
idx += 1 # считаем количество цифр
next = "{}{}{}".format(next, str(idx-start), previous[start])
previous = next
for i in next:
yield i
#N = int(input("Введите N: "))
for N in range(100490, 100510):
# проход диапазона шагов: это вообще не оптимально т.к. на каждой итерации происходит
# новый пробег по всему генератору, но наглядно ))
step = 0
for i in generat_conoway1(9):
N -= 1
if N < 0:
print("шаг {} цифра: {}".format(step, i))
break
step += 1
|
17,683 | 8a6c52514870bd5d48ab17666815e36ed5028c9c | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The build_cache functionality is split into
this file and build_cache_base.py
This file is part of the AITemplate OSS distribution.
For Meta-internal use, there can be an alternative
to this file which allows to instantiate build caches
with Meta-internal backing infrastructure.
"""
from aitemplate.backend.build_cache_base import (
BuildCache,
FileBasedBuildCache,
NoBuildCache,
)
from aitemplate.utils import environ as aitemplate_env
__all__ = ["BUILD_CACHE", "BuildCache"]
def create_build_cache() -> BuildCache:
build_cache_dir = aitemplate_env.ait_build_cache_dir()
if build_cache_dir is None or build_cache_dir == "":
return NoBuildCache()
else:
return FileBasedBuildCache(build_cache_dir)
BUILD_CACHE: BuildCache = create_build_cache()
|
17,684 | e911e204a9b6e3d7f47c402f0c3c6e2570579ba9 | class libro:
def __init__(self,ibs,titulo,autor,cantidad_de_pagina,pagina_actual):
self.ibs=ibs
self.titulo=titulo
self.autor=autor
self.cantidad_de_pagina= cantidad_de_pagina
self.pagina_actual= 0
def de_quien_es(self):
print (self.autor)
def nombre_del_titulo(self):
print (self.titulo)
def caracteristicas(self):
print (self.ibs,self.titulo,self.autor,self.cantidad_de_pagina)
def leer(self,pag_leido):
self.pagina_actual += pag_leido
if self.pagina_actual >= self.cantidad_de_pagina:
print ('felicidades,terminaste')
self.paginas_actual=0
def en_que_pagina_me_quede(self):
return self.pagina_actual
#como lo ejecute en la terminal
joni=libro(11221432,'rambo','jonatan',100,0)
print('titulo:',joni.nombre_del_titulo())
rambo
print(joni.de_quien_es())
jonatan
print(joni.caracteristicas())
11221432 rambo jonatan 100
joni.leer(50)
print('pagina_actual:',joni.en_que_pagina_me_quede())
pagina_actual: 50
print('pagina_actual:',joni.en_que_pagina_me_quede())
pagina_actual: 100
joni.leer(101)
felicidades,terminaste
|
17,685 | 6bbb407fe709cb878aa87e1c2f11f6d40b279810 | /home/action/.parts/packages/googleappengine/1.9.4/lib/pyasn1_modules/pyasn1_modules/rfc2459.py |
17,686 | 79571636e3f274bf7cf78a82d935fefb289e255f | """Create and print a boolean array, a mask, from a NumPy number array
with values from 0-5 and the condition: values ≥ 3.
"""
import numpy as np
array = np.arange(6)
print(array >= 3)
|
17,687 | 5e9b42beb7d45cb69a40e4a85494ade71c02243a | from setuptools import setup
setup(name="twitchchat", version="0.1", packages=['twitchchat'],)
|
17,688 | fedfb74e195951bd62e94e7595c58911ad0ac344 | def parts(words, word):
if word in words:
return True
for i in range(len(words)):
if words[i] in word:
tmp = word.replace(words[i],'')
words_temp = words[:i]+words[i+1:]
if parts(words_temp, tmp):
return True
return False
import re
words = list(enumerate(re.findall(r'"(.*?)"', input())))
words = sorted(words, key=lambda x: len(x[1]))
valid = []
for i in range(1, len(words)):
pure_words = [words[j][1] for j in range(i)]
if parts(pure_words, words[i][1]):
valid.append(words[i])
valid = sorted(valid, key=lambda x: x[0])
valid = [x[1] for x in valid]
print(valid) |
17,689 | a6cd48247bed8d88b36bca2ea6c392c4e0cc6268 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import UserInfo, Profile, Notification
admin.site.register(UserInfo)
admin.site.register(Profile)
admin.site.register(Notification)
# Register your models here.
|
17,690 | 899e2f2fb325f8f5beafc540e7decc7166902000 | import pytest
from pygears import gear, Intf, find
from pygears.typing import Unit
from pygears.hls import parse_gear_body
# from pygears.hls.utils import VisitError
# @pytest.mark.xfail(raises=VisitError)
# def test_generator_used_as_function():
# def func(x):
# yield True
# @gear
# def module(x):
# a = func(x)
# yield a
# module(Intf(Unit))
# parse_gear_body(find('/module'))
|
17,691 | c85ba1352a158d0aafb6bdb451623eca565bc5a0 | name = input('Enter your name? ')
color = input('Enter your color? ')
print(name + ' likes ' + color) |
17,692 | 92262205b604d8eae049537b90d58381aa19b657 | import json
from flask import render_template, redirect, request, current_app, session, flash, url_for, Blueprint, jsonify
from flask.ext.login import current_user
from flask.ext.security import LoginForm, RegisterForm, current_user, login_required, login_user, auth_token_required
from flask.ext.security.views import logout
from flask.ext.social.utils import get_provider_or_404
from flask.ext.social.views import connect_handler
from application.core import app
usersBlueprint = Blueprint('users', __name__, template_folder='templates/users')
@usersBlueprint.route('/user_details', methods=['GET'])
@login_required
def user_details():
return current_user.toJson()
@usersBlueprint.route('/profile')
@login_required
def profile():
return render_template('security/profile.html', content='Profile Page', twitter_conn=app.social.twitter.get_connection())
@usersBlueprint.route('/profile/<provider_id>/post', methods=['POST'])
@login_required
def social_post(provider_id):
message = request.form.get('message', None)
if message:
provider = get_provider_or_404(provider_id)
api = provider.get_api()
if provider_id == 'twitter':
display_name = 'Twitter'
api.PostUpdate(message)
if provider_id == 'facebook':
display_name = 'Facebook'
api.put_object("me", "feed", message=message)
flash('Message posted to %s: %s' % (display_name, message), 'info')
return redirect(url_for('users.profile'))
@usersBlueprint.route('/register', methods=['GET', 'POST'])
@usersBlueprint.route('/register/<provider_id>', methods=['GET', 'POST'])
def register(provider_id=None):
if current_user.is_authenticated():
return redirect(request.referrer or current_app.config['SECURITY_POST_LOGIN_VIEW'])
form = RegisterForm()
if provider_id:
provider = get_provider_or_404(provider_id)
connection_values = session.get('failed_login_connection', None)
else:
provider = None
connection_values = None
if form.validate_on_submit():
ds = current_app.security.datastore
user = ds.create_user(email=form.email.data, password=form.password.data)
ds.commit()
# See if there was an attempted social login prior to registering
# and if so use the provider connect_handler to save a connection
connection_values = session.pop('failed_login_connection', None)
if connection_values:
connection_values['user_id'] = user.id
connect_handler(connection_values, provider)
if login_user(user):
ds.commit()
flash('Account created successfully', 'info')
return redirect(url_for('security.profile'))
return render_template('security/thanks.html', user=user)
login_failed = int(request.args.get('login_failed', 0))
return render_template('security/register_user.html', register_user_form=form, provider=provider, login_failed=login_failed, connection_values=connection_values) |
17,693 | fe9aeb2b92545288acb065822970b2a5df44851f | from model.model import Model
import tensorflow as tf
class PoseRegModel(Model):
def __init__(self, n_dim=30, cacheFile=None):
super(PoseRegModel, self).__init__(cacheFile)
self.n_dim = n_dim
self.y = tf.placeholder(dtype=tf.float32, shape=[None, n_dim], name='y')
self.X = tf.placeholder(dtype=tf.float32, shape=[None, 128, 128, 1], name='X')
def get_y_infer(self):
if hasattr(self, 'y_infer'):
return self.y_infer
else:
return None
def get_loss(self):
if hasattr(self, 'loss'):
return self.loss
else:
return None
def inference(self, X, y):
with tf.variable_scope('conv1'):
conv = self.lh.conv(X, filter_num=8, ksize=(5,5), stride=1, reg=True)
pool = self.lh.max_pool(conv, ksize=(4,4), stride=4)
relu = self.lh.relu(pool)
with tf.variable_scope('conv2'):
conv = self.lh.conv(relu, filter_num=8, ksize=(5,5),stride=1, reg=True)
pool = self.lh.max_pool(conv, ksize=(2, 2), stride=2)
relu = self.lh.relu(pool)
with tf.variable_scope('conv3'):
conv = self.lh.conv(relu, filter_num=8, ksize=(3,3),stride=1, reg=True)
# pool = self.lh.max_pool(conv, ksize=(2, 2), stride=2)
relu = self.lh.relu(conv)
with tf.variable_scope('fc1'):
fc = self.lh.fc(relu, 1024, reg=True)
fc = self.lh.drop_out(fc, 0.7)
with tf.variable_scope('fc2'):
fc = self.lh.fc(fc, 1024, reg=True)
fc = self.lh.drop_out(fc, 0.7)
with tf.variable_scope('pca_out'):
fc = self.lh.fc(fc, self.n_dim, reg=True)
loss = self.lh.sqr_loss(fc, y)
self.loss = loss
self.y_infer = fc
return fc, loss |
17,694 | 2fd0ae683d22b9da46b0003f569a1e96b03a71eb |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
print "Spark version:",sc.version
print "Pandas version:",pd.__version__
from pandas import Series, DataFrame
from os import getenv
DATADIR = getenv("DATADIR")
SUBDIR = '/PUBLIC/movielens/ml-1m'
DATADIR += SUBDIR
# In[2]:
#--------------------------------------------------
import plotly as plotly
print "Plotly version", plotly.__version__ # version >1.9.4 required
import plotly.graph_objs as go
from plotly import tools
# plotly.offline.init_notebook_mode() # run at the start of every notebook
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot #, plot # Difference .plot / .iplot ???
init_notebook_mode() # run at the start of every ipython notebook to use plotly.offline
# this injects the plotly.js source files into the notebook
#--------------------------------------------------
# %matplotlib inline
# import matplotlib.pyplot as plt
# import seaborn as sns
#--------------------------------------------------
# ---
# # Spark DataFrames
# In[3]:
usersDF = spark.read.csv("%s/users.csv" % DATADIR, sep=',', header=False, inferSchema=True)
usersDF = usersDF.withColumnRenamed('_c0', 'UserID') .withColumnRenamed('_c1', 'Gender') .withColumnRenamed('_c2', 'Age') .withColumnRenamed('_c3', 'Occupation') .withColumnRenamed('_c4', 'ZipCode')
# In[4]:
usersDF.show(5)
# In[5]:
ratingsDF = spark.read.csv("%s/ratings.csv" % DATADIR, sep=',', header=False, inferSchema=True)
ratingsDF = ratingsDF.withColumnRenamed('_c0', 'UserID') .withColumnRenamed('_c1', 'MovieID') .withColumnRenamed('_c2', 'Rating') .withColumnRenamed('_c3', 'Timestamp')
# In[6]:
# Compute Ratings Histogram:
# ratingsHistogram = ratingsDF.groupBy("Rating").agg({'Rating': 'count'})
ratingsHistogram = ratingsDF.groupBy("Rating").count().withColumnRenamed('count','Cnt')
ratingsHistogram.show()
# ---
# ## Join DataFrames
# In[7]:
ratingsWithUserDataDF = ratingsDF.join(usersDF, on='UserID', how='inner')
# In[8]:
ratingsWithUserDataDF.show(5)
# In[9]:
# Compute Ratings Histogram by Gender:
ratingsHistogram = (
ratingsWithUserDataDF
.groupby(['Rating','Gender']).count().withColumnRenamed('count','Cnt')
.orderBy(["Rating", "Gender"], ascending=[1, 1])
)
# In[10]:
ratingsHistogram.show(100)
# In[11]:
fRatingsNr=ratingsWithUserDataDF.filter("Gender = 'F'").count()
mRatingsNr=ratingsWithUserDataDF.filter(ratingsWithUserDataDF['Gender'] == 'M').count()
print "Nr. of ratings by female users:",fRatingsNr
print "Nr. of ratings by male users: ",mRatingsNr
# In[12]:
from pyspark.sql.types import IntegerType, FloatType, DoubleType
from pyspark.sql.functions import udf
normalize_udf = udf(lambda cnt, gender: 1.*cnt/fRatingsNr if gender=='F' else 1.*cnt/mRatingsNr, DoubleType())
ratingsHistogram=(
ratingsHistogram.withColumn("CntNormalized", normalize_udf(ratingsHistogram.Cnt, ratingsHistogram.Gender))
)
# In[13]:
ratingsHistogram.show(20)
# In[14]:
ratingsHistogram.groupby('Gender').sum('CntNormalized').show()
# ---
# ## Read movies data
# In[15]:
from pyspark.sql import types as T
from pyspark.sql import functions as F
# In[16]:
moviesDF = spark.read.csv("%s/movies.csv" % DATADIR, sep='+', header=False, inferSchema=True)
moviesDF = moviesDF.withColumnRenamed('_c0', 'MovieID') .withColumnRenamed('_c1', 'Title') .withColumnRenamed('_c2', 'Genres')
# In[17]:
moviesDF.show(3, truncate=50)
# In[18]:
split_udf = udf(lambda s: s.split("|"), T.ArrayType(T.StringType()))
moviesDF=moviesDF.withColumn("Genres", split_udf(moviesDF['Genres']))
# In[19]:
moviesDF.show(3, truncate=50)
# In[20]:
ratingsWithUserAndMovieDataDF = ratingsWithUserDataDF.join(moviesDF, how='inner', on='MovieID')
# In[21]:
print "Nr. of rows:", ratingsWithUserAndMovieDataDF.count()
ratingsWithUserAndMovieDataDF.sort(['MovieID','UserID']).show(3, truncate=50)
# In[22]:
ratingsByGenderAndGenreSF = (
ratingsWithUserAndMovieDataDF
.withColumn('Genre', F.explode(ratingsWithUserAndMovieDataDF.Genres))
.drop('Genres')
.groupBy(['Gender','Rating','Genre'])
.agg(
F.count('*').alias('Cnt'),
F.mean('Age').alias('AvgAge')
)
# .agg({"*":'count', "Age":'mean'})
.sort(['Genre','Gender','Rating'])
)
# In[23]:
ratingsByGenderAndGenreSF.show(20)
# In[24]:
avgRatingsByGenderAndGenreDF = (
ratingsWithUserAndMovieDataDF
.withColumn('Genre', F.explode(ratingsWithUserAndMovieDataDF.Genres))
.drop('Genres')
.groupBy(['Gender','Genre'])
.agg(
F.count('*').alias('Cnt'),
F.mean('Rating').alias('AvgRating')
)
# .agg({"*":'count', "Rating":'mean'})
.sort(['Genre','Gender'])
)
# In[25]:
avgRatingsByGenderAndGenreDF.show(20)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
17,695 | e648cdc616aaa17ef2662a5dfdc393a1908c7f2a | #!/usr/bin/python
import unittest
class ProgramInputTest(unittest.TestCase):
def test_make_toolbox(self):
pass |
17,696 | c2245d7e0afd5281998e583892a2184c047dc4b4 | from models import Customer
customers = Customer.query.all()
print(customers)
|
17,697 | 459f6785175689e099fd5f2af4fe761bce83c1de | from django.apps import AppConfig
from . import connections
class FlexyConfig(AppConfig):
name = 'flexy'
verbose_name = "Flexy (Elasticsearch integration)"
def ready(self):
# Configure connections
connections.configure()
|
17,698 | 0b26084fc5b83e1254cc5ff8af40e4c0752bef82 | from ._network import Network
from ._system import System
|
17,699 | e3595ea6c8fdaa740c36e7b60881f94bdf297268 | # notrack
import hal
import threading
import logging
logger = logging.getLogger('wpilib.cs')
__all__ = ['CameraServer']
class CameraServer:
'''
Provides a way to launch an out of process cscore-based camera
service instance, for streaming or for image processing.
.. note:: This does not correspond directly to the wpilib
CameraServer object; that can be found as
:class:`cscore.CameraServer`. However, you should
not use cscore directly from your robot code, see
the documentation for details
'''
_alive = False
_launched = False
@classmethod
def is_alive(cls):
''':returns: True if the CameraServer is still alive'''
return cls._alive
@classmethod
def launch(cls, vision_py=None):
'''
Launches the CameraServer process in autocapture mode or
using a user-specified python script
:param vision_py: If specified, this is the relative path to
a filename with a function in it
Example usage::
wpilib.CameraServer.launch("vision.py:main")
.. warning:: You must have robotpy-cscore installed, or this
function will fail without returning an error
(you will see an error in the console).
'''
if cls._launched:
return
cls._launched = True
if hal.isSimulation():
logger.info("Would launch CameraServer with vision_py=%s", vision_py)
cls._alive = True
else:
logger.info("Launching CameraServer process")
# Launch the cscore launcher in a separate process
import subprocess
import sys
args = [
sys.executable,
'-m', 'cscore'
]
# TODO: Get accurate reporting data from the other cscore process. For
# now, just differentiate between users with a custom py file and those
# who do not. cscore handle values indicate type with bits 24-30
if vision_py:
if not vision_py.startswith('/'):
vision_py = '/home/lvuser/py/' + vision_py
args.append(vision_py)
hal.report(hal.UsageReporting.kResourceType_PCVideoServer, 0x51)
else:
hal.report(hal.UsageReporting.kResourceType_PCVideoServer, 0x52)
# We open a pipe to it so that when this process exits, it dies
proc = subprocess.Popen(args, close_fds=True, stdin=subprocess.PIPE, cwd='/home/lvuser/py')
th = threading.Thread(target=cls._monitor_child, args=(proc,))
th.daemon = True
th.start()
@classmethod
def _monitor_child(cls, proc):
proc.wait()
logger.warning("CameraServer process exited with exitcode %s", proc.returncode)
cls._alive = False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.