index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,200 | 718ebbb65dbfb909339adda8a9a1ff1750433113 | import datetime
from distutils.version import StrictVersion
import hashlib
import os
import random
import re
import seesaw
from seesaw.config import NumberConfigValue, realize
from seesaw.externalprocess import WgetDownload
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import (GetItemFromTracker, SendDoneToTracker,
PrepareStatsForTracker, UploadWithTracker)
from seesaw.util import find_executable
import shutil
import socket
import sys
import time
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.1.5"):
raise Exception("This pipeline needs seesaw version 0.1.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20140228.00"
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9) AppleWebKit/537.71 (KHTML, like Gecko) Version/7.0 Safari/537.71',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (Windows NT 5.1; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)')
TRACKER_ID = 'bebo'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
ip_str = socket.gethostbyname('www.bebo.com')
if ip_str != '173.239.67.222':
item.log_output('Got IP address: %s' % ip_str)
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class SelectUserAgent(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, 'SelectUserAgent')
def process(self, item):
item["user_agent"] = self.mutate_user_agent(random.choice(USER_AGENTS))
def mutate_user_agent(self, string):
def repl_func(match):
int_val = int(match.group(1))
int_val = random.randint(int_val - 4, int_val + 1)
return str(int_val) + '.'
return re.sub(r'([1-9][0-9])\.', repl_func, string)
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
dirname = "/".join((item["data_dir"], item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc"):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
BEBO_SHA1 = get_hash(os.path.join(CWD, 'bebo.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
return {
'pipeline_hash': PIPELINE_SHA1,
'bebo_hash': BEBO_SHA1,
'python_version': sys.version,
}
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", ItemInterpolation("%(user_agent)s"),
"-nv",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--lua-script", "bebo.lua",
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"-e", "robots=off",
"--no-cookies",
"--rotate-dns",
"--recursive", "--level=inf",
"--page-requisites",
"--timeout", "60",
"--tries", "inf",
"--span-hosts",
"--no-parent",
"--waitretry", "3600",
"--domains", "bebo.com",
"--warc-file",
ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "bebo-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("bebo-user: %(item_name)s"),
]
item_name = item['item_name']
start, end = item_name.split(':', 1)
start = int(start)
end = int(end)
assert start <= end
for profile_id in range(start, end + 1):
wget_args.append('http://archive.bebo.com/Profile.jsp?MemberId=%s' % profile_id)
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
downloader = globals()['downloader'] # quiet the code checker
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="Bebo",
project_html="""
<img class="project-logo" alt="" src="http://archiveteam.org/images/f/f6/Bebo_Logo_new.png" height="50" />
<h2>Bebo <span class="links">
<a href="http://archive.bebo.com/">Website</a> ·
<a href="http://%s/%s/">Leaderboard</a></span></h2>
<p><!--<b>Bebo</b> grew up--></p>
""" % (TRACKER_HOST, TRACKER_ID)
,
)
pipeline = Pipeline(
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="bebo"),
SelectUserAgent(),
WgetDownload(
WgetArgs(),
max_tries=5,
accept_on_exit_code=[0, 8],
env={
'item_name': ItemValue("item_name"),
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz"),
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp"
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
|
989,201 | d6c0788962b84adb461ccd11ea720f7e8962ed04 | #Diseñar un algoritmo tal que dados como datos dos variables de tipo entero, obtenga el resultado de la siguiente función:
class Ejercicio9:
def _init_ (self):
pass
def variables (self):
NUME= int(input("Primer variable:"))
VA= int(input("Segunda variable "))
NUME+VA
print(" ")
if NUME == 1:
Resp = 100*VA
elif NUME ==2:
Resp= pow (100,VA)
elif NUME ==3:
Resp= 100/VA
else:
Resp=NUME+VA
print("siguiente variable es:", Resp)
print("")
variables(" ") |
989,202 | 15bc64f3d44c2544aa94e70321600b351b2bd7d4 | from flask import render_template
from website import app
@app.errorhandler(403)
def not_authorized(path):
return render_template('status/403.html'), 403
@app.errorhandler(404)
def page_not_found(path):
return render_template('status/404.html'), 404
@app.errorhandler(410)
def resource_gone(path):
return render_template('status/410.html'), 410
|
989,203 | 7c36bf5ac7d2aba5c4959d9c91d90fc01dcd7270 |
import unittest
from . import main
class TestLongestCommonSubstring(unittest.TestCase):
def test_sample1(self):
s1 = "AACCTTGG"
s2 = "ACACTGTGA"
actual = main.find_longest_common_subsequence(s1, s2)
expected = ["AACTGG", "AACTTG"]
self.assertIn(actual, expected)
if __name__ == '__main__':
unittest.main()
|
989,204 | 32dbd28dd03fa8395577f14650123d3f279dcd17 | # -*- coding:utf-8 -*-
from const.const import Const,LogConst
from fluctuation_invest import FluctuationInvest
from naive_invest import NaiveInvest
from log.logger import Logger
class InvestManager(object):
def __init__(self):
self.invests = {
Const.NAIVE_INVEST:NaiveInvest(),
Const.FLUCTUATION_INVEST:FluctuationInvest(),
}
self.percents = {
Const.NAIVE_INVEST:1,
Const.FLUCTUATION_INVEST:0,
}
self.names = {
Const.NAIVE_INVEST:'NAIVE_INVEST',
Const.FLUCTUATION_INVEST:'FLUCTUATION_INVEST',
}
self.money_throwed = 0
self.init()
def init(self):
pass
def throw_money(self,cost):
self.money_throwed += cost
def check_curr_throwed_money(self):
return self.money_throwed
def take_out_money(self,takeout_cost):
if self.money_throwed > takeout_cost:
self.money_throwed -= takeout_cost
return takeout_cost
return 0
def take_out_all(self):
res = self.money_throwed
self.money_throwed = 0
return res
def invest_process_month(self):
res = 0
for i,invest in self.invests.iteritems():
percent = self.percents[i]
if percent == 0:
continue
money = self.money_throwed * percent
invest_money = invest.get_invest_month(money)
msg = '{} invest_process_month {}'.format(self.names[i],invest_money)
Logger.log(msg, tag_name=LogConst.MONTHLY_INVEST)
res += invest_money
self.money_throwed = res |
989,205 | bebc557b8a83fbe094d9058b01016dfb5b069775 | # -*- coding: utf-8 -*-
import tornado.web
from models import MGMessage
class IndexHandler(tornado.web.RequestHandler):
def get(self):
messages = MGMessage().getMessages()
self.render('index.html', TITLE='留言板', messages=messages)
def post(self):
name = self.get_argument('name')
says = self.get_argument('says')
MGMessage().addMessage(name, says)
self.redirect('/')
|
989,206 | a3c37052694fc33a0bd29ed01bcd67be4d36bb1b | from django.apps import AppConfig
class ReadyConfig(AppConfig):
name = 'ready'
|
989,207 | a2a5b9665624557c7b9d76400392df573435e385 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 16:49:09 2019
@author: HP
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 5 17:49:46 2019
@author: HP
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 17:00:45 2019
@author: HP
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 14:27:11 2019
@author: HP
"""
#from googletrans import Translator
import io
import datetime
import os
from tkinter import *
from tkinter import ttk
import speech_recognition as sr
from pygame import mixer
import pyperclip
import pyttsx3
import pyaudio
import threading
import webbrowser
import nltk
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize,sent_tokenize
import wikipedia
import engineio
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
from googletrans import Translator
import xlrd
text="english"
loc = ("C:/Users/HP/Desktop/url.xlsx")
# To open Workbook
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
#Tkinter app init (using custom ico in the title), using custom theme
engineio = pyttsx3.init()
voices = engineio.getProperty('voices')
engineio.setProperty('rate', 130)
engineio.setProperty('voice',voices[0].id)
def speak(text):
engineio.say(text)
engineio.runAndWait()
def talk(audio):
for line in audio.splitlines():
os.system(audio)
nltk.download('punkt')
root = Tk()
root.title('Voice Input')
root.iconbitmap('mic.ico')
style = ttk.Style()
style.theme_use('winnative')
# The image that is used for the speak button
photo = PhotoImage(file='microphone.png').subsample(15,15)
# Creating a guiding 'label' widget
label1 = ttk.Label(root, text="Say something", font='Courier 11 bold')
label1.grid(row=0, column=1)
# the main part of the app. Defining what the click of the speak button does
x = datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
a = x[9:]
atr = a[0]+a[1]
atr1 = int(atr)
if atr1 <= 6 and atr1 >= 0:
speak('its late night buddy..better go sleep..but how can i help you..')
elif atr1 >6 and atr1 < 12:
speak( 'HEY!!GOOD MORNING buddy... how can i help you')
elif atr1 >= 12 and atr1 <=16:
speak('HEY BUDDY,GOOD AFTERNOON TO YOU!!how can i help you')
elif atr1 >=16 and atr1<=20:
speak('GOOD EVENING BUDDY!!how can i help you')
else:
speak('its already nighT buddy....but can i help you')
#speak("what language do you prefer to speak")
def fn1():
# starting the recognizer, with some optional parameters that I found work well
r = sr.Recognizer()
r.pause_threshold = 0.7
r.energy_threshold = 400
with sr.Microphone() as source:
try:
audio = r.listen(source, timeout=5)
# use your own API key. get it here https://cloud.google.com/speech/
global text
#text = r.recognize_google(audio, language = 'en-IN')
# text=r.recognize_google(audio,language='hi-IN')
#else:
#if(text=="english")
# text=r.recognize_google(audio,language='en-IN')
if(text=="Tamil"):
text=r.recognize_google(audio,language='ta-IN')
print(text)
elif(text=="telegu"):
text=r.recognize_google(audio,language='te-IN')
elif(text=="malayalam"):
text=r.recognize_google(audio,language='ma-IN')
elif(text=="hindi"):
text=r.recognize_google(audio,language='hi-IN')
elif(text=="kanada" or text=="kanadam"):
text=r.recognize_google(audio,language='ka-IN')
else:
text=r.recognize_google(audio,language='en-IN')
# print (text)
# talk(audio)
destination_languages = {
'en': 'english'
}
translator = Translator()
for key,value in destination_languages.items():
x = translator.translate(text, dest=value).text
print(x)
speak(x)
print("listened")
# playing the sound effect after recognition completed
mixer.music.load('chime2.mp3')
mixer.music.play()
# placing the recognized 'message' on the clipboard
#pyperclip.copy(message)
except sr.UnknownValueError:
print("THE AUDIO WAS NOT CLEAR..just repeat again ")
fn1()
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
else:
pass
return text
def buttonClick():
# using the pygame mixer to play sound effects, 'prompting' the user to speak
mixer.init()
mixer.music.load('chime1.mp3')
mixer.music.play()
text=fn1()
# bool=re.search("event",text)
#bool=re.search("events",text)
#bool=re.search("college",text)
#bool=re.search("club",text)
global count
count=0
# using threading to prevent the app from freezing or becoming unresponsive
if(text=="what can you do for me"):
speak("HI.. I HAVE 3 FEATURES FOR U..I CAN HELP YOU IN TRANSLATING BASIC LANGUAGES TO ENGLISH..")
speak("HELP TO FETCH EVENT DETAILS..HELP TO FETCH CONTENTS FROM WIKIPEDIA AND DISPLAY!!")
speak("select what you want..")
text="english"
text=fn1()
count=count+1
if(text=="translate" or text=="translation"):
speak("what languauge you want to speak")
print("what languauge you want to speak")
text=fn1()
print(text)
speak("speak now")
text=fn1()
count=count+1
url=[ 'https://www.youtube.com/','https://www.netflix.com/r/','https://www.facebook.com/',
'https://www.instagram.com/r/',
'https://www.instagram.com/r/','whatsappmessenger://','https://www.swiggy.com/','telegram://']
app=['youtube','Netflix','facebook','instagram','insta','whatsapp','swiggy','telegram']
#text= "hey user.. open netflix for me"
global b
global b1
b=0
b1=0
for j in range(len(app)):
if(re.search(app[j],text)):
b=1
val=app[j]
print(app[j])
if(re.search("open",text)):
b1=1
if((b+b1)==2):
for i in range(len(url)):
if(re.search(val,url[i])):
print("Haiyaaaaaa")
print(app[i])
if(app[i]=="whatsapp" or app[i]=="telegram"):
print("just click on the first image or icon..")
webbrowser.open(url[i])
#text="what are the events happening in next month"
if(re.search("month",text)):
f=re.search("next month",text)
f1=re.search("previous month",text)
f2=re.search("current month",text)
f3=re.search("this month",text)
f4=re.search("month",text)
f5=re.search("last month",text)
f6=re.search("day",text)
f8=re.search("year",text)
x1=0
c1=0
if f:
x1 = datetime.datetime.now().strftime("%m")
x1=int(x1)
x1=x1+1
c1=c1+1
elif f1 and f5:
x1 = datetime.datetime.now().strftime("%m")
x1=int(x1)
x1=x1-1
c1=c1+1
elif f2 or f3:
x1 = datetime.datetime.now().strftime("%m")
x1=int(x1)
c1=c1+1
elif f4 and c1==0 and f6 and f8:
print("specify the month please")
#x1=int(x1)
print(x1)
#x1=x1+1
print(x1)
x1=str(x1)
print(x1)
for j in range(sheet.nrows):
v=sheet.cell_value(j,2)
print(v)
v=str(v)
y=re.search(x1,v)
if y:
print("matched")
w=sheet.cell_value(j,1)
webbrowser.open(w)
print("Done")
k=1
if(k!=1):
speak("sorry there were no events at that time")
else:
a=text.split(" ")
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(text)
#filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
text=filtered_sentence
for i in range(len(a)):
b=a[i]
#print(b)
for j in range(sheet.nrows):
v=sheet.cell_value(j,0)
#print(v)
y=re.search(v,b)
if y:
count=count+1
print("matched")
w=sheet.cell_value(j,1)
webbrowser.open(w)
print("Done")
def thr():
t1 = threading.Thread(target=buttonClick, daemon=True)
t1.start()
# creating the Speak button, which calls 'thr' which invokes 'buttonClick()'
MyButton1 = Button(root, image=photo, width=150, command=thr, activebackground='#c1bfbf', bd=0)
MyButton1.grid(row=0, column=2)
# making sure the app stay on top of all windows (use this optionally)
root.wm_attributes('-topmost', 1)
# running the mainloop
root.mainloop()
"""
if count==0:
a=text.split(" ");
#for i in a:
x = wikipedia.summary(text,sentences=2)
print(x)
speak(x)
""" |
989,208 | 7f096fb19c75bc5cffd27c6ba013f2033b15e9c2 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 17:28:22 2015
@author: A30123
"""
#import time
#from datetime import date, time, timedelta
#p=time.strptime("2015-08-24 17:26:48","%Y-%m-%d %H:%M:%S")
#d= timedelta(days=2.5)
#d2=time.strptime("2.5","%d")
#p="2015-08-24 17:26:48"
#d
#
#time_limit="03:00:00"
#time_limit+timedelta(minutes=60)
#
#timedelta()
#
#p+ time.hour(2.5)
import datetime
p2=datetime.datetime.strptime("2015-08-24 17:26:48","%Y-%m-%d %H:%M:%S")
p2-datetime.timedelta(days=2.5) |
989,209 | ba374ba15335bcdcd323dd18d254719990548532 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
@description: 根据省份分割列表/Segmentation by province
@file_name: segmentation_by_province.py
@project: my_love
@version: 1.0
@date: 2019/05/16 22:35
@author: air
"""
__author__ = 'air'
import pandas as pd
def segmentation_by_province(province_list):
"""
根据省份分割列表
:param province_list: 传入省份列表
:return:
"""
df_total = pd.DataFrame(columns=['城市名', '收入', '支出', '年份'])
new_row = pd.DataFrame(index=['0'], columns=['城市名', '收入', '支出', '年份'])
for province_file in province_list:
province = province_file[:province_file.find('.')]
df = pd.read_excel(province_file)
rows = df.shape[0]
columns = df.shape[1] // 2
for j in range(columns):
j += 1
city = df.iloc[0, j]
city = province + '-' + city[city.rfind(':') + 1:]
for i in range(1, rows):
new_row.iloc[0, 0] = city
new_row.iloc[0, 1] = str(df.iloc[i, j])
new_row.iloc[0, 2] = str(df.iloc[i, j + columns])
new_row.iloc[0, 3] = str(2008 + i)
df_total = df_total.append(new_row, ignore_index=True)
df_total.to_excel('total.xlsx', index=False, encoding='utf-8')
def segmentation_by_program(excel_list):
"""
按照项目分割Excel
:param excel_list: 传入项目Excel文件列表
:return:
"""
df_gdp = pd.read_excel(excel_list[0], index_col=0)
df_foreign = pd.read_excel(excel_list[1], index_col=0)
df_people = pd.read_excel(excel_list[2], index_col=0)
df_consume = pd.read_excel(excel_list[3], index_col=0)
df_cost = pd.read_excel(excel_list[4], index_col=0)
df_total = pd.DataFrame(columns=['城市名', '年份', 'GDP', '实际利用外资金额', '年末总人口', '社会消费品零售额', '财政支出'])
new_row = pd.DataFrame(index=['0'], columns=['城市名', '年份', 'GDP', '实际利用外资金额', '年末总人口', '社会消费品零售额', '财政支出'])
for i in range(284):
for j in range(8):
new_row.iloc[0, 0] = df_gdp.iloc[i, 0]
new_row.iloc[0, 1] = 2010 + j
new_row.iloc[0, 2] = df_gdp.iloc[i, j + 1]
new_row.iloc[0, 3] = df_foreign.iloc[i, j + 1]
new_row.iloc[0, 4] = df_people.iloc[i, j + 1]
new_row.iloc[0, 5] = df_consume.iloc[i, j + 1]
new_row.iloc[0, 6] = df_cost.iloc[i, j + 1]
df_total = df_total.append(new_row, ignore_index=True)
df_total.to_excel('city.xlsx', index=False, encoding='utf-8')
def segmentation_by_file(file_list):
"""
按照文件分割Excel
:param file_list: 传入文件列表
:return:
"""
columns = [file[:file.find('.')] for file in file_list]
columns.insert(0, '年份')
columns.insert(0, '英文城市名')
columns.insert(0, '城市名')
df = pd.DataFrame(columns=columns)
new = pd.DataFrame(index=['0'], columns=columns)
df_list = []
for file in file_list:
df_list.append(pd.read_excel(file, index_col=2))
city = int(df_list[0].shape[0])
for i in range(city):
for j in range(9):
new.iloc[0, 0] = df_list[0].iloc[i, 0]
new.iloc[0, 1] = df_list[0].iloc[i, 1]
new.iloc[0, 2] = 2009 + j
new.iloc[0, 3] = df_list[0].iloc[i, j + 2]
new.iloc[0, 4] = df_list[1].iloc[i, j + 2]
new.iloc[0, 5] = df_list[2].iloc[i, j + 2]
new.iloc[0, 6] = df_list[3].iloc[i, j + 2]
new.iloc[0, 7] = df_list[4].iloc[i, j + 2]
new.iloc[0, 8] = df_list[5].iloc[i, j + 2]
new.iloc[0, 9] = df_list[6].iloc[i, j + 2]
new.iloc[0, 10] = df_list[7].iloc[i, j + 2]
new.iloc[0, 11] = df_list[8].iloc[i, j + 2]
new.iloc[0, 12] = df_list[9].iloc[i, j + 2]
df = df.append(new, ignore_index=True)
df.to_excel('area.xlsx', index=False, encoding='utf-8')
def segmentation_by_city(file_list):
"""
按照城市分割Excel
:param file_list: 传入城市列表
:return:
"""
df_people = pd.read_excel(file_list[0], index_col=0)
df_income = pd.read_excel(file_list[1], index_col=0)
df_consume = pd.read_excel(file_list[2], index_col=0)
df_fdi = pd.read_excel(file_list[3], index_col=0)
df_retail = pd.read_excel(file_list[4], index_col=0)
df_area = pd.read_excel(file_list[5], index_col=0)
df_finance = pd.read_excel(file_list[6], index_col=0)
df_gdp = pd.read_excel(file_list[7], index_col=0)
df_total = pd.DataFrame(columns=['城市名', '英文城市名', '年份', '人口', '城镇人均可支配收入', '城镇人均消费支出', '外商直接投资(实际使用)',
'消费品零售', '行政区域土地面积', '财政支出', 'GDP'])
new_row = pd.DataFrame(index=['0'], columns=['城市名', '英文城市名', '年份', '人口', '城镇人均可支配收入', '城镇人均消费支出', '外商直接投资(实际使用)',
'消费品零售', '行政区域土地面积', '财政支出', 'GDP'])
for i in range(284):
for j in range(9):
new_row.iloc[0, 0] = df_gdp.iloc[i, 0]
new_row.iloc[0, 1] = df_gdp.iloc[i, 1]
new_row.iloc[0, 2] = 2010 + j
new_row.iloc[0, 3] = df_people.iloc[i, j + 2]
new_row.iloc[0, 4] = df_income.iloc[i, j + 2]
new_row.iloc[0, 5] = df_consume.iloc[i, j + 2]
new_row.iloc[0, 6] = df_fdi.iloc[i, j + 2]
new_row.iloc[0, 7] = df_retail.iloc[i, j + 2]
new_row.iloc[0, 8] = df_area.iloc[i, j + 2]
new_row.iloc[0, 9] = df_finance.iloc[i, j + 2]
new_row.iloc[0, 10] = df_gdp.iloc[i, j + 2]
df_total = df_total.append(new_row, ignore_index=True)
df_total.to_excel('program.xlsx', index=False, encoding='utf-8')
def segmentation_by_area(file_list):
"""
按照地区分割Excel
:param file_list: 传入地区列表
:return:
"""
df_gdp = pd.read_excel(file_list[0], index_col=1)
df_middle_student = pd.read_excel(file_list[1], index_col=1)
df_middle_school = pd.read_excel(file_list[2], index_col=1)
df_middle_teacher = pd.read_excel(file_list[3], index_col=1)
df_bus = pd.read_excel(file_list[4], index_col=1)
df_suburb_income = pd.read_excel(file_list[5], index_col=1)
df_bed = pd.read_excel(file_list[6], index_col=1)
df_hospital = pd.read_excel(file_list[7], index_col=1)
df_fai = pd.read_excel(file_list[8], index_col=1)
df_city_income = pd.read_excel(file_list[9], index_col=1)
df_fdi = pd.read_excel(file_list[10], index_col=1)
df_primary_student = pd.read_excel(file_list[11], index_col=1)
df_primary_school = pd.read_excel(file_list[12], index_col=1)
df_primary_teacher = pd.read_excel(file_list[13], index_col=1)
df_industry = pd.read_excel(file_list[14], index_col=1)
df_people = pd.read_excel(file_list[15], index_col=1)
df_resident = pd.read_excel(file_list[16], index_col=1)
df_consume = pd.read_excel(file_list[17], index_col=1)
df_life = pd.read_excel(file_list[18], index_col=1)
df_electric = pd.read_excel(file_list[19], index_col=1)
df_finance = pd.read_excel(file_list[20], index_col=1)
df = pd.DataFrame(columns=['城市名', '英文城市名', '年份', 'GDP', '中学在校生数', '中学学校数', '中学教师数',
'公共交通车辆拥有量', '农村人均可支配收入', '医院、卫生院床位数', '医院、卫生院数',
'固定资产投资', '城镇人均可支配收入', '外商直接投资', '小学在校生数', '小学学校数',
'小学教师数', '工业固体废物综合利用率', '常住人口', '户籍人口', '消费品零售',
'生活垃圾无害化处理率', '电力消费', '财政支出'])
new_row = pd.DataFrame(index=['0'], columns=['城市名', '英文城市名', '年份', 'GDP', '中学在校生数', '中学学校数',
'中学教师数', '公共交通车辆拥有量', '农村人均可支配收入', '医院、卫生院床位数',
'医院、卫生院数', '固定资产投资', '城镇人均可支配收入', '外商直接投资',
'小学在校生数', '小学学校数', '小学教师数', '工业固体废物综合利用率',
'常住人口', '户籍人口', '消费品零售', '生活垃圾无害化处理率', '电力消费',
'财政支出'])
for i in range(21):
for j in range(10):
new_row.iloc[0, 0] = str(df_gdp.iloc[i, 0])[str(df_gdp.iloc[i, 0]).rfind(':') + 1:]
new_row.iloc[0, 1] = df_gdp.iloc[i, 1]
new_row.iloc[0, 2] = 2009 + j
new_row.iloc[0, 3] = df_gdp.iloc[i, j + 4]
new_row.iloc[0, 4] = df_middle_student.iloc[i, j + 4]
new_row.iloc[0, 5] = df_middle_school.iloc[i, j + 4]
new_row.iloc[0, 6] = df_middle_teacher.iloc[i, j + 4]
new_row.iloc[0, 7] = df_bus.iloc[i, j + 4]
new_row.iloc[0, 8] = df_suburb_income.iloc[i, j + 4]
new_row.iloc[0, 9] = df_bed.iloc[i, j + 4]
new_row.iloc[0, 10] = df_hospital.iloc[i, j + 4]
new_row.iloc[0, 11] = df_fai.iloc[i, j + 4]
new_row.iloc[0, 12] = df_city_income.iloc[i, j + 4]
new_row.iloc[0, 13] = df_fdi.iloc[i, j + 4]
new_row.iloc[0, 14] = df_primary_student.iloc[i, j + 4]
new_row.iloc[0, 15] = df_primary_school.iloc[i, j + 4]
new_row.iloc[0, 16] = df_primary_teacher.iloc[i, j + 4]
new_row.iloc[0, 17] = df_industry.iloc[i, j + 4]
new_row.iloc[0, 18] = df_people.iloc[i, j + 4]
new_row.iloc[0, 19] = df_resident.iloc[i, j + 4]
new_row.iloc[0, 20] = df_consume.iloc[i, j + 4]
new_row.iloc[0, 21] = df_life.iloc[i, j + 4]
new_row.iloc[0, 22] = df_electric.iloc[i, j + 4]
new_row.iloc[0, 23] = df_finance.iloc[i, j + 4]
df = df.append(new_row, ignore_index=True)
df.to_excel('area.xlsx', index=False, encoding='utf-8')
def segmentation_by_year(input_file, start, end):
"""
通用按照年份分割列表
:param input_file: 传入文件名
:param start: 开始年份
:param end: 结束年份
:return:
"""
df = pd.read_excel(input_file, index_col=2)
columns = list(df.columns)
years = end - start
for year in range(start, end):
df_year = pd.DataFrame(columns=columns)
for city in range(int(df.shape[0]) // years):
new = pd.DataFrame(index=['0'], columns=columns)
for i in range(len(columns)):
new.iloc[0, i] = df.iloc[city * years + (year - start), i]
df_year = df_year.append(new, ignore_index=True)
df_year.to_excel('area' + str(year) + '.xlsx', index=False, encoding='utf-8')
if __name__ == '__main__':
# p_list = ['云南.xls', '内蒙古.xls', '吉林.xls', '四川.xls', '宁夏.xls', '安徽.xls', '山东.xls', '黑龙江.xls',
# '广东.xls', '广西.xls', '新疆.xls', '江苏.xls', '江西.xls', '河北.xls', '河南.xls', '浙江.xls', '海南.xls',
# '湖北.xls', '湖南.xls', '甘肃.xls', '福建.xls', '贵州.xls', '辽宁.xls', '陕西.xls', '青海.xls', '山西.xls']
# e_list = ['GDP.xls', '实际利用外资金额.xls', '年末总人口.xls', '社会消费品零售额.xls', '财政支出.xls']
# f_list = ['人口.xlsx', '城镇人均可支配收入.xlsx', '城镇人均消费支出.xlsx', '外商直接投资(实际使用).xlsx',
# '消费品零售.xlsx', '行政区域土地面积.xlsx', '财政支出.xlsx', 'GDP.xlsx']
# f_list = ['GDP.xlsx', '中学在校生数.xlsx', '中学学校数.xlsx', '中学教师数.xlsx', '公共交通车辆拥有量.xlsx',
# '农村人均可支配收入.xlsx', '医院、卫生院床位数.xlsx', '医院、卫生院数.xlsx', '固定资产投资.xlsx',
# '城镇人均可支配收入.xlsx', '外商直接投资.xlsx', '小学在校生数.xlsx', '小学学校数.xlsx', '小学教师数.xlsx',
# '工业固体废物综合利用率.xlsx', '常住人口.xlsx', '户籍人口.xlsx', '消费品零售.xlsx', '生活垃圾无害化处理率.xlsx',
# '电力消费.xlsx', '财政支出.xlsx']
f_list = ['养老保险.xlsx', '医疗保险.xlsx', '卫生技术人员.xlsx', '卫生机构数.xlsx', '图书馆.xlsx', '失业保险.xlsx',
'年末登记失业人员.xlsx', '床位数.xlsx', '文化人员.xlsx', '邮电总量.xlsx']
# segmentation_by_province(p_list)
# segmentation_by_program(e_list)
# segmentation_by_file(f_list)
# segmentation_by_file(f_list)
segmentation_by_year(r'area20190611.xlsx', 2009, 2018)
|
989,210 | 04e792e4f04dc38ba834d672c8b4d62498072a95 | # email_outbound/functions.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import FRIEND_ACCEPTED_INVITATION_TEMPLATE, FRIEND_INVITATION_TEMPLATE, LINK_TO_SIGN_IN_TEMPLATE, \
VERIFY_EMAIL_ADDRESS_TEMPLATE
from django.template.loader import get_template
from django.template import Context
import json
def get_template_filename(kind_of_email_template, text_or_html):
if kind_of_email_template == VERIFY_EMAIL_ADDRESS_TEMPLATE:
if text_or_html == "HTML":
return "verify_email_address.html"
else:
return "verify_email_address.txt"
elif kind_of_email_template == FRIEND_INVITATION_TEMPLATE:
if text_or_html == "HTML":
return "friend_invitation.html"
else:
return "friend_invitation.txt"
elif kind_of_email_template == FRIEND_ACCEPTED_INVITATION_TEMPLATE:
if text_or_html == "HTML":
return "friend_accepted_invitation.html"
else:
return "friend_accepted_invitation.txt"
elif kind_of_email_template == LINK_TO_SIGN_IN_TEMPLATE:
if text_or_html == "HTML":
return "link_to_sign_in.html"
else:
return "link_to_sign_in.txt"
# If the template wasn't recognized, return GENERIC_EMAIL_TEMPLATE
if text_or_html == "HTML":
return "generic_email.html"
else:
return "generic_email.txt"
def merge_message_content_with_template(kind_of_email_template, template_variables_in_json):
success = True
status = ""
message_text = ""
message_html = ""
# Transfer JSON template variables back into a dict
template_variables_dict = json.loads(template_variables_in_json)
template_variables_object = Context(template_variables_dict)
# Set up the templates
text_template_path = "email_outbound/email_templates/" + get_template_filename(kind_of_email_template, "TEXT")
html_template_path = "email_outbound/email_templates/" + get_template_filename(kind_of_email_template, "HTML")
# We need to combine the template_variables_in_json with the kind_of_email_template
text_template = get_template(text_template_path)
html_template = get_template(html_template_path)
if "subject" in template_variables_dict:
subject = template_variables_dict['subject']
else:
subject = "From We Vote"
try:
message_text = text_template.render(template_variables_object)
status += "RENDERED_TEXT_TEMPLATE "
message_html = html_template.render(template_variables_object)
status += "RENDERED_HTML_TEMPLATE "
except Exception as e:
status += "FAILED_RENDERING_TEMPLATE "
success = False
results = {
'success': success,
'status': status,
'subject': subject,
'message_text': message_text,
'message_html': message_html,
}
return results
|
989,211 | cd9da8bc5ef59f80282a08ba15b0c6950feb52d6 | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def get_gradient_at_b(x, y, b, m):
N = len(x)
diff = 0
for i in range(N):
x_val = x[i]
y_val = y[i]
diff += (y_val - ((m * x_val) + b))
b_gradient = -(2 / N) * diff
return b_gradient
def get_gradient_at_m(x, y, b, m):
N = len(x)
diff = 0
for i in range(N):
x_val = x[i]
y_val = y[i]
diff += x_val * (y_val - ((m * x_val) + b))
m_gradient = -(2 / N) * diff
return m_gradient
# Your step_gradient function here
def step_gradient(b_current, m_current, x, y, learning_rate):
b_gradient = get_gradient_at_b(x, y, b_current, m_current)
m_gradient = get_gradient_at_m(x, y, b_current, m_current)
b = b_current - (learning_rate * b_gradient)
m = m_current - (learning_rate * m_gradient)
return [b, m]
# Your gradient_descent function here:
def gradient_descent(x, y, learning_rate, num_iterations):
b = 0
m = 0
for i in range(num_iterations):
[b, m] = step_gradient(b, m, x, y, learning_rate)
return [b, m]
# months = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
# revenue = [52, 74, 79, 95, 115, 110, 129, 126, 147, 146, 156, 184]
#
# # Uncomment the line below to run your gradient_descent function
# [b, m] = gradient_descent(months, revenue, 0.01, 1000)
#
# # Uncomment the lines below to see the line you've settled upon!
# y = [m * x + b for x in months]
#
# line_fitter = LinearRegression()
#
# months_array = np.array(months).reshape(-1,1)
# line_fitter.fit(months_array, revenue)
# revenue_predict = line_fitter.predict(months_array)
#
# plt.plot(months, revenue, "o")
# plt.plot(months, y)
# plt.plot(months, revenue_predict)
#
# plt.show()
df = pd.read_csv("https://content.codecademy.com/programs/data-science-path/linear_regression/honeyproduction.csv")
# reset_index() create data frame
prod_per_year = (df.groupby('year').totalprod.mean().reset_index())
# can call column
X = prod_per_year.year
# reshape(-1, 1) rotate columns of array to rows each with one column
X = X.values.reshape(-1, 1)
y = prod_per_year.totalprod.values.reshape(-1,1)
plt.scatter(X, y)
plt.xlabel('Year')
plt.ylabel('Production Per Year')
regr = linear_model.LinearRegression()
regr.fit(X, y)
y_predict = regr.predict(X)
plt.plot(X, y_predict)
plt.show()
X_future = np.array(range(2013, 2051))
X_future = X_future.reshape(-1, 1)
future_predict = regr.predict(X_future)
print(future_predict[X_future == 2050])
plt.figure()
plt.plot(X_future, future_predict)
plt.show()
|
989,212 | 427c7917c8379e7a968904ec68fa5e6d99c04baf | class Solution(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
def isSolution(nums, n):
if len(nums) == n:
return True
else:
return False
def constructCandidates(nums, inPerms):
candidates = []
for n in nums:
if n not in inPerms:
candidates.append(n)
return candidates
def processSolution(nums):
print nums
def backtrack(nums, inPerms, j):
print inPerms
if isSolution(inPerms, len(nums)):
#processSolution(inPerms)
result.append(list(inPerms))
else:
candidates = constructCandidates(nums, inPerms)
inPerms.append(0)
for c in candidates:
inPerms[j] = c
backtrack(nums, inPerms, j+1)
inPerms.pop()
backtrack(nums, [], 0)
return result
s = Solution()
import time
start_time = time.time()
print s.permute([1,2,3])
print("--- %s seconds ---" % (time.time() - start_time))
|
989,213 | 3a0ab9b6e5b1c7d19746aa0c2e476af1aa400bc7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from urllib2 import urlopen
print urlopen(sys.argv[1],open(sys.argv[2]).read()).read()
|
989,214 | 157111e31a14bfd41be3878500430b610d1ff6a1 | from spacy.matcher import PhraseMatcher, DependencyMatcher
def dependencymatch(term, nlp):
matcher = DependencyMatcher(nlp.vocab)
matcher.add("dependency", [term])
return matcher
|
989,215 | e6a8a6dd69fd758728ebdccfbdbe383140dcd013 | import pdb
import calcROI
from matplotlib import pyplot
from matplotlib import cm
#Function to detect the heart region in all the given images
def detect_heart_region(images):
#(num_slices, num_times, width,height) = images.shape
rois,circles = calcROI.calc_rois(images)
return rois,circles;
|
989,216 | 0cb8a3376aed5c8876b6298223a918c92b6ff054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import zenodorequest
from zenodorequest import *
from bottle import route, run, request, response
""" enable cross domain ajax requests when using json of another domain"""
def enable_cors(fn):
def _enable_cors(*args, **kwargs):
# set CORS headers
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
if request.method != 'OPTIONS':
# actual request; reply with the actual response
return fn(*args, **kwargs)
return _enable_cors
@route('/submit', method='GET')
@enable_cors
def readjson():
""" read the json file from the url"""
uuid = request.query.get('uuid','')
if(uuid == ""):
result = { "code":"fail", "message":"empty uuid"}
return result
else:
zenodo = ZenodoRequest(uuid)
return {'data':zenodo.saveInDatabase()}
""" to launch local server"""
"""run(host='localhost', port=8084, debug=True)"""
|
989,217 | b43465f521b01e663f3bf425310fdd9dd4e8bbe3 | import requests
import os
import json
import unittest
import stableconfigs
import time
class APITest(unittest.TestCase):
URL = 'http://localhost:5005/'
START = URL + 'task'
GETSTATUS = URL + 'status/'
TERMINATE = URL + 'terminate/'
def test_basic_local_server(self):
monomer_input = ["a b >mon1",
"a* b* >mon2",
"a >mon3",
"b >mon4"]
my_mon = []
for line in monomer_input:
tokens = line.strip().split(' ')
my_mon.append(tokens)
dicToSend = {'monomers': my_mon, 'gen':2}
res = requests.post(self.START, json=dicToSend)
self.assertEqual(res.status_code, 202)
task_id = json.loads(res.text)["task_id"]
while res.status_code == 202 or res.status_code == 203:
res = requests.get(self.GETSTATUS + str(task_id))
self.assertEqual(res.status_code, 200)
data = json.loads(res.text)
self.assertEqual(data["count"], 2)
self.assertEqual(data["entropy"], 3)
def test_basic_with_constraints(self):
monomer_input = ["a b >mon1",
"a* b* >mon2",
"a >mon3",
"b >mon4"]
my_mon = []
for line in monomer_input:
tokens = line.strip().split(' ')
my_mon.append(tokens)
constr_input = ["FREE mon1"]
my_const = []
for line in constr_input:
tokens = line.strip().split(' ')
my_const.append(tokens)
dicToSend = {'monomers': my_mon, 'constraints': my_const}
res = requests.post(self.START, json=dicToSend)
self.assertEqual(res.status_code, 202)
task_id = json.loads(res.text)["task_id"]
while res.status_code == 202 or res.status_code == 203:
res = requests.get(self.GETSTATUS + str(task_id))
self.assertEqual(res.status_code, 200)
data = json.loads(res.text)
self.assertEqual(data["count"], 1)
self.assertEqual(data["entropy"], 3)
def test_empty_error(self):
monomer_input = []
my_mon = []
my_const = []
dicToSend = {'monomers': my_mon, 'constraints': my_const}
res = requests.post(self.START, json=dicToSend)
self.assertEqual(res.status_code, 202)
task_id = json.loads(res.text)["task_id"]
while res.status_code == 202 or res.status_code == 203:
res = requests.get(self.GETSTATUS + str(task_id))
self.assertEqual(res.status_code, 401)
data = json.loads(res.text)
self.assertEqual(data["status"], "TBNException")
self.assertTrue("Input contains no monomers" in str(data["message"]))
def test_bsite_anypaired_constraint_exception(self):
my_mon = [["a:s1"]]
my_const = [["ANYPAIRED", "s1"]]
dicToSend = {'monomers': my_mon, 'constraints': my_const}
res = requests.post(self.START, json=dicToSend)
self.assertEqual(res.status_code, 202)
task_id = json.loads(res.text)["task_id"]
while res.status_code == 202 or res.status_code == 203:
res = requests.get(self.GETSTATUS + str(task_id))
self.assertEqual(res.status_code, 401)
data = json.loads(res.text)
self.assertEqual(data["status"], "TBNException")
self.assertTrue("Binding Site [s1]" in str(data["message"]))
# Flakey Test
def test_terminate(self):
monomer_input = ["a b >mon1",
"a* b* >mon2",
"a >mon3",
"b >mon4"]
my_mon = []
for line in monomer_input:
tokens = line.strip().split(' ')
my_mon.append(tokens)
dicToSend = {'monomers': my_mon, 'gen': 1}
res = requests.post(self.START, json=dicToSend)
task_id = json.loads(res.text)["task_id"]
res = requests.delete('http://localhost:5005/terminate/' + task_id)
self.assertEqual(res.status_code, 200)
res = requests.get(self.GETSTATUS + task_id)
while res.status_code == 202 or res.status_code == 203:
res = requests.get(self.GETSTATUS + str(task_id))
if res.status_code != 200:
data = json.loads(res.text)
self.assertEqual(data["status"], "TBNException")
self.assertTrue("Early Termination" in str(data["message"]))
if __name__ == '__main__':
unittest.main()
|
989,218 | 0a0c6c4bf139a100f1a50389ad91c8791b526053 | '''el resultado es un documento en sql que sube UNA LINEA de info a la db
etapas:
1) nombre del pkmn en tabla pokemon
2) asociacion del pkmn con sus tipos en pkmn_tipos
2.1) agrega un tipo
2.2) pregunta si tiene segundo tipo
2.2.1) agrega segundo tipo
3) imprime el documento
la información debe agregarse a la lista datos
'''
tipos = {
'1': 'agua',
'2': 'acero',
'3': 'bicho',
'4': 'fuego',
'5': 'dragon',
'6': 'electrico',
'7': 'fantasma',
'8': 'hada',
'9': 'hielo',
'10': 'lucha',
'11': 'planta',
'12': 'normal',
'13': 'roca',
'14': 'siniestro',
'15': 'tierra',
'16': 'veneno',
'17': 'volador',
'18': 'psiquico',
}
datos = []
def pkmn_tipos2():
# esta funcion añade un segundo tipo
a = input('escribe el tipo: ')
# ahora preguntamos si tiene segundo tipo
datos.append(a)
def pkmn_tipos():
# esta funcion añade el tipo del pkmn
counter = 1
print('estos son los tipos disponibles:')
for i in tipos.values():
print(f'{counter}) {i}')
counter += 1
a = input('escribe el tipo: ')
# ahora preguntamos si tiene segundo tipo
datos.append(a)
seg = int(input('''tiene segundo tipo?
0) no
1) si
tu respuesta: '''))
if seg == 1:
pkmn_tipos2()
else:
pass
def nombrepkmn():
# esta función añade el nombre del pkmn
b = input('escribe el nombre: ')
datos.append(b)
def numeropkmn():
# esta funcion añade el numero del pkmn
n = int(input('escribe el numero del pkmn: '))
datos.append(n)
def run():
numeropkmn()
nombrepkmn()
pkmn_tipos()
imprimir()
def imprimir():
# lo que sigue 'abre' un archivo, entre parentesis se ubica la direccion en donde se creara
file = open("d:/00Eternidad/00Drive/Documentos vivos/Proyectos/Rentabilidad/Base de datos SQL/pkmn/pkmn_datos.sql", "w")
# lo que sigue será la primera línea del archivo:
file.write(f'''
INSERT INTO pokemon (id, nombre)
VALUES ('{datos[0]}', '{datos[1]}')
;
INSERT INTO pkmn_tipos (pkmn_id, tipos_id)
VALUES ('{datos[0]}', '{datos[2]}')
;''')
if len(datos) == 4:
file.write(f'''
INSERT INTO pkmn_tipos (pkmn_id, tipos_id)
('{datos[0]}', '{datos[3]}')
;''')
else:
pass
# puedes añadir otra línea bajo la estructura "file.write()". Entre paréntesis debe ir el contenido de la línea:
# lo que sigue indica que se cierra el archivo:
file.close()
if __name__ == '__main__':
run() |
989,219 | 5f9ad2a0df91ad0f58a2e5c90a081b32251ab1b5 | from django.contrib import admin
from django.urls import path, include
from rest_framework.routers import SimpleRouter
from . import views
router = SimpleRouter()
router.register('banner', views.BannerView, 'banner')
urlpatterns = [
# path('', include(router.urls)),
]
urlpatterns += router.urls
|
989,220 | 8eb978b5b68e279f2ee26a727fdddab376b29328 | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def emailCompare(s1,s2):
if(s1 == ' ' or s2 == ' '):
return False
if(s1 == s2):
return True
#check if domains are equal
email1 = s1.split('@')
email2 = s2.split('@')
if(email1[1] == email2[1]):
#remove characters after plus sign
email1RemovePlus = email1[0].split('+')[0]
email2RemovePlus = email2[0].split('+')[0]
#remove all periods
email1RemovePeriod = email1RemovePlus.replace('.','')
email2RemovePeriod = email2RemovePlus.replace('.','')
#check if final local names are equal
if(email1RemovePeriod == email2RemovePeriod):
return True
return False
def solution(L):
# write your code in Python 3.6
count = 0
for x in range(0,len(L)-1):
incremented = False
for y in range(x+1,len(L)):
if(emailCompare(L[x],L[y])):
L[y] = ' '
if(not incremented):
count = count + 1
incremented = True
return count
#####2 input = [1,2,1,2,1,2,1] output = 7
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
maxCount = 2
count = 2
x = 1
treeA = A[0]
treeB = A[x]
while(x+1 < len(A)):
x = x + 1
#print(str(x) + ' ' + str(len(A)))
# If trees are same move Amy down the line of trees
if(treeA == treeB):
treeB = [x]
count = count + 1
if(count > maxCount):
maxCount = count
if(treeA != treeB):
if(A[x] != treeA and A[x] != treeB):
count = 0
treeA = A[x-1]
treeB = A[x]
else:
count = count + 1
if(count > maxCount):
maxCount = count
return maxCount
|
989,221 | 0b9e2660e2cb7054279f75452ce9174b4bee75f6 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-07-04 16:05
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('responsitory', '0011_auto_20180704_0124'),
]
operations = [
migrations.RenameField(
model_name='article2tag',
old_name='blog',
new_name='article',
),
migrations.AlterUniqueTogether(
name='article2tag',
unique_together=set([('article', 'tag')]),
),
]
|
989,222 | cf4f938939a1c7dd06f1f4bd7a4841941eef0da4 | import torch
import torchsl
from torchsl._extensions import _has_ops
from ._helpers import *
__all__ = ['pclda']
# ===========================================================
# Pairwise-Covariance Linear Discriminant Analysis
# ===========================================================
def pclda(X, y, y_unique=None, beta=1, q=1):
if y_unique is None:
y_unique = torch.unique(y)
if _has_ops():
return torchsl.ops.pclda(X, y, y_unique, beta, q)
options = dict(dtype=X.dtype, device=X.device)
num_samples = y.size(0)
num_classes = y_unique.size(0)
ecs = class_vectors(y, y_unique).to(dtype=options['dtype'])
ucs = class_means(X, ecs)
y_unique_counts = ecs.sum(1)
out_dimension = X.size(1)
pairs = torch.combinations(torch.arange(num_classes, dtype=torch.long), r=2)
class_W = torch.empty(num_classes, num_samples, num_samples, **options)
class_I = torch.empty(num_classes, num_samples, num_samples, **options)
for ci in range(num_classes):
class_W[ci] = ecs[ci].unsqueeze(0).t().mm(ecs[ci].unsqueeze(0)).div_(y_unique_counts[ci])
class_I[ci] = torch.eye(num_samples, **options) * ecs[ci]
W = class_W.sum(dim=0)
I = torch.eye(num_samples, **options)
class_Sw = torch.empty(num_classes, out_dimension, out_dimension, **options)
for ci in range(num_classes):
class_Sw[ci] = X.t().mm(class_I[ci] - class_W[ci]).mm(X)
Sw = X.t().mm(I - W).mm(X)
out = 0
for ca, cb in pairs:
Sw_ab = beta * (y_unique_counts[ca] * class_Sw[ca] + y_unique_counts[cb] * class_Sw[cb])
Sw_ab.div_(y_unique_counts[ca] + y_unique_counts[cb]).add_((1 - beta) * Sw)
du_ab = ucs[ca].sub(ucs[cb]).unsqueeze_(0)
# Sb_ab = du_ab.t().mm(du_ab)
# out += y_unique_counts[ca] * y_unique_counts[cb] * (torch.trace(Sb_ab) / torch.trace(Sw_ab)).pow_(-q)
out += y_unique_counts[ca] * y_unique_counts[cb] * (du_ab.mm(Sw_ab.inverse()).mm(du_ab.t())).pow_(-q)
out /= num_samples * num_samples
return out
|
989,223 | 86e2f5c46a7de76df2bf67e2fc7ba76b8c77b31d | from enum import Enum
class PersonType(Enum):
S = "Student"
T = "Teacher"
|
989,224 | 0cf4107cb5e78aad059f017b254d29a3a5a2532a | from typing import Dict
class BaseException(Exception):
title: str
status_code: int
def __init__(self, message: str = '', status_code: int = 400, payload: Dict[str, str] = {}, title: str = ''):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
self.title = title
def to_dict(self):
response = {
'title': self.title,
'message': self.message,
**self.payload
}
return response
|
989,225 | a88713cace5e399ea2f0fc24edf3be96f45f8d66 | from typing import List, Dict
import os
import tempfile
import logging
from functools import lru_cache
import datetime
import pytz
import numpy as np
from jinja2 import Template
import pandas as pd
from epanet import epamodule
from typing import Union
import constants
import math
import tensorflow as tf
import matplotlib.pyplot as plt
from data_utils import nash_sutcliffe, plot_results_lines, load_adcl_raw, load_adcl
logger = logging.getLogger()
LISBON = pytz.timezone("Europe/Lisbon")
class SimulationResults:
"""
Object used to return results from a complete simulation
"""
tank_levels: Dict[str, List[float]]
tank_times: Dict[str, List[float]]
tank_min_level: Dict[str, List[float]]
tank_max_level: Dict[str, List[float]]
cost: float
pumps: List
energy: float
def __init__(self, tanks, cost, pumps):
"""
:param tanks: Array of @Tank objects
:param power: Dictionary with the power values of the pumps
:param cost: The total cost of this simulation
:param pumps: Array of @Pump objects
"""
self.tank_levels = {}
self.tank_times = {}
self.tank_min_level = {}
self.tank_max_level = {}
for tank_id, tank in tanks.items():
self.tank_levels[tank_id] = tank.simulation_levels
self.tank_times[tank_id] = tank.simulation_times
# self.tank_min_level[tank_id] = tank.min_level
# self.tank_max_level[tank_id] = tank.max_level
self.cost = cost
self.pumps = pumps
self.tanks = tanks
def levels(self, simulator) -> list:
"""
The levels of each tank
:return:
"""
simulation_levels = []
n_considered_tanks = 0
for tank_id, tank in simulator.tanks.items():
for pump in simulator.pumps:
corresponding_tank = pump.get_corresponding_tank()
if tank_id == corresponding_tank:
# print(tank.simulation_levels)
simulation_levels += tank.simulation_levels
n_considered_tanks += 1
break
# print(simulation_levels)
return simulation_levels
# return sum([self.tank_levels[l] for l in self.tank_levels], [])
@property
def min_levels(self) -> List[float]:
"""
:return: List with min admissible water level for each tank
"""
return [min(self.tank_levels[l]) for l in self.tank_levels]
@property
def max_levels(self) -> List[float]:
"""
:return: List with max admissible water level for each tank
"""
return [max(self.tank_levels[l]) for l in self.tank_levels]
def get_pump_times(self, start):
"""
Process a dictionary with the times where each pump is turned on and off
:param start: A datetime that marks the beginning of the simulation
:return: A dict that contains the datetime objects for each start and shutdown of each pump
"""
pumps_dict = {}
for pump in self.pumps:
dataframe_ = pd.DataFrame()
time = []
command = []
for i in range(len(pump.start_intervals)):
t_on = pump.start_intervals[i].epanet_on_time
t_off = pump.start_intervals[i].epanet_off_time
time += [start + t_on * pd.Timedelta("1S"),
start + t_off * pd.Timedelta("1S")]
command += [1, 0]
dataframe_['Time'] = time
dataframe_[pump.link_id] = command
pumps_dict[pump.link_id] = dataframe_
return pumps_dict
def get_tank_levels(self, start):
"""
Get a dictionary with every tank where the value is a DataFrame \
with the tank levels in the time interval of the simulation
:param start: A datetime that marks the beginning of the simulation
:return: A dict that contains the tank level during the simulation
"""
tanks_dict = {}
for tank in self.tank_levels:
dataframe_ = pd.DataFrame()
dataframe_['Time'] = list(map(lambda x: start + x * pd.Timedelta('1S'), self.tank_times[tank]))
dataframe_.tail(1)['Time'] -= pd.Timedelta('1S')
dataframe_[tank] = self.tank_levels[tank]
tanks_dict[tank] = dataframe_
return tanks_dict
class ControlOperation:
def __init__(self, time: int, operation: Union[int, float]):
self.time = time
self.operation = operation
self.epanet_time = None
def __repr__(self):
return f"time: {self.time} | op: {self.operation} | epanet_time: {self.epanet_time}"
class EpanetElement:
def __init__(self, en_id: Union[str, bytes], en_index: int):
assert en_id is not None and en_index is not None
assert isinstance(en_id, (str, bytes)) and isinstance(en_index, int)
self.en_id = en_id
self.en_index = en_index
self.controls = []
def add_control_operation(self, op_time, operation):
self.controls.append(ControlOperation(op_time, operation))
def __repr__(self):
return f"{self.__class__.__name__}: {self.en_id} | EN_index: {self.en_index}"
class SimulationPump(EpanetElement):
"""
Class representing a Pump in the simulation
:param link_id: The id of pump that is given by the epanet file
:type link_id: bytes
:param en_index: The index of the pump on epanet
:type en_index: int
:param start: The of the optimization window
:type start: datetime.datetime
:param speeds: The speeds of the VSD of the pump for its respective optimization periods. This is given by a real
number that can take values of {0, [0.6, 1.2]}
:type speeds: List[float]
"""
link_id: bytes
en_index: int
cost: float = 0
energy: float = 0
clocktime: List = []
powers_reads: List = []
flow_reads = List = []
def __init__(self, link_id: bytes, en_index: int, on_off_times: Union[list, None] = None):
super().__init__(link_id, en_index)
assert isinstance(on_off_times, list) or on_off_times is None
self.controls = [ControlOperation(op[0], op[1]) for op in on_off_times] if on_off_times is not None else []
def __str__(self):
string_representation = f'PumpID: {self.link_id} | EN_index: {self.en_index} |' \
f' Corresponding Tank: {self.get_corresponding_tank()}'
return string_representation
def get_on_times(self):
"""
Gets the times at which the pump is supposed to be turned on given by :ref: StartInterval.pump_on_time()
:return: A list of pump starts in seconds
:rtype List[int]
"""
return [p_op for p_op in self.controls if p_op.operation == 1]
def get_off_times(self):
"""
Gets the times at which the pump is supposed to be shutdown given by :ref: StartInterval.pump_on_time()
:return: A list of pump shutdowns in seconds
:rtype List[int]
"""
return [p_op for p_op in self.controls if p_op.operation == 0]
def get_corresponding_tank(self):
"""
Which pump has a tank associated with it. This method returns the epanet id of that tank.
:return: The id of the tank in a byte string
:rtype bytes
"""
pump_assign_dict = constants.CORRESPONDING_TANK_DICT
assert self.en_id in pump_assign_dict, f"Pump {self.en_id} does not have corresponding tank!"
return pump_assign_dict[self.en_id]
def append_start_power(self, power: float, epanet_timestamp: float):
"""
Append a power reading and its timestamp to a corresponding start interval. The reading is assign to an
interval if interval_start <= timestamp < interval_end
:param power: The power in kW/h
:type power: float
:param epanet_timestamp: The timestamp associated to the power reading in seconds
:type epanet_timestamp: float
"""
self.powers_reads.append((power, epanet_timestamp))
def append_start_flow(self, flow: float, epanet_timestamp: float):
"""
Append a flow reading and its timestamp to a corresponding start interval. The reading is assign to an
interval if interval_start <= timestamp < interval_end
:param flow: The flow in m^3/h
:type flow: float
:param epanet_timestamp: The timestamp associated to the flow reading in seconds
:type epanet_timestamp: float
"""
self.flow_reads.append((flow, epanet_timestamp))
def calculate_total_pump_volume(self) -> float:
"""
Computes the total pump flow that was pumped by the pump
:return: The flow in m^3/h
:rtype float
"""
volume_sum = 0
for interval in self.flow_reads:
# volume_sum += interval.calculate_volume() TODO: finish this
pass
assert volume_sum >= 0
return volume_sum
def calculate_energy(self) -> tuple:
cost_sum = 0
energy_sum = 0
for t in self.powers_reads: # TODO: finish this
pass
# energy_sum += interval_energy
#
# self.energy = energy_sum
assert self.energy >= 0
return self.energy
class SimulationValve(EpanetElement):
def __init__(self, en_id: Union[str, bytes], en_index: int):
super().__init__(en_id, en_index)
class Tank(EpanetElement):
"""
Class representing a Tank in the simulation
"""
id: int
en_index: int
last_level: float
simulation_levels: List[float]
simulation_times: List
def __init__(self, t_id, en_index, last_level):
super().__init__(t_id, en_index)
self.last_level = last_level
self.simulation_levels = None
self.simulation_times = None
def __str__(self):
return f'TankID: {self.id} | EN_index: {self.en_index} |' \
'Last:{self.last_level}'
class SimulationJunction(EpanetElement):
"""
Class representing a Junction in the simulation
"""
id: int
en_index: int
pattern_demand: List[float]
pattern_index: int
def __init__(self, j_id, en_index, pattern_demand, pattern_index):
super().__init__(j_id, en_index)
self.pattern_demand = pattern_demand
self.pattern_index = pattern_index
class SimulationPipe(EpanetElement):
def __init__(self, en_id: Union[str, bytes], en_index: int):
super().__init__(en_id, en_index)
class Simulation:
def __init__(self, epanet_file, tanks_info, demands, simulation_duration, n_controls):
self.tanks = {}
self.constraints = {}
self.pumps = {}
self.valves = {}
self.pipes = {}
self.junctions = {}
self.sim_window_seconds = simulation_duration
self.N_CONTROLS = n_controls
self.file = self.render_template(epanet_file, self.N_CONTROLS)
epamodule.ENopen(self.file, "/dev/null")
epamodule.ENsettimeparam(epamodule.EN_DURATION, self.sim_window_seconds)
epamodule.ENsetoption(epamodule.EN_TRIALS, constants.EPANET_MAX_TRIALS)
# self.save_inp_file()
self.set_tanks(tanks_info)
self.set_junctions(demands)
self._set_links()
self.cost = 0
self.energy = 0
def __str__(self):
str_constraints = ""
for t in self.tanks:
if t in self.constraints:
u_const = self.constraints[t]['upper_const']
l_const = self.constraints[t]['lower_const']
str_constraints += f"\t\tTank: \n\t\t\tupper constr: {u_const}\n\t\t\tlower constr: {l_const}"
return f'Simulation: \n' \
f'\tPumps: {len(self.pumps)}\n ' \
f'\tTanks: {len(self.tanks)}' \
f'\tJunctions: {len(self.junctions)}' \
f'\tConstraints:\n' + str_constraints
@staticmethod
def render_template(template_name: str, n_controls: int) -> str:
"""
Convert the {}_server.inp template file that is loaded to the server to a {}.inp file that is interpretable by EPANET
:param template_name: {}_server.inp
:param n_controls: number of controls of the type "LINK link_id value AT TIME time HOURS" in the final file
:return: path of the converted .inp file
"""
controls_var = constants.DEFAULT_CONTROL * n_controls
with open(template_name, "r") as f:
template = Template(f.read())
inp = template.render(simulation=True, controls=controls_var)
handle, path = tempfile.mkstemp()
f = os.fdopen(handle, mode='w')
f.write(inp)
f.close()
return path
@staticmethod
def save_inp_file(name: str = f'/tmp/{datetime.datetime.now()}'):
"""
Saves the INP file corresponding to the network represented by self
:param name: Path where to save the inp file (Default: '/tmp/{datetime.datetime.now()}')
:return:
"""
epamodule.ENsaveinpfile(name) # THIS IS OPTIONAL
logger.debug(name)
def _set_links(self):
self.pumps = {}
n_links = epamodule.ENgetcount(epamodule.EN_LINKCOUNT)
for link_index in range(1, n_links + 1):
# TODO: apply a creational pattern
type_ = epamodule.ENgetlinktype(link_index)
id_ = epamodule.ENgetlinkid(link_index)
if type_ == epamodule.EN_PUMP:
p = SimulationPump(id_, link_index)
self.pumps[p.en_id.decode("utf-8")] = p
elif type_ == epamodule.EN_FCV or type_ == epamodule.EN_TCV:
v = SimulationValve(id_, link_index)
self.valves[v.en_id.decode("utf-8")] = v
elif type_ == epamodule.EN_PIPE:
p = SimulationPipe(id_, link_index)
self.pipes[p.en_id.decode("utf-8")] = p
def set_junctions(self, demands):
self.junctions = {}
for junction_ in demands:
index = epamodule.ENgetnodeindex(str(junction_))
pattern_index = epamodule.ENgetpatternindex(f'PatternDemand{junction_}')
j = SimulationJunction(junction_, index, demands[junction_], pattern_index)
self.junctions[junction_] = j
epamodule.ENsetpattern(pattern_index, j.pattern_demand)
epamodule.ENsetnodevalue(index, epamodule.EN_BASEDEMAND, constants.EPANET_DEFAULT_BASEDEMAND)
epamodule.ENsetnodevalue(index, epamodule.EN_PATTERN, pattern_index)
def set_tanks(self, tank_info):
self.tanks = {}
for _tank in tank_info:
index = epamodule.ENgetnodeindex(str(_tank))
t = Tank(_tank, index, tank_info[_tank])
self.tanks[t.en_id] = t
def get_constraints(self):
upper_constraint = []
lower_constraint = []
for _id, tank in self.tanks.items():
if _id in self.constraints:
upper_constraint.extend(self.constraints[tank.id]['upper_const'])
lower_constraint.extend(self.constraints[tank.id]['lower_const'])
return np.array(upper_constraint).ravel(), np.array(lower_constraint).ravel()
def set_tank_constraints(self, tank: Tank, upper: list, lower: list):
assert tank is not None and upper and lower
self.constraints[tank.id] = {'lower_const': lower,
'upper_const': upper}
def set_tank_initial_levels(self):
for _id, tank in self.tanks.items():
tank.simulation_levels = []
tank.simulation_times = []
epamodule.ENsetnodevalue(tank.en_index, epamodule.EN_TANKLEVEL, tank.last_level)
def calc_energy_and_price(self) -> (float, float):
"""
Calculates the price and the energy of the pumping operations of a given pump_id. The cost of each optimization
period is given by :ref: StartInterval.get_cost()
:return: The pumping cost and the energy spent
:rtype
"""
cost_sum = 0
energy_sum = 0
for pump_id in self.pumps:
pump_energy, pump_cost = self.pumps[pump_id].calculate_energy_and_cost()
cost_sum += pump_cost
energy_sum += pump_energy
pump_id.append_index = 0
assert energy_sum >= 0, "The pumping energy cant be negative!"
assert cost_sum >= 0, "The pumping cost cant be negative!"
return energy_sum, cost_sum
@staticmethod
def _check_clock_time(clock, pump_times, tank_times, max_clock):
return clock in pump_times, clock in tank_times, clock == max_clock
def _get_parallel_stop_times(self, pump_id1) -> set:
parallel_stop_times = set()
for pump_id2 in self.pumps:
if pump_id1 != pump_id2 and self.pumps[pump_id2].get_corresponding_tank() == self.pumps[pump_id1].get_corresponding_tank():
parallel_stop_times.update(self.pumps[pump_id2].get_epanet_off_times())
return parallel_stop_times
def _interval_read_times(self, interval=constants.READING_FREQ_TANKS_WITH_NO_CONTROLS):
return [interval * i for i in range(int(self.sim_window_seconds / interval)+1)]
def _create_stop_criterion_data_structures(self):
_read_times = set()
_pumpless_tanks_reads = set()
pump_on_off_times = {}
for pump_id in self.pumps:
p_on_times = self.pumps[pump_id].get_on_times()
p_off_times = self.pumps[pump_id].get_off_times()
_read_times.update(p_on_times)
_read_times.update(p_off_times)
parallel_stop_times = self._get_parallel_stop_times(pump_id)
pump_on_off_times[pump_id] = {'on_times': set(p_on_times), 'off_times': set(p_off_times),
'parallel_pump_stops': parallel_stop_times}
_pumpless_tanks_reads.update(self._interval_read_times())
return _read_times, _pumpless_tanks_reads, pump_on_off_times
def _get_tanks_without_pumps(self) -> list:
pumpless_tanks = []
for tank_id in self.tanks:
for pump_id in self.pumps:
if tank_id == self.pumps[pump_id].get_corresponding_tank():
break
else:
pumpless_tanks.append(tank_id)
return pumpless_tanks
def __collect_external_data__(self, collector_func, **func_args):
collector_func(tanks=self.tanks, pumps=self.pumps, **func_args)
def _set_controls(self, control_operations: dict):
"""
Sets the controls on the epanet simulation module. This means that the times at which the pump will be
turned on and off
"""
control_index = 1
for id, operations in control_operations.items():
link = self.pumps[id] if id in self.pumps else self.valves[id] if id in self.valves else self.pipes[id]
for op in operations:
epamodule.ENsetcontrol(control_index,
epamodule.EN_TIMER,
link.en_index,
op[0], # operation setting
0,
op[1]) # operation time
control = epamodule.ENgetcontrol(control_index)
epanet_control_time = int(control[4])
link.add_control_operation(epanet_control_time, op[0])
control_index += 1
@staticmethod
def _read_pump_power_and_flow(clock_time, sim_pump):
pump_power = epamodule.ENgetlinkvalue(sim_pump.en_index, epamodule.EN_ENERGY)
pump_flow = epamodule.ENgetlinkvalue(sim_pump.en_index, epamodule.EN_FLOW)
sim_pump.append_start_power(pump_power, clock_time)
sim_pump.append_start_flow(pump_flow, clock_time)
@staticmethod
def _read_tank_level_and_time(clock_time, tank):
level = epamodule.ENgetnodevalue(tank.en_index, epamodule.EN_PRESSURE)
tank.simulation_levels += [level]
tank.simulation_times += [clock_time]
def _reads_on_control_times(self, clock_time, pump_read_times, tank_read_times, pump_on_off_times, pumpless_tanks):
is_pump_read_time, is_tank_read_time, is_clock_max = self._check_clock_time(clock_time, pump_read_times,
tank_read_times,
self.sim_window_seconds)
if is_pump_read_time or is_clock_max:
for pump_id in self.pumps:
tank_id = self.pumps[pump_id].get_corresponding_tank()
tank = self.tanks.get(tank_id)
if tank is not None:
if clock_time in pump_on_off_times[pump_id]['on_times']:
self._read_pump_power_and_flow(clock_time, self.pumps[pump_id])
# self.pumps[pump_id].clocktime += [clock_time]
self._read_tank_level_and_time(clock_time, tank)
if clock_time in pump_on_off_times[pump_id]['off_times'] or is_clock_max:
# self.pumps[pump_id].clocktime += [clock_time]
self._read_tank_level_and_time(clock_time, tank)
if clock_time in pump_on_off_times[pump_id]['parallel_pump_stops']:
pump_power = epamodule.ENgetlinkvalue(self.pumps[pump_id].en_index, epamodule.EN_ENERGY)
pump_flow = epamodule.ENgetlinkvalue(self.pumps[pump_id].en_index, epamodule.EN_FLOW)
if pump_power > 0:
self.pumps[pump_id].append_start_power(pump_power, clock_time)
self.pumps[pump_id].append_start_flow(pump_flow, clock_time)
# self.pumps[pump_id].clocktime += [clock_time]
if pumpless_tanks and is_tank_read_time:
for tank_id in pumpless_tanks:
self._read_tank_level_and_time(clock_time, self.tanks[tank_id])
def _read_on_intervals(self, clock_time, read_intervals):
if clock_time in [time for time in read_intervals if math.isclose(clock_time, time, rel_tol=5)]:
for pump_id in self.pumps:
self._read_pump_power_and_flow(clock_time, self.pumps[pump_id])
for tank_id in self.tanks:
self._read_tank_level_and_time(clock_time, self.tanks[tank_id])
@lru_cache(maxsize=0)
def new_simulation(self, control_operations: dict, data_read_frequency: Union[int, None] = None) -> SimulationResults:
assert control_operations and data_read_frequency > 0 or None
pump_read_times, tank_read_times, pump_on_off_times, pumpless_tanks, read_intervals = None, None, None, None, None
epamodule.ENopenH()
self.set_tank_initial_levels()
self._set_controls(control_operations)
epamodule.ENinitH(10)
cond = True
if data_read_frequency is None:
pumpless_tanks = self._get_tanks_without_pumps()
pump_read_times, tank_read_times, pump_on_off_times = self._create_stop_criterion_data_structures()
else:
read_intervals = self._interval_read_times(data_read_frequency)
while cond:
clock_time = epamodule.ENrunH()
if data_read_frequency is None:
self._reads_on_control_times(clock_time, pump_read_times, tank_read_times, pump_on_off_times,
pumpless_tanks)
else:
self._read_on_intervals(clock_time, read_intervals)
_ = epamodule.ENnextH()
cond = not (clock_time >= self.sim_window_seconds)
# self.energy, self.cost = self.calc_energy_and_price()
epamodule.ENcloseH()
results = SimulationResults(self.tanks, None, self.pumps)
return results
def process_control_operations(dataframe):
dataframe = dataframe.copy()
control_operations = {}
start = dataframe.index[0]
for col in dataframe.columns:
if col.startswith('P_') or col.startswith('Pipe'):
if col.startswith('P_'):
dataframe[col] = dataframe[col].mask(dataframe[col] >= 1, 1)
dataframe[col] = dataframe[col].mask(dataframe[col] < 1, 0)
operations_simplified = dataframe[dataframe[col] != dataframe[col].shift(1)][col].to_frame()
operations_simplified['op_start_seconds'] = \
operations_simplified.index.map(lambda d: int((d - start).total_seconds()))
control_operations[col] = operations_simplified.values
return control_operations
def process_tank_initial_levels(data, index):
return {res_col: data[res_col][index] for res_col in [_ for _ in data.columns if _.startswith("Res_")]}
def process_demands(data):
return {dem_col: data[dem_col].values.tolist() for dem_col in [_ for _ in data.columns if _.startswith("PE_")]}
def calculate_total_n_controls(controls: dict):
n_controls = 0
for ct_item in controls:
n_controls += len(controls[ct_item])
return n_controls
def epanet_simulation(network_file, sim_duration, control_operations, demands, tank_initial_levels, data_read_step=3600):
control_operations = process_control_operations(control_operations)
demands_dict = process_demands(demands)
simulator = Simulation(network_file, tank_initial_levels, demands_dict, sim_duration,
calculate_total_n_controls(control_operations))
res = simulator.new_simulation(control_operations, data_read_step)
return np.asarray([_ for _ in res.tank_levels.values()], dtype=float).T
def adcl_simulation():
adcl_processed, _, _, test_size, abs_levels = load_adcl()
adcl_raw = load_adcl_raw()
adcl_raw = adcl_raw[adcl_raw.index >= adcl_processed.index[-test_size]]
tank_levels = process_tank_initial_levels(abs_levels, adcl_processed.index[-test_size-1])
adcl_processed = adcl_processed[adcl_processed.index >= adcl_processed.index[-test_size]]
abs_levels = abs_levels[abs_levels.index >= abs_levels.index[-test_size]]
demands = adcl_processed[["PE_Aveleira", "PE_Albarqueira", "PE_Espinheira"]]
sim_duration = int((adcl_raw.index[-1] - adcl_raw.index[0]).total_seconds())
control_operations = process_control_operations(adcl_raw)
control_operations.update(
{v: ctls for v, ctls in process_control_operations(adcl_processed).items() if v.startswith("Pipe")})
demands_dict = process_demands(demands)
simulator = Simulation("epanet/adcl_no_valve.inp", tank_levels, demands_dict, sim_duration,
calculate_total_n_controls(control_operations))
res = simulator.new_simulation(control_operations, 900)
true_levels = abs_levels[
[level_col for level_col in abs_levels.columns if level_col.startswith("Res_")]].values
epanet_levels = np.asarray([_ for _ in res.tank_levels.values()], dtype=float).T
n = nash_sutcliffe(tf.convert_to_tensor(true_levels, np.float32), tf.convert_to_tensor(epanet_levels, np.float32))
# plot_results_lines(true_levels, epanet_levels)
print(n.numpy())
return epanet_levels
if __name__ == '__main__':
adcl_simulation() |
989,226 | 65aa596ad6b7b475018db4171d784dbaca6dfe0d | from django.contrib import admin
# registrando as classes no admin
from .models import Produto, Cliente
# criando uma classes para listar as informações na administração do django.
class ProdutoAdmin(admin.ModelAdmin):
list_display = ('nome', 'preco', 'estoque')
class ClienteAdmin(admin.ModelAdmin):
list_display = ('nome', 'sobrenome', 'email')
# registrando as classes no admin
admin.site.register(Produto, ProdutoAdmin)
admin.site.register(Cliente, ClienteAdmin) |
989,227 | d5ba846567465b6a0ef64a3f780166216c4e33aa | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.clients.restful.flask.kik.config import KikConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
class KikConfigurationTests(unittest.TestCase):
def test_init(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
kik:
bot_name: testbot
webhook: https://localhost:5000
host: 127.0.0.1
port: 5000
debug: false
unknown_command: Sorry, that is not a command I have been taught yet!
unknown_command_srai: YKIK_UNKNOWN_COMMAND
""", ConsoleConfiguration(), ".")
kik_config = KikConfiguration()
kik_config.load_configuration(yaml, ".")
self.assertEqual("testbot", kik_config.bot_name)
self.assertEqual("https://localhost:5000", kik_config.webhook)
self.assertEqual("127.0.0.1", kik_config.host)
self.assertEqual(5000, kik_config.port)
self.assertEqual(False, kik_config.debug)
self.assertEqual(kik_config.unknown_command, "Sorry, that is not a command I have been taught yet!")
self.assertEqual(kik_config.unknown_command_srai, "YKIK_UNKNOWN_COMMAND")
def test_init_no_values(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
kik:
""", ConsoleConfiguration(), ".")
kik_config = KikConfiguration()
kik_config.load_configuration(yaml, ".")
self.assertEqual("program-y", kik_config.bot_name)
self.assertEqual("https://localhost:5000", kik_config.webhook)
self.assertEqual("0.0.0.0", kik_config.host)
self.assertEqual(80, kik_config.port)
self.assertEqual(False, kik_config.debug)
def test_to_yaml_with_defaults(self):
config = KikConfiguration()
data = {}
config.to_yaml(data, True)
self.assertEqual(data['bot_name'], "program-y")
self.assertEqual(data['webhook'], "https://666666666.ngrok.io")
self.assertEqual(data['unknown_command'], "Unknown command")
self.assertEqual(data['unknown_command_srai'], 'KIKUNKNONWCOMMAND')
self.assertEqual(data['bot'], 'bot')
self.assertEqual(data['bot_selector'], "programy.clients.client.DefaultBotSelector")
self.assertEqual(data['renderer'], "programy.clients.render.text.TextRenderer")
|
989,228 | 45e04911a777459c4128832535fe4f80855211ef | from qiskit.wrapper import load_qasm_string
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler import PassManager, transpile
from qiskit.transpiler._basepasses import TransformationPass
class BugPass(TransformationPass) :
def run(self, dag):
print("Activating")
return DAGCircuit()
qasm = "OPENQASM 2.0;\ninclude \"qelib1.inc\";\nqreg q[2];\nh q[0];\ncx q[0],q[1];"
dag = DAGCircuit.fromQuantumCircuit(load_qasm_string(qasm))
pm = PassManager()
pm.add_passes(BugPass())
dag2 = transpile(dag,pass_manager=pm)
dag == dag2 # returns true but should be false
|
989,229 | 9447e8e698c1dfce076fb0052217a7d0cf49d558 | Python 3.8.1 (tags/v3.8.1:1b293b6, Dec 18 2019, 22:39:24) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>>
# Aprendizaje automático 01
importar numpy como np
# Creando un arreglo
print ( 'Dígito número de elementos:' )
n = int ( entrada ())
a = np . arange ( n )
print ( 'Arreglo a =' , a , ' \ n ' )
print ( 'Tipo de a =' , a . dtype , ' \ n ' )
print ( 'Dimensión de a =' , a . ndim , ' \ n ' )
print ( 'Número de elementos de a =' , a . shape )
# Creando un arreglo multidimensional
print ( ' \ n Digite numero de elementos para areglo 1:' )
f = int ( entrada ())
print ( 'Número de elementos para arreglo 2:' )
c = int ( entrada ())
m = np . matriz ([ np . arange ( f ), np . arange ( c )])
imprimir ( m );
# Utilización de arreglos multidimensionales
print ( 'Dígito número de elementos:' )
n = int ( entrada ())
print ( 'Número de bloques de dígitos:' )
b = int ( entrada ())
print ( 'Número de filas de dígitos:' )
f = int ( entrada ())
print ( 'Número de columnas de dígitos:' )
c = int ( entrada ())
z = np . arange ( n ). remodelar ( b , f , c )
imprimir ( 'z = \ n ' , z )
print ( 'Buscar un elemento en especifico \ n ' )
print ( 'Bloque:' )
b = int ( entrada ())
imprimir ( 'Fila:' )
f = int ( entrada ())
imprimir ( 'Columna:' )
c = int ( entrada ())
imprimir ( ' \ n z [b, f, c] =' , z [ b , f , c ]) |
989,230 | 73e1b578a88297f6d81fc4f2702773aa276509b0 | from setuptools import setup
setup(
name='gmusic-alarm',
version='0.0.1',
description='Alarm clock using Google Play Music radio stations',
url='https://github.com/cmurphy/gmusic-alarm',
author='Colleen Murphy',
author_email='colleen@gazlene.net',
license='Apache-2.0',
packages=['gmusic_alarm'],
install_requires=['gmusicapi', 'python-vlc'],
entry_points={
'console_scripts': ['gmusic-alarm=gmusic_alarm.cli:run'],
}
)
|
989,231 | c69661a65ee2f45956036825d264c798e9af1086 | class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
# New Solution 1: Backtracking (40ms: 53.60%)
# Can be improved by not using length (32ms: 87.28%)
res = []
def backtracking(sub_res, index):
res.append(sub_res)
for i in range(index, len(nums)):
backtracking(sub_res+[nums[i]], i+1)
backtracking([], 0)
return res
# New Solution 2: Array Flip (28ms: 96.10%)
# Very smart!!
res = [[]]
for num in nums:
length = len(res)
for i in range(length):
res.append(res[i]+[num])
return res
# New Solution 3: Bit Manupulation (36ms: 72.09%)
# Can be improved by using bin() to (28ms: 96.10%)
length = len(nums)
res = []
for i in range(1<<length):
temp = []
s = bin(i)
s = '0'*(length+2-len(s)) + s[2:]
for j in range(length):
if s[j]=='1':
temp.append(nums[j])
res.append(temp)
return res |
989,232 | cf1babe542a09e7d2dde1a0141be3ffec36e49d4 | import numpy as np
from exceptions import ValueError
import random
import libfunctions
class RbfNetwork(object):
def __init__(self, input_size, output_size, sigma):
self.input_size = input_size
self.output_size = output_size
self.sigma = sigma
self.kernels = None
self.weights = None
def test_input(self, input):
newinput = np.asarray(input)
if newinput.ndim == 1: #dealing with a vector
newinput.resize( (1, newinput.shape[0]))
elif newinput.ndim != 2: #matrix
raise ValueError("input has to be either a vector or a matrix")
if newinput.shape[1] != self.input_size:
print newinput.shape
raise ValueError("input dimension differs from the RBF one")
return newinput
def select_random_kernels(self, input, size):
'''
Select a number of rows from the input matrix to be the kernels for
the RBFNetwork. The weights matrix will be reset.
@param input: The matrix the kernels will be taken from. Its number of
columns must be equal to the network input_size
@param size: The number of rows to take from the input matrix. It must
be between one and the number of rows of input
'''
newinput = self.test_input(input)
if size > newinput.shape[0]:
raise IndexError("asking for more elements that in input")
self.kernels = np.empty(shape = (size, self.input_size), dtype=np.double,
order='C')
self.weights = np.empty(shape= (size + 1, self.output_size), dtype=np.double,
order='C')
draws = random.sample(xrange(newinput.shape[0]), size)
self.kernels = input[draws, :].copy()
def first_layer_output(self, input, check = True):
if check:
input = self.test_input(input)
num_inputs = input.shape[0]
num_kernels = self.kernels.shape[0]
res = np.ndarray(shape=(num_inputs, 1+self.kernels.shape[0]))
libfunctions.first_layer_output(input, self.kernels, res,
num_kernels, num_inputs,
self.input_size, self.sigma)
return res
def output(self, input):
newinput = self.test_input(input)
res = self.first_layer_output(newinput, check = False)
return np.dot(res, self.weights)
def __call__(self, input):
return self.output(input)
def lsqtrain(self, input, output):
"""
Perform least sqare training over input/outputs
input/output has to be a 2d ndvector, and every row should be a multi-dimensional variable
Returns an ndarray of the same size of input/output
"""
newinput = self.test_input(input)
newoutput = np.asarray(output)
if newoutput.ndim == 1: #dealing with a vector
newoutput = newoutput.reshape( (1, newoutput.shape[0]))
elif newoutput.ndim != 2: #matrix
raise ValueError("output has to be either a vector or a matrix")
if newoutput.shape[1] != self.output_size:
raise ValueError("output dimension differs from the RBF one")
if newinput.ndim != newoutput.ndim:
raise ValueError("input and output must have the same shape")
if newoutput.shape[0] != newinput.shape[0]:
raise ValueError("input and output must have the same number of rows ")
A = self.first_layer_output(newinput, check = False)
b = output
self.weights, errs, _, _ = np.linalg.lstsq(A, b)
return errs
def output_conf(self, input):
newinput = self.test_input(input)
firsto = self.first_layer_output(newinput, check=False)
out = np.dot(firsto, self.weights)
conf = np.max(firsto[:,1:], 1)
return out, conf
def sample_inputs(self, n_samples):
out = np.empty((n_samples, self.input_size) )
libfunctions.sample_inputs(n_samples, out, self.kernels,
self.kernels.shape[0],
self.input_size,
self.sigma
)
return out
|
989,233 | 69a79456c8f552b624f84a0582b03a8eaabfb8d3 | #!/usr/bin/env python
# encoding: utf-8
#coding=utf-8
'''
http://localhost:12345/params?content=12\tabc
http://localhost:12345/params?content=2\t3
'''
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
import sys
import urllib
#from sklearn.externals import joblib
import threading
import Queue
import random
import time
import os, sys
import time
import csv
import argparse
import cPickle as pickle
import numpy as np
#import pandas as pd
#import tensorflow as tf
#from utils import TextLoader
#from model import Model
csv.field_size_limit(sys.maxsize)
#queue = Queue.Queue(10)
#models = joblib.load('../model/model_lr')
#xumm change:
import re
import jieba
import json
from process import ProcessText
from pyfasttext import FastText
from content_test import ContCmp
class TestHTTPHandler(BaseHTTPRequestHandler):
def do_GET(self):
#print 'self.path:', self.path
if '?' in self.path:
mpath,margs = urllib.splitquery(self.path)
#print('mpath:', mpath)
#print('margs', margs)
content = margs.split('=')
#print 'content', content
#mid, weibo = content[1].split('\\t')
weibo = content[-1]
weibo = urllib.unquote(weibo)
result = predict(fasttext_model, processtext, weibo)#.encode('utf8'))
self.protocal_version = 'HTTP/1.1'
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.end_headers()
content = result
#self.wfile.write('weibo_fenci:%s' % weibo)
self.wfile.write('Predict Result:%s' % content)
#self.wfile.write('Predict Result:%s' % result[0])
#self.wfile.write(content)
class ThreadingHTTPServer(HTTPServer, ThreadingMixIn):
pass
def start_server():
addr = '10.77.6.241'
port = 8188
http_server = ThreadingHTTPServer((addr, port), TestHTTPHandler)
print ("HTTP server is at: http://%s:%s/" % (addr, port))
http_server.serve_forever()
#xumm change
def predict(model, fenci, weibo):
weibo_join = fenci.process(weibo)
words = ''.join(weibo_join.split(' '))
weibo_content = weibo_join + '\n'
label, prob = model.predict_proba_single(weibo_content, k = 1)[0]
label_other_form = '1042015:' + label
#flag, _ = contcmp.check_is_exist(label_other_form, words)
flag = False
if label_other_form in labels_list and prob > 0.8:
if 'tagCategory_046' in label_other_form:
return '@'.join([label_other_form, str(prob)])
flag, kcnt = contcmp.check_is_exist(label_other_form, words)
if not flag or ('tagCategory_060' in label_other_form and kcnt < 2):
return "1042015:tagCategory_1004@0.5"
else:
return '@'.join([label_other_form, '0.6111'])
else:
return "1042015:tagCategory_1004@0.5"
#predict_result = '@'.join([label_other_form, str(prob)])
#return predict_result
def loadModel():
global fasttext_model
fasttext_model = FastText()
fasttext_model.load_model('3Ngram_3mincount_1wminlabel.bin')
def init():
global processtext
processtext = ProcessText()
global labels_list
with open("both_labels.pkl", "rb") as f:
labels_list = pickle.load(f)
global contcmp
contcmp = ContCmp("root_feature_file.allid")
#loadModel()
global fasttext_model
fasttext_model = FastText()
fasttext_model.load_model('3Ngram_3mincount_1wminlabel.bin')
def main():
init()
print ('Initialization finished!')
start_server()
if __name__ == '__main__':
main()
|
989,234 | 292e8de0a651a93bd5be72101762f019ef9db95c | with open('staff.txt', 'r') as data:
print('Сотрудники с окладом менее 20 тыс. руб.: ')
for i, line in enumerate(data):
staff = line.split(' ')
if int(staff[1]) < 20000:
print(staff[0])
count = 0
with open('staff.txt', 'r') as data:
count = len(data.readlines())
with open('staff.txt', 'r') as data:
sum_salary = 0
for i, line in enumerate(data):
staff = line.split(' ')
sum_salary += int(staff[1])
print(f"Средняя величина дохода сотрудников: {sum_salary/count} тыс. руб. ")
|
989,235 | 730b1f92c2dcac24ab6a0c2c77303e2cbb633ada | '''
URL = https://leetcode.com/problems/sort-transformed-array/description/
360. Sort Transformed Array
45 mins to solution
Complexity
Let N := len(nums)
Time = O(N)
Space = O(N) ( EXP ) O(1) ( IMP )
Edge Cases
(A) [1,2,3,4,5] 1 2 3
(B) [-5,-4,-3,-2,1] 1 2 3
(C) [1,2,3,4,5] -1 -2 -3
(D) [-5,-4,-3,-2,-1] -1 -2 -3
(E)
(F)
'''
def solvePolynomial(num: int, a: int, b: int, c: int) -> int:
return (a * num * num ) + (b * num ) + c
# YOu seem ... close?
def zipperMerge(posNums: List[int], negNums: List[int]) -> List[int]:
transformedArray = []
pPtr = 0
nPtr = 0
while(pPtr < len(posNums) and nPtr < len(negNums)):
if(posNums[pPtr] < negNums[nPtr]):
transformedArray.append(posNums[pPtr])
pPtr += 1
else:
transformedArray.append(negNums[nPtr])
nPtr += 1
while(pPtr < len(posNums)):
transformedArray.append(posNums[pPtr])
pPtr += 1
while(nPtr < len(negNums)):
transformedArray.append(negNums[nPtr])
nPtr += 1
return transformedArray
class Solution:
def sortTransformedArray(self, nums: List[int], a: int, b: int, c: int) -> List[int]:
n = len(nums)
lPtr = 0
rPtr = n-1
wPtr = n-1
# The negative flips us ---> iterate and create two lists, based on parity/signage of the input
negListNums = []
posListNums = []
# It is based on the saddle point Hari!
# saddlePoint = (-b + sqrt(b*b - 4 * a * c) / (2 * a ))
# Better idea 1'st derivative from calculus yields the answer :-)
# What if a = 0 ?
saddlePoint = (-b/(2 *a) ) if (a != 0) else 0 # Nest your operations here!!
negNums = []
for i in range(len(nums)):
if(nums[i] < saddlePoint):
negNums.append(nums[i])
negListNums.append(solvePolynomial(nums[i], a,b,c)) # Technically o(1) here
else:
posListNums.append(solvePolynomial(nums[i], a,b,c))
if(len(negListNums) > 0 and negListNums[0] > negListNums[len(negListNums) - 1]):
negListNums.reverse()
if(len(posListNums) > 0 and posListNums[0] > posListNums[len(posListNums) - 1]):
posListNums.reverse()
transformedArray = zipperMerge(negListNums,posListNums)
return transformedArray
|
989,236 | dd98e43d1aee0d9c5252e16a036a4180b6c27f38 | """
参考
https://deepblue-ts.co.jp/python/pypi-oss-package/
"""
import setuptools
from os import path
version = '1.4'
package_name = "scraping_toolkit"
root_dir = path.abspath(path.dirname(__file__))
def _requirements():
return [name.rstrip() for name in open(path.join(root_dir, 'requirements.txt')).readlines()]
setuptools.setup(
name='scraping_toolkit',
version=version,
author='IzumiSatoshi',
install_requires=_requirements(),
packages=[package_name]
)
|
989,237 | 6523cb9ed496d56e993b486574d6cec406eda3c2 | import mock
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import AuthUserFactory
@pytest.mark.django_db
class TestThrottling:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def url(self):
return '/{}test/throttle/'.format(API_BASE)
def test_user_rate_throttle(self, app, url, user):
res = app.get(url, auth=user.auth)
assert res.status_code == 200
res = app.get(url, auth=user.auth)
assert res.status_code == 200
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 429
@mock.patch('api.base.throttling.TestUserRateThrottle.allow_request')
def test_user_rate_allow_request_called(self, mock_allow, app, url, user):
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert mock_allow.call_count == 1
@mock.patch('api.base.throttling.TestAnonRateThrottle.allow_request')
def test_anon_rate_allow_request_called(self, mock_allow, app, url):
res = app.get(url)
assert res.status_code == 200
assert mock_allow.call_count == 1
def test_anon_rate_throttle(self, app, url):
res = app.get(url)
assert res.status_code == 200
res = app.get(url, expect_errors=True)
assert res.status_code == 429
def test_user_rate_throttle_with_throttle_token(self, app, url, user):
headers = {'X-THROTTLE-TOKEN': 'test-token'}
res = app.get(url, auth=user.auth, headers=headers)
assert res.status_code == 200
res = app.get(url, auth=user.auth, headers=headers)
assert res.status_code == 200
res = app.get(url, auth=user.auth, headers=headers)
assert res.status_code == 200
def test_anon_rate_throttle_with_throttle_token(self, app, url):
headers = {'X-THROTTLE-TOKEN': 'test-token'}
res = app.get(url, headers=headers)
assert res.status_code == 200
res = app.get(url, headers=headers)
assert res.status_code == 200
def test_user_rate_throttle_with_incorrect_throttle_token(
self, app, url, user):
headers = {'X-THROTTLE-TOKEN': 'fake-token'}
res = app.get(url, auth=user.auth, headers=headers)
assert res.status_code == 200
res = app.get(url, auth=user.auth, headers=headers)
assert res.status_code == 200
res = app.get(url, auth=user.auth, headers=headers, expect_errors=True)
assert res.status_code == 429
def test_anon_rate_throttle_with_incorrect_throttle_token(self, app, url):
headers = {'X-THROTTLE-TOKEN': 'fake-token'}
res = app.get(url, headers=headers)
assert res.status_code == 200
res = app.get(url, headers=headers, expect_errors=True)
assert res.status_code == 429
|
989,238 | 5725a346ff6c0a1dd01293510c3d1f9d5318c826 | # coding=utf-8
from selenium import webdriver
import unittest
from time import sleep
import os
import sys
import signal
class TestLogin(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.base_url = "http://10.110.1.55:8082/admin/home.html"
#目前这个网址http://10.110.1.55:8082/login.html有问题
def login_check(self, username, password):
self.driver.get(self.base_url)
print("Current url is %s"%self.driver.current_url)
print("Current title is %s"%self.driver.title)
self.driver.find_element_by_id("username").send_keys(username)
self.driver.find_element_by_id("password").send_keys(password)
self.driver.find_element_by_xpath("//input[@value='登录']").click()
sleep(3)
print("Current url is %s"%self.driver.current_url)
print("Current title is %s"%self.driver.title)
sreach_window=self.driver.current_window_handle
print("Current title is %s"%self.driver.title)
main_page_url = 'http://10.110.1.55:8082/admin/home.html'
if(main_page_url == self.driver.current_url):
rst = True
else:
rst = False
return rst
def test_Login_success_with_right_u_p(self):
result = self.login_check("admin", "123456")
self.assertTrue(result)
def test_login_fail_with_wrong_u_p(self):
result = self.login_check("admin", "password")
self.assertFalse(result)
if __name__ == '__main__':
unittest.main()
|
989,239 | e84c0bb9ed7acc0cbbef95f0e3f8ec08cb523ad3 | import matplotlib.pyplot as plt
import numpy as np
'''
就是简单的把x y 对应的关系plot出来'''
x = np.linspace(-1,1,50)
y = 2*x +1
plt.plot(x,y)
plt.show() |
989,240 | f33ef9434f760d7ab113b9b916a1df406e7d06c6 | from scipy.stats import norm
import random
import os
import numpy as np
import sys
sys.path.append('../timeseries')
from timeseries import TimeSeries
from arraytimeseries import ArrayTimeSeries
from sizedcontainertimeseriesinterface import SizedContainerTimeSeriesInterface
sys.path.append('../cs207rbtree')
import redblackDB
sys.path.append('../SimSearch')
from _corr import kernel_dist
import pprint
# py.test --doctest-modules --cov --cov-report term-missing Distance_from_known_ts.py
def load_ts_file(filepath):
'''
Takes in file and reads time series from it
Parameters
----------
filepath: path of the file
Returns
-------
timeSeries object : TimeSeries class
>>> ts=load_ts_file('169975.dat_folded.txt')
>>> ts._values[0]
15.137
'''
#Only considers the first two columns of the text file (other columns are discarded)
#Only evaluates time values between 0 and 1
#First column is presumed to be times and second column is presumed to be light curve values.
data = np.loadtxt(filepath, delimiter=' ',dtype = str)
clean_input = []
for i in range(len(data)):
row = data[i].split("\\t")
clean_input.append([float(row[0][2:]),float(row[1])])
data = np.array(clean_input)
_ , indices = np.unique(data[:, 0], return_index=True)
data = data[indices, :]
times, values = data.T
full_ts = TimeSeries(times=list(times),values=list(values))
interpolated_ts = full_ts.interpolate(list(np.arange(0.0, 1.0, (1.0 /100))))
full_ts_interpolated = TimeSeries(times=list(np.arange(0.0, 1.0, (1.0 /100))),values=list(interpolated_ts))
return full_ts_interpolated
if len(sys.argv)<2:
raise ValueError("No input file containing time series passed")
else:
test_ts=load_ts_file(sys.argv[1])
num_vantage_points = 20
num_of_timeseries = 1000
num_top=int(sys.argv[2])
def tsmaker(m, s, j):
'''
Creates a random time series of 100 elements
Parameters
----------
m,s,j: parameters of the function norm.pdf
Returns
-------
timeSeries object : TimeSeries class
>>> ts = tsmaker(2,3,4)
>>> ts._times[0]
0.0
'''
t = list(np.arange(0.0, 1.0, 0.01))
v = norm.pdf(t, m, s) + j*np.random.randn(100)
return TimeSeries(values=v,times=t)
#[{1: <dist>, 2: <dist>, }_1, {}_2, ... {}_20] to
# (1,2);(2,3)....|(3,4);.....|
def encodeVantagePoints(decoded):
'''
Encodes the vantage point to a string so that it can be stored
Parameters
----------
Decoded: Any list that is to be encoded
Returns
-------
string
>>> ts=encodeVantagePoints({1:(1,2)})
>>> ts[1]
'1'
'''
# dbstring = []
# for distances in decoded:
distancestring = []
for k, v in decoded.items():
distancestring.append("(" + str(k) + "," + str(v) + ")")
encodedString = ';'.join(distancestring)
return encodedString
# dbstring.append(';'.join(distancestring))
# return '|'.join(dbstring)
# (1,2);(2,3)....|(3,4);.....| to
#[{1: <dist>, 2: <dist>, }_1, {}_2, ... {}_20]
def decodeVantagePoints(encoded):
'''
>>> ts=decodeVantagePoints('(1,2);(2,3)')
>>> ts
{1: 2.0, 2: 3.0}
'''
dict_distances = {}
items = encoded.split(';')
for item in items:
# Grab key and value
split = item.split(',')
key = int(split[0][1:])
value = float(split[1][:-1])
dict_distances[key] = value
return dict_distances
def encodeTimeSeries(timeSeries):
"""
Takes in time series object and transforms it into a string.
Parameters
----------
timeSeries: Concrete class of SizedContainerTimeSeriesInterface
Returns
-------
String representation of time series object, where each time and value is encoded in
"(t,v)" and separated with ";"
>>> ts = TimeSeries(values=[0, 2, -1, 0.5, 0], times=[1, 1.5, 2, 2.5, 10])
>>> k=encodeTimeSeries(ts)
>>> k
'(1,0);(1.5,2);(2,-1);(2.5,0.5);(10,0)'
"""
items = timeSeries.items()
encodedTimeSeries = []
for (time, value) in items:
encodedTimeSeries.append("(" + str(time) + "," + str(value) + ")")
return ';'.join(encodedTimeSeries)
# Takes in encoded time series and transforms it into a TimeSeries object
# Raise ValueError whenever improper
def decodeTimeSeries(encodedTimeSeries):
"""
Takes in time series string and transforms it into a time series object.
Raises ValueError when the input string is malformed.
Parameters
----------
String representation of time series object, where each time and value is encoded in
"(t,v)" and separated with ";"
Returns
-------
timeSeries: TimeSeries class
>>> ts = TimeSeries(values=[0, 2, -1, 0.5, 0], times=[1, 1.5, 2, 2.5, 10])
>>> encodedString = encodeTimeSeries(ts)
>>> k=decodeTimeSeries(encodedString)
>>> k
TimeSeries(Length: 5, [0.0, 2.0, -1.0, 0.5, 0.0])
"""
itemStrings = encodedTimeSeries.split(';')
t = []
v = []
for itemString in itemStrings:
timeValuePair = itemString.split(',')
if len(timeValuePair) != 2:
raise ValueError('Time series string is malformed')
time = timeValuePair[0]
value = timeValuePair[1]
if len(time) < 2 or len(value) < 2:
raise ValueError('Time series string is malformed')
time = time[1:]
value = value[:-1]
# This might throw ValueError if time and value could not be converted to floats
t.append(float(time))
v.append(float(value))
z = TimeSeries(values=v, times=t)
return z
def read_ts(i):
'''
Read Time Series from disk
Parameters
----------
i:ID of the time series to be read from disk
Returns
-------
time series object
'''
filename='ts-'+str(i)+'.txt'
t=[]
v=[]
lines = [line.rstrip('\n') for line in open(filename)]
for line in lines:
(time,val)=line.split(" ")
t.append(time)
v.append(float(val))
ts=TimeSeries(values=v,times=t)
return ts
def write_ts(ts,i):
""" Write light curve to disk as space delimited text file"""
'''
Write light curve to disk as space delimited text file
Parameters
----------
ts: time series object
i : a counter to be appended to the file name where it is stored
Returns
-------
None.
'''
path = "ts-{}.txt".format(i)
datafile_id = open(path, 'wb')
data = np.array([ts._times, ts._values])
data = data.T
np.savetxt(datafile_id, data, fmt=['%.3f','%8f'])
datafile_id.close()
# Get distance from vantage points from DB, and if its not there then proceed
db = redblackDB.connect("distanceFromVantagePoints.dbdb")
db_vantagepoints = redblackDB.connect("vantagePoints.dbdb")
db_data = redblackDB.connect("timeseriesdata.dbdb")
distances_from_vantage_points = []
v=[]
x=[]
try:
#try to read the file containing the dictionary of vantagepoint_id:vantagepoint_timeseries
#try to check if there are 20 redblack trees. One of these will be read in the later part oft he code.
#db_data.get('hello')
#raise KeyError
file=open('vantagepointids.txt')
print("Red Black trees already found!")
#dbfilename='db_vantagepoints'+closest
#vantagedb=redblackDB.connect(db_file_name+'.dbdb')
#vantagedb.get()
#for i in range(num_vantage_points):
# key='v'+str(i)
# decodedVantagePoints=db_vantagepoints.get(key)
# v.append(decodeTimeSeries(decodedVantagePoints))
# distances_from_vantage_points.append(decodeVantagePoints(db.get(key)))
#for i in range(num_of_timeseries):
# key='x' + str(i)
# x.append(decodeTimeSeries(db_data.get(key)))
except FileNotFoundError:
# Calculate and cache on disk
print('Not stored in disk, calculate distances')
#generate 20 random indices as vantage point id's
vantage_point_ids=random.sample(range(num_of_timeseries), num_vantage_points)
filename='vantagepointids.txt'
fileh=open(filename,'w')
fileh.write(str(vantage_point_ids))
#print(len(vantage_point_ids))
vpcounter=1
#generation of 1000 time series
for i in range(num_of_timeseries):
ts=tsmaker(4,2,8)
write_ts(ts,i)
x.append(ts)
#db_data.set('x' + str(i), encodeTimeSeries(ts))
#db_data.commit()
#if vantage point then retain in v
if i in vantage_point_ids:
v.append(ts)
for i in range(num_vantage_points):
print('Working on vantage point: ', i)
db_file_name='db_vantagepoints'+str(i)
vantagedb=redblackDB.connect(db_file_name+'.dbdb')
dict_distances = {}
for j in range(num_of_timeseries):
distance_bw=kernel_dist(v[i],x[j])
dict_distances[j]=distance_bw
for key in dict_distances.keys():
#print("I am at key",key)
val=dict_distances[key]
#print("val is",val)
vantagedb.set(str(val),str(key))
#print("I set stuff")
vantagedb.commit()
corr=sys.maxsize
closest='dummy'
filename='vantagepointids.txt'
fileh=open(filename,'r')
vantageids=fileh.read()
vantageids=vantageids[1:len(vantageids)-1]
vantageids.replace(" ","")
list=vantageids.split(',')
#print('vantageids are',vantageids)
v=[]
for vp in list:
ts=read_ts(vp.replace(" ",""))
v.append(ts)
#Find closest vantage point
for i in range(num_vantage_points):
if kernel_dist(test_ts,v[i]) < corr:
corr = kernel_dist(test_ts,v[i])
closest = str(i)
#Define region between them
max_region=2*corr
dbfilename='db_vantagepoints'+closest
vantagedb=redblackDB.connect(dbfilename+'.dbdb')
dist=vantagedb.chop(str(max_region))
rboutputs={}
for i in dist:
(a,b)=i
rboutputs[b]=a;
sortedrbouts=sorted(rboutputs, key=rboutputs.get, reverse=False)[:num_top]
print('IDs of the top ',num_top,'time series are',','.join(map(str,sortedrbouts)))
|
989,241 | e1aea8b2fae7b447c5bf2c7dc8d6944355817412 | #!/usr/bin/python3
#import文
import RPi.GPIO as GPIO
from time import sleep
import time,os,pickle
#ポート設定
GPIO.setmode(GPIO.BCM)
GPIO.setup(25, GPIO.OUT)
GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#初期化数据
os.chdir('/home/pi/Documents/Degu/Data')
trail = 0
time0 = time.time() #始まりの時間
time1 = time.time()
time2 = time.time() #前回反応の時間点
jiKoku = []
day = time.strftime("%Y-%m-%d")
#~ headTitle = ["Group,ID,Time"]
#データ保存先を指定
mydata = []
mydata2 = []
myfile = open(day + '.csv','a')
myfile2 = open(day + '_Result.csv','a')
kumiTemp = open('/home/pi/Documents/Degu/kumiTemporary.txt','r+')
#~ for line in headTitle:
#~ myfile.write(line+'\n')
kumi = kumiTemp.read()
kumiTemp.close()
print ("第",kumi,"組、",str((time.strftime("%H:%M:%S", time.localtime()))),"から始動!")
#メインプログラム
try:
while True: #時間統制部分
if time1-time0 < 299: #5分間300秒があるが、一秒の整備時間の原因で引く
time1 = time.time()
if GPIO.input(24) == GPIO.LOW:
GPIO.output(25, GPIO.HIGH)
time1 = time.time() #反応瞬間の時間
print ("第",kumi,"組")
print (" 回数 ",trail)
print (" 時刻 ",time.strftime("%H:%M:%S", time.localtime()))
print ("時間間隔 ",round((time1-time2),1),"","秒" ,'\n') #反応間かかった時間、小数点後1桁保留
jiKoku.append(time.strftime("%H:%M:%S", time.localtime()))
trail = trail+1
mydata = [str(kumi),',',str(trail),',',str((time.strftime("%H:%M:%S", time.localtime()))),'\n',]
time2 = time.time()
for line in mydata:
myfile.write(line)
while GPIO.input(24) == GPIO.LOW: #「保護わく」
#waitTime = 0
#waitTime = waitTime + 0.1
#if waitTime > 1: #take out the waitingTrails
#trailWait = trail
sleep(0.1)
else:
GPIO.output(25, GPIO.LOW)
time1 = time.time()
sleep(0.01)
else: #一日に達すれば命令
print("第",str(kumi),"組 ",str((time.strftime("%H:%M:%S", time.localtime()))),"でブロック終了",'\n')
mydata2 = [str(kumi),',',str(trail)]
for line in mydata2:
myfile2.write(line)
kumiTemp = open('/home/pi/Documents/Degu/kumiTemporary.txt','w')
kumi = int(kumi)+1
for line in str(kumi):
kumiTemp.write(line)
myfile.close() #ファイルを閉める
myfile2.close()
kumiTemp.close()
GPIO.cleanup() #ポート釈放
exit()
sleep(0.01) #サーキュレーション尺度
except KeyboardInterrupt: #サーキュレーション終了
pass
#クロスファイル
print("プログラム中断")
kumi = kumiTemp.read()
kumiTemp = open('/home/pi/Documents/Degu/kumiTemporary.txt','w')
kumi = int(kumi)+1
for line in str(kumi):
kumiTemp.write(line)
myfile.close()
myfile2.close()
kumiTemp.close()
#ポート釈放
GPIO.cleanup()
|
989,242 | 5bd35c9d71e4dd61a8221b80567cc861c2d88693 | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import render, redirect
from django_redis import get_redis_connection
from shop.models import Shop_SKU, Shop_sort, Lunbo, Active, Active_zone
def shop_index(request): # 商城主页
# 查询轮播图
luns = Lunbo.objects.filter(isDelete=False).order_by("order")
# 查询活动表
active = Active.objects.filter(isDelete=False)
# 查询活动专区表
act_zone = Active_zone.objects.filter(is_shelf=True, isDelete=False)
context = {
"luns": luns,
"active": active,
"act_zone": act_zone,
}
return render(request, "shop/index.html", context)
def shop_detail(request, id): # 商品详情
try:
goods = Shop_SKU.objects.get(pk=id, is_shelf=True)
except Shop_SKU.DoesNotExist:
return redirect("shop:商城主页")
context = {
"goods": goods,
}
return render(request, 'shop/detail.html', context)
def shop_category(request, cate_id, order):
"""商品分类,超市"""
# 正向查询 多一方模型对象.关联属性
# 逆向查询 少一方模型对象.多一方模型类_set
"""自定义参数:
综合 0
销量降 1
价格降 2
价格升 3
新品 4
"""
# 进行判断的时候,需要将传入的cate_id转换成整数,因为商品分类里面取的id是整数
try:
cate_id = int(cate_id)
order = int(order)
except:
return redirect("shop:超市")
# 查询商品分类表 所有的分类
sorts = Shop_sort.objects.filter(isDelete=False)
# 查询一条
sort = sorts.first()
# 默认查询一条分类
if cate_id == 0:
cate_id = sort.pk
# 查询商品sku表中 某个商品分类下的所有商品
sku = Shop_SKU.objects.filter(isDelete=False, is_shelf=True, sort_id_id=cate_id)
# 第一种方式查询综合,销量,价格,新品
# if order == 0:
# sku = Shop_SKU.objects.filter(isDelete=False, is_shelf=True, sort_id_id=cate_id)
# elif order == 1:
# sku = Shop_SKU.objects.filter(isDelete=False, is_shelf=True, sort_id_id=cate_id).order_by("sku_sale")
# elif order == 2:
# sku = Shop_SKU.objects.filter(isDelete=False, is_shelf=True, sort_id_id=cate_id).order_by("sku_price")
# elif order ==3:
# sku = Shop_SKU.objects.filter(isDelete=False, is_shelf=True, sort_id_id=cate_id).order_by("-sku_price")
# elif order ==4:
# sku = Shop_SKU.objects.filter(isDelete=False, is_shelf=True, sort_id_id=cate_id).order_by("create_time")
# 第二种,定义一个列表
order_list = ["id", "-sku_sale", "sku_price", "-sku_price", "create_time"]
try:
# 取出其中一个的排列方式
order_one = order_list[order]
except:
# 如果没有取到,就按照id来排
order_one = order_list[0]
order = 0
# 把取到的其中一种方式传进去排序
sku = sku.order_by(order_one)
# 对页面进行分页
# 设置页面显示几条数据
pagesize = 1
# 创建分页对象,显示页面展示2条数据格式
paginator = Paginator(sku, pagesize)
# 获取当前页
now_page = request.GET.get('p', 1)
try:
# 获取当前页数据
page = paginator.page(now_page)
except PageNotAnInteger:
# 判断传入的参数是字符串的时候,就显示第一页
page = paginator.page(1)
# 判断传入的参数是大于总页数的时候,就显示最后一页
except EmptyPage:
page = paginator.page(paginator.num_pages)
# 显示购物车的数量
# 初始购物车数量为0
car_count = 0
# 获取到用户id
user_id = request.session.get("id")
if user_id:
# 连接redis数据库
r = get_redis_connection("default")
# 设置键,获取cate_id
cart_id = "cart_id_{}".format(user_id)
# 从redis中取出商品的数量,hvals属性获取到的是一个列表
car_values = r.hvals(cart_id)
# print(car_values)
# 遍历获取里面的值,里面的值是2进制格式,可以解码,也可以直接int转换
for v in car_values:
car_count += int(v)
context = {
"sorts": sorts,
"sku": page,
"cate_id": cate_id,
"order": order,
"car_count":car_count,
}
return render(request, 'shop/category.html', context)
def shop_city(request): # 所在城市
return render(request, "shop/city.html")
def shop_village(request): # 所在学校
return render(request, "shop/village.html")
def shop_tidings(request): # 消息中心
return render(request, "shop/tidings.html")
def shop_recharge(request): # 充值界面
return render(request, "shop/recharge.html")
def shop_yhq(request): # 我的红包
return render(request, 'shop/yhq.html')
def shop_ygq(request): # 过期红包
return render(request, "shop/ygq.html")
def shop_speed(request): # 零食飞速
return render(request, 'shop/speed.html')
def shop_list(request): # 琳琅的店
return render(request, 'shop/list.html')
|
989,243 | cf7f79b99f98217a6e5158db078843471a2409d5 | """
@project PalavrasHttpEndpoint
@since 02/08/2017
@author Alencar Rodrigo Hentges <alencarhentges@gmail.com>
"""
import json
from flask import Flask
from flask import request
from utils.FraseUtil import FraseUtil
from utils import StringUtil
app = Flask(__name__)
@app.route('/palavras/analisar/', methods=['GET'])
def analisarFrase():
paramFrase = request.args.get('frase')
if StringUtil.isEmpty(paramFrase):
error = "A frase a ser analisada de ser passada via parâmetro(?frase={sua_frase})."
return json.dumps({"error": error}), 400
frase = FraseUtil.getFrase(paramFrase)
return json.dumps(frase.palavras) |
989,244 | 4f919e2eba7b5123e07b53a3ab7f7cd75418ccf8 | import errno
#This class takes care of files needed in discordbot
class Files():
def __init__(self, filename):
self.filename = filename
#Read a file line by line and store it to list & return
def readFile(self):
try:
s = open(self.filename, "rt")
questions = s.readlines()
s.close()
except Exception as exc:
if exc.errno == errno.ENOENT:
print("The file doesn't exist.")
elif exc.errno == errno.EMFILE:
print("You've opened too many files.")
else:
print("The error number is:", exc.errno)
return questions
|
989,245 | e99fd102efb4cf06f571cf7ace677bb46a600cbc | # Problem Title: Zigzag Iterator
class ZigzagIterator(object):
def __init__(self, v1, v2):
"""
Initialize your data structure here.
:type v1: List[int]
:type v2: List[int]
"""
self.v1 = v1
self.v2 = v2
self.p1 = 0
self.p2 = 0
self.n1 = len(v1)
self.n2 = len(v2)
self.first = True
def next(self):
"""
:rtype: int
"""
if (self.first and self.p1 < self.n1) or self.p2 >= self.n2:
self.first = False
res = self.v1[self.p1]
self.p1 += 1
else:
self.first = True
res = self.v2[self.p2]
self.p2 += 1
return res
def hasNext(self):
"""
:rtype: bool
"""
if self.p1 >= self.n1 and self.p2 >= self.n2:
return False
return True
# Your ZigzagIterator object will be instantiated and called as such:
# i, v = ZigzagIterator(v1, v2), []
# while i.hasNext(): v.append(i.next())
|
989,246 | 9332c1b58d50e2e54ba020389bde2b24d0489b65 | from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from hx_lti_assignment.models import Assignment, AssignmentTargets
from hx_lti_initializer.models import LTICourse, LTIProfile, LTIResourceLinkConfig
from lti import ToolConsumer
from target_object_database.models import TargetObject
class TODViewsTests(TestCase):
def setUp(self):
"""
1. Creates a test course.
2. Creates a test Assignment.
3. Creates a fake Target Object record.
4. Starts the LTI tool consumer and makes a data launch request.
"""
user = User(username="Luis", email="dfslkjfijeflkj")
user.save()
lti_profile = LTIProfile.objects.create(
user=user, name=user.username, anon_id="luis123"
)
lti_profile.save()
course = LTICourse(course_name="Fake Course", course_id="BlueMonkeyFake")
course.save()
course.course_admins.add(lti_profile)
self.assignment = Assignment(
assignment_name="Test", pagination_limit=10, course=course
)
self.assignment.save()
self.tod = TargetObject(
target_title="TObj2",
target_author="Test Author",
target_content="Fake Content2",
target_citation="Fake Citation2",
target_type="tx",
)
self.tod.save()
self.aTarget = AssignmentTargets(
assignment=self.assignment,
target_object=self.tod,
order=1,
target_external_css="",
target_instructions="Fake Instructions",
target_external_options="",
)
self.aTarget.save()
self.target_path = reverse("hx_lti_initializer:launch_lti")
self.launch_url = "http://testserver{}".format(self.target_path)
self.resource_link_id = "some_string_to_be_the_fake_resource_link_id"
# set the starting resource
lti_resource_link_config = LTIResourceLinkConfig.objects.create(
resource_link_id=self.resource_link_id,
assignment_target=self.aTarget,
)
self.consumer = ToolConsumer(
consumer_key=settings.CONSUMER_KEY,
consumer_secret=settings.LTI_SECRET,
launch_url=self.launch_url,
params={
"lti_message_type": "basic-lti-launch-request",
"lti_version": "LTI-1p0",
"resource_link_id": self.resource_link_id,
"lis_person_sourcedid": lti_profile.name,
"lis_outcome_service_url": "fake_url",
"user_id": lti_profile.anon_id,
"roles": ["Learner"],
"context_id": course.course_id,
},
)
self.lti_params = self.consumer.generate_launch_data()
def tearDown(self):
del self.assignment
del self.tod
def test_call_view_loads(self):
lti_params = self.consumer.generate_launch_data()
response0 = self.client.post(self.target_path, lti_params)
self.assertTrue(response0.status_code == 302)
target_url = reverse(
"target_object_database:open_target_object",
kwargs={"collection_id": self.assignment.id, "target_obj_id": self.tod.id,},
)
response = self.client.get(target_url)
self.assertTrue(response.status_code == 200)
target_url = reverse(
"target_object_database:open_target_object",
kwargs={"collection_id": self.assignment.id, "target_obj_id": "987654321",},
)
response = self.client.get(target_url)
self.assertTrue(response.status_code == 404)
'''
24feb20 naomi: not sure how relevant this test is, it seems no one uses
this "get_admin_url" method...
def test_get_admin_url(self):
"""
"""
self.assertEqual(
self.tod.get_admin_url(),
'/admin/target_object_database/targetobject/%d/' % self.tod.id
)
'''
|
989,247 | 14cd4d108d0905eb2858439f34654ceb9995d0f4 | import math
import threading
import matplotlib.pyplot as plt
class plots:
def __init__(self):
self.__plots = []
self.__plot_ind = 0
self.__nplots = 0
self.minExp = -16
self.__minY = math.pow(10,self.minExp)
self.__automaticXScale = True
self.__automaticYScale = True
def __check_name(self,name):
names = [self.__plots[i]['name'] for i in range(self.__nplots)]
if (name in names):
self.__plot_ind = names.index(name)
return True
else:
return False
@property
def automaticXScale(self):
return self.__automaticXScale
@automaticXScale.setter
def automaticXScale(self,value):
self.__automaticXScale = value
@property
def automaticYScale(self):
return self.__automaticYScale
@automaticYScale.setter
def automaticYScale(self,value):
self.__automaticYScale = value
@property
def toStr(self):
s = 'plots:\n'
names = [self.__plots[i]['name'] for i in range(self.__nplots)]
for i in range(len(names)):
s += str(i) + ' : ' + names[i] + '\n'
return s
def add_plot(self,**params): # params: { plot_name, xlabel, ylabel, logy}
plot_name = ''
if ('plot_name' in params):
plot_name = params['plot_name']
else:
print('Error: no field \'plot_name\' specified')
exit()
xlabel = ''
ylabel = ''
if ('xlabel' in params):
xlabel = params['xlabel']
else:
print('Error: no field \'xlabel\' specified')
exit()
if ('ylabel' in params):
ylabel = params['ylabel']
else:
print('Error: no field \'ylabel\' specified')
exit()
logy = False
if ('logy' in params):
if (params['logy']==True):
logy = True
self.__plots.append({ 'name':plot_name,
'xmin': 0,
'xmax': 0,
'ymin': 0.1*logy,
'ymax': self.__minY,
'xlabel': xlabel,
'ylabel': ylabel,
'logy': logy,
'obj':plt.figure(plot_name)})
self.__nplots += 1
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid()
if (logy):
plt.gca().set_yscale('log')
def add_data(self,**params): #params = {'plot_name', 'x', 'y', 'label', 'type' in ['mib', 'lib']}
p = self.__plots
if ('plot_name' in params):
plt_name = params['plot_name']
if (self.__check_name(plt_name)):
plt.figure(plt_name)
else:
print('Plot not yet created. Please use add_plot first')
exit()
else:
print('Error: \'plot_name\' is necessary')
exit()
if ((('x' in params) * ('y' in params) * (len(params['x'])==len(params['y'])))==False):
print('Error: x, y array are necessary and they must be of the same length')
exit()
if ('label' in params == False):
print('Error: label is a necessary fields')
exit()
this_type = ''
if ('type' in params):
this_type = params['type']
p = p[self.__plot_ind]
y = params['y']
x = params['x']
if (p['logy'] == True):
if (min(y) <= 0):
#print('Error: impossible to plot negative value in log scale.')
#print('approximating 0 with {:.1e}'.format(self.__minY))
y_positive = y[[i for i in range(len(y)) if y[i]>0]]
#assert(all(y_positive[i]>0 for i in range(len(y_positive))))
#print('y_min = {:.1e}, p[\'y_min\'] = {:.1e}'.format(min(y_positive),p['ymin']))
if (self.__automaticYScale == True):
try:
p['ymin'] = min([p['ymin'],min(y_positive)])
except ValueError:
p['ymin'] = self.__minY
else:
p['ymin'] = self.__automaticYScale[0]
y[[i for i in range(len(y)) if y[i]<=0]] = self.__minY
else:
if (self.__automaticYScale == True):
p['ymin'] = min([p['ymin'],min(y)])
else:
p['ymin'] = self.__automaticYScale[0]
else:
if (self.__automaticYScale == True):
p['ymin'] = min([p['ymin'],min(y)])
else:
p['ymin'] = self.__automaticYScale[0]
if (self.__automaticYScale == True):
p['ymax'] = max([p['ymax'],max(y)])
else:
p['ymax'] = self.__automaticYScale[1]
if (self.__automaticXScale == True):
p['xmin'] = min([p['xmin'],min(x)])
p['xmax'] = max([p['xmax'],max(x)])
else:
p['xmin'] = self.__automaticXScale[0]
p['xmax'] = self.__automaticXScale[1]
ls = '-'
if (this_type == 'lib'): ls += '-'
if 'color' not in params:
params['color'] = None
newlines = plt.plot(x, y, color=params['color'], marker='.',linewidth=0.5,linestyle=ls,label=(this_type.upper() +' '+ params['label']))
#plt.plot(x, y, marker='.',linewidth=0.5,linestyle=ls,label=(this_type.upper() +' '+ params['label']))
plt.xlim(p['xmin'], p['xmax']+math.fabs(p['xmax'])/100)
plt.ylim(p['ymin'], p['ymax'] + math.fabs(p['ymax'])/100)
plt.legend()
return newlines[0]
def get_plot_obj(self, name):
if (name in ['*', '']):
return self.__plots
if (self.__check_name(name)==True):
return self.__plots[self.__plot_ind]
else:
print('No plot named '+name)
def get_plot(self, name):
if (name in ['*', '']):
return self.__plots
if (self.__check_name(name)==True):
return self.__plots[self.__plot_ind]['obj']
else:
print('No plot named '+name)
def show_plot(self, name):
if (self.__check_name(name)==True):
self.thread_plt()
else:
print('No plot named '+name)
def thread_plt (self):
plt.show()
def run(self):
mt = threading.main_thread()
t = threading.Thread(target = self.thread_plt)
t.start()
t.join()
print('Plot closed.')
def describe_plot(self, name):
if (self.__check_name(name)==True):
p = self.__plots[self.__plot_ind]
return p['name'] + '_vs_' + p['xlabel']
else:
print('No plot named '+name)
if __name__ == '__main__':
import numpy as np
p = plots()
p.add_plot({'plot_name':'first','xlabel':'Overhead', 'ylabel':'Packet error rate', 'logy':True})
p.add_data({'plot_name':'first','label':'label1', 'type':'mib',
'x':np.array([0.1, 0.2, 0.3]),
'y':np.array([10, 103, 108])})
p.add_data({'plot_name':'first','label':'label1', 'type':'lib',
'x':np.array([0.1, 0.2, 0.3]),
'y':np.array([1, 100, 104])})
print('Plotting:' + p.describe_plot('first'))
p.show_plot('first') |
989,248 | 407ce24c4ec741c4a470ebbeccd7a3358e6c8f61 |
# 问题1: 最长回文子串
def maxLen(s,i,j):
while i>=0 and j<len(s) and s[i]==s[j]:
i-=1
j+=1
return j-i-1
def longestHuiwenSubStr1(s):
maxL=1
for i in range(len(s)):
maxL=max(maxL,maxLen(s,i,i),maxLen(s,i,i+1))
return maxL
def longestHuiwenSubstr2(s):
N=len(s)
if N==0:
return 0
maxL=1
DP=[[False for _ in range(N)] for _ in range(N)]
for i in range(N):
DP[i][i]=True
if i<N-1 and s[i]==s[i+1]:
DP[i][i+1]=True
for jj in range(2,N):
for i in range(0,N-jj):
j=i+jj
DP[i][j]=DP[i+1][j-1] and s[i]==s[j]
if DP[i][j]:
maxL=max(maxL,j-i+1)
return maxL
print(longestHuiwenSubStr1('aedsbzxyxzbaba'))
print(longestHuiwenSubstr2('aedsbzxyxzbaba'))
# 问题2:最长回文子序问题
def longestHuiwenSubSeq(s):
N=len(s)
if 0==N:
return 0
DP=[[0 for _ in range(N)] for _ in range(N)]
for i in range(N):
DP[i][i]=1
if i<N-1:
DP[i][i+1]=2 if s[i]==s[i+1] else 1
for jj in range(2,N):
for i in range(0,N-jj):
j=i+jj
if s[i]==s[j]:
DP[i][j]=2+DP[i+1][j-1]
else:
DP[i][j]=max(DP[i+1][j],DP[i][j-1])
return DP[0][-1]
print(longestHuiwenSubSeq('xyzaxzsey')) |
989,249 | 033f854e5965879d6ad1fa138ce973e5c2c4b71d | # SimpleCOMServer.py
class PythonUtilities:
_public_methods_ = ['SplitString']
_reg_progid_ = "PythonDemos.Utilities"
# NEVER copy the following ID
# Use print pythoncom.CreateGuid() to create a new one
_reg_clsid_ = "{1DCE0ACF-7F78-4280-A87C-E3182AE57BBF}"
# implementation
def SplitString(self, val, item=None):
import string
if item:
item = str(item)
return string.split(str(val), item)
# Add code so that when this script is run, it self-registers
if __name__ == '__main__':
print "Registering COM server..."
import win32com.server.register
win32com.server.register.UseCommandLine(PythonUtilities)
|
989,250 | c40b0eff02ba6bab1f3790bd7775e2b3155f03cc | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import oculow
driver = webdriver.Chrome()
# Capture apptim website
driver.get("http://www.google.com")
assert "Google" in driver.title
oculow.capture_screen(driver)
# Capture lince website
driver.get("http://www.project-lince.com")
assert "Lince" in driver.title
oculow.capture_screen(driver)
oculow.upload_image("C:\\Users\\Potosin\\Desktop\\test1.PNG")
driver.close()
oculow.dispose()
|
989,251 | 8fa1941e0029a471670e18454e2447b5ec17ad02 | import logging
from copy import deepcopy
from typing import Any, Dict, List, Optional, Union
from tqdm import tqdm
from haystack.errors import HaystackError
from haystack.schema import Document, Answer
from haystack.nodes.translator.base import BaseTranslator
from haystack.lazy_imports import LazyImport
logger = logging.getLogger(__name__)
with LazyImport(message="Run 'pip install farm-haystack[inference]'") as torch_and_transformers_import:
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from haystack.modeling.utils import initialize_device_settings # pylint: disable=ungrouped-imports
class TransformersTranslator(BaseTranslator):
"""
Translator component based on Seq2Seq models from Huggingface's transformers library.
Exemplary use cases:
- Translate a query from Language A to B (e.g. if you only have good models + documents in language B)
- Translate a document from Language A to B (e.g. if you want to return results in the native language of the user)
We currently recommend using OPUS models (see __init__() for details)
**Example:**
```python
DOCS = [
Document(content="Heinz von Foerster was an Austrian American scientist combining physics and philosophy,
and widely attributed as the originator of Second-order cybernetics.")
]
translator = TransformersTranslator(model_name_or_path="Helsinki-NLP/opus-mt-en-de")
res = translator.translate(documents=DOCS, query=None)
```
"""
def __init__(
self,
model_name_or_path: str,
tokenizer_name: Optional[str] = None,
max_seq_len: Optional[int] = None,
clean_up_tokenization_spaces: Optional[bool] = True,
use_gpu: bool = True,
progress_bar: bool = True,
use_auth_token: Optional[Union[str, bool]] = None,
devices: Optional[List[Union[str, "torch.device"]]] = None,
):
"""Initialize the translator with a model that fits your targeted languages. While we support all seq2seq
models from Hugging Face's model hub, we recommend using the OPUS models from Helsinki NLP. They provide plenty
of different models, usually one model per language pair and translation direction.
They have a pretty standardized naming that should help you find the right model:
- "Helsinki-NLP/opus-mt-en-de" => translating from English to German
- "Helsinki-NLP/opus-mt-de-en" => translating from German to English
- "Helsinki-NLP/opus-mt-fr-en" => translating from French to English
- "Helsinki-NLP/opus-mt-hi-en"=> translating from Hindi to English
...
They also have a few multilingual models that support multiple languages at once.
:param model_name_or_path: Name of the seq2seq model that shall be used for translation.
Can be a remote name from Huggingface's modelhub or a local path.
:param tokenizer_name: Optional tokenizer name. If not supplied, `model_name_or_path` will also be used for the
tokenizer.
:param max_seq_len: The maximum sentence length the model accepts. (Optional)
:param clean_up_tokenization_spaces: Whether or not to clean up the tokenization spaces. (default True)
:param use_gpu: Whether to use GPU or the CPU. Falls back on CPU if no GPU is available.
:param progress_bar: Whether to show a progress bar.
:param use_auth_token: The API token used to download private models from Huggingface.
If this parameter is set to `True`, then the token generated when running
`transformers-cli login` (stored in ~/.huggingface) will be used.
Additional information can be found here
https://huggingface.co/transformers/main_classes/model.html#transformers.PreTrainedModel.from_pretrained
:param devices: List of torch devices (e.g. cuda, cpu, mps) to limit inference to specific devices.
A list containing torch device objects and/or strings is supported (For example
[torch.device('cuda:0'), "mps", "cuda:1"]). When specifying `use_gpu=False` the devices
parameter is not used and a single cpu device is used for inference.
"""
torch_and_transformers_import.check()
super().__init__()
self.devices, _ = initialize_device_settings(devices=devices, use_cuda=use_gpu, multi_gpu=False)
if len(self.devices) > 1:
logger.warning(
"Multiple devices are not supported in %s inference, using the first device %s.",
self.__class__.__name__,
self.devices[0],
)
self.max_seq_len = max_seq_len
self.clean_up_tokenization_spaces = clean_up_tokenization_spaces
self.progress_bar = progress_bar
tokenizer_name = tokenizer_name or model_name_or_path
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_auth_token=use_auth_token)
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path, use_auth_token=use_auth_token)
self.model.to(str(self.devices[0]))
def translate(
self,
results: Optional[List[Dict[str, Any]]] = None,
query: Optional[str] = None,
documents: Optional[Union[List[Document], List[Answer], List[str], List[Dict[str, Any]]]] = None,
dict_key: Optional[str] = None,
) -> Union[str, List[Document], List[Answer], List[str], List[Dict[str, Any]]]:
"""
Run the actual translation. You can supply a query or a list of documents. Whatever is supplied will be translated.
:param results: Generated QA pairs to translate
:param query: The query string to translate
:param documents: The documents to translate
:param dict_key: If you pass a dictionary in `documents`, you can specify here the field which shall be translated.
"""
queries_for_translator = None
answers_for_translator = None
if results is not None:
queries_for_translator = [result["query"] for result in results]
answers_for_translator = [result["answers"][0].answer for result in results]
if not query and not documents and results is None:
raise AttributeError("Translator needs a query or documents to perform translation.")
if query and documents:
raise AttributeError("Translator needs either a query or documents but not both.")
if documents and len(documents) == 0:
logger.warning("Empty documents list is passed")
return documents
dict_key = dict_key or "content"
if queries_for_translator is not None and answers_for_translator is not None:
text_for_translator = queries_for_translator + answers_for_translator
elif isinstance(documents, list):
if isinstance(documents[0], Document):
text_for_translator = [doc.content for doc in documents] # type: ignore
elif isinstance(documents[0], Answer):
text_for_translator = [answer.answer for answer in documents] # type: ignore
elif isinstance(documents[0], str):
text_for_translator = documents # type: ignore
else:
if not isinstance(documents[0].get(dict_key, None), str): # type: ignore
raise AttributeError(f"Dictionary should have {dict_key} key and it's value should be `str` type")
text_for_translator = [doc[dict_key] for doc in documents] # type: ignore
else:
text_for_translator: List[str] = [query] # type: ignore
batch = self.tokenizer(
text=text_for_translator,
return_tensors="pt",
max_length=self.max_seq_len,
padding="longest",
truncation=True,
).to(self.devices[0])
generated_output = self.model.generate(**batch)
translated_texts = self.tokenizer.batch_decode(
generated_output, skip_special_tokens=True, clean_up_tokenization_spaces=self.clean_up_tokenization_spaces
)
if queries_for_translator is not None and answers_for_translator is not None:
return translated_texts
elif query:
return translated_texts[0]
elif documents:
if isinstance(documents, list) and isinstance(documents[0], str):
return [translated_text for translated_text in translated_texts]
translated_documents: Union[
List[Document], List[Answer], List[str], List[Dict[str, Any]]
] = [] # type: ignore
for translated_text, doc in zip(translated_texts, documents):
translated_document = deepcopy(doc)
if isinstance(translated_document, Document):
translated_document.content = translated_text
elif isinstance(translated_document, Answer):
translated_document.answer = translated_text
else:
translated_document[dict_key] = translated_text # type: ignore
translated_documents.append(translated_document) # type: ignore
return translated_documents
raise AttributeError("Translator needs a query or documents to perform translation")
def translate_batch(
self,
queries: Optional[List[str]] = None,
documents: Optional[Union[List[Document], List[Answer], List[List[Document]], List[List[Answer]]]] = None,
batch_size: Optional[int] = None,
) -> List[Union[str, List[Document], List[Answer], List[str], List[Dict[str, Any]]]]:
"""
Run the actual translation. You can supply a single query, a list of queries or a list (of lists) of documents.
:param queries: Single query or list of queries.
:param documents: List of documents or list of lists of documets.
:param batch_size: Not applicable.
"""
# TODO: This method currently just calls the translate method multiple times, so there is room for improvement.
if queries and documents:
raise AttributeError("Translator needs either query or documents but not both.")
if not queries and not documents:
raise AttributeError("Translator needs a query or documents to perform translation.")
translated = []
# Translate queries
if queries:
for query in tqdm(queries, disable=not self.progress_bar, desc="Translating"):
cur_translation = self.translate(query=query)
translated.append(cur_translation)
# Translate docs / answers
elif documents:
# Single list of documents / answers
if not isinstance(documents[0], list):
translated.append(self.translate(documents=documents)) # type: ignore
# Multiple lists of document / answer lists
else:
for cur_list in tqdm(documents, disable=not self.progress_bar, desc="Translating"):
if not isinstance(cur_list, list):
raise HaystackError(
f"cur_list was of type {type(cur_list)}, but expected a list of Documents / Answers."
)
cur_translation = self.translate(documents=cur_list)
translated.append(cur_translation)
return translated
|
989,252 | 7348df1499fd12f08fd8bf4a3c0c4c35e894d5f2 | #!/usr/bin/env python
# encoding: utf-8
'''
@author: xuqiang
@license: (C) Copyright 2013-2022.
@contact: xq-310@163.com
@software: wallet
@file: views.py.py
@time: 2019/7/14 下午6:23
@desc:
'''
from flask import Blueprint,request
from rpc.eth_rpc_client import EthRpcClient
from db.eth_db import EthDb
from decimal import Decimal, ROUND_DOWN
from common import component
bp_token = Blueprint('bp_token', __name__)
@bp_token.route('/token/tx',methods=['GET','POST'])
def sendtoken():
rpc = EthRpcClient()
db = EthDb()
print(request.get_json())
transaction= request.get_json()
try:
txhash = rpc.sendRawTansaction(transaction["rawtx"])
print("transaction txhash:", txhash)
except Exception as e:
print("transaction error:",e)
raise e
print("process token!")
params = dict()
params["token_addr_from"] = transaction['from'].lower()
params["token_addr_to"] = transaction['to'].lower()
params["token_amount"] = transaction["value"]
params["tx_hash"] = txhash
params["contract_addr"] = transaction["contract"].lower()
params["token_decimals"] = transaction["decimals"]
params["nonce"] = transaction["nonce"]
ret = db.insert_tokentx_first(params)
db.commit()
return {"th":"0x"+txhash}
@bp_token.route('/token/txlist',methods=['POST'])
def gettokenlist():
address = request.get_json()["address"]
contract = request.get_json()["contract"]
db = EthDb()
items = db.get_token_address(address, contract)
infos = []
for item in items:
tx={}
tx["from"] = item["from_addr"]
tx["to"]=item["to_addr"]
tx["contract"] = item["contract_addr"]
tx["hash"]=item["tx_hash"]
tx["nonce"]=item["nonce"]
tx["value"]=str(item["amount"])
tx["time"]=item["update_time"]
tx["state"]=item["status"]
infos.append(tx)
print('#######Token List::',len(infos))
result={"result":infos}
return result
@bp_token.route('/token/token',methods=['POST'])
def gettoken():
print("gettoken~~:",request.get_json())
address = request.get_json()["address"]
token = request.get_json()["token"]
result={}
try:
db = EthDb()
infos = db.get_token_balance(token, address)
#如果不存在,则添加
if infos:
result["balance"]= str(infos["balance"])
result["decimals"] = infos["decimals"]
else:
component.update_contract_addr(token)
balance,decimals = component.token_balance_updata(token, address)
result["balance"] = str(balance)
result["decimals"] = decimals
return result
except Exception as e:
return False
|
989,253 | bcaec29ac60cec404e2897c8316f43d1425799cb | import sys
sys.path.insert(0, ".")
import atomium
pdb = atomium.open("tests/time/{}.{}")
#pdb.save("tests/time/temp.pdb")
|
989,254 | 0bac01a70bddefd0e15f4a3275ec66b7879e9492 | # Copyright 2019 NOKIA - All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from netaddr import IPNetwork
from nuage_tempest_plugin.lib.test.nuage_test import NuageBaseTest
from nuage_tempest_plugin.lib.topology import Topology
from nuage_tempest_plugin.lib.utils import constants as n_constants
from nuage_tempest_plugin.services.nuage_client import NuageRestClient
CONF = Topology.get_conf()
SPOOFING_ENABLED = n_constants.ENABLED
SPOOFING_DISABLED = (n_constants.INHERITED if Topology.is_v5
else n_constants.DISABLED)
class PortsScaleTest(NuageBaseTest):
@classmethod
def setup_clients(cls):
super(PortsScaleTest, cls).setup_clients()
cls.nuage_client = NuageRestClient()
# Increase api read timeout because router interface attach can
# take a long time if there are many ports with aaps
cls.manager.routers_client = cls.manager.network.RoutersClient(
http_timeout=100)
def test_nuage_port_allow_address_pair_scale(self):
network = self.create_network()
cidr = IPNetwork("10.0.0.0/16")
subnet = self.create_subnet(network, cidr=cidr,
mask_bits=cidr.prefixlen)
num_ports_aap = 100
addrpair_port = self.create_port(network, device_owner='nuage:vip')
allowed_address_pairs = [
{'ip_address': addrpair_port['fixed_ips'][0]['ip_address'],
'mac_address': addrpair_port['mac_address']}]
portids_to_aap = {}
for i in range(num_ports_aap):
port = self.create_port(
network,
allowed_address_pairs=allowed_address_pairs)
portids_to_aap[port['id']] = allowed_address_pairs
router = self.create_router()
self.router_attach(router, subnet)
l3domain_ext_id = self.nuage_client.get_vsd_external_id(router['id'])
nuage_domain = self.nuage_client.get_resource(
n_constants.DOMAIN,
filters='externalID',
filter_values=l3domain_ext_id)
nuage_subnet = self.nuage_client.get_domain_subnet(
n_constants.DOMAIN, nuage_domain[0]['ID'], by_subnet=subnet)
for port_id in portids_to_aap:
port_ext_id = self.nuage_client.get_vsd_external_id(port_id)
nuage_vport = self.nuage_client.get_vport(
n_constants.SUBNETWORK,
nuage_subnet[0]['ID'],
filters='externalID',
filter_values=port_ext_id)
self.assertEqual(SPOOFING_DISABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vip = self.nuage_client.get_virtual_ip(
n_constants.VPORT,
nuage_vport[0]['ID'],
filters='virtualIP',
filter_values=str(portids_to_aap[port_id][0]['ip_address']))
self.assertEqual(portids_to_aap[port_id][0]['mac_address'],
nuage_vip[0]['MAC'])
self.assertEqual(nuage_vip[0]['externalID'],
self.nuage_client.get_vsd_external_id(port_id))
|
989,255 | 3fafc7a1b9e7915b0eac43582b0193ff54bdee5f | from __future__ import unicode_literals
from django.db import models
class Diputado(models.Model):
nombre = models.CharField(max_length=100)
ciudad = models.CharField(max_length=50)
pais= models.CharField(max_length=50)
email = models.EmailField(max_length=50)
fechaNacimiento= models.DateField()
foto = models.ImageField(upload_to='/assets/images', null=True)
suplente= models.CharField(max_length=100)
|
989,256 | 0d2e61a19c70f7958706b7353a5d654152f7b40c | import gym
from DQN import DQN
PATH_SAVE = "cartpole"
env = gym.make('CartPole-v0').unwrapped
dqn_train = DQN(env=env, env_type="Gym", nb_action = 2, skip_frame=1)
dqn_train.train_loop() |
989,257 | 53514ba7dbecdf995e8134fec2ed6aaf49493f20 | import fruits
def boy(no_of_seeds):
if no_of_seeds == 1:
print(fruits.one_seed_fruit())
elif no_of_seeds == 0:
print(fruits.seedless_fruit())
boy(1) |
989,258 | 4dc56faa55095fde0a67a2b1441a9688ec6c2782 | # CS266: Fall 2019
# HW1
#
# Pratik Prajapati
# Ashraf Saber
#
import MerkleTree as mtree
import Block as blk
# a test script to check various functions of the Block() class.
#
txList = ['alice', 'carol', 'duck', 'bob']
txList.sort()
m = mtree.MerkleTree(txList)
m.generateTree()
# just any random hash to test
prevHash = '9f9d51bc70ef21ca5c14f307980a29d8'
b = blk.Block(prevHash, m)
blockHash = b.mineBlock()
print('mined nonce = %s' % (blockHash))
|
989,259 | 8f56191e398890b4aa71158920136557a6266d93 | # Generated by Django 3.0.8 on 2021-04-21 18:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('miweb', '0016_datosscrapy_url'),
]
operations = [
migrations.RemoveField(
model_name='datosscrapy',
name='url',
),
]
|
989,260 | 56f92268cf45235ceff3271c78a2d31327ed4a4b | class FontWeightEnum(object):
Light = .4
Normal = .6
Bold = .9
class OutlineLenghtEnum(object):
NoOutline = 0
Little = 0.125
Medium = 0.25
Big = 0.5
|
989,261 | 0b000a4e2172e40d19c80788d8db88efedb78cb1 | #imiport minimize and the circuit eigenfinder
from Diag_trans import trans
from scipy.optimize import minimize
import numpy as np
import matplotlib.pyplot as plt
#this function creates a loss function and calculates its value for given circuit parameters that are used to calc energies
def loss(x):
#x is Ec, Ej as an array
Ej=x[0]
Ec=x[1]
#initialize circuit
test=trans(Ec=Ec,Ej=Ej)
E0,E1,E2=test.energies()
#these coeffecients tune the sensitivity to each desired parameter
# a=1, b=1, c=2.55, d=1 <-- this worked beforehand
#tune var 1
a=1.0
#tune var 2
b=1.0
#tune anharmonicity
c=2.55
#tune 4 ghz
d=1
#loss function calculated for variance of E1, E2, and how close the first energy transition is to 4 ghz
#anharmonicity is also calcualted as relative energy transitions between E0-->E1 and E1-->E2
loss = a * np.var(E1)+ b * np.var(E2) + \
c * (np.log((np.mean(E2)-np.mean(E1))/(np.mean(E1)-np.mean(E0))-0.8)) +\
d * abs(abs(np.mean(E1)-np.mean(E0))-4)
return loss
if __name__=='__main__':
#start values for optimization
Ec=2
Ej=20
x0=np.array([Ej,Ec])
#as per koch paperzA
bounds=[(0.01,140),(0.01,140)]
bounds=bounds
#gradient-descent Low memory BFGS methods
res1 = minimize(loss, x0, method='L-BFGS-B',bounds=bounds)
#create circuit based on optimization to display
final_cir=trans(Ej=float(res1['x'][0]),Ec=float(res1['x'][1]))
E0,E1,E2=final_cir.energies()
#prints parameters of final circuit
print(res1)
print(" var 1: \t"+str(np.var(E1)))
print(" var 2: \t"+str(np.var(E2)))
print(" anharm: \t"+str((np.mean(E2)-np.mean(E1))/(np.mean(E1)-np.mean(E0))))
print(" Ej/Ec: \t"+str(res1['x'][0]/res1['x'][1]))
print(" t1: \t\t"+ str(np.mean(E1-E0)))
#display energies
t=np.arange(-10,10,1)
fig,ax=plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
plt.xlabel("Charge offset")
plt.ylabel("Energy(G)")
E1-=np.mean(E0)
E2-=np.mean(E0)
E0-=np.mean(E0)
l = plt.plot(t, (E0), 'red', t, (E1), 'blue', t, (E2), 'green')
plt.show()
|
989,262 | ef963248dd8cc13f4ddbd152aaa98a701988a1da | #!/usr/bin/env python
# This is a command line program to download a user's avatar from
# GitHub. Usage: 'python get_avatar.py <GitHub_username>
import sys
import json
import argparse
import requests
import shutil
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('username')
args = parser.parse_args()
# Call the GitHub api and get user info
requestURL = 'https://api.github.com/users/' + args.username
result = requests.get(requestURL)
if result.ok :
user_info = json.loads(result.content)
avatarURL = user_info['avatar_url']
else:
sys.stderr.write("Error fetching user information for {0};\
exiting now, sorry...\n".format(args.username))
sys.exit()
# Download and save image file
imageFile = requests.get(avatarURL, stream=True)
if imageFile.ok:
with open(args.username + '.png', 'wb') as outFile:
shutil.copyfileobj(imageFile.raw, outFile)
|
989,263 | bde8ac94db66afbb5907285d752fbf82108530e4 | # code_logzero.py
from logzero import logger, logfile, setup_logger
import logging
import config_logzero as log
import modulo_interno
# Log messages
log.config_log().info("This log message saved in the log file")
log.config_log().warning("This log message saved in the log file")
modulo_interno.funcion_en_modulo_interno() |
989,264 | b18ec9813adc21d2e98618cbf3ed232f72d05df9 | # 定义一个0~1000之间的随机整数n为答案,提示用户输入一个整数k参与游戏猜答案
# 如果k != n,提示:猜错了,并要求重新输入
# 如果k == n,提示:经过x次猜测,恭喜回答正确,答案是n
import random as r
n = r.randint(0, 1000) # 正确答案
k = -1 # 猜测的答案
x = 0 # 猜测的次数
# 如果回答错误,就提示再次输入答案
while k != n:
if x: # 已经至少猜测过1次
print('猜大了' if k > n else '猜小了')
# print('猜大了') if k > n else print('猜小了')
# if k > n:
# print('猜大了')
# else:
# print('猜小了')
k = int(input('请输入你的答案:'))
# 猜测的次数+1
x += 1
# 回答正确
print(f'经过{x}次猜测,恭喜回答正确,答案是{n}')
|
989,265 | c67dd4dab3e7b1d19a6543da0df8309636cf0581 | from rankor import app
def uwsgi(settings):
app.start('pyramid')
return app.make_wsgi_object()
def tests(settings):
app.start('tests')
|
989,266 | dcb86be0a53616ead7f05bf67a67b54f6969b819 | # Import the Degree Programs I saved from the ATA Outcomes file into Django
"""
In this long comment I'm going to go ahead and put down a few notes on the .csv
we're reading from.
The first thing to understand is that this CSV was exported from an excel
spreadsheet that was meant to be looked at, not fed into a computer program.
That means it's very unclean mixed data, with some rows being purely visual and
others being actual content.
Each degree program is named in a row, and then its classes follow. The classes
are split into categories which we largely don't care about except electives,
because unlike the other classes electives are not all required to be taken. In
an ideal file format whether a class is an elective would be a checkbox in a
separate column. But this is not an ideal format so instead it comes in this
sectioning. That means we either have to manually fix it (eh), or do ugly hack
stuff to make it work (also eh, but my first approach).
Every individual class has a department, label, credits, and then boolean core
learning outcomes. It's necessary to be careful with the credits because the
person who put together the spreadsheet 'chained together' classes which can be
taken in place of each other. Which by the way, some classes can be taken in
place of each other so you have to account for that in the data structure too.
[{"label":"My Degree Program Name",
"credits":90,
"elective_credits":10,
"classes":{"id":"MATH 110",
"label":"Introduction To Linear Algebra",
"lower_credit_bound":5,
"upper_credit_bound":10,
"credit_type":"QS",
"CLO":{1:False,
2:True,
3:False,
...}
"substitutes":[{"id":...}, ...]
"elective?":False}
"""
import re
import csv
from django.core.management.base import BaseCommand, CommandError
from clo_app import models
class Command(BaseCommand):
help = "Import JD's manually cleaned .csv of the degree programs and their CLO."
def add_arguments(self, parser):
parser.add_argument("filepath", nargs=1, type=str)
parser.add_argument("--initialize", action="store_true", dest="init")
parser.add_argument("--delete", action="store_true", dest="delete")
def handle(self, *args, **options):
if options["init"]:
self.initialize()
print("Initial objects were created without errors!")
return "\n" # Django wraps I/O and tries to concat return value as string
# Elif because init and delete are mutually exclusive
elif options["delete"]:
wumpus_q = input(
"This will PERMANENTLY DELETE all data currently loaded"
" in the application, so I just want to be sure you mean it."
" Type 'wumpus' in to prove you really read this: ")
if wumpus_q.lower() == "wumpus":
self.delete_all()
print("All gone.")
else:
print("Nope. Did you include the single quotes? Don't.")
return "\n"
with open(options["filepath"][0]) as programs_csv:
degree_programs = csv.reader(programs_csv)
next(degree_programs)
ATA_line = next(degree_programs)
if not ATA_line[0].startswith("ATA"):
raise Exception("Second line of .csv was not expected ATA line!")
degree_program_rows = []
while ATA_line:
program_rows, new_ATA_line = self.dp_rows(degree_programs, ATA_line)
program_rows.insert(0, ATA_line)
degree_program_rows.append(program_rows)
ATA_line = new_ATA_line
# On the first pass we construct Degree Programs and Courses
# This requires an initialization pass to have already been run
# TODO: Add code checking for the initialization pass and
# raise error if not present.
try:
models.CoreLearningOutcome.objects.get(id=1)
except models.CoreLearningOutcome.DoesNotExist:
raise ValueError("You need to run the initialization pass first with"
" --initialize")
dp_objects = []
course_objects = []
clo_objects = []
for dp_rowset in degree_program_rows:
# Check for "N.A." and correct it to null if found
try:
float(dp_rowset[0][1])
except ValueError:
dp_rowset[0][1] = None
try:
float(dp_rowset[0][2])
except ValueError:
dp_rowset[0][2] = None
dp = models.DegreeProgram(label=dp_rowset[0][0],
credits=dp_rowset[0][1],
elective_credits=dp_rowset[0][2])
dp_objects.append(dp)
course_object_set, clo_set = self.build_courses_from_rows(dp_rowset)
course_objects += course_object_set
clo_objects += clo_set
# Since we reference objects created previously in pass two
# we have to save the ones made in the first pass.
[dp.save() for dp in dp_objects]
[course.save() for course in course_objects]
print("Degree Programs, Courses and Course Learning Outcomes saved!")
self.pass_two(degree_program_rows)
print("Course relationships saved!")
print("Data imported.")
def pass_two(self, degree_program_rows):
"""On the second pass we construct Degree Program and Course
Relationships."""
class_id_re = re.compile("[A-Z]+&* [0-9]+")
for dp_rowset in enumerate(degree_program_rows):
degree_program = models.DegreeProgram.objects.get(label=dp_rowset[1][0][0])
last_parent = (dp_rowset[0], 1)
# Check to make sure first course in program isn't generic
# If it is, change it
if dp_rowset[1][1][0].startswith("Generic"):
for course_row in enumerate(dp_rowset[1]):
if course_row[1][0].startswith("ATA"):
continue
elif not course_row[1][0].startswith("Generic"):
last_parent = (dp_rowset[0], course_row[0])
break
substitute = False
for row in enumerate(dp_rowset[1]):
if not class_id_re.fullmatch(row[1][0].strip()):
continue
course_id = row[1][0]
course_title = row[1][1]
course_credits = row[1][2]
course = models.Course.objects.get(id=course_id)
# Set flags on elective, substitute, and generic
elective = bool(row[1][-1])
generic = course_id.startswith("Generic")
# Get parent course
parent = degree_program_rows[last_parent[0]][last_parent[1]]
parent_course = models.Course.objects.get(id=parent[0])
if not substitute and not generic:
dpcs = models.DPCourseSpecific(
degree_program=degree_program,
course=course,
elective=elective)
dpcs.save()
last_parent = (dp_rowset[0], row[0])
elif generic and not substitute:
credit_type = self.extract_generic_credit_type(row[1])
dpcg = models.DPCourseGeneric(
degree_program=degree_program,
credit_type=credit_type,
credits=course_credits,
elective=elective)
dpcg.save()
# Omission of last parent update purposeful
# as a hack because I don't think this
# situation ever actually occurs in the data
#TODO: Do this correctly.
elif substitute and not generic:
dp_parent_course = models.DPCourseSpecific.objects.get(
degree_program=degree_program,
course=parent_course)
dpcss = models.DPCourseSubstituteSpecific(
parent_course=dp_parent_course,
course=course)
dpcss.save()
elif substitute and generic:
dp_parent_course = models.DPCourseSpecific.objects.get(
degree_program=degree_program,
course=parent_course)
credit_type = self.extract_generic_credit_type(row[1])
dpcsg = models.DPCourseSubstituteGeneric(
parent_course=dp_parent_course,
credit_type=credit_type,
credits=course_credits,
elective=elective)
dpcsg.save()
else:
raise ValueError("Improper combination of flags!")
substitute = course_title.strip().endswith("or")
return True
def extract_generic_credit_type(self, course_row):
"""Given a course row, extract and return its generic credit type."""
course_id = row[0]
credit_type_res = {re.compile("Communication"):"CS",
re.compile("Natural Science"):"NS",
re.compile("Humanities"):"H",
re.compile("Performance"):"HP",
re.compile("Social"):"SS",
re.compile("Lab"):"NSL",
re.compile("Quant"):"QS",
re.compile("Elective"):"E",
re.compile("Diversity"):"DC",
re.compile("Prereq"):"PR"}
for credit_type_re in credit_type_res:
if credit_type_re.search(course_id):
type_string = credit_type_res[credit_type_re]
credit_type = models.CreditType.objects.get(label_short=type_string)
return credit_type
def initialize(self):
"""Run an initialization pass if the user requests it. This is necessary
before we can construct the other objects in the system."""
# Construct Core Learning Outcomes
clo_1 = models.CoreLearningOutcome(
label="Engage and take responsibility as active learners",
description=(
"Students will be involved in the"
" learning process as they gain deeper"
" levels of understanding of the subject"
" matter. They will design, complete and"
" analyze projects while developing group"
" interaction and leadership skills."))
clo_2 = models.CoreLearningOutcome(
label="Think critically",
description=(
"Students will develop and"
" practice analytical skills, problem-solving"
" skills and quantitative reasoning skills."
" Using creativity and self-reflection,"
" they will be able to engage in inquiry"
" that produces well-reasoned, meaningful"
" conclusions."))
clo_3 = models.CoreLearningOutcome(
label="Communicate effectively",
description=(
"Students will develop the organizational"
" and research skills necessary to write"
" and speak effectively. The students will"
" demonstrate awareness of different"
" audiences, styles, and approaches to"
" oral and written communication."))
clo_4 = models.CoreLearningOutcome(
label="Participate in diverse environments",
description=(
"Students will gain the awareness of"
" and sensitivity to diversity, including"
" one’s own place as a global citizen."
" Students attain knowledge and understanding"
" of the multiple expressions of diversity,"
" and the skills to recognize, analyze"
" and evaluate diverse issues and perspectives."))
clo_5 = models.CoreLearningOutcome(
label="Utilize information literacy skills",
description=(
"Students will develop and employ"
" skills to recognize when information"
" is needed and to locate, evaluate,"
" effectively use and communicate"
" information in its various forms."))
clo_6 = models.CoreLearningOutcome(
label="Demonstrate computer and technology proficiency",
description=("Students will use computers and"
" technology as appropriate in their course of study."))
clo_7 = models.CoreLearningOutcome(
label="Identify elements of a sustainable society",
description=("Students will integrate and"
" apply economic, ecological, and eco-justice"
" concepts into a systems-thinking framework."))
clo_1.save()
clo_2.save()
clo_3.save()
clo_4.save()
clo_5.save()
clo_6.save()
clo_7.save()
# Construct Credit Types
CS = models.CreditType(label_short="CS",
label="Communication Skills")
NS = models.CreditType(label_short="NS",
label="Natural Science")
H = models.CreditType(label_short="H",
label="Humanities")
HP = models.CreditType(label_short="HP",
label="Humanities Performance")
SS = models.CreditType(label_short="SS",
label="Social Sciences")
NSL = models.CreditType(label_short="NSL",
label="Natural Science Lab")
QS = models.CreditType(label_short="QS",
label="Quantitative Skills")
E = models.CreditType(label_short="E",
label="Elective")
DC = models.CreditType(label_short="DC",
label="Diversity Course")
PR = models.CreditType(label_short="PR",
label="Generic Prerequisite")
CS.save()
NS.save()
H.save()
HP.save()
SS.save()
NSL.save()
QS.save()
E.save()
DC.save()
PR.save()
def delete_all(self):
"""Delete every object in the database. This is so you can reseed it.
Mostly just for debugging."""
models.CourseLearningOutcome.objects.all().delete()
#models.CoreLearningOutcome.objects.all().delete()
#models.CreditType.objects.all().delete()
models.Course.objects.all().delete()
models.DegreeProgram.objects.all().delete()
models.DPCourseSpecific.objects.all().delete()
models.DPCourseGeneric.objects.all().delete()
models.DPCourseSubstituteSpecific.objects.all().delete()
models.DPCourseSubstituteGeneric.objects.all().delete()
def dp_rows(self, csv_reader, ATA_line):
"""Extract the rows corresponding to a particular degree program and return
them.
csv_reader - The CSV reader that returns rows from the data to be imported.
ATA_line - The degree program line that was previously read."""
rows = []
# Compile a regular expression pattern matching class ID's
class_id_re = re.compile("[A-Z]+&* [0-9]+")
for row in csv_reader:
# Exit when we encounter the next ATA row after first
if row[0].startswith("ATA"):
return (rows, row)
elif class_id_re.fullmatch(row[0].strip()):
rows.append(row)
elif row[0].startswith("Generic"):
rows.append(row)
# This exit point occurs when we run out of rows to read
return (rows, None)
def build_courses_from_rows(self, rowset):
"""Take a set of rows from the .csv, and construct course objects from
them. Next we construct CourseLearningOutcomes. Then return both."""
class_id_re = re.compile("[A-Z]+&* [0-9]+")
courses = []
course_learning_outcomes = []
for row in rowset:
if not class_id_re.fullmatch(row[0].strip()):
continue
# If credit is numeric assign it to lower and upper credit bound
# Otherwise, split the credit range and assign
try:
lowercb = float(row[2])
uppercb = float(row[2])
except ValueError:
if "-" in row[2]:
bounds = row[2].split("-")
lowercb = float(bounds[0])
uppercb = float(bounds[1])
else:
lowercb = None
uppercb = None
course = models.Course(id=row[0].strip(),
label=row[1].strip(" or"),
lower_credit_bound=lowercb,
upper_credit_bound=uppercb)
course.save()
outcome_string = row[3]
clo_content = re.findall("[0-9]+", outcome_string)
for outcome in clo_content:
core_learning_outcome = models.CoreLearningOutcome.objects.get(
id=int(
outcome))
try:
models.CourseLearningOutcome.objects.get(
course=course,
learning_outcome=core_learning_outcome)
break
except models.CourseLearningOutcome.DoesNotExist:
course_learning_outcome = models.CourseLearningOutcome(
course=course,
learning_outcome=core_learning_outcome)
course_learning_outcome.save()
return (courses, course_learning_outcomes)
|
989,267 | 78ea8a7ec69051d3614e758ff2a0a245bf6b085a | import sys
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
args = sys.argv
with open(args[1], "r") as f:
lines = f.read().splitlines()
firstLine = lines[0].split(" ")
maxLevel = int(firstLine[0])
N = int(firstLine[1])
lines = lines[1:]
i = 0
inputData = []
# 入力信号分と低域成分を考慮して+1する
while i < maxLevel + 2:
inputData.append([[],[]])
#print(inputData)
#print(i)
i += 1
fig = plt.figure(figsize=(6.0,10.0))
i = 0
maxVal = 0
while i < N * (maxLevel + 2) :
#print(i)
lxy = lines[i].split(" ")
l = i // N
#if abs(float(lxy[1])) > 0.00001 :
inputData[l][0].append(float(lxy[0]))
inputData[l][1].append(float(lxy[1]))
#print(inputData[l])
i += 1
#print(inputData)
i = 0
while i < maxLevel + 2:
#print(i)
ax = fig.add_subplot(maxLevel + 2, 1, i + 1)
ax.stem(inputData[i][0], inputData[i][1], use_line_collection=True, markerfmt=' ')
ax.set_xlim(0,N)
ax.set_ylim(-max(inputData[i][1])*1.1, max(inputData[i][1])*1.1)
i += 1
plt.savefig(args[2])
|
989,268 | fd38433524cc67240996c93ec121fff66296ec5d | from os import walk
import subprocess
import string
import sys
source_dir = str(sys.arg1)
print source_dir
f = []
for (dirpath, dirnames, filenames) in walk("/home/kreid/music"): #TODO: Generalize this PS1
f.extend(dirnames) #get the names of the music folders
break
f = sorted(f, key=str.lower)
tarball = ()
for letter in string.ascii_uppercase:
for file in f:
if file[0][0] == letter: #Match the folders up with the corresponding tarball
tarball = tarball + ("/home/kreid/music/" + str(file),) #Append to the tuple with path written in
elif len(tarball) != 0:
clean = ' '.join(str(i) for i in (tarball)) #format it nicely
tar_process = subprocess.Popen(["bash","-c" ,"tar cvf Music_" + str(letter) +".tar " + str(clean)]) #TODO make the file names optional, preferably incoporating $(date)
tar_process.wait()
tarball = () #reset
|
989,269 | 50dda3e5e861e8fce1ed24fa0e7444bd832b260e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 17 12:15:53 2019
@author: TEB
"""
import numpy as np
import matplotlib.pyplot as plt
from nengo.processes import Piecewise
from World import Time
class Stimulus(Time):
def __init__(self, temperature, time_instance = 0):
self.TimeObject = Time()
self.Timestep = self.TimeObject.Timestep
self.Temperature = temperature
self.TimeInstance = time_instance
self.StimulusMagnitude = None # Stimulus normalised between 1 and -1
self.StimulusDic = {} # Nengo requires a dictionary to work
self.NetworkInput = None # Input of the Nengo Simulator
self.SpikeCount = None
self.HPReward = 0 # The Hit Point loss or gain depending on temperature.
if time_instance is None:
self.MomentInTime = self.TimeObject.Clock
else:
self.MomentInTime = time_instance * self.Timestep
def temperature2spikecount(self):
if (self.Temperature < 1) or (self.Temperature > 30):
self.SpikeCount = 0
self.HPReward = -2 # This HP reward must correspond to the starting max HP.
# print("The cell culture dies instantly --> Neurons no longer spike")
if (self.Temperature >= 1) and (self.Temperature < 10):
self. SpikeCount = round(2.762 * (10 ** -4) * ((self.Temperature - 1) ** 3) + 0.6349 * (self.Temperature - 1) + 4.999)
self.HPReward = -1
if (self.Temperature >= 10) and (self.Temperature < 15):
self.SpikeCount = round(0.0243 * ((self.Temperature - 10) ** 3) + 0.0075 * ((self.Temperature - 10) ** 2) + 0.702 * (self.Temperature - 10) + 10.9143)
self.HPReward = 0
if (self.Temperature >= 15) and (self.Temperature < 20):
self.SpikeCount = round(- 0.159 * ((self.Temperature - 15) ** 3) + 0.3725 * ((self.Temperature - 15) ** 2) + 2.602 * (self.Temperature - 15) + 17.653)
self.HPReward = 0.5
if (self.Temperature >= 20) and (self.Temperature < 22):
self.SpikeCount = round(0.6552 * ((self.Temperature - 20) ** 3) - 2.0126 * ((self.Temperature - 20) ** 2) - 5.5985 * (self.Temperature - 20) + 20.0999)
self.HPReward = 1
if (self.Temperature >= 22) and (self.Temperature < 25):
self.SpikeCount = round(- 0.2135 * ((self.Temperature - 22) ** 3) + 1.9187 * ((self.Temperature - 22) ** 2) - 5.7864 * (self.Temperature - 22) + 6.0941)
self.HPReward = -0.5
if (self.Temperature >= 25) and (self.Temperature <= 30):
self.SpikeCount = round(1.8272 * (10 ** -4) * ((self.Temperature - 25) ** 3) - 0.0027 * ((self.Temperature - 25) ** 2) + 0.0385 * (self.Temperature - 25) + 0.2389)
self.HPReward = - 1
def spikecount2stimulus(self):
self.temperature2spikecount()
self.StimulusMagnitude = 2 * (self.SpikeCount / 25) - 1
self.StimulusDic = {self.TimeInstance: self.StimulusMagnitude}
self.NetworkInput = Piecewise(self.StimulusDic)
class StimulusTimeSeries(Stimulus):
# This class was not used in the simulation.
def __init__(self, temperatures, time_instances):
TimeObject = Time()
self.Timestep = TimeObject.Timestep
self.Temperatures = temperatures
self.TimeInstances = time_instances
self.PiecewiseStimulations = None # Dictionary containing one temperature element.
self.StimuliDic = {}
self.NetworkInputs = None
self.HPRewards = []
if time_instances is None:
self.NumberOfInstances = self.TimeObject.Clock / self.Timestep
self.MomentsInTime = np.linspace(0.0, self.TimeObject.Clock, self.NumberOfInstances)
else:
self.NumberOfInstances = np.size(time_instances)
self.MomentsInTime = self.Timestep * time_instances
self.StimuliMagnitude = np.zeros(self.NumberOfInstances)
self.SpikeCounts = np.zeros(self.NumberOfInstances)
def temps2stimuli(self):
for i in range(0, self.NumberOfInstances):
stimulus = Stimulus(self.Temperatures[i], i)
stimulus.spikecount2stimulus()
self.SpikeCounts[i] = stimulus.SpikeCount
self.StimuliMagnitude[i] = stimulus.StimulusMagnitude
self.HPRewards.append(stimulus.HPReward)
c = stimulus.StimulusDic[i]
c = c[0]
stimulus.StimulusDic = {self.MomentsInTime[i]: c}
self.StimuliDic = {**self.StimuliDic, **stimulus.StimulusDic}
self.NetworkInputs = Piecewise(self.StimuliDic)
if __name__ == "__main__":
# Konstantin Nikolic's Data Confidential
Temperatures = np.array([1, 10, 15, 20, 22, 25, 30]);
NumberOfSpikes = np.array([5, 11, 17, 23, 3, 1, 0]);
temperatureInstances = np.linspace(-10, 40, 1000) # Temperatures in degree Celsius.
timeInstances = np.linspace(0, 1000, 1000) # Time instances.
sts = StimulusTimeSeries(temperatureInstances, timeInstances)
sts.temps2stimuli()
ts = sts.StimuliDic
plt.figure()
plt.plot(Temperatures, NumberOfSpikes, 'X', label = 'Original Data Points')
plt.plot(temperatureInstances, sts.SpikeCounts, label = 'Extrapolated Data Points')
plt.title('Spike Count as a Function of Temperature', {'fontsize': 20})
plt.xlabel('Temperature in Degrees Celcius [C]', {'fontsize': 16})
plt.ylabel('Spike Count in Units [U]', {'fontsize': 16})
plt.legend()
plt.figure()
plt.plot(temperatureInstances, sts.StimuliMagnitude)
# settings = {'fontsize': rcParams['axes.titlesize'], 'fontweight' : rcParams['axes.titleweight'], 'verticalalignment': 'baseline', 'horizontalalignment': loc}
plt.title('Voltage as a Function of Temperature', {'fontsize': 20})
plt.xlabel('Temperature in Degrees Celcius [C]', {'fontsize': 16})
plt.ylabel('Voltage in Millivolts [mV]', {'fontsize': 16})
plt.figure()
plt.plot(temperatureInstances, sts.HPRewards)
# settings = {'fontsize': rcParams['axes.titlesize'], 'fontweight' : rcParams['axes.titleweight'], 'verticalalignment': 'baseline', 'horizontalalignment': loc}
plt.title('Hit Point Reward as a Function of Temperature', {'fontsize': 20})
plt.xlabel('Temperature in Degrees Celcius [C]', {'fontsize': 16})
plt.ylabel('Hit Point Reward in Arbitrary Units [A.U.]', {'fontsize': 12})
|
989,270 | 0ba2f25d2e4b2b6dbf34b5e176b66173592a729a | from django.shortcuts import render
from produce.models import ProduceType,ProgrameType,ProduceTypeTwo
# Create your views here.
def get_type_navigation():
produce_type = ProduceType.objects.all()
return produce_type
def get_type_navigation_two():
produce_type_two = ProduceTypeTwo.objects.all()
return produce_type_two
def get_programe_navigation():
programe_type = ProgrameType.objects.all()
return programe_type
|
989,271 | 075ad320f728d30925f42884f09c630699fce5dc | import tornado.web
import tornado.escape
import pymongo
from gmail import GMailPy
from ATLiteExceptions import *
class EMailer(tornado.web.RequestHandler):
def initialize(self):
#self.connection = pymongo.connection.Connection()
#self.db = self.connection.atlitepy
self.client = MongoClient()
self.db = client.atlitepy
self.required_properties = ['userGuid', 'message', 'subject', 'to_addrs']
def get(self):
self.handle_request()
def post(self):
self.handle_request()
def handle_request(self):
try:
json = self.json_in_request()
properties = tornado.escape.json_decode(json)
self.check_required_properties(properties)
self.check_user_permissions(properties['userGuid'])
g = GMailPy()
for addr in properties['to_addrs'].split(','):
if len(addr) > 0:
g.add_to_addr(addr.strip())
g.set_subject(properties['subject'])
g.set_message(properties['message'])
g.send_message()
self.write(tornado.escape.json_encode({'success':'sent'}))
except JSONMissingError:
self.write(tornado.escape.json_encode({'error':'no JSON found'}))
except JSONPropertyMissingError, e:
self.write(tornado.escape.json_encode({'error':e.message}))
except PermissionDeniedError:
self.write(tornado.escape.json_encode({'error':'invalid user GUID'}))
def json_in_request(self):
_json = self.get_argument('json', None)
if _json is not None:
return _json
raise JSONMissingError()
def check_user_permissions(self, userGuid):
users = self.db.users
user = users.find_one({'user_guid':userGuid})
if user is None:
raise PermissionDeniedError()
def check_required_properties(self, props):
for p in self.required_properties:
try:
check = props[p]
except KeyError:
raise JSONPropertyMissingError('property %s not found' % (p))
|
989,272 | 36f9f3334fe4ee937c8742b2765c864a6f4c0460 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def __init__(self):
self.res = []
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
self.recursive_tree_depth(root, 1)
return max(self.res)
def recursive_tree_depth(self, node, count):
if node.left is None and node.right is None:
self.res.append(count)
return
if node.left is not None:
self.recursive_tree_depth(node.left, count + 1)
if node.right is not None:
self.recursive_tree_depth(node.right, count + 1)
|
989,273 | 3c02db350d6e934d3f444f11cb7d4f61b18a78a5 | from flask import Flask, render_template, flash, url_for, redirect, request
from sqlalchemy_searchable import make_searchable
from user import User
from extensions import db, mail, login_manager, bcrypt
def configure_extensions(app):
db.init_app(app)
# login_manager.refresh_view = 'user.reauth'
login_manager.init_app(app)
@login_manager.user_loader
def user_loader(user_email):
return User.query.filter_by(email=user_email).first()
login_manager.login_view = 'user.signin'
login_manager.login_message_category = "info"
mail.init_app(app)
bcrypt.init_app(app)
def configure_blueprints(app):
from user import user_bp
from main import main_bp
from resource import resource_bp
for bp in [user_bp, main_bp, resource_bp]:
app.register_blueprint(bp)
def configure_hook(app):
@app.before_request
def before_request():
pass
def configure_error_handlers(app):
@app.errorhandler(403)
def forbidden(e):
return render_template('error.html', message='403 forbidden'), 403
@app.errorhandler(404)
def page_not_found(e):
return render_template('error.html', message='404 not found'), 404
@app.errorhandler(410)
def gone(e):
return render_template('error.html', message='410 gone'), 410
@app.errorhandler(500)
def internal_error(e):
return render_template('error.html', message='500 internal error'), 500
def configure_cli(app):
@app.cli.command()
def initdb():
db.drop_all()
make_searchable()
db.configure_mappers()
db.create_all()
app = Flask(__name__)
app.config.from_object('config')
configure_blueprints(app)
configure_extensions(app)
configure_error_handlers(app)
configure_cli(app)
from admin import views
|
989,274 | 7d42d8dc0ac24c689fb274efe6989a3443770d57 | # Author: Alexander Bokovoy <abokovoy@redhat.com>
# Tomas Babej <tbabej@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
This base module contains default implementations of IPA interface for
interacting with system services.
'''
from __future__ import absolute_import
import os
import json
import time
import collections
import logging
import warnings
import six
from ipapython import ipautil
from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
# Canonical names of services as IPA wants to see them. As we need to have
# *some* naming, set them as in Red Hat distributions. Actual implementation
# should make them available through knownservices.<name> and take care of
# re-mapping internally, if needed
wellknownservices = ['certmonger', 'dirsrv', 'httpd', 'ipa', 'krb5kdc',
'messagebus', 'nslcd', 'nscd', 'ntpd', 'portmap',
'rpcbind', 'kadmin', 'sshd', 'autofs', 'rpcgssd',
'rpcidmapd', 'pki_tomcatd', 'chronyd', 'domainname',
'named', 'ods_enforcerd', 'ods_signerd', 'gssproxy']
# The common ports for these services. This is used to wait for the
# service to become available.
wellknownports = {
'dirsrv': [389], # only used if the incoming instance name is blank
'pki-tomcatd@pki-tomcat.service': [8080, 8443],
'pki-tomcat': [8080, 8443],
'pki-tomcatd': [8080, 8443], # used if the incoming instance name is blank
}
SERVICE_POLL_INTERVAL = 0.1 # seconds
class KnownServices(collections.Mapping):
"""
KnownServices is an abstract class factory that should give out instances
of well-known platform services. Actual implementation must create these
instances as its own attributes on first access (or instance creation)
and cache them.
"""
def __init__(self, d):
self.__d = d
def __getitem__(self, key):
return self.__d[key]
def __iter__(self):
return iter(self.__d)
def __len__(self):
return len(self.__d)
def __call__(self):
return six.itervalues(self.__d)
def __getattr__(self, name):
try:
return self.__d[name]
except KeyError:
raise AttributeError(name)
class PlatformService(object):
"""
PlatformService abstracts out external process running on the system
which is possible to administer (start, stop, check status, etc).
"""
def __init__(self, service_name, api=None):
# pylint: disable=ipa-forbidden-import
import ipalib # FixMe: break import cycle
# pylint: enable=ipa-forbidden-import
self.service_name = service_name
if api is not None:
self.api = api
else:
self.api = ipalib.api
warnings.warn(
"{s.__class__.__name__}('{s.service_name}', api=None) "
"is deprecated.".format(s=self),
RuntimeWarning, stacklevel=2)
def start(self, instance_name="", capture_output=True, wait=True,
update_service_list=True):
"""
When a service is started record the fact in a special file.
This allows ipactl stop to always stop all services that have
been started via ipa tools
"""
if not update_service_list:
return
svc_list = []
try:
with open(paths.SVC_LIST_FILE, 'r') as f:
svc_list = json.load(f)
except Exception:
# not fatal, may be the first service
pass
if self.service_name not in svc_list:
svc_list.append(self.service_name)
with open(paths.SVC_LIST_FILE, 'w') as f:
json.dump(svc_list, f)
return
def stop(self, instance_name="", capture_output=True,
update_service_list=True):
"""
When a service is stopped remove it from the service list file.
"""
if not update_service_list:
return
svc_list = []
try:
with open(paths.SVC_LIST_FILE, 'r') as f:
svc_list = json.load(f)
except Exception:
# not fatal, may be the first service
pass
while self.service_name in svc_list:
svc_list.remove(self.service_name)
with open(paths.SVC_LIST_FILE, 'w') as f:
json.dump(svc_list, f)
return
def reload_or_restart(self, instance_name="", capture_output=True,
wait=True):
return
def restart(self, instance_name="", capture_output=True, wait=True):
return
def is_running(self, instance_name="", wait=True):
return False
def is_installed(self):
return False
def is_enabled(self, instance_name=""):
return False
def is_masked(self, instance_name=""):
return False
def enable(self, instance_name=""):
return
def disable(self, instance_name=""):
return
def mask(self, instance_name=""):
return
def unmask(self, instance_name=""):
return
def install(self, instance_name=""):
return
def remove(self, instance_name=""):
return
class SystemdService(PlatformService):
SYSTEMD_SRV_TARGET = "%s.target.wants"
def __init__(self, service_name, systemd_name, api=None):
super(SystemdService, self).__init__(service_name, api=api)
self.systemd_name = systemd_name
self.lib_path = os.path.join(paths.LIB_SYSTEMD_SYSTEMD_DIR,
self.systemd_name)
self.lib_path_exists = None
def service_instance(self, instance_name, operation=None):
if self.lib_path_exists is None:
self.lib_path_exists = os.path.exists(self.lib_path)
elements = self.systemd_name.split("@")
# Make sure the correct DS instance is returned
if elements[0] == 'dirsrv' and not instance_name:
return ('dirsrv@%s.service'
% str(self.api.env.realm.replace('.', '-')))
# Short-cut: if there is already exact service name, return it
if self.lib_path_exists and instance_name:
if len(elements) == 1:
# service name is like pki-tomcatd.target or krb5kdc.service
return self.systemd_name
if len(elements) > 1 and elements[1][0] != '.':
# Service name is like pki-tomcatd@pki-tomcat.service
# and that file exists
return self.systemd_name
if len(elements) > 1:
# We have dynamic service
if instance_name:
# Instanciate dynamic service
return "%s@%s.service" % (elements[0], instance_name)
else:
# No instance name, try with target
tgt_name = "%s.target" % (elements[0])
srv_lib = os.path.join(paths.LIB_SYSTEMD_SYSTEMD_DIR, tgt_name)
if os.path.exists(srv_lib):
return tgt_name
return self.systemd_name
def parse_variables(self, text, separator=None):
"""
Parses 'systemctl show' output and returns a dict[variable]=value
Arguments: text -- 'systemctl show' output as string
separator -- optional (defaults to None), what separates
the key/value pairs in the text
"""
def splitter(x, separator=None):
if len(x) > 1:
y = x.split(separator)
return (y[0], y[-1])
return (None, None)
return dict(splitter(x, separator=separator) for x in text.split("\n"))
def wait_for_open_ports(self, instance_name=""):
"""
If this is a service we need to wait for do so.
"""
ports = None
if instance_name in wellknownports:
ports = wellknownports[instance_name]
else:
elements = self.systemd_name.split("@")
if elements[0] in wellknownports:
ports = wellknownports[elements[0]]
if ports:
ipautil.wait_for_open_ports('localhost', ports,
self.api.env.startup_timeout)
def stop(self, instance_name="", capture_output=True):
instance = self.service_instance(instance_name)
args = [paths.SYSTEMCTL, "stop", instance]
# The --ignore-dependencies switch is used to avoid possible
# deadlock during the shutdown transaction. For more details, see
# https://fedorahosted.org/freeipa/ticket/3729#comment:1 and
# https://bugzilla.redhat.com/show_bug.cgi?id=973331#c11
if instance == "ipa-otpd.socket":
args.append("--ignore-dependencies")
ipautil.run(args, skip_output=not capture_output)
update_service_list = getattr(self.api.env, 'context',
None) in ['ipactl', 'installer']
super(SystemdService, self).stop(
instance_name,
update_service_list=update_service_list)
logger.debug('Stop of %s complete', instance)
def start(self, instance_name="", capture_output=True, wait=True):
ipautil.run([paths.SYSTEMCTL, "start",
self.service_instance(instance_name)],
skip_output=not capture_output)
update_service_list = getattr(self.api.env, 'context',
None) in ['ipactl', 'installer']
if wait and self.is_running(instance_name):
self.wait_for_open_ports(self.service_instance(instance_name))
super(SystemdService, self).start(
instance_name,
update_service_list=update_service_list)
logger.debug('Start of %s complete',
self.service_instance(instance_name))
def _restart_base(self, instance_name, operation, capture_output=True,
wait=False):
ipautil.run([paths.SYSTEMCTL, operation,
self.service_instance(instance_name)],
skip_output=not capture_output)
if wait and self.is_running(instance_name):
self.wait_for_open_ports(self.service_instance(instance_name))
logger.debug('Restart of %s complete',
self.service_instance(instance_name))
def reload_or_restart(self, instance_name="", capture_output=True,
wait=True):
self._restart_base(instance_name, "reload-or-restart",
capture_output, wait)
def restart(self, instance_name="", capture_output=True, wait=True):
self._restart_base(instance_name, "restart",
capture_output, wait)
def is_running(self, instance_name="", wait=True):
instance = self.service_instance(instance_name, 'is-active')
while True:
try:
result = ipautil.run(
[paths.SYSTEMCTL, "is-active", instance],
capture_output=True
)
except ipautil.CalledProcessError as e:
if e.returncode == 3 and 'activating' in str(e.output):
time.sleep(SERVICE_POLL_INTERVAL)
continue
return False
else:
# activating
if result.returncode == 3 and 'activating' in result.output:
time.sleep(SERVICE_POLL_INTERVAL)
continue
# active
if result.returncode == 0:
return True
# not active
return False
def is_installed(self):
try:
result = ipautil.run(
[paths.SYSTEMCTL, "list-unit-files", "--full"],
capture_output=True)
if result.returncode != 0:
return False
else:
svar = self.parse_variables(result.output)
if not self.service_instance("") in svar:
# systemd doesn't show the service
return False
except ipautil.CalledProcessError:
return False
return True
def is_enabled(self, instance_name=""):
enabled = True
try:
result = ipautil.run(
[paths.SYSTEMCTL, "is-enabled",
self.service_instance(instance_name)])
if result.returncode != 0:
enabled = False
except ipautil.CalledProcessError:
enabled = False
return enabled
def is_masked(self, instance_name=""):
masked = False
try:
result = ipautil.run(
[paths.SYSTEMCTL, "is-enabled",
self.service_instance(instance_name)],
capture_output=True)
if result.returncode == 1 and result.output == 'masked':
masked = True
except ipautil.CalledProcessError:
pass
return masked
def enable(self, instance_name=""):
if self.lib_path_exists is None:
self.lib_path_exists = os.path.exists(self.lib_path)
elements = self.systemd_name.split("@")
l = len(elements)
if self.lib_path_exists and (l > 1 and elements[1][0] != '.'):
# There is explicit service unit supporting this instance,
# follow normal systemd enabler
self.__enable(instance_name)
return
if self.lib_path_exists and (l == 1):
# There is explicit service unit which does not support
# the instances, ignore instance
self.__enable()
return
if len(instance_name) > 0 and l > 1:
# New instance, we need to do following:
# 1. Make /etc/systemd/system/<service>.target.wants/
# if it is not there
# 2. Link /etc/systemd/system/<service>.target.wants/
# <service>@<instance_name>.service to
# /lib/systemd/system/<service>@.service
srv_tgt = os.path.join(paths.ETC_SYSTEMD_SYSTEM_DIR,
self.SYSTEMD_SRV_TARGET % (elements[0]))
srv_lnk = os.path.join(srv_tgt,
self.service_instance(instance_name))
try:
if not os.path.isdir(srv_tgt):
os.mkdir(srv_tgt)
os.chmod(srv_tgt, 0o755)
if os.path.exists(srv_lnk):
# Remove old link
os.unlink(srv_lnk)
if not os.path.exists(srv_lnk):
# object does not exist _or_ is a broken link
if not os.path.islink(srv_lnk):
# if it truly does not exist, make a link
os.symlink(self.lib_path, srv_lnk)
else:
# Link exists and it is broken, make new one
os.unlink(srv_lnk)
os.symlink(self.lib_path, srv_lnk)
ipautil.run([paths.SYSTEMCTL, "--system", "daemon-reload"])
except Exception:
pass
else:
self.__enable(instance_name)
def disable(self, instance_name=""):
elements = self.systemd_name.split("@")
if instance_name != "" and len(elements) > 1:
# Remove instance, we need to do following:
# Remove link from /etc/systemd/system/<service>.target.wants/
# <service>@<instance_name>.service
# to /lib/systemd/system/<service>@.service
srv_tgt = os.path.join(paths.ETC_SYSTEMD_SYSTEM_DIR,
self.SYSTEMD_SRV_TARGET % (elements[0]))
srv_lnk = os.path.join(srv_tgt,
self.service_instance(instance_name))
try:
if os.path.isdir(srv_tgt):
if os.path.islink(srv_lnk):
os.unlink(srv_lnk)
ipautil.run([paths.SYSTEMCTL, "--system", "daemon-reload"])
except Exception:
pass
else:
try:
ipautil.run([paths.SYSTEMCTL, "disable",
self.service_instance(instance_name)])
except ipautil.CalledProcessError:
pass
def mask(self, instance_name=""):
srv_tgt = os.path.join(paths.ETC_SYSTEMD_SYSTEM_DIR, self.service_instance(instance_name))
if os.path.exists(srv_tgt):
os.unlink(srv_tgt)
try:
ipautil.run([paths.SYSTEMCTL, "mask",
self.service_instance(instance_name)])
except ipautil.CalledProcessError:
pass
def unmask(self, instance_name=""):
try:
ipautil.run([paths.SYSTEMCTL, "unmask",
self.service_instance(instance_name)])
except ipautil.CalledProcessError:
pass
def __enable(self, instance_name=""):
try:
ipautil.run([paths.SYSTEMCTL, "enable",
self.service_instance(instance_name)])
except ipautil.CalledProcessError:
pass
def install(self):
self.enable()
def remove(self):
self.disable()
# Objects below are expected to be exported by platform module
def base_service_class_factory(name, api=None):
raise NotImplementedError
service = base_service_class_factory
knownservices = KnownServices({})
# System may support more time&date services. FreeIPA supports ntpd only, other
# services will be disabled during IPA installation
timedate_services = ['ntpd', 'chronyd']
|
989,275 | 81be200ff9ecbdddd911170bf6f7809bf1b7924d | import turtle as trt
import math as mp
trt.shape(
'turtle')
trt.speed(
10)
trt.penup()
trt.goto(-300, 0)
trt.pendown()
SIDE = 30
DIAG = SIDE * mp.sqrt(2)
SPACE = 20
def middle_side(q):
trt.penup()
if q == 1:
trt.pendown()
trt.forward(
SIDE)
trt.right(
135)
def diag_side(q):
trt.penup()
if q == 1:
trt.pendown()
trt.forward(
DIAG)
trt.left(
135)
def turn_side(q):
trt.penup()
if q == 1:
trt.pendown()
trt.forward(
SIDE)
trt.left(90)
def just_side(q):
trt.penup()
if q == 1:
trt.pendown()
trt.forward(
SIDE)
def end_side(q):
trt.penup()
if q == 1:
trt.pendown()
trt.forward(SIDE)
trt.penup()
trt.right(180)
trt.forward(SIDE * 2)
trt.right(90)
trt.forward(SIDE + SPACE)
writer = (
middle_side, diag_side, middle_side,
diag_side, turn_side, just_side,
turn_side, turn_side, just_side,
end_side
)
number = [
'zero.txt', 'one.txt', 'two.txt',
'three.txt', 'four.txt', 'five.txt',
'six.txt', 'seven.txt', 'eight.txt',
'nine.txt'
]
opn = open(
'number.txt', 'r'
)
example = opn.readline()
example = example.rstrip()
for k in range(len(example)):
opnum = open(number[int(example[k])], 'r')
rule = opnum.readline()
rule = rule.rstrip()
for i in range(len(writer)):
writer[i](int(rule[i]))
|
989,276 | 440732ac37fe3b16c5facd64bad272839ed792a8 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
filename = 'emperors.csv'
# strfile = unicode(str(filename), errors='replace')
df = pd.read_csv('../emperors.csv', index_col=0, encoding='latin-1')
# ***** I'm thinking we need Python running the server itself to be able to respond to AJAX requests from the client... *****
def main():
# print(df['name'])
# print(df.axes)
for row in df.iterrows():
print(row[1]['reign.start'], '\n')
# print(df.describe())
# if __name__ == "__main__":
# x=main()
# return x;
spans = []
def getSpan(term):
for row in df.iterrows():
if term == 'life':
start = row[1]['birth']
end = row[1]['death']
elif term == 'reign':
start = row[1]['reign.start']
end = row[1]['reign.end']
if start == 'nan' or end == 'nan' or type(start).__name__ == 'float' or type(end).__name__ == 'float':
global spans
spans.append('nan')
# return
continue # not sure why this is stopping at the first nan....Ah, need continue instead of break.
start = start.split('-')
end = end.split('-')
# print(row[1]['name'])
span = dict()
span['years'] = int(end[0]) - int(start[0])
span['months'] = int(end[1]) - int(start[1])
span['days'] = int(end[2]) - int(start[2])
if (span['months'] < 0):
span['months'] = 12 + span['months']
span['years'] -= 1
if (span['days'] < 0):
span['days'] = 30 + span['days']
span['months'] -= 1
# print(span)
days_in_term = span['years'] * 365 + span['months'] * 30 + span['days']
# print(days_in_term)
# Not sure why this isn't working....
# if term == 'life':
# row[1]['lifespan'] = days_in_term
# elif term == 'reign':
# row[1]['reign.term'] = days_in_term
spans.append(days_in_term)
# print("spansssssss: ", spans) # Why won't this print out right here????
# print('hi h i hi hi')
# I believe this is working correctly:
def getReignLengths():
getSpan('reign')
# getReignLengths()
def getLifeSpans():
getSpan('life')
# getLifeSpans()
def groupByEra():
for row in df.iterrows():
print(row[1]['dynasty'])
# groupByEra()
def getGroups(term):
for thing in df.groupby([term]):
print(thing[0])
# print(df.groupby(["dynasty"]))
# print(df.count())
# getGroups('killer')
# Good: This grabs only a specific group (those assassinated) grouped by dynasty:
def sumRows(term):
specific = df[df['cause'] == 'Assassination']
specific2 = df[df['rise'] == 'Seized Power']
# print(summed)
print(specific2.groupby(term).count()['name'])
# print(df.groupby(term).count()['name'])
# sumRows('cause') # This is asking, of those who seized power, how did they die?
# When is reign end not equal to death?
def deathIsEnd():
for row in df.iterrows():
death = row[1]['death']
end = row[1]['reign.end']
print(row[1]['name'], death == end)
# deathIsEnd()
# Note: will need to run this before any other functions that try to access lifespan/reign length columns:
def addToDF():
global spans
getLifeSpans()
# print(df.head())
# print(spans)
df['lifespan'] = pd.Series(spans, index=df.index)
# print(df.head())
spans = []
getReignLengths()
df['reign'] = pd.Series(spans, index=df.index)
print(df.head())
avg_reign_length = df['reign'].sum() / len(df['reign'])
# avg_life_span = df[df['lifespan'] != 'nan'].sum() / len(df['lifespan']) # won't work because of strings??
print(avg_reign_length)
addToDF()
def reignByDyn():
# print(df.head())
dyns = df.groupby('dynasty')
print(dyns)
for d in dyns:
print(d[1])
reignByDyn()
def playWithGB():
dyns = df.groupby('dynasty').mean()
print(dyns)
# Same result as above:
dyns2 = df.groupby('dynasty').agg(np.mean)
print(dyns2)
# Hmm only getting one column....
# print(df.corr())
# Thank you https://www.dataquest.io/blog/pandas-python-tutorial/:
# - df.iloc
# -reviews.loc[:5,["score", "release_year"]]
# -reviews[["score", "release_year"]]
# -Create a df by passing multiple series to the DF constructor
# -reviews["score"].mean()
# -reviews.mean()
# -pandas.DataFrame.corr — finds the correlation between columns in a DataFrame.
# -pandas.DataFrame.count — counts the number of non-null values in each DataFrame column.
# -pandas.DataFrame.max — finds the highest value in each column.
# -pandas.DataFrame.min — finds the lowest value in each column.
# -pandas.DataFrame.median — finds the median of each column.
# -pandas.DataFrame.std — finds the standard deviation of each column.
# -score_filter = reviews["score"] > 7
# -filtered_reviews = reviews[score_filter]
# -xbox_one_filter = (reviews["score"] > 7) & (reviews["platform"] == "Xbox One")
# reviews[reviews["platform"] == "Xbox One"]["score"].plot(kind="hist")
# filtered_reviews["score"].hist()
# Dictionaries can be converted easily into Series.
# Boolean indexing: cities[cities > 1000]
# Pass a dictionary of lists to the Dataframes constructor
# box/whisker -- Wow this is amazing:
# df.plot(kind='box', subplots=True, layout=(3, 3), sharex=False, sharey=False)
# plt.show()
# histograms -- Wow this is also nuts:
# df.hist()
# plt.show()
# scatter plot matrix -- WOW --:
# scatter_matrix(df)
# plt.show()
# would be good to write a function that checks how many of those (e.g.) from Italia assassinated, VS how often *everyone* was assassinated.
|
989,277 | 62b991ba0e1a44347b71fd0925fa83618a6cb51e | # -*- coding: utf-8 -*-
'''
Created on 2017-12-21 10:13
---------
@summary:
---------
@author: Boris
'''
import base.base_parser as base_parser
import utils.tools as tools
from utils.log import log
SITE_ID = 1712200003
NAME = '爱奇艺'
# 必须定义 添加网站信息
@tools.run_safe_model(__name__)
def add_site_info():
log.debug('添加网站信息')
table = 'VIDEO_NEWS_site_info'
url = 'http://so.iqiyi.com'
base_parser.add_website_info(table, site_id=SITE_ID, url=url, name=NAME)
# 必须定义 添加根url
@tools.run_safe_model(__name__)
def add_root_url(keywords):
log.debug('''
添加根url
parser_params : %s
''' % str(keywords))
for keyword in keywords:
print(keyword)
next_keyword = False
keyword = tools.quote(keyword)
for page_index in range(1, 20):
url = 'http://so.iqiyi.com/so/q_%s_ctg__t_0_page_%s_p_1_qc_0_rd__site__m_4_bitrate_' % (keyword, page_index)
print(url)
html, res = tools.get_html_by_requests(url)
video_list_title = tools.get_tag(html, 'a', {'class': 'figure-180101'})
video_list_time = tools.get_tag(html, 'div', {'class': 'result_info'})
if not video_list_time:
print('无视频列表 跳出')
break
for info_index, video_info in enumerate(video_list_time):
try:
image_url = tools.get_info(str(video_list_title[info_index]), 'src="(.+?)"', fetch_one=True)
title = tools.get_info(str(video_list_title[info_index]), 'title="(.+?)"', fetch_one=True)
url = tools.get_info(str(video_list_title[info_index]), 'href="(.+?)"', fetch_one=True)
release_time = tools.get_tag(video_info, 'em', {'class': 'result_info_desc'}, find_all=False).get_text()
is_continue = base_parser.save_video_info(image_url=image_url, url=url, title=title, release_time=release_time,
site_name=NAME)
if not is_continue:
next_keyword = True
break
except Exception as e:
log.error(e)
if next_keyword:
break
# 必须定义 解析网址
def parser(url_info):
pass
|
989,278 | db13b75756209d26d09ddb6f4169f846fa56e827 | #!/usr/bin/env python3
"""problem_108.py
Problem 108: Diophantine reciprocals I
In the following equation x, y, and n are positive integers.
1/x + 1/y = 1/n
For n = 4 there are exactly three distinct solutions:
1/5 + 1/20 = 1/4
1/6 + 1/12 = 1/4
1/8 + 1/8 = 1/4
What is the least value of n for which the number of distinct solutions exceeds
MIN_SOLUTIONS?
NOTE: This problem is an easier version of Problem 110; it is strongly advised
that you solve this one first.
"""
__author__ = 'Curtis Belmonte'
import math
import common.divisors as divs
import common.primes as prime
import common.sequences as seqs
# PARAMETERS ##################################################################
MIN_SOLUTIONS = 1000 # default: 1000
# SOLUTION ####################################################################
def find_min_denom(min_solutions: int) -> int:
"""Finds the least n such that 1/x + 1/y = 1/n exceeds a solution count.
Specifically, returns the minimum natural number n for which there are more
than min_solutions integer pairs x <= y that satisfy the above equation.
"""
# count max distinct prime factors to exceed min_solutions
prime_count = int(math.ceil(math.log(2 * min_solutions - 1, 3)))
# check products of primorials up to prime count
primorial_list = prime.primorials(prime_count)
for n in seqs.generate_products(primorial_list):
# find solution count in terms of divisors of n^2
if (divs.count_power_divisors(n, 2) + 1) // 2 > min_solutions:
return n
# should never reach this statement
return 0
def solve() -> int:
return find_min_denom(MIN_SOLUTIONS)
if __name__ == '__main__':
print(solve())
|
989,279 | 549689adbf669f40e58d4fae22a2583283985def |
async def test_non_existing_container(container_requester):
async with container_requester as requester:
response, status = await requester('GET', '/db/non')
assert status == 404
async def test_non_existing_registry(container_requester):
async with container_requester as requester:
response, status = await requester('GET', '/db/guillotina/@registry/non')
assert status == 404
async def test_non_existing_type(container_requester):
async with container_requester as requester:
response, status = await requester('GET', '/db/guillotina/@types/non')
assert status == 404
|
989,280 | b2c9e387163d2b84ead032af253c9d3e5213d62e | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-08-08 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0010_auto_20160808_2114'),
]
operations = [
migrations.RemoveField(
model_name='sendhistory',
name='celery_task_id',
),
migrations.RemoveField(
model_name='sendhistory',
name='sending',
),
migrations.AddField(
model_name='messagetracker',
name='sending_email',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='messagetracker',
name='sending_text',
field=models.BooleanField(default=False),
),
]
|
989,281 | e1c22307a8e5a870c11388c12003ea745386bd9b | """
Overview
===============================================================================
+----------+------------------------------------------------------------------+
| Path | PyPoE/poe/file/specification/errors.py |
+----------+------------------------------------------------------------------+
| Version | 1.0.0a0 |
+----------+------------------------------------------------------------------+
| Revision | $Id$ |
+----------+------------------------------------------------------------------+
| Author | Omega_K2 |
+----------+------------------------------------------------------------------+
Description
===============================================================================
Agreement
===============================================================================
See PyPoE/LICENSE
Documentation
===============================================================================
.. autoclass:: SpecificationError
.. autoclass:: SpecificationError.ERRORS
.. autoclass:: SpecificationWarning
"""
# =============================================================================
# Imports
# =============================================================================
# Python
from enum import IntEnum
# 3rd-party
# self
# =============================================================================
# Globals
# =============================================================================
__all__ = ['SpecificationError', 'SpecificationWarning']
# =============================================================================
# Exceptions & Warnings
# =============================================================================
class SpecificationError(ValueError):
"""
SpecificationErrors are raised to indicate there is a problem with the
specification compared to the data.
Unlike most errors, they are raised with an error code and the error
message. The error code can be used to capture specific errors more
accurately.
"""
class ERRORS(IntEnum):
"""
Numeric meaning:
* 1xxx - indicates issues with format of fields
* 2xxx - indicates issues with format of virtual fields
* 3xxx - indicates issues at runtime
Attributes
----------
INVALID_FOREIGN_KEY_FILE
Foreign key file does not exist
INVALID_FOREIGN_KEY_ID
Foreign key with the specified id does not exist
INVALID_ARGUMENT_COMBINATION
Invalid combination of multiple arguments; i.e. when they can't be
used together
INVALID_ENUM_NAME
Enum does not exist in :py:mod:`PyPoE.poe.constants`
VIRTUAL_KEY_EMPTY
Virtual key does not have fields defined
VIRTUAL_KEY_DUPLICATE
Virtual key is a duplicate of a regular key
VIRTUAL_KEY_INVALID_KEY
Invalid fields specified for the virtual key
VIRTUAL_KEY_INVALID_DATA_TYPE
Invalid data type(s) in the target fields
RUNTIME_MISSING_SPECIFICATION
No specification found in the specification format used for the
function call
RUNTIME_MISSING_FOREIGN_KEY
A single foreign key reference could not be resolved
RUNTIME_ROWSIZE_MISMATCH
The row size in the specification doesn't match the real data row
size
"""
INVALID_FOREIGN_KEY_FILE = 1000
INVALID_FOREIGN_KEY_ID = 1001
INVALID_ARGUMENT_COMBINATION = 1002
INVALID_ENUM_NAME = 1003
VIRTUAL_KEY_EMPTY = 2000
VIRTUAL_KEY_DUPLICATE = 2001
VIRTUAL_KEY_INVALID_KEY = 2002
VIRTUAL_KEY_INVALID_DATA_TYPE = 2003
RUNTIME_MISSING_SPECIFICATION = 3000
RUNTIME_MISSING_FOREIGN_KEY = 3001
RUNTIME_ROWSIZE_MISMATCH = 3002
def __init__(self, code, msg):
super().__init__()
self.code = self.ERRORS(code)
self.msg = msg
def __str__(self):
return '%s: %s' % (repr(self.code), self.msg)
class SpecificationWarning(UserWarning):
pass |
989,282 | 86a2597f9fbfbfbe7d7b5ddd5bd5f6c5d72652ca | #!/bin/python
import sys
n = int(raw_input().strip())
a = map(int,raw_input().strip().split(' '))
numOfSwaps = 0
for i in range(len(a)-1, 0, -1):
for j in range(i):
if (a[j] > a[j+1]):
temp = a[j]
a[j] = a[j+1]
a[j+1] = temp
numOfSwaps += 1
print a
print "Array is sorted in %d swaps." % numOfSwaps
print "First Element: %d" % a[0]
print "Last Element: %d" % a[n-1]
|
989,283 | 5f5524c28dd7c59876bdba3401c79cb7f0318008 | from collections import defaultdict
v = defaultdict(list)
mod = 10**9+7
N, K = map(int, input().split())
for _ in range(N-1):
a, b = map(int, input().split())
v[a].append(b)
v[b].append(a)
visited = [False]*(N+1)
ans = 1
q = [(1, 0, 0)]
while q:
pos, parent, score = q.pop()
visited[pos] = True
ans *= (K-score)
ans %= mod
a = 1 if pos==1 else 2
b = 0
for i in v[pos]:
if visited[i]:continue
q.append((i, pos, a+b))
b += 1
print(ans)
|
989,284 | 6f27bf84d9392070f91384b5d081dcabc8935850 | from utils import session, expose, render_template, Response, url_for
from models import Category, SubCategory, Scenario
from models import User, AppFamilyPermission, AppPermission
from authenticate_user import authenticateuser
from authenticate_user import verifyloggedin
from authenticate_user import authorizeuseroncategory
from authenticate_user import authorizeuseronsubcategory
from authenticate_user import login, logout
from authenticate_user import register
from authenticate_user import setpermissions
from authenticate_user import setpermissions2
from authenticate_user import setpermissions3
from uploader import get_all_scenarios, get_scenario_query
from uploader import upload_query_result_structure
from uploader import upload_query_result_data
from uploader import upload_query_result_count
from sqlalchemy import func,over
from sqlalchemy.sql import text
from werkzeug.utils import redirect
from werkzeug.exceptions import NotFound
from werkzeug.utils import cached_property
from werkzeug.contrib.securecookie import SecureCookie
from werkzeug.datastructures import Headers
from cgi import escape
import json
import hashlib
import uuid
from ScenarioDataTableProcessing import *
from SubCategoryDataTableProcessing import *
from ScenarioHeadersDataTableProcessing import *
_CSV_FILE_PATH = "/tmp/sample.csv" # TODO: Use a generated path based on logged in user to avoid contention
@authenticateuser
@expose('/welcome/')
def welcome(request):
return render_template('welcome.html')
@authenticateuser
@verifyloggedin
@expose('/')
def overview(request):
user = request.client_user_object
user_id = user.user_id
args = json.dumps(request.args)
categories = (session.query(Category)
.join(
AppFamilyPermission,
Category.category_id
== AppFamilyPermission.category_id)
.filter(AppFamilyPermission.user_id == user_id)
.order_by(Category.category_display_order.asc()))
sub_categories = (session.query(SubCategory)
.join(
AppPermission,
SubCategory.subcategory_id
== AppPermission.subcategory_id)
.filter(AppPermission.user_id == user_id))
return render_template(
'overview.html',
categories=categories,
sub_categories=sub_categories,
user=user,
args=args)
@authenticateuser
@verifyloggedin
@authorizeuseroncategory
@expose('/<c_name>/')
def category(request, c_name):
if c_name == 'favicon.ico':
return Response()
user = request.client_user_object
user_id = user.user_id
args = json.dumps(request.args)
categories = (session.query(Category)
.join(
AppFamilyPermission,
Category.category_id
== AppFamilyPermission.category_id)
.filter(AppFamilyPermission.user_id == user_id)
.order_by(Category.category_display_order.asc()))
sub_categories = (session.query(SubCategory)
.join(
AppPermission,
SubCategory.subcategory_id
== AppPermission.subcategory_id)
.filter(AppPermission.user_id == user_id))
breadcrumbs = (session.query(Category)
.filter(Category.category_name==c_name).all()[0])
return render_template(
'category.html',
categories=categories,
sub_categories=sub_categories,
breadcrumbs=breadcrumbs,
category_name=c_name,
user=user,
args=args)
@authenticateuser
@verifyloggedin
@authorizeuseroncategory
@authorizeuseronsubcategory
@expose('/<c_name>/<sc_name>/')
def sub_category(request, c_name, sc_name):
user = request.client_user_object
user_id = user.user_id
args = json.dumps(request.args)
categories = (session.query(Category)
.join(
AppFamilyPermission,
Category.category_id
== AppFamilyPermission.category_id)
.filter(AppFamilyPermission.user_id == user_id)
.order_by(Category.category_display_order.asc()))
sub_categories = (session.query(SubCategory)
.join(
AppPermission,
SubCategory.subcategory_id
== AppPermission.subcategory_id)
.filter(AppPermission.user_id == user_id))
breadcrumbs = (session.query(Category, SubCategory)
.join(
SubCategory, Category.category_id
== SubCategory.category_id)
.filter(Category.category_name==c_name)
.filter(SubCategory.subcategory_name==sc_name)
.all()[0])
return render_template(
'sub_category.html',
categories=categories,
sub_categories=sub_categories,
breadcrumbs=breadcrumbs,
subcategory_name=sc_name,
user=user,
args=args)
@authenticateuser
@verifyloggedin
@authorizeuseroncategory
@authorizeuseronsubcategory
@expose('/<c_name>/<sc_name>/<s_name>/')
def scenario(request, c_name, sc_name, s_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
args = json.dumps(request.args)
categories = (session.query(Category)
.join(
AppFamilyPermission,
Category.category_id
== AppFamilyPermission.category_id)
.filter(AppFamilyPermission.user_id == user_id)
.order_by(Category.category_display_order.asc()))
sub_categories = (session.query(SubCategory)
.join(
AppPermission,
SubCategory.subcategory_id
== AppPermission.subcategory_id)
.filter(AppPermission.user_id == user_id))
breadcrumbs = (session.query(Category, SubCategory, Scenario)
.join(SubCategory, Category.category_id
== SubCategory.category_id)
.join(Scenario, SubCategory.subcategory_id
== Scenario.subcategory_id)
.filter(Category.category_name==c_name)
.filter(SubCategory.subcategory_name==sc_name)
.filter(Scenario.scn_name==s_name).all()[0])
scenario = (session.query(Scenario)
.filter(Scenario.scn_name==s_name)
.all()[0])
scn_short_des = scenario.scn_short_description
scenario_id = scenario.scn_id
s_name_lower = s_name.lower()
groupby = request.args.get('groupby')
if groupby:
clicks_count_query = (
"""select insert_clicks({s_id},'{name}');"""
.format(s_id = scenario_id,name = groupby))
count_query = text(clicks_count_query)
insert = session.execute(count_query).fetchall()
session.commit()
query1 = (
"""select frequent_column_name from scenario_clicks_count
where scn_id = {s_id} order by frequency_number
desc limit 5;""".format(s_id = scenario_id))
s1 = text(query1)
scenario_data_column_names_ordered1 = session.execute(s1).fetchall()
query2 = (""" select column_name from INFORMATION_SCHEMA.COLUMNS
where column_name not in (
select frequent_column_name from scenario_clicks_count
where scn_id = {s_id} order by frequency_number desc limit 5)
and table_name = '{scen_name_lower}'
and table_schema = '{namespace_name}' order by column_name;"""
.format(
namespace_name=org_namespace_name,
scen_name_lower=s_name_lower.lower(),
s_id=scenario_id))
s2 = text(query2)
scenario_data_column_names_ordered2 = session.execute(s2).fetchall()
return render_template(
'scenario.html',
categories=categories,
sub_categories=sub_categories,
breadcrumbs=breadcrumbs,
scn_des=scn_short_des,
scenario_name=s_name,
scenario_data_column_names1=scenario_data_column_names_ordered1,
scenario_data_column_names2=scenario_data_column_names_ordered2,
user=user,
args=args)
@authenticateuser
@verifyloggedin
@expose('/overview_main_chart_data_source/')
def overview_main_chart_data_source(request):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
issue_filter = request.args.get('filter')
if not issue_filter:
issue_filter = ''
query = """SELECT iqp_categories.category_name,
COUNT(iqp_scenarios.scn_name) as issue_count,
SUM(problem_count) as problemsum
FROM iqp_categories
JOIN iqp_subcategories ON (iqp_categories.category_id
= iqp_subcategories.category_id)
JOIN iqp_scenarios ON (iqp_subcategories.subcategory_id
= iqp_scenarios.subcategory_id)
JOIN {namespace_name}.iqp_problem_count_recent ON (iqp_scenarios.scn_id
= iqp_problem_count_recent.scn_id)
JOIN app_family_permissions ON (iqp_categories.category_id
= app_family_permissions.category_id)
JOIN app_permissions ON (iqp_subcategories.subcategory_id
= app_permissions.subcategory_id)
WHERE iqp_problem_count_recent.problem_count > 0
AND (iqp_scenarios.scn_type NOT IN ('Stats', 'N','Feature')
OR '{filter_type}' IN ('Stats', 'N','Feature'))
AND (iqp_scenarios.scn_type = '{filter_type}'
OR '{filter_type}' = '')
AND app_family_permissions.user_id = '{user_id}'
AND app_permissions.user_id = '{user_id}'
GROUP BY iqp_categories.category_name""".format(
namespace_name=org_namespace_name,
filter_type=issue_filter,
user_id=user_id)
s = text(query)
rs = session.execute(s).fetchall()
data = ([[str(item['category_name']),
int(item['issue_count']),
int(item['problemsum'])]
for item in rs])
result = json.dumps(data)
return Response(result, mimetype='application/json')
@authenticateuser
@verifyloggedin
@expose('/overview_proportion_chart_data_source/')
def overview_proportion_chart_data_source(request):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
issue_filter = request.args.get('filter')
if not issue_filter:
issue_filter = ''
query = """SELECT (CASE WHEN iqp_problem_count_recent.problem_count > 0
THEN 'Issue' ELSE 'No_Issue' END) as issue_or_not,
COUNT(iqp_scenarios.scn_id) as issue_or_not_count
FROM iqp_categories
JOIN iqp_subcategories ON (iqp_categories.category_id
= iqp_subcategories.category_id)
JOIN iqp_scenarios ON (iqp_subcategories.subcategory_id
= iqp_scenarios.subcategory_id)
JOIN {namespace_name}.iqp_problem_count_recent
ON (iqp_scenarios.scn_id = iqp_problem_count_recent.scn_id)
JOIN app_family_permissions
ON (iqp_categories.category_id = app_family_permissions.category_id)
JOIN app_permissions ON (iqp_subcategories.subcategory_id
= app_permissions.subcategory_id)
WHERE (iqp_scenarios.scn_type NOT IN ('Stats', 'N','Feature')
OR '{filter_type}' IN ('Stats', 'N','Feature'))
AND (iqp_scenarios.scn_type = '{filter_type}' OR '{filter_type}' = '')
AND app_family_permissions.user_id = '{user_id}'
AND app_permissions.user_id = '{user_id}'
GROUP BY issue_or_not""".format(
namespace_name=org_namespace_name,
filter_type=issue_filter,
user_id=user_id)
s = text(query)
rs = session.execute(s).fetchall()
data = ([[str(item['issue_or_not']),
int(item['issue_or_not_count'])]
for item in rs])
result = json.dumps(data)
return Response(result, mimetype='application/json')
@authenticateuser
@verifyloggedin
@expose('/category_main_chart_data_source/<c_name>/')
def category_main_chart_data_source(request, c_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
this_category = (session.query(Category)
.filter(Category.category_name==c_name)
.all()[0])
c_id = this_category.category_id
issue_filter = request.args.get('filter')
if not issue_filter:
issue_filter = ''
query = """SELECT iqp_subcategories.subcategory_name,
COUNT(iqp_scenarios.scn_name) issue_count,
SUM(problem_count) as problemsum
FROM iqp_subcategories
JOIN iqp_scenarios ON (iqp_subcategories.subcategory_id
= iqp_scenarios.subcategory_id)
JOIN {namespace_name}.iqp_problem_count_recent
ON (iqp_scenarios.scn_id = iqp_problem_count_recent.scn_id)
JOIN app_permissions ON (iqp_subcategories.subcategory_id
= app_permissions.subcategory_id)
WHERE iqp_problem_count_recent.problem_count > 0
AND (iqp_scenarios.scn_type NOT IN ('Stats', 'N','Feature')
OR '{filter_type}' IN ('Stats', 'N','Feature'))
AND (iqp_scenarios.scn_type = '{filter_type}'
OR '{filter_type}' = '')
AND iqp_subcategories.category_id = '{c_id}'
AND app_permissions.user_id = '{user_id}'
GROUP BY iqp_subcategories.subcategory_name
""".format(namespace_name=org_namespace_name,
c_id=c_id,
filter_type=issue_filter,
user_id=user_id)
s = text(query)
rs = session.execute(s).fetchall()
data = ([[str(item['subcategory_name']),
int(item['issue_count']),
int(item['problemsum'])]
for item in rs])
result = json.dumps(data)
return Response(result, mimetype='application/json')
@authenticateuser
@verifyloggedin
@expose('/category_proportion_chart_data_source/<c_name>/')
def category_proportion_chart_data_source(request, c_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
this_category = (session.query(Category)
.filter(Category.category_name==c_name)
.all()[0])
c_id = this_category.category_id
issue_filter = request.args.get('filter')
if not issue_filter:
issue_filter = ''
query = """SELECT (CASE WHEN iqp_problem_count_recent.problem_count > 0
THEN 'Issue' ELSE 'No_Issue' END) as issue_or_not,
COUNT(iqp_scenarios.scn_id) as issue_or_not_count
FROM iqp_subcategories
JOIN iqp_scenarios ON (iqp_subcategories.subcategory_id
= iqp_scenarios.subcategory_id)
JOIN {namespace_name}.iqp_problem_count_recent
ON (iqp_scenarios.scn_id = iqp_problem_count_recent.scn_id)
JOIN app_permissions ON (iqp_subcategories.subcategory_id
= app_permissions.subcategory_id)
WHERE (iqp_scenarios.scn_type NOT IN ('Stats', 'N','Feature')
OR '{filter_type}' IN ('Stats', 'N','Feature'))
AND (iqp_scenarios.scn_type = '{filter_type}'
OR '{filter_type}' = '')
AND iqp_subcategories.category_id = '{c_id}'
AND app_permissions.user_id = '{user_id}'
GROUP BY issue_or_not
""".format(
namespace_name=org_namespace_name,
c_id=c_id,
filter_type=issue_filter,
user_id=user_id)
s = text(query)
rs = session.execute(s).fetchall()
data = ([[str(item['issue_or_not']),
int(item['issue_or_not_count'])]
for item in rs])
result = json.dumps(data)
return Response(result, mimetype='application/json')
@authenticateuser
@verifyloggedin
@expose('/sub_category_table_data_source/<sc_name>/')
def sub_category_table_data_source(request,sc_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
this_sub_category = (session.query(SubCategory)
.filter(SubCategory.subcategory_name==sc_name)
.all()[0])
sc_id = this_sub_category.subcategory_id
issue_filter = request.args.get('filter')
sortkey = request.args['sortname']
sortDir = request.args['sortorder']
limit = int(request.args['rp'])
offset = int((int(request.args['page']) - 1) * limit)
data = None
if not issue_filter:
issue_filter = ''
if issue_filter == ('Stats' or 'Features'):
query = """SELECT iqp_scenarios.scn_name as name,
iqp_scenarios.scn_short_description as description,
iqp_problem_count_recent.problem_count as current,
iqp_problem_count_recent.problem_time as refreshtime,
COALESCE(problem_count_stats.problem_count,999999999)
as stats_total,
(100*iqp_problem_count_recent.problem_count
/ COALESCE(problem_count_stats.problem_count,999999999))
as stats_percentage
FROM iqp_scenarios
LEFT JOIN {namespace_name}.iqp_problem_count_recent
ON (iqp_scenarios.scn_id = iqp_problem_count_recent.scn_id)
LEFT JOIN {namespace_name}.iqp_problem_count_prev
ON (iqp_scenarios.scn_id = iqp_problem_count_prev.scn_id)
LEFT JOIN {namespace_name}.iqp_problem_count_recent problem_count_stats
ON (iqp_scenarios.scn_totals_scn_id = problem_count_stats.scn_id)
WHERE iqp_scenarios.subcategory_id = '{sc_id}'
AND iqp_problem_count_recent.problem_count > 0
AND (iqp_scenarios.scn_type NOT IN ('Stats', 'N','Feature')
OR '{filter_type}' IN ('Stats', 'N','Feature'))
AND (iqp_scenarios.scn_type = '{filter_type}' OR '{filter_type}' = '')
ORDER BY {sortby} {dir} limit {limit} offset {offset}
""".format(
namespace_name=org_namespace_name,
sc_id=sc_id,
filter_type=issue_filter,
sortby=sortkey,
dir=sortDir,
limit=limit,
offset = offset)
s = text(query)
rs = session.execute(s).fetchall()
data = ([[str(item['name']),
str(item['description']),
int(item['current']),
int(item['refreshtime'])]
for item in rs])
else:
query = """SELECT iqp_scenarios.scn_name as name,
iqp_scenarios.scn_short_description as description,
iqp_problem_count_recent.problem_count as current,
COALESCE(iqp_problem_count_prev.problem_count, 0) as prev,
(iqp_problem_count_recent.problem_count
- COALESCE(iqp_problem_count_prev.problem_count, 0)) as trend,
iqp_problem_count_recent.problem_time as refreshtime,
COALESCE(problem_count_stats.problem_count,999999999) as stats_total,
(100*iqp_problem_count_recent.problem_count
/ COALESCE(problem_count_stats.problem_count,999999999))
as stats_percentage
FROM iqp_scenarios
LEFT JOIN {namespace_name}.iqp_problem_count_recent
ON (iqp_scenarios.scn_id = iqp_problem_count_recent.scn_id)
LEFT JOIN {namespace_name}.iqp_problem_count_prev
ON (iqp_scenarios.scn_id = iqp_problem_count_prev.scn_id)
LEFT JOIN {namespace_name}.iqp_problem_count_recent problem_count_stats
ON (iqp_scenarios.scn_totals_scn_id = problem_count_stats.scn_id)
WHERE iqp_scenarios.subcategory_id = '{sc_id}'
AND iqp_problem_count_recent.problem_count > 0
AND (iqp_scenarios.scn_type NOT IN ('Stats', 'N','Feature')
OR '{filter_type}' IN ('Stats', 'N','Feature'))
AND (iqp_scenarios.scn_type = '{filter_type}' OR '{filter_type}' = '')
ORDER BY {sortby} {dir} limit {limit} offset {offset}""".format(namespace_name=org_namespace_name,
sc_id=sc_id,
filter_type=issue_filter,
sortby=sortkey,
dir=sortDir,
limit=limit,
offset=offset)
s = text(query)
rs = session.execute(s).fetchall()
data = ([[str(item['name']),
str(item['description']),
int(item['current']),
int(item['stats_total']),
float(item['stats_percentage']),
int(item['trend']),
int(item['refreshtime'])]
for item in rs])
countQuery = """SELECT count(*)
FROM iqp_scenarios
LEFT JOIN {namespace_name}.iqp_problem_count_recent
ON (iqp_scenarios.scn_id = iqp_problem_count_recent.scn_id)
LEFT JOIN {namespace_name}.iqp_problem_count_prev
ON (iqp_scenarios.scn_id = iqp_problem_count_prev.scn_id)
LEFT JOIN {namespace_name}.iqp_problem_count_recent problem_count_stats
ON (iqp_scenarios.scn_totals_scn_id = problem_count_stats.scn_id)
WHERE iqp_scenarios.subcategory_id = '{sc_id}'
AND iqp_problem_count_recent.problem_count > 0
AND (iqp_scenarios.scn_type NOT IN ('Stats', 'N','Feature')
OR '{filter_type}' IN ('Stats', 'N','Feature'))
AND (iqp_scenarios.scn_type = '{filter_type}'
OR '{filter_type}' = '')
""".format(namespace_name=org_namespace_name,
sc_id=sc_id,
filter_type=issue_filter)
cs = text(countQuery)
rs2 = session.execute(cs).fetchall()
jsond = {"total": rs2[0][0], "page": request.args['page'], "rows": []}
for row in data:
eachRow = {}
eachRow["cell"] = row
jsond["rows"].append(eachRow)
del eachRow
result = json.dumps(jsond)
return Response(result, mimetype='application/json')
@authenticateuser
@verifyloggedin
@expose('/sub_category_proportion_chart_data_source/<sc_name>/')
def sub_category_proportion_chart_data_source(request, sc_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
this_sub_category = (session.query(SubCategory)
.filter(SubCategory.subcategory_name==sc_name)
.all()[0])
sc_id = this_sub_category.subcategory_id
issue_filter = request.args.get('filter')
if not issue_filter:
issue_filter = ''
query = """SELECT (CASE WHEN iqp_problem_count_recent.problem_count > 0
THEN 'Issue' ELSE 'No_Issue' END) as issue_or_not,
COUNT(iqp_scenarios.scn_id) as issue_or_not_count
FROM iqp_scenarios
JOIN {namespace_name}.iqp_problem_count_recent
ON (iqp_scenarios.scn_id = iqp_problem_count_recent.scn_id)
WHERE (iqp_scenarios.scn_type NOT IN ('Stats', 'N','Feature')
OR '{filter_type}' IN ('Stats', 'N','Feature'))
AND (iqp_scenarios.scn_type = '{filter_type}'
OR '{filter_type}' = '')
AND iqp_scenarios.subcategory_id = '{sc_id}'
GROUP BY issue_or_not
""".format(
namespace_name=org_namespace_name,
sc_id=sc_id,
filter_type=issue_filter)
s = text(query)
rs = session.execute(s).fetchall()
data = ([[str(item['issue_or_not']),
int(item['issue_or_not_count'])]
for item in rs])
result = json.dumps(data)
return Response(result, mimetype='application/json')
@authenticateuser
@verifyloggedin
@expose('/scenario_main_chart_data_source/<s_name>/')
def scenario_main_chart_data_source(request, s_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
groupby = request.args.get('groupby')
data = {}
if groupby:
query = """SELECT {group_by}, COUNT(*) as groupsum
FROM {namespace_name}.{scen_name}
GROUP BY {group_by}""".format(
namespace_name=org_namespace_name,
scen_name=s_name,
group_by=groupby)
s = text(query)
rs = session.execute(s).fetchall()
data['groupby'] = groupby
data['data'] = [[str(item[groupby]), int(item['groupsum'])] for item in rs]
else:
scenario = session.query(Scenario).filter(Scenario.scn_name==s_name).all()[0]
scenario_id = scenario.scn_id
s_name_lower = s_name.lower()
query1 = """select frequent_column_name
from scenario_clicks_count where scn_id = {s_id}
order by frequency_number desc limit 1;""".format(s_id=scenario_id)
s1 = text(query1)
mostly_used = session.execute(s1).fetchall()
if mostly_used:
query = """SELECT {mostly_used}, COUNT(*) as groupsum
FROM {namespace_name}.{scen_name}
GROUP BY {mostly_used}""".format(
namespace_name=org_namespace_name,
scen_name=s_name,
mostly_used=mostly_used[0][0])
else:
query = """SELECT COUNT(*) as groupsum
FROM {namespace_name}.{scen_name}""".format(
namespace_name=org_namespace_name,
scen_name=s_name)
s = text(query)
rs = session.execute(s).fetchall()
if mostly_used:
data['groupby'] = mostly_used[0][0]
data['data'] = ([[str(item[mostly_used[0][0]]),
int(item['groupsum'])]
for item in rs])
else:
data['groupby'] = 'All Rows'
data['data'] = ([[str('All Rows'),
int(item['groupsum'])]
for item in rs])
result = json.dumps(data)
return Response(result, mimetype='application/json')
#this is not used any more
@authenticateuser
@verifyloggedin
@expose('/scenario_main_chart_options_data_source/<s_name>/')
def scenario_main_chart_options_data_source(request, s_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
s_name_lower = s_name.lower()
for scenario in session.query(Scenario).filter(Scenario.scn_name==s_name).all():
this_scenario = scenario
s_id = this_scenario.scn_id
data = {}
query = """select frequent_column_name
from scenario_clicks_count
where scn_id = {sid} order by
frequency_number desc limit 5;""".format(sid = s_id)
s = text(query)
rs = session.execute(s).fetchall()
data["most-five"] = [[str(item['frequent_column_name'])] for item in rs]
query2 = """select column_name from INFORMATION_SCHEMA.COLUMNS
where column_name not in (select frequent_column_name
from scenario_clicks_count
order by frequency_number
desc limit 5)
and table_name = '{scen_name_lower}'
and table_schema = '{namespace_name}'
order by column_name;""".format(
namespace_name=org_namespace_name,
scen_name_lower=s_name_lower.lower())
s2 = text(query2)
rs2 = session.execute(s2).fetchall()
data["others"] = [[str(item['column_name'])] for item in rs2]
result = json.dumps(data)
return Response(result, mimetype='application/json')
@authenticateuser
@verifyloggedin
@expose('/scenario_table_data_source/<s_name>/')#for table data
def scenario_table_data_source(request, s_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
s_name = str(s_name)
s_name = escape(s_name)
processTable = ScenarioDataTableProcessing(s_name,request)
return Response(
processTable.generateDataTables(org_namespace_name),
mimetype='application/json')
#not used
@authenticateuser
@verifyloggedin
@expose('/scenario_header_table_data_source/<s_name>/')#for table column names
def scenario_header_table_data_source(request, s_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
s_name = str(s_name)
s_name = escape(s_name)
headers = ScenarioHeadersDataTableProcessing(s_name)
return Response(
headers.generateHeaders(org_namespace_name),
mimetype='application/json')
@authenticateuser
@verifyloggedin
@expose('/scenario_trend_chart_data_source/<s_name>/')
def scenario_trend_chart_data_source(request, s_name):
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
s_name = str(s_name)
s_name = escape(s_name)
for scenario in session.query(Scenario).filter(Scenario.scn_name==s_name).all():
this_scenario = scenario
s_id = this_scenario.scn_id
query = """SELECT iqp_problem_count.problem_time, iqp_problem_count.problem_count
FROM {namespace_name}.iqp_problem_count
WHERE iqp_problem_count.scn_id = '{s_id}'
ORDER BY iqp_problem_count.problem_time DESC LIMIT 100 OFFSET 0""".format(
namespace_name=org_namespace_name,
s_id=s_id)
s = text(query)
rs = session.execute(s).fetchall()
data = []
for item in rs:
data.append(
[int(item['problem_time']*1000),
int(item['problem_count'])])
result = json.dumps(data)
return Response(result, mimetype='application/json')
@authenticateuser
@verifyloggedin
@expose('/export/browser_data/')
def export(request):
import csv
#Response.headers['Content-Type'] = "application/CSV"
#Response.headers['Content-Disposition'] = 'attachment; filename= sample.csv'
d = Headers()
#write the headers
#d.add("Pragma", "public")
#d.add("Expires","0")
#d.add("Cache-Control", must-revalidate, post-check=0, pre-check=0")
#d.add('Content-Type', "application/force-download")
#d.add("Content-Type","application/octet-stream")
d.add("Content-Type","application/octet-stream")
d.add('Content-Disposition', 'attachment;filename=iqpgenerated.csv')
headers = ["Application","No of Scenarios","No of Issues"]
ofile = open(_CSV_FILE_PATH, "wb")
#write column names first
writer = csv.writer(ofile, delimiter=',',quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(headers)
tableData = json.loads(request.args['tableData'])
#write table data
for eachRow in tableData:
writer.writerow(eachRow)
return Response(open(_CSV_FILE_PATH, 'r'),headers = d)
@authenticateuser
@verifyloggedin
@expose('/export/subcategory/<sc_name>')
def exportSubcategoryTable(request,sc_name):
import csv
#Response.headers['Content-Type'] = "application/CSV"
#Response.headers['Content-Disposition'] = 'attachment; filename= sample.csv'
d = Headers()
#write the headers
#d.add("Pragma", "public")
#d.add("Expires","0")
#d.add("Cache-Control", must-revalidate, post-check=0, pre-check=0")
#d.add('Content-Type', "application/force-download")
#d.add("Content-Type","application/octet-stream")
d.add("Content-Type","application/octet-stream")
d.add('Content-Disposition', 'attachment;filename=iqpgenerated.csv')
headers = ["Scenario","Current Count","Total Count","Percentage of Total","Trend","Last Refreshed"]
ofile = open(_CSV_FILE_PATH, "wb")
#write column names first
writer = csv.writer(ofile, delimiter=',',quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(headers)
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
this_sub_category = session.query(SubCategory).filter(SubCategory.subcategory_name==sc_name).all()[0]
sc_id = this_sub_category.subcategory_id
issue_filter = request.args.get('filter')
if not issue_filter:
issue_filter = ''
query = """SELECT iqp_scenarios.scn_name as name,
iqp_scenarios.scn_short_description as description,
iqp_problem_count_recent.problem_count as current,
COALESCE(iqp_problem_count_prev.problem_count, 0) as prev,
(iqp_problem_count_recent.problem_count
- COALESCE(iqp_problem_count_prev.problem_count, 0)) as trend,
iqp_problem_count_recent.problem_time as refreshtime,
COALESCE(problem_count_stats.problem_count,999999999) as stats_total,
(iqp_problem_count_recent.problem_count
/ COALESCE(problem_count_stats.problem_count,999999999)) as stats_percentage
FROM iqp_scenarios
LEFT JOIN {namespace_name}.iqp_problem_count_recent
ON (iqp_scenarios.scn_id = iqp_problem_count_recent.scn_id)
LEFT JOIN {namespace_name}.iqp_problem_count_prev
ON (iqp_scenarios.scn_id = iqp_problem_count_prev.scn_id)
LEFT JOIN {namespace_name}.iqp_problem_count_recent problem_count_stats
ON (iqp_scenarios.scn_totals_scn_id = problem_count_stats.scn_id)
WHERE iqp_scenarios.subcategory_id = '{sc_id}'
AND iqp_problem_count_recent.problem_count > 0
AND (iqp_scenarios.scn_type NOT IN ('Stats', 'N','Feature')
OR '{filter_type}' IN ('Stats', 'N','Feature'))
AND (iqp_scenarios.scn_type = '{filter_type}' OR '{filter_type}' = '')""".format(
namespace_name=org_namespace_name,
sc_id=sc_id,
filter_type=issue_filter)
s = text(query)
rs = session.execute(s).fetchall()
data = ([[str(item['description']),
int(item['current']),
int(item['stats_total']),
float(item['stats_percentage']),
int(item['trend']),
int(item['refreshtime'])]
for item in rs])
#tableData = json.loads(request.args['tableData'])
#write table data
for eachRow in data:
writer.writerow(eachRow)
return Response(
open(_CSV_FILE_PATH, 'r'),
headers = d)
@authenticateuser
@verifyloggedin
@expose('/export/scenario/<s_name>')
def exportScenarioTable(request,s_name):
import csv
#Response.headers['Content-Type'] = "application/CSV"
#Response.headers['Content-Disposition'] = 'attachment; filename= sample.csv'
d = Headers()
#write the headers
#d.add("Pragma", "public")
#d.add("Expires","0")
#d.add("Cache-Control", must-revalidate, post-check=0, pre-check=0")
#d.add('Content-Type', "application/force-download")
#d.add("Content-Type","application/octet-stream")
d.add("Content-Type","application/octet-stream")
d.add('Content-Disposition', 'attachment;filename=iqpgenerated.csv')
#get the name space
user = request.client_user_object
user_id = user.user_id
org = request.client_organization_object
org_namespace_name = org.organization_namespace_name
#get the get arguments
headers = str(request.args["headers"]).split(",")
tableName = s_name
ofile = open(_CSV_FILE_PATH, "wb")
#write column names first
writer = csv.writer(ofile, delimiter=',',quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(headers)
#write the data
query = """SELECT * FROM {table}""".format(table = org_namespace_name +'.'+tableName)
s = text(query)
rs = session.execute(s).fetchall()
for item in rs:
lis = [str(item[eachColumn]) for eachColumn in headers]
writer.writerow(lis)
return Response(open(_CSV_FILE_PATH, 'r'),headers = d)
@authenticateuser
@verifyloggedin
@expose('/export/sendemail/')
def export_sendEmail(request):
#get the name space
d = Headers()
d.add('Access-Control-Allow-Origin', 'http://localhost:5000')
d.add('Access-Control','allow <*>')
user = request.client_user_object
user_id = user.user_id
user_namespace_name = user.user_namespace_name
svg = str(request.args["svg"])
tableData = request.args["tableData"]
return Response("ok",headers = d)
|
989,285 | cc6b2969cd37693e110f5d715801f0708c923fcd | from Salarie import *
class directeurF(salarie):
def __init__(self, nom, prenom, echelonSal, id, anneeNomination):
salarie.__init__(self, nom, prenom, echelonSal, id)
self.__anneeNomination = anneeNomination
def afficher(self):
print("* [id = ",self._id,"] Nom et Prenom : ",self._nom, self._prenom," , Salaire : ",self._echelonSal, " , Statue : Directeur , Annee Nomination : ",self.__anneeNomination,".")
|
989,286 | 8886efaabe72b1cd97e08d06e31a5661bb43d244 | import time
start = time.time()
string = str(2**1000)
result = 0
for i in string:
i = int(i)
result += i
eslaped = time.time() - start
print (result,eslaped)
|
989,287 | 72de4e096e78cccc161b16e7c5531fa4d6b51560 | import cv2
import numpy as np
class PointCloud:
def __init__(self):
self.points3D = {}
|
989,288 | 5f1b99ed5a3de1862542dcd314fa061cd90764e3 | import sys, time
import numpy as np
from cassandra.query import named_tuple_factory, BatchStatement, SimpleStatement
from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy
from cassandra import ConsistencyLevel
from numpy.core.numeric import Inf
from transactions.t1 import execute_t1
from transactions.t2 import execute_t2
from transactions.t3 import execute_t3
from transactions.t4 import execute_t4
from transactions.t5 import execute_t5
from transactions.t6 import execute_t6
from transactions.t7 import execute_t7
from transactions.t8 import execute_t8
xact_map = {
"N":1,
"P":2,
"D":3,
"O":4,
"S":5,
"I":6,
"T":7,
"R":8
}
# maps xact num to [total_xact_cnt, total_exec_time, failed_xact_cnt]
xact_info = [[0,0,0] for i in range(9)]
latencies = []
if __name__ == '__main__':
profile = ExecutionProfile(
load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']),
# retry_policy=RetryPolicy(), # DEFAULT
consistency_level=ConsistencyLevel.LOCAL_QUORUM,
serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL,
request_timeout=15,
# row_factory=named_tuple_factory
)
cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile})
session = cluster.connect('wholesale_supplier')
# session.row_factory = named_tuple_factory
num_xacts = 0
cnt = 0
total_exec_time = 0 # in seconds
for line in sys.stdin:
input_arr = line.split(",")
xact = input_arr[0].strip()
cnt += 1
print(f'{line.strip()} | Xact {cnt}')
start_time = time.time()
isFail = 0 # fail status
if(xact == 'N'):
isFail = execute_t1(session, input_arr)
elif(xact == 'P'):
isFail = execute_t2(session, input_arr)
elif(xact == 'D'):
isFail = execute_t3(session, input_arr)
elif (xact == 'O'):
isFail = execute_t4(session, line)
elif (xact == 'S'):
isFail = execute_t5(session, line)
elif (xact == 'I'):
isFail = execute_t6(session, line)
elif (xact == 'T'):
isFail = execute_t7(session)
elif (xact == 'R'):
isFail = execute_t8(session, line)
else:
print('fall thru', xact)
latency_seconds = time.time() - start_time
total_exec_time += latency_seconds
num_xacts += (1 - isFail)
latencies.append(latency_seconds)
# Transaction-specific latencies
xact_num = xact_map[xact]
xact_info[xact_num][0] += 1
xact_info[xact_num][1] += latency_seconds
xact_info[xact_num][2] += isFail
cluster.shutdown()
throughput = num_xacts / total_exec_time if total_exec_time > 0 else 0
avg_latency = total_exec_time / num_xacts * 1000 if num_xacts > 0 else Inf # in ms
median_latency = np.percentile(latencies, 50) * 1000
p95_latency = np.percentile(latencies, 95) * 1000
p99_latency = np.percentile(latencies, 99) * 1000
metrics = "{},{:.3f},{:.3f},{:.3f},{:.3f},{:.3f},{:.3f}".format(
num_xacts,
total_exec_time,
throughput,
avg_latency,
median_latency,
p95_latency,
p99_latency
)
print(metrics, file=sys.stderr)
print("Total failures: ")
for i in range(1,9):
print(f'T{i}: {xact_info[i][2]}/{xact_info[i][0]}')
print("Average transaction latency: ")
for xact_num in range(1,9):
total_time = xact_info[xact_num][1]
total_count = xact_info[xact_num][0]
xact_avg_latency = total_time / total_count if total_count > 0 else Inf
xact_metric = f'T{xact_num}: {xact_avg_latency}s'
print(xact_metric)
|
989,289 | c74bc3db9a8e7c7bbcf8aa841bcdaca20777ddf3 | # -*- coding: utf-8 -*-
'''
(Compute the volume of a cylinder) Write a program that
reads in the radius and length of a cylinder and computes the
area and volume using the following formulas:
area = radius * radius * π volume = area * length
Here is a sample run:
Enter the radius and length of a cylinder: 5.5, 12
The area is 95.0331 The volume is 1140.4
'''
print("This program prints the volume of a cylinder: ")
radius = float(input("Enter the radius of the cylinder: "))
length = float(input("Enter the length of the cylinder "))
area = 2*radius * radius * length*(2*3.14*radius)
volume = 3.14* radius * radius * length
print("Here is a sample run of the program\n")
print("The conversion to of the radius and legnth to volume is: ", volume)
|
989,290 | bd7f92f1c5cdcb36874e051c061d2be04c10b214 | # -*- coding: utf-8 -*-
import sys
from hw1_ui import Ui_MainWindow
import cv2
from PyQt5.QtWidgets import QMainWindow, QApplication
import time
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.onBindingUI()
# Write your code below
# UI components are defined in hw1_ui.py, please take a look.
# You can also open hw1.ui by qt-designer to check ui components.
def onBindingUI(self):
self.btn1_1.clicked.connect(self.on_btn1_1_click)
self.btn1_2.clicked.connect(self.on_btn1_2_click)
self.btn1_3.clicked.connect(self.on_btn1_3_click)
self.btn1_4.clicked.connect(self.on_btn1_4_click)
self.btn2_1.clicked.connect(self.on_btn2_1_click)
self.btn3_1.clicked.connect(self.on_btn3_1_click)
self.btn4_1.clicked.connect(self.on_btn4_1_click)
self.btn4_2.clicked.connect(self.on_btn4_2_click)
self.btn5_1.clicked.connect(self.on_btn5_1_click)
self.btn5_2.clicked.connect(self.on_btn5_2_click)
# button for problem 1.1
def on_btn1_1_click(self):
img = cv2.imread('dog.bmp')
#a,b = img.cvSize
print('Height = %d' % img.shape[0])
print('Width = %d' % img.shape[1])
#print(type(img))
#print(img.shape[0])
cv2.imshow('dog',img)
cv2.waitKey(0)
cv2.destroyAllWindows('dog')
#print(type(img)
def on_btn1_2_click(self):
img = cv2.imread('color.png')
print(type(img))
img2 = img
img2[:,:,[0,1,2]] = img2[:,:,[1,2,0]]
cv2.imshow('color',img)
cv2.imshow('color2',img2)
#cv2.destroyWindow('dog')
def on_btn1_3_click(self):
pass
def on_btn1_4_click(self):
pass
def on_btn2_1_click(self):
pass
def on_btn3_1_click(self):
pass
def on_btn4_1_click(self):
pass
def on_btn4_2_click(self):
pass
def on_btn5_1_click(self):
# edtAngle, edtScale. edtTx, edtTy to access to the ui object
pass
def on_btn5_2_click(self):
pass
### ### ###
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
989,291 | eb15d09d7f2e8aaf1e98a7073a02bc8295fa772c | #!/usr/bin/python
import datetime
def getSpeed(data, recency):
if data == {} :
return "N/A"
filtered = dict((key,value) for key, value in data.iteritems() if key > datetime.datetime.now() - datetime.timedelta(seconds=recency))
if filtered == {} :
return "N/A"
data_points = filtered.values()
return reduce(lambda x, y: x + y, data_points) / len(data_points)
|
989,292 | 941419981603a1ee618374e48e763e588928c2df | # coding: utf-8
from __future__ import unicode_literals, absolute_import
from django.contrib import admin
from .models import Author, Publisher, Book
admin.site.register([Author, Publisher, Book])
|
989,293 | 8d23dae7f56a8215cfaa6beedc06fdcb9708ed04 | # Normalising data to same size to improve accuracy of machine learning model
from PIL import Image
import os
url_path = "/Users/josh/Hackathons/ru_hacking/downloads/Not hotdog/"
for filename in os.listdir(url_path):
route = url_path+filename
if os.path.isfile(route):
try:
im = Image.open(route)
f, e = os.path.splitext(route)
imResize = im.resize((1000,800), Image.ANTIALIAS)
imResize.save(f + ' resized.jpg', 'JPEG', quality=90)
os.remove(route)
except:
os.remove(route)
else:
continue
|
989,294 | 98136601d7efa49734324b5648050b305b28bd2a | from tbselenium.tbdriver import TorBrowserDriver
from tbselenium.utils import start_xvfb,stop_xvfb
import subprocess,os
from tbselenium.utils import launch_tbb_tor_with_stem
import Config as cm
from utils import ReadWebList, getTime, get_tor_circuits, SetOutputPath, writeLog, RemoveTmpFile, getGuard, writeStreamInfo
from utils import TimeExceededError, timeout, cancel_timeout, TorSetupError, TBBSetupError
from utils import ReadOpenWebList, RemoveProcess
from utils import StreamProcessing, removepcapfile, tarNetworkTraffic, MoveLogFile
from stem.control import Controller
import time, sys
from os.path import join
import pathlib
from selenium import webdriver
import argparse
from selenium.webdriver import DesiredCapabilities
from pcapParser import parseAllpcap
#####################
# tor browser setup #
#####################
def TBBSetup(driverpath,controller,idx):
driver = 0
try:
driver = TorBrowserDriver(driverpath,tor_cfg=cm.USE_STEM)
except Exception as e:
writeLog("[crawl.py error]TBBSetup error: "+str(e))
print("[crawl.py error]TBBSetup error")
print(str(e))
driver = 0
return driver
#########################
# firefox browser setup #
#########################
def FFSetup():
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type", 1);
profile.set_preference("network.proxy.socks", "localhost");
profile.set_preference("network.proxy.socks_port", cm.TorSocksPort);
driver = webdriver.Firefox(profile)
return driver
########################
# Chrome browser setup #
########################
def ChromeSetup():
options = webdriver.ChromeOptions()
host = 'localhost'
port = str(cm.TorSocksPort)
options.add_argument("--proxy-server=socks5://" + host + ":" + port)
options.add_argument("--disable-gpu")
driver = webdriver.Chrome(chrome_options = options)
return driver
########################
# controller ,torsetup #
########################
# setup tor process and controller
def TorSetup(tor_binary):
tor_process,controller = 0,0
print("in tor setup binary = ",tor_binary)
try:
tor_process = launch_tbb_tor_with_stem(tbb_path=cm.driverpath, torrc=cm.TorConfig,
tor_binary=tor_binary)
controller = Controller.from_port(port=int(cm.TorConfig['ControlPort']))
controller.authenticate()
print("getting tor circuit...")
print("write entry guard/ circuit to log...")
except Exception as e:
print("[crawl.py error]TorSetup: "+str(e)+"\n")
writeLog("[crawl.py error]TorSetup: "+str(e)+"\n")
tor_process,controller = 0,0
return tor_process,controller
####################
# close all stream #
####################
# close_all_streams
# remove temp file
# xvfb
def cleanupStream(controller,crawlcnt,domain):
print("check & remove existing streams...")
l = []
for stream in controller.get_streams():
l.append(stream.circ_id)
d = getGuard(controller,l)
for stream in controller.get_streams():
try:
# print("stream id: ",stream.id,stream.circ_id,stream.target_address)
writeStreamInfo("%s,%s,%s,%s,%s,%s,%s,%s"%(domain,crawlcnt,stream.id,stream.circ_id,d[stream.circ_id],stream.target_address,stream.target,str(stream.target_port)))
controller.close_stream(stream.id)
except Exception as e:
writeLog("### error in closing stream: "+str(stream.id))
pass
#########################
# launch tor with torrc #
#########################
# start a tor process
def launch_tor_with_custom_stem(datalist,browser):
print("length of data: ",len(datalist))
tor_binary = join(cm.TorProxypath, cm.DEFAULT_TOR_BINARY_PATH)
tor_process,controller = 0,0
try:
TRYTOR_CNT = cm.TRYCNT
while TRYTOR_CNT > 0 and tor_process == 0 and controller == 0:
print("try to setup tor:",str(TRYTOR_CNT))
tor_process,controller = TorSetup(tor_binary)
TRYTOR_CNT -= 1
if tor_process == 0:
raise TorSetupError
print("finish tor proxy setup...")
xvfb_display = start_xvfb() # virtual display
for ele in datalist:
t = getTime()
savepath,out_img = SetOutputPath(ele,t)
p = 0
try:
driver,TRYCNT = 0,cm.TRYCNT
while driver == 0 and TRYCNT != 0:
print("try to setup tbb:",str(TRYCNT))
args = (cm.driverpath,controller,ele[2]) if browser == 'TBB' else ()
options = {'TBB': TBBSetup, 'FF': FFSetup, 'CR': ChromeSetup}
driver = options[browser](*args)
TRYCNT -= 1
if driver == 0:
raise TBBSetupError
cmd = "tcpdump -i %s tcp and not port ssh -w %s"%(cm.netInterface,savepath)
print('cmd = ',cmd)
cmd = cmd.split(' ')
p = subprocess.Popen(cmd)
try:
timeout(cm.VISITPAGE_TIMEOUT)
driver.get('https://'+ele[0])
cancel_timeout()
time.sleep(cm.DURATION_VISIT_PAGE)
p.terminate()
if(ele[2] == 0 or ele[2] == 2):
driver.get_screenshot_as_file(out_img)
writeLog(str(t)+","+ele[0]+","+str(ele[2]))
print("Finish tcpdump sleep...")
except TimeExceededError:
writeLog("Error crawling,"+ele[0]+","+str(ele[2])+"\n"+str("Page visit Timeout"))
finally:
cancel_timeout()
except TBBSetupError:
print("[crawl.py error]: unable to setup TBB")
writeLog("[crawl.py error]: unable to setup TBB")
except Exception as e:
with open(cm.ErrorFilePath,'a+') as fw:
fw.write(ele[0]+","+str(e)+"\n")
writeLog("Error crawling,"+ele[0]+","+str(ele[2])+"\n"+str(e))
finally:
if p != 0 and p.returncode != 0:
try:
p.terminate()
except Exception as e:
writeLog("[crawl.py] tcpdump terminate error: "+str(e))
if controller != 0:
cleanupStream(controller,str(ele[2]),ele[0])
if driver != 0:
try:
timeout(30)
driver.quit()
cancel_timeout()
except Exception as e:
cancel_timeout()
writeLog("[crawl.py] driver quit error: "+str(e))
if ele[2] != 3:
time.sleep(cm.PAUSE_BETWEEN_INSTANCES)
else:
time.sleep(cm.PAUSE_BETWEEN_SITES)
RemoveTmpFile()
RemoveProcess()
except TorSetupError:
print("[crawl.py] unable to set up tor proxy")
writeLog("[crawl.py] unable to set up tor proxy")
except Exception as e:
print("[crawl.py]launch_tor_with_custom_stem Error")
print("Error:",str(e))
writeLog("[crawl.py]launch_tor_with_custom_stem Error : "+str(e))
finally:
if tor_process != 0:
tor_process.kill()
stop_xvfb(xvfb_display)
def ParsePcapFile():
StreamList = StreamProcessing(cm.StreamFile)
print("start parsing pcap file in %s to %s"%(cm.ResultDir,cm.pcapDir,))
parseAllpcap(cm.ResultDir,StreamList,cm.pcapDir)
print("start compress traces...")
outputtardir = tarNetworkTraffic(cm.pcapDir,cm.rawtrafficdir) # tar the netowrk traffic save in rawtrafficdir
print("remove result_902/* , traces/* ...")
removepcapfile([cm.ResultDir,cm.pcapDir]) # remove pcap and csv(with have tar the csv in rawTraffic)
print("move logs to %s"%(outputtardir))
MoveLogFile(outputtardir)
#################
# main function #
#################
def main(opts):
if opts.openworld == False:
datalist = ReadWebList()
datalen = len(datalist)
else:
datalist = ReadOpenWebList(5000,1) # 5000 sites for open world dataset, each with 1 instance
datalen = len(datalist)
print("len datalist for openworld = ",len(datalist))
for i in range(0,datalen,cm.MAX_SITES_PER_TOR_PROCESS):
if i + cm.MAX_SITES_PER_TOR_PROCESS < datalen:
writeLog("data start from %s to %s"%(datalist[i][0],datalist[i+cm.MAX_SITES_PER_TOR_PROCESS-1][0]))
print("data start from %s to %s\n"%(datalist[i][0],datalist[i+cm.MAX_SITES_PER_TOR_PROCESS-1][0]))
launch_tor_with_custom_stem(datalist[i:i+cm.MAX_SITES_PER_TOR_PROCESS], opts.browser)
else:
writeLog("data start from %s to %s"%(datalist[i][0],datalist[-1][0]))
print("data start from %s to %s\n"%(datalist[i][0],datalist[-1][0]))
launch_tor_with_custom_stem(datalist[i:], opts.browser)
ParsePcapFile()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Crawler with Tor Proxy')
parser.add_argument('--browser', default='TBB',
type=str, choices=['TBB','FF','CR'], dest='browser')
parser.add_argument('--openworld',help='crawl OpenWorld Dataset',action='store_true')
parser.add_argument('--test', '-t', help='test pcap file',action='store_true')
opts = parser.parse_args()
main(opts)
|
989,295 | 301108fa03bed433f00927c9836ef61d3126b42f |
from xai.brain.wordbase.nouns._bush import _BUSH
#calss header
class _BUSHES(_BUSH, ):
def __init__(self,):
_BUSH.__init__(self)
self.name = "BUSHES"
self.specie = 'nouns'
self.basic = "bush"
self.jsondata = {}
|
989,296 | 76ad02b6b99dcd8534d1eb9ba14603907fc46fe4 | # pylint: disable=no-self-use,invalid-name
import pytest
import pathlib
from allennlp.common import Params
from allennlp.common.util import ensure_list
from csqa.data.dataset_readers import QEReader
class TestQEReader:
FIXTURES_ROOT = (pathlib.Path(__file__).parent /
".." / ".." / ".." / "tests" / "fixtures").resolve()
@pytest.mark.parametrize("lazy", (True, False))
def test_read(self, lazy):
params = Params({'lazy': lazy})
reader = QEReader.from_params(params)
instances = reader.read(
str(self.FIXTURES_ROOT / 'qe_sample.txt'))
instances = ensure_list(instances)
assert len(instances) == 10
sample = instances[0]
tokens = [t.text for t in sample.fields['tokens']]
label = sample.fields['label']
print(tokens)
print(label)
def test_can_build_from_params(self):
reader = QEReader.from_params(Params({}))
# pylint: disable=protected-access
assert reader._token_indexers['tokens'].__class__.__name__ == 'SingleIdTokenIndexer'
|
989,297 | 998b222da79249c23e7ef7efeeb2f25147578a2c | #
#!/usr/bin/env python
from netmiko import Netmiko
from credentials import password1, username1
cisco1 = {
"host": "10.223.252.122",
"username": username1,
"password": password1,
"device_type": "cisco_ios",
}
cisco2 = {
"host": "10.223.148.202",
"username": username1,
"password": password1,
"device_type": "cisco_ios",
}
for device in (cisco1, cisco2):
net_connect = Netmiko(**device)
print(net_connect.find_prompt()) |
989,298 | 4e89ca060c5ef884a9abd54524b1f79fd5e2a9ae | if __name__ == "__main__":
import django
import os
import sys
import inspect
from pathlib import PurePath
root = PurePath(os.path.abspath(inspect.getfile(inspect.currentframe()))).parent.parent
sys.path.append(str(root))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'distantworlds2.settings.dev')
django.setup()
from core.models import Commander
Commander.scrape_roster()
|
989,299 | 6d3bc56131591a99a3773aede65781918fc62917 | # 帮帮Stish
# Description
#
# Satish wants to prepare for tommorow's exam . He knows the distribution of marks for the subject along with time to learn the concepts.You are given remaining time for the exam along with marks for each topic and passing marks for the subject.Find the max marks Satish can attain by studing max no of topic in max no hours not excedding (p) .
#
#
# Input
#
# The first line of input contains the number of testcases t.
# The first line of each testcase contains the no of topics(n) ,
# time remaining for exam(h) in hour and
# passing marks(p).
# Each 'n' lines contain
# u(time to learn topic) and
# v(weightage of topic in exam) .
#
#
# Output
#
# For each test case print "YES" along with time taken or "NO".
#
# Constraints:
#
# 1<=t<=100
#
# 1<=n<=300
#
# 1<=h<=150
#
# 1<=p<=35
#
#1<=u,v<=25
def distribution():
test_cases = int(input())
for i in range(0, test_cases):
content_info = list(map(int, input().strip().split(' ')))
time = content_info[1]
mark = content_info[2]
dict = {}
course = []
res = []
for i in range(0,content_info[0]):
tmp = list(map(int, input().strip().split(' ')))
dict[tmp[0]] = tmp[1]#重量:价值
course.append(tmp[0])
course.sort()
for i in range(0, content_info[0]):
mark_count = 0
time_count = 0
time_count += course[i]
mark_count += dict[course[i]]
if time_count > time:
time_count = 0
continue
for j in range(i+1,content_info[0]):
time_count += course[j]
if time_count < time:
mark_count += dict[course[j]]
else:
time_count -= course[j]
break
if mark_count > mark:
res.append((time_count,mark_count))
if res:
max = res[0][1]
for i in res:
if i[1] > max:
max = i[1]
for i in res:
if i[1] == max:
print('YES '+ str(i[0]))
else:
print('NO')
if __name__ == '__main__':
distribution()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.