index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
997,000 | 30b017f9421b3e487e5d25f4b8acd6906ddd0e78 | class Ponto:
def __init__(self, x, y):
self._x = x
self._y = y
def getX(self):
return self._x
def getY(self):
return self._y
def setX(self, x):
self._x = x
def setY(self, y):
self._y = y
def qualQuadrante(self):
if(self.getX() > 0 and self.getY() > 0):
return 1
elif(self.getX() < 0 and self.getY() > 0):
return 2
elif(self.getY() < 0 and self.getX > 0):
return 4
elif(self.getX() < 0 and self.getY() < 0):
return 3
elif(self.getX() == 0 and self.getY() == 0):
return 'Origem do plano'
class Quadrilatero():
def __init__(self, P1, P2):
self.P1 = P1 #x
self.P2 = P2 #y
def contidoEmQ(self, a):
if(a.getY() <= self.P2 and a.getX() <= self.P1):
return True
else:
return False
|
997,001 | fdb61ad4a468b05c2a57d2340328c9293d8fdea4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
li = []
with open("D:\coding\python\project\python-upgrade\day05\\test05.txt",'r') as f:
with open("D:\coding\python\project\python-upgrade\day05\\test05_new.txt",'w') as g:
for line in f:
print(line)
if line not in li:
li.append(line)
g.write(line)
|
997,002 | 63e2605c1babc8114c89457d8154576a27fff54f | '''
leetcode 1047 删除字符串中相邻重复的字符
给定由小写字母组成的字符串,相邻重复字符删除会选择两个相邻且重复的字符并删除它们。在字符串上反复执行删除操作,知道无法继续删除,返回删除后的结果。
思路:使用栈来完成删除操作。遍历字符串,若栈不为空且当前字符与栈顶字符相同的栈顶字符出栈,否则当前字符入栈。遍历完成后将栈中的字符拼接起来即为删除后的结果。时间复杂度O(N),空间复杂度为O(N)。
'''
class Solution:
def removeDuplicates(self, S: str) -> str:
stack = []
for char in S:
if stack and stack[-1] == char:
stack.pop()
else:
stack.append(char)
ans = ''
for char in stack:
ans += char
return ans
def my_test():
my_solution = Solution()
testcases = [
['abbaca', 'ca'],
['abcddcba', '']
]
for i in range(len(testcases)):
assert my_solution.removeDuplicates(testcases[i][0]) == testcases[i][1]
|
997,003 | bd13cf83262f6d219857ed05860e33d7255e2210 | from tkinter import *
from tkinter import filedialog
from over_temps import go_time
from under_temps import go_time2
wo_num = wo_num2 = wo_num3 = wo_num4 = wo_num5 = wo_num6 = wo_num7 = wo_num8 = wo_num9 = wo_num10 = [''] * 10
total_wo = [wo_num, wo_num2, wo_num3, wo_num4, wo_num5, wo_num6, wo_num7, wo_num8, wo_num9, wo_num10]
total_list = []
csv_num = ''
temp_check = ''
def retrieve_entries():
global total_wo
global total_list
global total_entries
if total_list != []:
total_list = []
for x in range(0, 10):
container = total_entries[x].get()
if container != '':
total_list.append(container)
total_wo_val[x].config(text='\u2713')
def sel():
global temp_check
temp_check = str(selection.get())
def exit_now():
root.destroy()
def submit_now():
global total_list
global csv_num
global temp_check
check_entries = len(total_list)
if check_entries > 0 and csv_num != '' and temp_check == '1':
go_time(total_list, csv_num)
elif check_entries > 0 and csv_num != '' and temp_check == '2':
go_time2(total_list, csv_num)
def browsefunc():
filename = filedialog.askopenfilename()
global csv_num
csv_num = filename.replace('C:/Users/Mike/Python/Personal Projects/Oven Chart Generator/', '')
print(csv_num)
pathlabel.config(text=csv_num)
root = Tk()
root.wm_title("Generate Chart")
browsebutton = Button(root, text="Browse for CSV", command=browsefunc)
pathlabel = Label(root)
wo_entry = Button(root, text="Enter WO#'s", command=retrieve_entries)
wo_val = Label(root)
wo_val2 = Label(root)
wo_val3 = Label(root)
wo_val4 = Label(root)
wo_val5 = Label(root)
wo_val6 = Label(root)
wo_val7 = Label(root)
wo_val8 = Label(root)
wo_val9 = Label(root)
wo_val10 = Label(root)
total_wo_val = [wo_val, wo_val2, wo_val3, wo_val4, wo_val5, wo_val6, wo_val7, wo_val8, wo_val9, wo_val10]
work_order = StringVar()
workorder2 = StringVar()
workorder3 = StringVar()
workorder4 = StringVar()
workorder5 = StringVar()
workorder6 = StringVar()
workorder7 = StringVar()
workorder8 = StringVar()
workorder9 = StringVar()
workorder10 = StringVar()
profile = StringVar()
selection = IntVar()
over = Radiobutton(root, text="OVER 500 Degrees", variable=selection, value=1, command=sel)
under = Radiobutton(root, text="LESS THAN 500 Degrees", variable=selection, value=2, command=sel)
ent = Entry(root,textvariable=work_order)
ent2 = Entry(root, textvariable=workorder2)
ent3 = Entry(root, textvariable=workorder3)
ent4 = Entry(root, textvariable=workorder4)
ent5 = Entry(root, textvariable=workorder5)
ent6 = Entry(root, textvariable=workorder6)
ent7 = Entry(root, textvariable=workorder7)
ent8 = Entry(root, textvariable=workorder8)
ent9 = Entry(root, textvariable=workorder9)
ent10 = Entry(root, textvariable=workorder10)
total_entries = [ent, ent2, ent3, ent4, ent5, ent6, ent7, ent8, ent9, ent10]
lab = Label(root, text="WO #1:")
lab_2 = Label(root, text="WO #2:")
lab_3 = Label(root, text="WO #3:")
lab_4 = Label(root, text="WO #4:")
lab_5 = Label(root, text="WO #5:")
lab_6 = Label(root, text="WO #6:")
lab_7 = Label(root, text="WO #7:")
lab_8 = Label(root, text="WO #8:")
lab_9 = Label(root, text="WO #9:")
lab_10 = Label(root, text="WO #10:")
prof = Label(root, text="Profile Type : ")
file_sel = Label(root, text="File Selected : ")
reset = Button(root, text="Exit", command=exit_now)
submit = Button(root, text="Submit", command=submit_now)
lab.grid(row=0,column=0)
lab_2.grid(row=1, column=0)
lab_3.grid(row=2, column=0)
lab_4.grid(row=3, column=0)
lab_5.grid(row=4, column=0)
lab_6.grid(row=5, column=0)
lab_7.grid(row=6, column=0)
lab_8.grid(row=7, column=0)
lab_9.grid(row=8, column=0)
lab_10.grid(row=9, column=0)
ent.grid(row=0,column=1)
ent2.grid(row=1, column=1)
ent3.grid(row=2, column=1)
ent4.grid(row=3, column=1)
ent5.grid(row=4, column=1)
ent6.grid(row=5, column=1)
ent7.grid(row=6, column=1)
ent8.grid(row=7, column=1)
ent9.grid(row=8, column=1)
ent10.grid(row=9, column=1)
wo_val.grid(row=0,column=2)
wo_val2.grid(row=1, column=2)
wo_val3.grid(row=2, column=2)
wo_val4.grid(row=3, column=2)
wo_val5.grid(row=4, column=2)
wo_val6.grid(row=5, column=2)
wo_val7.grid(row=6, column=2)
wo_val8.grid(row=7, column=2)
wo_val9.grid(row=8, column=2)
wo_val10.grid(row=9, column=2)
prof.grid(row=11,column=0)
over.grid(row=11, column=2)
under.grid(row=11, column=1)
browsebutton.grid(row=12,column=0)
file_sel.grid(row=13,column=0)
pathlabel.grid(row=13,column=1)
reset.grid(row=16,column=3)
submit.grid(row=15,column=3)
wo_entry.grid(row=10, column=0)
root.mainloop()
|
997,004 | 234282af56b2d04b73c7199b4670ca1749848dea | refresh = 5
version = 20160123.01
urls = ['http://www.radioalgerie.dz/news/ar/']
regex = [r'^https?:\/\/[^\/]*radioalgerie\.dz']
videoregex = []
liveregex = [] |
997,005 | 3bab32aa503de268a7992da8c3d825898f5e0909 | #!/usr/bin/python3
import cgi
import subprocess
import convertImage
print("content-type: text/html")
print()
mydata = cgi.FieldStorage()
name = mydata.getvalue("x")
reg = mydata.getvalue("y")
convertImage.convertImages(name, reg)
|
997,006 | fd0683582d27f2ff1c72ef99f54540cf7d83bf83 | from lichee import plugin
from lichee.representation import representation_base
@plugin.register_plugin(plugin.PluginType.REPRESENTATION, "pass_through")
class PassThroughRepresentation(representation_base.BaseRepresentation):
"""Pass through repr layer provides
Attributes
----------
features: torch.nn.Sequential
feature layers of vgg model.
avgpool: torch.nn.AdaptiveAvgPool2d
avg pool layer of vgg model.
"""
def __init__(self, representation_cfg):
super(PassThroughRepresentation, self).__init__(representation_cfg)
def forward(self, *inputs):
return inputs
|
997,007 | f9851e49b78cd2e194cce713b69bea0287d43dfd | """empty message
Revision ID: a634a2fa3ae8
Revises: 6481f0c7c406
Create Date: 2021-08-29 21:40:40.486700
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'a634a2fa3ae8'
down_revision = '6481f0c7c406'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('person_ibfk_3', 'person', type_='foreignkey')
op.drop_column('person', 'child_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('person', sa.Column('child_id', mysql.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('person_ibfk_3', 'person', 'person', ['child_id'], ['id'])
# ### end Alembic commands ###
|
997,008 | 724097ddad7dee94da5548c3afa5fd9c3de95e77 | # Problem No.: 25
# Solver: Jinmin Goh
# Date: 20191211
# URL: https://leetcode.com/problems/reverse-nodes-in-k-group/submissions/
import sys
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseKGroup(self, head, k):
temp_list = []
ans_head = None
ans_walker = None
walker = head
temp_node = None
while walker:
while walker and len(temp_list) < k:
temp_list.append(walker.val)
walker = walker.next
print(temp_list)
if len(temp_list) == k:
if not ans_head:
ans_head = ListNode(temp_list.pop())
ans_walker = ans_head
while temp_list:
temp_node = ListNode(temp_list.pop())
ans_walker.next = temp_node
ans_walker = ans_walker.next
else:
if not ans_head:
return head
while temp_list:
temp_node = ListNode(temp_list.pop(0))
ans_walker.next = temp_node
ans_walker = ans_walker.next
return ans_head
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
|
997,009 | f36f6c110f29908e8b85c1fbee0996db53779b1e | from main import db
from time import sleep
from data.__all_models import Job, Player
def payday():
players = db.query(Player).all()
for player in players:
job_id = player.job
job = db.query(Job).filter(Job.id == job_id).first()
player.money += job.wage
db.commit()
print('payday')
def main():
while True:
payday()
sleep(3600.0)
if __name__ == '__main__':
main() |
997,010 | 3feccc30ff4e0f9510c28101b07391b885dbd8ec | pi = 3.141592654
es_cierto = True
numero = 0
dividendo = 1
divisor = 3
print("pi :", round(pi, 6))
print("Progresion PI dividendo divisor")
print("------------- --------- -------")
while es_cierto:
numero += round(dividendo / divisor, 5)
print(" {:6.5f}".format(numero).ljust(18) \
,"{:2.0f}".format(dividendo).ljust(9) \
,"{:2.0f}".format(divisor))
valor_puente = dividendo
dividendo += 1
dividendo *= valor_puente
valor_puente = divisor
divisor += 2
divisor *= valor_puente
es_cierto = False if dividendo >= 2551 else True |
997,011 | 7f9143c481ca3fc701b1fb22de6baba697b5d4af | from django.shortcuts import render
from django.contrib.auth.decorators import permission_required
from django.contrib import messages
import csv, io
from zipfile import ZipFile
from django.core.files.base import File
from django.contrib.auth.decorators import login_required
from teacherportal.models import Teacher, Subject, TeacherSubject
@login_required(login_url='/accounts/login/')
def data_upload(request):
if request.method == 'GET':
return render(request, 'bulk_upload.html',{})
data_file = request.FILES['file']
images_zip = request.FILES['images']
if not data_file.name.endswith('.csv') and not images_zip.name.endswith('.zip'):
messages.error(request, 'This is not a csv file')
return render(request, 'teacher/bulk_upload.html',{})
data_set = data_file.read().decode('UTF-8')
io_string = io.StringIO(data_set)
next(io_string)
zipped_files = ZipFile(images_zip)
image_names = zipped_files.namelist()
for column in csv.reader(io_string, delimiter=',', quotechar='"'):
if not column[3] == '':
image_name = column[2]
teacher, created = Teacher.objects.update_or_create(
first_name=column[0],
last_name=column[1],
email_address=column[3],
phone_number=column[4],
room_number=column[5]
)
if not image_name == '':
if image_name in image_names:
zip_img = zipped_files.read(image_name)
tmp_file = io.BytesIO(zip_img)
dummy_file = File(tmp_file)
dummy_file.name = image_name
dummy_file.size = len(zip_img)
dummy_file.file = tmp_file
teacher.profile_picture = dummy_file
teacher.save()
subjects = column[6].split(',')
subjects_taught_count = TeacherSubject.objects.filter(teacher=teacher).count()
for subject in subjects:
if subjects_taught_count>5:
break
subject = subject.strip().lower()
subject_object, created = Subject.objects.update_or_create(title=subject)
TeacherSubject.objects.update_or_create(teacher=teacher, subject=subject_object)
subjects_taught_count +=1
messages.success(request, 'Data has been uploaded')
return render(request, 'teacher/bulk_upload.html',{}) |
997,012 | 199ca5e172d559ad11bcc65aebbf2b985aa4b58e | import tkinter as tk
from num_guess_game import *
game = guess_num_game(0, 0)
game_made = False
game_over = False
def make_game():
global game_made, game
try:
num1 = int(entr_first.get())
num2 = int(entr_second.get())
except ValueError:
game_made = False
lbl_display2["text"] = "Must enter two numbers"
lbl_display1["text"] = "Game not ready"
return
if num1 >= num2:
game_made = False
lbl_display2["text"] = "First number must be smaller than second number"
lbl_display1["text"] = "Game not ready"
return
lbl_display2["text"] = ""
game = guess_num_game(num1, num2)
game_made = True
lbl_display1["text"] = "New game ready"
def make_guess():
global game_made, game_over
if not game_made:
return
if game_over:
lbl_display1["text"] = "Game finished! Enter numbers again to play"
return
try:
guess = int(entr_guess.get())
except ValueError:
lbl_display1["text"] = "Guess must be a number between the entered numbers (inclusive)"
return
if not (game.get_first_num() <= guess <= game.get_second_num()):
lbl_display1["text"] = "Guess not in range"
return
if guess < game.get_guess_num():
lbl_display1["text"] = "Guess is less than the number"
return
if guess > game.get_guess_num():
lbl_display1["text"] = "Guess is greater than the number"
return
if guess == game.get_guess_num():
lbl_display1["text"] = "You guessed the right number!"
game_over = True
window = tk.Tk()
window.title("Number guessing game")
window.rowconfigure(0, minsize=50, weight=1)
window.columnconfigure([0, 1, 2], minsize=50, weight=1)
lbl_display = tk.Label(master=window, text="Input the two numbers to start", fg="#5E6AE7")
lbl_display.grid(row=0, column=0)
lbl_first = tk.Label(master=window, text="First number:", bg="#D63636")
lbl_first.grid(row=1, column=0, sticky="nsew")
entr_first = tk.Entry(master=window)
entr_first.grid(row=1, column=1, sticky="nsew")
lbl_second = tk.Label(master=window, text="Second number:", bg="#D63636")
lbl_second.grid(row=2, column=0, sticky="nsew")
entr_second = tk.Entry(master=window)
entr_second.grid(row=2, column=1, sticky="nsew")
btn_apply = tk.Button(master=window, text="Click to apply", bg="#36D692", command=make_game)
btn_apply.grid(row=3, column=2, sticky="nsew")
lbl_display2 = tk.Label(master=window, fg="#5E6AE7")
lbl_display2.grid(row=3, column=0, sticky="nsew")
lbl_display1 = tk.Label(master=window, text="Game not ready", fg="#5E6AE7")
lbl_display1.grid(row=4, column=0)
lbl_guess = tk.Label(master=window, text="Guess", bg="#D63636")
lbl_guess.grid(row=5, column=0, sticky="nsew")
entr_guess = tk.Entry(master=window)
entr_guess.grid(row=5, column=1, sticky="nsew")
btn_guess = tk.Button(master=window, text="Click to guess", bg="#36D692", command=make_guess)
btn_guess.grid(row=5, column=2, sticky="nsew")
window.mainloop()
|
997,013 | 9e890662241aadcb87edd9fd00fc0059d886278e | suite = {
"mxversion" : "5.199.0",
"name" : "graal-generator-tests",
"defaultLicense" : "GPLv2-CPE",
"versionConflictResolution": "latest",
"imports": {
"suites": [
{
"name": "compiler",
"subdir": True,
"version": "f2916dbcc8a1e0412b98239bb625de0d7ee7841e",
"urls" : [
{"url" : "https://github.com/graalvm/graal", "kind": "git"},
{"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind" : "binary"},
]
}
]
},
"libraries" : {
"JBGENERATOR" : {
"urls" : [
"https://github.com/jku-ssw/java-bytecode-generator/releases/download/v1.0.0/jbgenerator-1.0.0.jar"
],
"sha1" : "50f69012583984849c5e5c5cd7ec85cd3653b85a",
},
"COMMONS_CLI": {
"sha1": "c51c00206bb913cd8612b24abd9fa98ae89719b1",
"maven": {
"groupId": "commons-cli",
"artifactId": "commons-cli",
"version": "1.4",
}
}
},
"projects": {
"at.jku.ssw.java.bytecode.generator.tests" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"JBGENERATOR",
"COMMONS_CLI",
"compiler:GRAAL",
"mx:JUNIT",
],
"javaCompliance" : "8+",
"workingSets" : "Graal, HotSpot, Test",
},
},
"distributions": {
"GRAAL_GENERATOR_TESTS": {
"mainClass" : "at.jku.ssw.java.bytecode.generator.tests.CompileGeneratedClasses",
"subDir" : "projects",
"dependencies" : [
"at.jku.ssw.java.bytecode.generator.tests"
],
"exclude" : [
"mx:JUNIT",
"JBGENERATOR"
],
}
},
}
|
997,014 | 472c9f28a197bbe459ae328d81a61c4e2609a8f9 | import pygame
from gameoflife import settings
class InfoText(pygame.sprite.DirtySprite):
def __init__(
self,
text,
size,
pos=(0, 0),
font=settings.TEXT_FONT,
color=settings.TEXT_COLOR,
alpha=False,
):
super().__init__()
self.color = color
self.text = text
self.fontsize = size
self._font = pygame.font.Font(font, size)
self.image = self._font.render(text, 1, color)
self.rect = pos
if alpha:
self.image.set_alpha(150)
def set_position(self, pos):
self.rect = pos
def update(self, text):
self.image = self._font.render(text, 1, self.color)
|
997,015 | ec054a5d68c497adbc701ad15d5cdce17a721263 | from random import randint
nlaunch = 1000
sucess = 0
for i in range(nlaunch)
if randint(1,6) + randint(1,6) == 7
sucess = sucess + 1
frequence = nlaunch / sucess
print(frequence)
|
997,016 | 4b932e78e493f83ac424abdeea8bd3f8cf10078d | from tests.conftests import _app
import json
from datetime import datetime
EMP_API_URL = '/api/employee/'
def test_employee_get(_app):
url_present_object = EMP_API_URL + '1'
url_missing_object = EMP_API_URL + '2'
response_p = _app.get(url_present_object)
response_m = _app.get(url_missing_object)
assert response_p.status_code == 200
assert response_m.status_code == 404
assert 'name' in response_p.get_json()
def test_employee_post(_app):
data_present = {
"department_id": 1,
"dob": "2020-12-12",
"name": "TEST0",
"salary": 1000.0
}
data_missing = {
"department_id": 2,
"dob": "2020-01-01",
"name": "TEST1",
"salary": 1500.0
}
response_e = _app.post(EMP_API_URL + 'new')
response_p = _app.post(EMP_API_URL + 'new', json=data_present)
response_m = _app.post(EMP_API_URL + 'new', json=data_missing)
json_data = response_m.get_json()
json_required = {
"department_id": 2,
"dob": "2020-01-01",
"id": 2,
"name": "TEST1",
"salary": 1500.0
}
assert response_e.status_code == 400
assert response_p.status_code == 400
assert json_data == json_required
assert response_m.status_code == 200
def test_employee_put(_app):
data_missing = {
"department_id": 2,
"dob": "2020-01-01",
"name": "TEST1",
"salary": 1500.0
}
data_to_apply = {
"department_id": 1,
"dob": "2020-01-01",
"name": "TEST2",
"salary": 1500.0
}
# check if data in test BD
response_m = _app.put(EMP_API_URL + '2', json=data_missing)
response_p = _app.put(EMP_API_URL + '1', json=data_to_apply)
json_data = response_p.get_json()
assert response_m.status_code == 404
assert json_data == {
"department_id": 1,
"dob": "2020-01-01",
"id": 1,
"name": "TEST2",
"salary": 1500.0
}
assert json_data['name'] == data_to_apply['name']
def test_employee_delete(_app):
# check if data in test BD
response_m = _app.delete(EMP_API_URL + '2')
response_p = _app.delete(EMP_API_URL + '1')
assert response_m.status_code == 404
assert response_p.status_code == 200
assert response_p.get_json() == {'message': 'Employee deleted!'}
def test_employees(_app):
url = '/api/employees/'
url_filter = url + '?'
name = 'name=Albert Mayer'
single_date = "&date1=2020-12-23"
double_date = "&date1=2020-12-01&date2=2020-12-30"
formatted_date1 = datetime.strptime('2020-12-01', "%Y-%m-%d")
formatted_date2 = datetime.strptime('2020-12-30', "%Y-%m-%d")
department = "&department=0"
all_one_date = name + single_date + department
all_two_dates = name + double_date + department
nonsense = 'XDXDXDXDXD'
response_bare = _app.get(url)
response_name = _app.get(url_filter + name)
response_single_date = _app.get(url_filter + single_date)
response_double_date = _app.get(url_filter + double_date)
response_department = _app.get(url_filter + department)
response_all_one_date = _app.get(url_filter + all_one_date)
response_all_two_dates = _app.get(url_filter + all_two_dates)
response_nonsense = _app.get(url_filter + nonsense)
assert response_bare.get_json() == [
{
"department_id": 1,
"dob": "2020-12-12",
"id": 1,
"name": "TEST0",
"salary": 1000.0
}
]
for employee in response_name.get_json():
assert employee['name'] == 'Albert Mayer'
for employee in response_single_date.get_json():
assert employee['dob'] == '2020-12-23'
for employee in response_double_date.get_json():
res_date_formatted = datetime.strptime(employee['dob'], "%Y-%m-%d")
assert formatted_date1 < res_date_formatted < formatted_date2
for employee in response_department.get_json():
assert employee['department_id'] == 0
for employee in response_all_one_date.get_json():
assert employee['name'] == 'Albert Mayer'
assert employee['dob'] == '2020-12-23'
assert employee['department_id'] == 0
for employee in response_all_two_dates.get_json():
assert employee['name'] == 'Albert Mayer'
res_date_formatted = datetime.strptime(employee['dob'], "%Y-%m-%d")
assert formatted_date1 < res_date_formatted < formatted_date2
assert employee['department_id'] == 0
assert response_nonsense.status_code == 404
|
997,017 | 790b24d3a1bca95141fee1272d8d0f13ef055061 | import os
import platform
import socket
from datetime import date
import asyncio
import psutil
import requests
from fake_useragent import UserAgent
from flask import Flask, render_template,request
from flask_socketio import SocketIO
import subprocess
import dns.resolver
from cprint import *
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
version = str(0.1) + " beta"
@app.route('/')
def hello_world():
cpux = psutil.cpu_percent()
ram = psutil.virtual_memory()
return render_template("home.html", Cpu=cpux, Mem=ram.percent, date=date.today(), version=version)
@app.route("/start",methods=['GET','PORT'])
def start():
#ua = UserAgent()
global version
if request.method == "POST":
url = request.form['URL']
print(url)
startscan(url)
try:
ip = requests.get("https://ident.me").content.decode()
except:
ip = "127.0.0.1"
osx = platform.platform()
version = version
distrox = socket.gethostname()
user = os.environ['USER']
return render_template("start.html", IP=ip, Useragent="Mozilla/5.0 (Windows NT 6.2)", os=osx, version=version,
distro=distrox, user=user)
def getip(url):
ip = "127.0.1.7"
try:
unitest = url.replace("http://", "").replace("https://", "").replace("/","").split(":")[0]
if unitest[:3].isdigit():
ip = unitest
else:
new = dns.resolver.query(unitest, "A")
for A in new:
return str(A.to_text())
except Exception as e:
socketio.emit('result', "~#Error " + str(e))
if ip == "127.0.1.7":
try:
return socket.gethostbyname(unitest)
except Exception as e:
print(e)
def generatecommand(ip,url, num):
if os.path.isfile("./resources/app/argoui/attack.py"):
return "python3 ./resources/app/argoui/attack.py " + ip + " " + url + " " + str(num)
elif os.path.isfile("./argoui/attack.py"):
return "python3 ./argoui/attack.py " + ip + " " + url + " " + str(num)
else:
return "python3 ./attack.py " + ip + " " + url + " " + str(num)
def webanalizer(ip,url):
socketio.emit('result', "~#Starting web analizer...")
command = generatecommand(ip,url,3)
data = subprocess.check_output(command, shell=True).decode().split("\\n\\t")
for i in data:
print(i)
socketio.emit('result', str(removejunk(i)).replace("\n\t","\n").replace("(","").replace("'')","") + "\n")
def fuzz(ip,url):
socketio.emit('result', "~#Start fuzzing...")
command = generatecommand(ip, url, 4)
data = subprocess.check_output(command, shell=True).decode().split("\\n\\t")
for i in data:
socketio.emit('result', str(removejunk(i).replace(">","")))
def scanport(ip,url):
socketio.emit('result', "~#Port_scanning_Starting...")
socketio.emit('result', "~#Scanning 1 to 10000 port!...")
command = generatecommand(ip,url,2)
openport = subprocess.check_output(command, shell=True).decode().split("\n")
idata = ""
x = 1
for i in openport:
if x == 2:
idata = ""
socketio.emit('result', "~#OPEN PORT >>> ")
socketio.emit('resultNO', " " + str(removejunk(i)))
x = x + 1
def scandns(ip,url):
socketio.emit('result', "~#Dns Enum Starting...")
dnsresult = ""
try:
command = generatecommand(ip,url,1)
dnsresult = subprocess.check_output(command, shell=True).decode().split("\\\\n")
except Exception as e:
cprint.err(e)
socketio.emit('result',str(e))
print(dnsresult)
for i in dnsresult:
socketio.emit('result', str(removejunk(i)))
def removejunk(data):
return data.replace("\n", "").replace("\n\n", "").replace("\t", "").replace("[0m","").replace("[92m","").replace("['","\n").replace("]","").replace(","," ").replace("[","\n").replace("\'\"","")
def startx(url):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
ip = getip(url)
webanalizer(ip,url)
scandns(ip,url)
scanport(ip, url)
fuzz(ip,url)
@socketio.on('startscan')
def startscan(data):
if data["url"] == "":
socketio.emit('result', "please enter a valid url!")
return 0
else:
#threading.Thread(target=startx,args=(data["url"]))
startx(data["url"])
@socketio.on('ready')
def handle_connected_event(data):
if data["connected"]:
print("connected")
result = "~#ARGO is ready_ "
socketio.emit('result', result)
if __name__ == '__main__':
socketio.run(app, debug=True)
|
997,018 | 33a9226c1ebf2a74084b64e608c18c4efaf2beb2 | # Base interface for spells. Includes methods for accessing crystals within the
# spell grid along and for accessing the total combined effects of a spell given
# the player's current layout.
import copy
import math
import random
from attackdata import *
from crystal import *
from hexgrid import *
from window import *
from vector import *
class Spell(object):
def __init__(self, player, size):
self.type = 'Spell' # Identifier
self.player = player # The player who is using this spell
self.size = size # The 'radius' of our spell in number of cells
self.grid = HexGrid(size) # Underlying grid we are working with
# Place in initial 'source' crystals. These are crystals which players
# build up their pipe systems from.
start = self.get_source_locs()
colors = ((True, False, False),
(False, True, False),
(False, False, True))
# For each starting crystal, create and initalize the Crystal object
for (i, (loc, color)) in enumerate(zip(start, colors)):
color = Color(*color)
row, col = loc.list()
crystal = Crystal()
# Initialize crystal attributes
crystal.color = color
crystal.pipes = ['Out'] + [None] * 5
crystal.atts['Source'] = color
crystal.atts['Movable'] = False
# Set up the proper orientation
for _ in range(3 - i):
crystal.rotate(1)
# Insert the crystal into the grid
self.grid.cells[row][col] = crystal
# Provide a list of the locations of all source crystals
def get_source_locs(self):
grid = self.grid
size = grid.size
ret = [(0, 0),
(size - 1, 0),
(2 * (size - 1), 0)]
return map(vector, ret)
# Perform a breadth-first search on crystals to compute which ones are reachable from
# the given start crystal. This is used to determine which crystals are active, and
# can thus contribute to the overall effects of the spell.
#
# Returns a list of edges in the resulting directed graph along with whether
# a cycle was detected.
def get_bfs(self, start):
grid = self.grid
# Make sure there is actually a crystal at start
if grid.cells[start[0]][start[1]] is None:
return [], False
dirs = [HEX_NW, HEX_NE, HEX_E, HEX_SE, HEX_SW, HEX_W]
q = [vector(start)] # Our queue of current nodes
edges = []
visited = []
cycle = False
# Standard BFS loop
while len(q) > 0:
# Get the next location
cur = q.pop(0)
row1, col1 = cur.tuple()
# Check if we've already visited
if cur.list() in visited:
cycle = True
continue
visited.append(cur.list())
# Obtain the actual contents of the cell
c1 = grid.cells[row1][col1]
# Visit each of the neighboring cells
neighbors = []
for dir in dirs:
loc = grid.move_loc(dir, cur)
# Make sure we're still in bounds
if grid.out_of_bounds(loc):
continue
# Check to see if there is a crystal in the neighboring cell
row2, col2 = loc.tuple()
c2 = grid.cells[row2][col2]
if c2 is None:
continue
# Make sure colors match up. We use <= as opposed to == since
# some crystal can take more than one color as input. Yellow
# can take Red or Green, but output would have to be sent to a
# crystal which can take both Red and Green. In this case, these
# would be Yellow and White crystals.
if not c1.color <= c2.color:
continue
# Make there is an actual pipe going between the two crystals
if c1.pipes[dir] == 'Out' and \
c2.pipes[(dir + 3) % 6] == 'In':
edges.append((cur, loc))
q.append(loc)
return edges, cycle
# Collect a dictionary of all (active) attributes in the spell
def get_modifiers(self):
modifiers = ['Neutral', 'Fire', 'Ice', 'Heal', 'Lightning']
modifiers = {x: 0 for x in modifiers}
# Collect attributes for each source crystal separately
start = self.get_source_locs()
for loc in start:
# Run BFS to find the reachable crystals
edges, cycle = self.get_bfs(loc)
# Cycles are not allowed for a single source crystals.
# Merging source crystals is acceptable however.
if cycle:
continue
# Don't receive attributes from source crystals or corruption
# crystals which act as walls.
forbidden = ['Movable', 'Source']
cur_modifiers = {}
# Iterate over all edges in the BFS
for (u, v) in edges:
# Get the crystal located in cell v
row, col = v.list()
crystal = self.grid.cells[row][col]
# Iterate over all attributes the crystal provides
for (att, val) in crystal.atts.iteritems():
# Check if this is a forbidden attribute
if att in forbidden:
continue
# Now increment the value of the attribute
# e.g. If att == 'Fire', then increase the Fire damage
if att in cur_modifiers:
cur_modifiers[att] += val
else:
cur_modifiers[att] = copy.deepcopy(val)
# We need at least one crystal with the 'Cast' modifier, otherwise
# no magic is performed from this source crystal.
if 'Cast' not in cur_modifiers:
continue
# Add in the contributions from this source crystal to the overall
# modifiers for the spell
for (att, val) in cur_modifiers.iteritems():
if att in modifiers:
modifiers[att] += val
else:
modifiers[att] = copy.deepcopy(val)
# Return sum of all modifiers across the spell
return modifiers
# Collect a string description of all attributes provided by the
# current spell. Used for display purposes.
def get_atts(self):
ret = ''
modifiers = self.get_modifiers()
for (mod, val) in modifiers.iteritems():
if val == 0:
continue
val_str = str(val)
if isinstance(val, (int, long)):
val_str = '{:+d}'.format(val)
ret += str(mod) + ': ' + val_str + '\n'
return ret
# Compute the total damage of the spell, broken down by element for
# any elemental defenses.
def get_attack(self):
data = AttackData()
data.atts = self.get_modifiers()
return data
# Displays a simple hexagon icon to represent the spell. Used in
# inventory displays that can contain spells.
def display(self, dst, center, radius):
color = (255, 255, 255)
# Compute six different corners for the central hexagon
vels = {
'N': 90,
'NE': 30,
'SE': -30,
'S': -90,
'SW': -150,
'NW': 150
}
d2r = math.pi / 180
dir_vel = {k: vector(math.cos(t * d2r), math.sin(t * d2r))
for (k, t) in vels.iteritems()}
# Now add in extra points for each outer hexagon. Uses an
# ordering to only compute each unique corner point once.
dirs = ['N', 'NE', 'SE', 'S', 'SW', 'NW']
points = [[x] for x in range(6)]
for x in range(6):
points += [[x, x], [x, x, (x + 1) % 6],
[(x + 1) % 6, (x + 1) % 6, x]]
zero = vector(0, 0)
points = [sum([dir_vel[dirs[dir]] for dir in p], zero) for p in points]
# Build up the list of edges we want to actually draw between points.
# Separated out so we generate each segment via rotational symmetry.
segments = [[0, 1, 2, 3, 4, 5, 0]]
for x in range(6):
y = 3 * x
vals = [y, y + 1, y + 2, y + 3]
vals = [x] + [6 + v % 18 for v in vals]
segments += [vals]
# Now actually draw line segments between each point in our hex grid icon.
for line in segments:
plist = [center + 0.5 * radius * points[p] for p in line]
plist = [p.list() for p in plist]
pygame.draw.lines(dst, color, False, plist)
|
997,019 | 15bf78986887e579784aa8a63dc58f1ba72f36ba | from game.component import EventComponent, Tag
from game.event import Event, EventType, ActionEventType, CaveEventType, CollisionEventType
from game.script.script import Script
class Exit(Script):
def start(self, entity, world):
self.entity = entity
self.world = world
self.event_bus = world.component_for_entity(entity, EventComponent)
def update(self, dt):
sent = False
for event in self.event_bus.get_events():
if event.ty == EventType.COLLISION:
player = None
if event.data['first'] == self.entity:
player = event.data['second']
elif event.data['second'] == self.entity:
player = event.data['first']
if player != None and self.world.has_component(player, Tag) and "player" in self.world.component_for_entity(player, Tag).tags:
self.event_bus.send.append(Event({
'type': ActionEventType.SET_INFO,
'id': self.entity,
'text': "Descend",
'callback': self.descend,
'timeout': 0.2
}, EventType.ACTION))
sent = True
if not sent:
return
self.event_bus.send.append(Event({
'type': ActionEventType.DELETE_INFO,
'id': self.entity
}, EventType.ACTION))
def descend(self):
self.event_bus.send.append(Event({
'type': CaveEventType.DESCEND
}, EventType.CAVE, True))
return True |
997,020 | 8918acab02250ea507e8a4f9d3941783be304b0d | from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, SubmitField, PasswordField
from wtforms.validators import Length, DataRequired, EqualTo, Email
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=30)])
email = StringField("Email", validators=[DataRequired, Email()])
password = PasswordField("Password", validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo(password)])
submit = SubmitField("Sign up!")
class LoginForm(FlaskForm):
email = StringField("Email", validators=[DataRequired(), Email()])
password = PasswordField("Password", validators=[DataRequired()])
remember = BooleanField("Remember Me")
submit = SubmitField("Login") |
997,021 | b19eec09c2eb8b8b260f7c937c6e70db0ad387ee | from PIL import Image, ImageDraw
SIZE = 256
image = Image.new("L", (SIZE, SIZE))
d = ImageDraw.Draw(image)
for x in range(SIZE):
for y in range(SIZE):
d.point((x,y), x)
image.save('./gradiation1.jpg')
|
997,022 | 64ab276258569d43726b34c177a7052081626d36 | #----------------------------------------------
# -*- encoding=utf-8 -*- #
# __author__:'xiaojie' #
# CreateTime: #
# 2019/4/25 10:39 #
# #
# 天下风云出我辈, #
# 一入江湖岁月催。 #
# 皇图霸业谈笑中, #
# 不胜人生一场醉。 #
#----------------------------------------------
import tensorflow as tf
import os
import numpy as np
import cv2
from .Generator import generator
from .Discriminator import discriminator
from tensorflow.examples.tutorials.mnist import input_data
# 这种loss是只训练2个network,将q的loss同时加到D和G的loss中,把q的参数等同看待。
class InfoGan:
def __init__(self,sess,args):
#########################
# #
# General Setting #
# #
#########################
self.sess = sess
self.args = args
self.model_dir = args.model_dir
if not self.model_dir:
raise ValueError('Need to provide model directory')
self.summary_dir = os.path.join(self.model_dir,'log')
self.test_dir = os.path.join(self.model_dir,'test')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
if not os.path.exists(self.summary_dir):
os.makedirs(self.summary_dir)
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir)
self.global_step = tf.train.get_or_create_global_step()
#########################
# #
# Model Building #
# #
#########################
# 1. Build Generator
# Create latent variable
with tf.name_scope('noise_sample'):
self.z_cat = tf.placeholder(tf.int32,[None])#10
self.z_cont = tf.placeholder(tf.float32,[None,args.num_cont])#2
self.z_rand = tf.placeholder(tf.float32,[None,args.num_rand])#62
#
z = tf.concat([tf.one_hot(self.z_cat,args.num_category),self.z_cont,self.z_rand],axis=1)
self.g = generator(z,args)
# 2. Build Discriminator
# Real Data
with tf.name_scope('data_and_target'):
self.x = tf.placeholder(tf.float32,[None,28,28,1])
y_real = tf.ones([tf.shape(self.x)[0]])
y_fake = tf.zeros([tf.shape(self.x)[0]])
d_real,_,_,_ = discriminator(self.x,args)
d_fake, r_cat,r_cont_mu,r_cont_var = discriminator(self.g, args)
# 3. Calculate loss
# -log(D(G(x))) trick
with tf.name_scope('loss'):
print('sssssssssssss',d_fake.get_shape(),
y_fake.get_shape(),
d_real.get_shape(),
y_real.get_shape(),
r_cat.get_shape(),
self.z_cat.get_shape())
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
labels=y_real))
self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
labels=y_fake))
self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real,
labels=y_real))
self.d_loss = (self.d_loss_fake+self.d_loss_real)
# discrete logQ(c|x)
self.cat_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=r_cat,
labels=self.z_cat))
eplison = (r_cont_mu-self.z_cont)/r_cont_var
# variance = 1
# log guassian distribution (continuous logQ(c|x))
self.cont_loss = -tf.reduce_mean(
tf.reduce_sum(
-0.5*tf.log(2*np.pi*r_cont_var+1e-8)-0.5*tf.square(eplison),axis=1))
self.train_g_loss = self.g_loss+self.cat_loss+self.cont_loss*0.1
self.train_d_loss = self.d_loss+self.cat_loss+self.cont_loss*0.1
#采用联合训练的方式,将D的输出看成一个loss。而G的loss是通过D来计算的,那么g的loss也有2部分(原本的D_loss+q)。
#4. Update weights
g_param = tf.trainable_variables(scope='generator')
d_param = tf.trainable_variables(scope='discriminator')
print('BBBBBBBBBBBBB',d_param)
with tf.name_scope('optimizer'):
g_optim = tf.train.AdamOptimizer(learning_rate=args.g_lr,beta1=0.5,beta2=0.99)
self.g_train_op = g_optim.minimize(self.train_g_loss,var_list=g_param,
global_step=self.global_step)
d_optim = tf.train.AdamOptimizer(learning_rate=args.d_lr,beta1=0.5,beta2=0.99)
self.d_train_op = d_optim.minimize(self.train_d_loss,var_list=d_param)
# 5. visualize
tf.summary.image('Real',self.x)
tf.summary.image('Fake',self.g)
with tf.name_scope('Generator'):
tf.summary.scalar('g_total_loss',self.train_g_loss)
with tf.name_scope('Discriminator'):
tf.summary.scalar('d_total_loss',self.train_d_loss)
with tf.name_scope('All_Loss'):
tf.summary.scalar('g_loss',self.g_loss)
tf.summary.scalar('d_loss',self.d_loss)
tf.summary.scalar('cat_loss',self.cat_loss)
tf.summary.scalar('cont_loss',self.cont_loss)
self.summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver()
def sample_z_and_c(self):
z_cont_ = np.random.uniform(-1, 1, size=[self.args.batch_size, self.args.num_cont])
z_rand_ = np.random.uniform(-1, 1, size=[self.args.batch_size, self.args.num_rand])
z_cat_ = np.random.randint(self.args.num_category, size=[self.args.batch_size])
z = tf.concat([tf.one_hot(z_cat_, self.args.num_category), z_cont_, z_rand_], axis=1)
return z
def train(self):
summary_writer = tf.summary.FileWriter(self.summary_dir,self.sess.graph)
mnist = input_data.read_data_sets('../../MNIST_data',one_hot=True)
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
checkpoint = tf.train.latest_checkpoint(self.model_dir)#这个方法就是调用了ckpt.model_checkpoint_path
if checkpoint:
print('Load checkpoint {}...'.format(checkpoint))
self.saver.restore(self.sess,checkpoint)
# ckpt = tf.train.get_checkpoint_state(self.model_dir)
# if ckpt and ckpt.model_checkpoint_path:
# model_file = tf.train.latest_checkpoint(self.model_dir)
# self.saver.restore(self.sess, model_file)
steps_per_epoch = mnist.train.labels.shape[0]//self.args.batch_size
for epoch in range(self.args.epoch):
for step in range(steps_per_epoch):
x_batch,_ = mnist.train.next_batch(self.args.batch_size)
x_batch = np.expand_dims(np.reshape(x_batch,[-1,28,28]),axis=-1)
z_cont = np.random.uniform(-1,1,size=[self.args.batch_size,self.args.num_cont])
z_rand = np.random.uniform(-1,1,size=[self.args.batch_size,self.args.num_rand])
z_cat = np.random.randint(self.args.num_category,size=[self.args.batch_size])
d_loss,_ = self.sess.run([self.train_d_loss,self.d_train_op],
feed_dict={self.x:x_batch,
self.z_cont:z_cont,
self.z_rand:z_rand,
self.z_cat:z_cat})
g_loss,_ = self.sess.run([self.train_g_loss,self.g_train_op],
feed_dict={self.x: x_batch,
self.z_cont: z_cont,
self.z_rand: z_rand,
self.z_cat: z_cat})
summary,global_step = self.sess.run([self.summary_op,self.global_step],
feed_dict={self.x: x_batch,
self.z_cont: z_cont,
self.z_rand: z_rand,
self.z_cat: z_cat})
if step % 100 == 0 :
print('Epoch[{}/{}] Step[{}/{}] g_loss:{:.4f}, d_loss:{:.4f}'.format(epoch, self.args.epoch, step,
steps_per_epoch, g_loss,
d_loss))
summary_writer.add_summary(summary,global_step)
self.save(global_step)
def inference(self):
if self.model_dir is None:
raise ValueError('Need to provide model directory')
checkpoint = tf.train.latest_checkpoint(self.model_dir)
if not checkpoint:
raise FileNotFoundError('Checkpoint is not found in {}'.format(self.model_dir))
else:
print('Loading model checkpoint {}...'.format(self.model_dir))
self.saver.restore(self.sess,checkpoint)
for q in range(2):
col = []
for c in range(10):
row = []
for d in range(11):
z_cat = [c]
z_cont = -np.ones([1, self.args.num_cont]) * 2 + d * 0.4
z_cont[0, q] = 0
z_rand = np.random.uniform(-1, 1, size=[1, self.args.num_rand])
g = self.sess.run([self.g], feed_dict={self.z_cat: z_cat,
self.z_cont: z_cont,
self.z_rand: z_rand})
g = np.squeeze(g)
multiplier = 255.0 / g.max()
g = (g * multiplier).astype(np.uint8)
row.append(g)
row = np.concatenate(row, axis=1)
col.append(row)
result = np.concatenate(col, axis=0)
filename = 'continuous_' + str(q) + '_col_cat_row_change.png'
cv2.imwrite(os.path.join(self.test_dir, filename), result)
def save(self,step):
model_name = 'infogan.model'
self.saver.save(self.sess,os.path.join(self.model_dir,model_name),global_step=step)
|
997,023 | 4d93eeb9cbb566c2bf24a6b5325d5981ae92e4d7 |
def perrin(n):
if n==0: return 3
elif n==1: return 0
elif n==2: return 2
else: return perrin(n-2)+perrin(n-3)
|
997,024 | 914270ea88bbb2fb6c795450eb7e0fe6c14bd30b | from django import forms
from models import User
class InvitationForm(forms.Form):
email = forms.CharField(widget=forms.TextInput(attrs={'size': 32,
'placeholder': 'Email Address of Friend to invite.',
'class':'form-control search-query'}))
class RegisterForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={'size': 32,
'placeholder': 'Password',
'class':'form-control'}))
class Meta:
model = User
fields = ('username','email')
# error_messages = {
# NON_FIELD_ERRORS: {
# 'unique_together': "%(User)s's %(email)s are not unique.",
# }
# }
# username = forms.CharField(widget=forms.TextInput(attrs={'size': 32,
# 'placeholder': 'Username',
# 'class':'form-control'}))
# email = forms.CharField(widget=forms.TextInput(attrs={'size': 32,
# 'placeholder': 'Email',
# 'class':'form-control'}))
|
997,025 | 4ab0b784e2f594a78375aa5e9845955da797158d | from django.urls import path
# from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('login/', views.connection, name='login'),
path('logout/', views.deconnection, name='logout'),
]
|
997,026 | 8d4ea849353312ff345e4c40f54e173c5f70561c | from django import forms
from django.forms import ModelForm
from network.models import Post, Profile
class PostForm(ModelForm):
class Meta:
model = Post
fields = [
'body'
]
labels = {'body': "What's on your mind?"}
widgets = {
'body': forms.Textarea(attrs={'class': 'form-control body', 'rows': '3', 'columns': '15'})
}
|
997,027 | 2093667afe3db2d47609cb5106465d1089f967d4 | Python 3.5.3 (default, Jan 19 2017, 14:11:04)
[GCC 6.3.0 20170124] on linux
Type "copyright", "credits" or "license()" for more information.
>>> #Aula 2/5 - Curso SESC Consolação 04/04/2018
>>> print('Aula 2/5')
Aula 2/5
>>> #Exercício slide 46
>>> animal='gatinho'
>>> animal=[0:6]
SyntaxError: invalid syntax
>>> animal[0:6]
'gatinh'
>>> animal[0:6]+'a'
'gatinha'
>>> #Exercício slide 45
>>> serie='Stranger Things'
>>> serie.upper()
'STRANGER THINGS'
>>> serie.capitalize()
'Stranger things'
>>> serie[::-1]
'sgnihT regnartS'
>>> #Tamanho da String : len(<string>)
>>> novaserie='Star Trek Discovery'
>>> len(novaserie)
19
>>> #Comando Find : <string que contém o texto>.find('string que procuro')
>>> #<string que contém o texto>,find('string que procuro',<posição a partir da qual quero procurar>)
>>> novaserie.find('k')
8
>>> abertura='Espaço: a fronteira final... audaciosamente indo onde ninguém jamais esteve.'
>>> len(abertura)
76
>>> abertura.fint('t')
Traceback (most recent call last):
File "<pyshell#20>", line 1, in <module>
abertura.fint('t')
AttributeError: 'str' object has no attribute 'fint'
>>> abertura.find('t')
14
>>> abertura.find('a',13)
18
>>> abertura.find('!')
-1
>>> #Comando Replace : troca uma string por outra dentro de um texto, porém a troca não é definitiva
>>> #<variavel>.replace('string que quero mudar','nova string')
>>> spock='Fascinante, capitão Kirk'
>>> spock.replace('Fascinante','Incrível')
'Incrível, capitão Kirk'
>>> #Listas: permitem armazenar várias informações diferentes (números, strings, lógico) em uma mesma variável
>>> #<variável> = [info1,info2,info3]
>>> meubicho=['Gato',9,True]
>>> meubicho[0]
'Gato'
>>> meubichp[3]
Traceback (most recent call last):
File "<pyshell#32>", line 1, in <module>
meubichp[3]
NameError: name 'meubichp' is not defined
>>> meubicho[3]
Traceback (most recent call last):
File "<pyshell#33>", line 1, in <module>
meubicho[3]
IndexError: list index out of range
>>> meubichp[2]
Traceback (most recent call last):
File "<pyshell#34>", line 1, in <module>
meubichp[2]
NameError: name 'meubichp' is not defined
>>> meubicho[2]
True
>>> meubicho[1]
9
>>> #Em listas devemos nos atentar que sempre começa com '0', 1 , 2 , 3 e assim por diante
>>> #Comando Append: acrescenta dados ao final de uma lista
>>> #<variável>append(<variável2>)
>>> nomedaserie=['Gotham','A', 'Dark']
>>> nomedaserie.append('Knight')
>>> nomedaserie
['Gotham', 'A', 'Dark', 'Knight']
>>> nomes=['Ana','Lucas','Marcus','Dani']
>>> nomes.append('Michelle')
>>> nomes
['Ana', 'Lucas', 'Marcus', 'Dani', 'Michelle']
>>> #Comando Join : gruda os elementos de uma sequencia de strings, usando um parametro fornecido
>>> #'<parametro fornecido>'.join(<nome da sequencia>)
>>> herois=['Flash','Arrow','Supergirl')
SyntaxError: invalid syntax
>>> herois=['Flash','Arrow','Supergirl']
>>> herois
['Flash', 'Arrow', 'Supergirl']
>>> ' e '.join(herois)
'Flash e Arrow e Supergirl'
>>> #Comando Split : separa uma string em pontos onde existam separadores de texto (espaço, tab, enter, '/' , =)
>>> #criando uma lista de strings
>>> #',string>'.split('<separador>')
>>> '1,2,3,4'.split(' e ')
['1,2,3,4']
>>> '1,2,3,4'.split(',')
['1', '2', '3', '4']
>>> #Tuplas: são similares as listas, mas imutáveis. Não podemos adicionar ou modificar nenhum de seus elementos.
>>> #consome menos espaço da memória
>>> #<variavel> = (info1,info2,info3)
>>> #<variavel> = info1,info2,info3
>>> a=(3,5,8)
>>> a
(3, 5, 8)
>>> b=3,5,8
>>> b
(3, 5, 8)
>>> a==b
True
>>> type(b)
<class 'tuple'>
>>> a=(1,)
>>> a
(1,)
>>> type(a)
<class 'tuple'>
>>> b=(1)
>>> b
1
>>> type(b)
<class 'int'>
>>> #os exemplos acima parecem iguais mas são reconehcidos de forma diferente pelo Python
>>> #exercícios slide 61
>>> chaves='Eu prefiro morrer do que perder a vida.'
>>> #1) WUal o tamanho da string?
>>> len(chaves)
39
>>> #2) Verifique se começa com 'p'
>>> chaves.startswith('p')
False
>>> #3) Verifique se termina com '.'
>>> chaves.endswith('.')
True
>>> #4) Verifique a posição do caracter ','
>>> chaves.find(',')
-1
>>> #5) Troque o caracter '.' por '!'
>>> chaves.replace('.','!')
'Eu prefiro morrer do que perder a vida!'
>>> #6) Dada a lista mercado =['1kg de banana','12 ovos','1kg de farinha'], acrescente a string 'fermento em pó'
>>> mercado =['1kg de banana','12 ovos','1kg de farinha']
>>> mercado.append('fermento em pó')
>>> mercado
['1kg de banana', '12 ovos', '1kg de farinha', 'fermento em pó']
>>>
|
997,028 | 464a81033731520ac2ac8b7e4f8df71170bac682 | # coding:utf-8
import easyhistory
# from easyhistory.store import CSVStore
from easyhistory import store
mystore = store.use(export='csv', path='history', dtype='D')
# result = mystore.get_factors('150153', '2015-03-25')
result = mystore.get_factors('150153', '2015-03-25')
print result |
997,029 | 2f1e95bc9121eb565641f65fbddadd81b10a617f |
__all__ = [
'Status'
]
class Status(object):
LOGOUT = 0
ONLINE = 10
OFFLINE = 20
AWAY = 30
HIDDEN = 40
BUSY = 50
CALLME = 60
SLIENT = 70
class ErrorCode(object):
DB_EXEC_FAILED = -50
NOT_JSON_FORMAT = -30
#upload error code
UPLOAD_OVERSIZE = -21
UPLOAD_OVERRETRY = -20
#network error code
HTTP_ERROR = -11
NETWORK_ERROR = -10
#system error code
FILE_NOT_EXIST = -6
NULL_POINTER = -5
CANCELED = -4
TIMEOUT_OVER = -3
NO_RESULT = -2
ERROR = -1
#webqq error code
OK = 0
LOGIN_NEED_VC = 10
HASH_WRONG = 50
LOGIN_ABNORMAL = 60
NO_MESSAGE = 102
COOKIE_WRONG = 103
PTWEBQQ = 116
LOST_CONN = 121
|
997,030 | adc3ad189064b64bd48a885dd5a7b35528fb0364 | import neural_network.reader as reader
from neural_network.network2 import NeuralNetwork
from util.frame import progress
from sklearn.metrics import f1_score, classification_report, accuracy_score
from util.dump import dump_object, load_object
from sys import stdout
from util.timer import Timer
import numpy as np
import warnings
import pylab as pt
warnings.filterwarnings('ignore')
DUMPED = False
CONTINUE = False
def images_to_np_array(image_data):
return np.asarray([np.fromstring(i, dtype=np.uint8) / 256 for i in image_data])
def labels_to_np_array(labels_data):
x = np.zeros((len(labels_data), 10))
for i in range(len(labels_data)):
x[i][labels_data[i]] = 1
return x
def get_predicted(predict_data):
return [max(range(len(i)), key=lambda x: i[x]) for i in predict_data]
stats_x, stats_y, stats_y2, stats_y3 = [], [], [], []
if CONTINUE or DUMPED:
stats_x, stats_y = load_object('stoch-n-images-stat.dump')
if not DUMPED or (DUMPED and CONTINUE):
train_labels = []
train_images = []
image_size = (28, 28)
timer = Timer()
stdout.write('Loading Train data...')
timer.set_new()
train_labels = reader.read_labels('mnist/train-labels-idx1-ubyte')
train_images = reader.read_images('mnist/train-images-idx3-ubyte')
print('DONE in ' + timer.get_diff_str())
image_size = train_images[1]
stdout.write('Loading Test data...')
timer.set_new()
test_labels = reader.read_labels('mnist/t10k-labels-idx1-ubyte')
test_images = reader.read_images('mnist/t10k-images-idx3-ubyte')
print('DONE in ' + timer.get_diff_str())
image_size = test_images[1]
images_test = images_to_np_array(test_images[2])
labels_test = labels_to_np_array(test_labels[1])
rang_test = len(images_test)
def classify():
predicted = network.predict(images_test)
predicted = get_predicted(predicted)
return accuracy_score(test_labels[1], predicted)
network = NeuralNetwork(1, 1, 1)
images_train = images_to_np_array(train_images[2])
labels_train = labels_to_np_array(train_labels[1])
cycles = 10
print('Training...')
progress(0)
timer = Timer()
rang = list(range(150, 250, 10))
for j in range(len(rang)):
if not rang[j] in stats_x:
np.random.seed(1)
network = NeuralNetwork(image_size[0] * image_size[1], 300, 10)
for i in range(cycles):
randoms = np.random.randint(0, 60000, rang[j])
network.train(images_train[randoms], labels_train[randoms], 0.1)
if i % 1 == 0:
progress((j * cycles + i + 1) / (cycles * len(rang)))
stats_x.append(rang[j])
stats_y.append(classify())
progress(1)
dump_object((stats_x, stats_y), 'stoch-n-images-stat.dump')
print(' DONE in ', timer.get_diff_str())
pt.plot(stats_x, stats_y, color='red')
pt.show() |
997,031 | 17a13eb2424018488de35415c3f66fe07288b45b | from nltk import Tree
def buildTree(token):
if token.n_lefts + token.n_rights > 0:
return Tree(token, [buildTree(child) for child in token.children])
else:
return buildTree(token) |
997,032 | cc4d7128763b072ab06647ac2b5169d353394505 | __version__ = '0.2.1'
__author__ = 'chenjiandongx'
|
997,033 | f69764bf45e2369bf3144d1de307601fe42ed240 | # x = 0
# while x < 10:
# print(x)
# # x += 1
# x = x + 1
# x = 0
# while True:
# print(x)
# if x == 10:
# break
# x += 1
x = 0
flag = True
while flag:
print(x)
if x == 10:
flag = False
x += 1
|
997,034 | a88de60a1be600b11be5e0d6879260f43a2c6b6a | # -*- coding: utf-8 -*-
#
# File: plugins/etc_proposals.py
# This file is part of the Portato-Project, a graphical portage-frontend.
#
# Copyright (C) 2006-2010 René 'Necoro' Neumann
# This is free software. You may redistribute copies of it under the terms of
# the GNU General Public License version 2.
# There is NO WARRANTY, to the extent permitted by law.
#
# Written by René 'Necoro' Neumann <necoro@necoro.net>
import os
from subprocess import Popen
class EtcProposals (WidgetPlugin):
__author__ = "René 'Necoro' Neumann"
__description__ = "Adds support for <b>etc-proposals</b>, a graphical etc-update replacement."
__dependency__ = ["app-portage/etc-proposals"]
def init (self):
self.prog = ["/usr/sbin/etc-proposals"]
self.add_call("after_emerge", self.hook, type = "after")
def widget_init(self):
self.create_widget("Plugin Menu", "Et_c-Proposals", activate = self.menu)
def launch (self, options = []):
if os.getuid() == 0:
Popen(self.prog+options)
else:
helper.error("ETC_PROPOSALS :: %s",_("Cannot start etc-proposals. Not root!"))
def hook (self, *args, **kwargs):
"""Entry point for this plugin."""
self.launch(["--fastexit"])
def menu (self, *args):
self.launch()
register(EtcProposals)
|
997,035 | c555e82e99fce486be44a3090307a508933a39f9 | # 注意:在开发时,应该把模块中的所有全局变量全局变量
# 定义在所有函数的上方,就可以保证所有的函数
# 都能正常的访问到每一个全局变量
num = 10
gl_title = "黑马程序员"
name = "小明"
def demo():
# 如果局部变量的名字和全局变量的名字相同
# pycharm会在局部变量下方显示一个灰色的虚线
num = 99
print("%d" % num)
print("%s" % gl_title)
# print("%s" % name)
# 在定义一个全局变量
demo()
# 在定义一个全局变量
|
997,036 | 9433188449d55e624006bb4ee413f9fb3e0b9e72 | #!/user/bin/env python
#-*- coding:utf-8 -*-
import urlparse
import urllib2
import random
import time
import socket
from datetime import datetime
DEFAULT_AGENT = 'wswp' #代理
DEFAULT_DELAY = 5 #延迟时间
DEFAULT_RETRIES = 2 #重复次数
DEFAULT_TIMEOUT = 20 #等待时间
class Downloader(object):
'''下载类,提供网页下载功能'''
def __init__(self, delay=DEFAULT_DELAY, user_agent=DEFAULT_AGENT, proxies=None, num_retries=DEFAULT_RETRIES, timeout=DEFAULT_TIMEOUT, opener=None, cache=None):
socket.setdefaulttimeout(timeout) #设置socket等待时间
self.throttle = Throttle(delay) #设置两次请求之间的等待时间并甄别bad_url
self.user_agent = user_agent #设置用户代理
self.proxies = proxies #设置代理字典
self.num_retries = num_retries #设置重复次数
self.opener = opener
self.cache = cache
#将类以函数形式运行
def __call__(self, url):
result = None
if self.cache:
#当有缓存机制时尝试从缓存中取出url对应的网页缓存,没有则会引发KeyError异常
try:
result = self.cache[url]
except KeyError:
pass
else:
if (self.num_retries > 0 and 500 <= result['code'] < 600) or result['code'] == None:
#如果重复请求次数有效,请求码为服务器问题
result = None
if not result:
#当结果为空时,需要进行下载
self.throttle.wait(url)
proxy = random.choice(self.proxies) if self.proxies else None
headers = {'User-agent': self.user_agent}
result = self.download(url, headers, proxy=proxy, num_retries=self.num_retries)
if self.cache:
#当有缓存机制时,将结果存入缓存中
self.cache[url] = result
return result['html']
def download(self, url, headers, proxy, num_retries, data=None):
'''下载函数,提供url返回结果,返回结果不一定为正确结果'''
print 'Downloading:', url
request = urllib2.Request(url, data, headers or {})
opener = self.opener or urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
response = opener.open(request)
html = response.read()
code = response.code
except urllib2.HTTPError,e:
#当出现HTTP请求错误时暂时没有很好的鉴别方式,待完善
html = ''
code = None
print 'Download error[HTTP]:', str(e)
print 'not make the Page!'
except urllib2.URLError, e:
#当出现URL错误时,甄别是否为超时请求
print 'Download error[URL:%s]:%s'%(url,str(e))
code = None
html = ''
if hasattr(e, 'reason'):
if str(e.reason) == 'timed out':
code = 504
if num_retries > 0 and 500 <= code < 600:
self.throttle.wait(url)
return self.download(url, headers, proxy, num_retries-1, data)
else:
code = None
return {'html': html, 'code': code}
class Throttle:
'''在两次相同请求之中,进行适当“休息”'''
def __init__(self, delay):
self.delay = delay
self.domains = {}
def wait(self, url):
domain = urlparse.urlsplit(url).netloc
last_accessed = self.domains.get(domain)
if self.delay > 0 and last_accessed is not None:
sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
if sleep_secs > 0:
time.sleep(sleep_secs)
self.domains[domain] = datetime.now() |
997,037 | d7b2a48e404092726c2b130212fa81bc0bbe60d0 | class Solution:
def reverseStr(self, s: str, k: int) -> str:
l = list(s)
res = []
step = 2 * k
sub_l = [l[i:i + step] for i in range(0, len(l), step)]
for i in sub_l:
if len(i) < k:
res.extend(i[::-1])
else:
res.extend(i[:k][::-1] + i[k:])
return ''.join(res)
if __name__ == '__main__':
s = 'abcdefg'
print(Solution().reverseStr(s, 2)) |
997,038 | 8aad13f9c378234f81d886cd21a1cbc754f8efbc | from flask import Flask
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route("/")
@cross_origin()
def helloWorld():
return "Hello, cross-origin-world!" |
997,039 | 8e31030b19f02c0ef47b2065fdcfa1fb7fc0b5a9 | V = int (input())
votes = input()
A = votes.count("A")
B = votes.count("B")
if A+B==V:
if A> B:
print("A")
else:
print("B") |
997,040 | 329376e813155819f621eb37f0d53ec2fc17ede5 | from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.renderers import JSONRenderer
import requests
import environ
from environ import Env
from main.models import CustomUser, Thumb
from django.db.models import Count
import random
env = environ.Env()
environ.Env.read_env()
# Create your views here.
def recommendations_by_genre(user):
count = user.thumbs.filter(up=True).values_list('api_genre_id').annotate(genre_count=Count('api_genre_id')).order_by('-genre_count')
if count[0][1] >= 3:
return count[0][0]
else:
return ''
def recommendations_by_actor(user):
count = user.thumbs.filter(up=True).values_list('api_actor_id').annotate(
actor_count=Count('api_actor_id')).order_by('-actor_count')
if count[0][1] >= 3:
return count[0][0]
else:
return ''
def recommendations_by_director(user):
count = user.thumbs.filter(up=True).values_list('api_director_id').annotate(
director_count=Count('api_director_id')).order_by('-director_count')
if count[0][1] >= 3:
return count[0][0]
else:
return ''
def user_subscriptions(user):
provider_ids = user.subscriptions.values_list('api_provider_id', flat=True)
content = []
for id in provider_ids:
content.append(str(id))
pipe_separated = "|".join(content)
return pipe_separated
def recommendations_by_genre_actor(user):
return {"api_key": env('API_KEY'), "language": user.language, "sort_by": "popularity.desc", "include_adult": "false", "include_video": "false",
"page": "1", "with_watch_providers": user_subscriptions(user), "watch_region": user.region, "with_genre": recommendations_by_genre(user), "with_cast": recommendations_by_actor(user)}
def recommendations_by_genre_director(user):
return {"api_key": env('API_KEY'), "language": user.language, "sort_by": "popularity.desc", "include_adult": "false", "include_video": "false",
"page": "1", "with_watch_providers": user_subscriptions(user), "watch_region": user.region, "with_genre": recommendations_by_genre(user), "with_crew": recommendations_by_director(user)}
def recommendations_by_actor_director(user):
return {"api_key": env('API_KEY'), "language": user.language, "sort_by": "popularity.desc", "include_adult": "false", "include_video": "false",
"page": "1", "with_watch_providers": user_subscriptions(user), "watch_region": user.region, "with_cast": recommendations_by_actor(user), "with_crew": recommendations_by_director(user)}
def determine_params(user):
if user.thumbs.count() >= 10:
random_recommendation = [recommendations_by_genre_actor(user), recommendations_by_genre_director(user), recommendations_by_actor_director(user)]
return random.choice(random_recommendation)
else:
return {"api_key": env('API_KEY'), "language": user.language, "sort_by": "popularity.desc", "include_adult": "false", "include_video": "false", "page": "1", "with_watch_providers": user_subscriptions(user), "watch_region": user.region}
def remove_thumbs_down(user, movies):
thumbs_down = user.thumbs.filter(up=False).values_list('api_movie_id', flat=True)
content = []
movie_list = movies['results']
for id in thumbs_down:
content.append(id)
good_movies = []
for movie in movie_list:
if movie['id'] in content:
continue
else:
good_movies.append(movie)
return good_movies
@api_view(['GET'])
@renderer_classes((JSONRenderer,))
def get_movies(request):
user = CustomUser.objects.get(pk=request.GET.get('user'))
params = determine_params(user)
response = requests.get("https://api.themoviedb.org/3/discover/movie", params=params)
movies = response.json()
good_movies = {'results': remove_thumbs_down(user, movies)}
return Response(good_movies, status=status.HTTP_200_OK)
|
997,041 | 944f2ba1003a5f0ac4b017351c580fa7280af6f2 | # -*- coding: utf-8 -*-
"""
Unsupervised text keyphrase extraction and summarization utility.
Rasmus Heikkila, 2016
"""
from collections import Counter, defaultdict
import networkx
import spacy
import itertools as it
import math
default_sents = 3
default_kp = 5
nlp_pipeline = spacy.load('en')
def summarize_page(url, sent_count=default_sents, kp_count=default_kp):
"""
Retrieves a web page, finds its body of content and summarizes it.
Args:
url: the url of the website to summarize
sent_count: number(/ratio) of sentences in the summary
kp_count: number(/ratio) of keyphrases in the summary
Returns:
A tuple (summary, keyphrases). Any exception will be returned
as a tuple (message, []).
"""
import bs4
import requests
try:
data = requests.get(url).text
soup = bs4.BeautifulSoup(data, "html.parser")
# Find the tag with most paragraph tags as direct children
body = max(soup.find_all(),
key=lambda tag: len(tag.find_all('p', recursive=False)))
paragraphs = map(lambda p: p.text, body('p'))
text = '\n'.join(paragraphs)
return summarize(text, sent_count, kp_count)
except Exception as e:
return "Something went wrong: {}".format(str(e)), []
def summarize(text, sent_count=default_sents, kp_count=default_kp, idf=None, sg=True):
"""
Produces a summary of a given text and also finds the keyphrases of the text
if desired.
Args:
text: the text string to summarize
sent_count: number of sentences in the summary
kp_count: number of keyphrases in the summary
idf: a dictionary (string, float) of inverse document frequencies
sg: flag for enabling SGRank algorithm. If False, the TextRank algorithm is used instead.
Returns:
A tuple (summary, keyphrases).
If sent_count and kp_count are less than one, they will be considered as a
ratio of the length of text or total number of candidate keywords. If they
are more than one, they will be considered as a fixed count.
"""
summary = ""
doc = nlp_pipeline(text)
if sent_count > 0:
summary = text_summary(doc, sent_count)
top_phrases = []
if kp_count > 0:
if sg:
top_phrases = sgrank(doc, kp_count, idf=idf)
else:
top_phrases = textrank(doc, kp_count)
return (summary, top_phrases)
def text_summary(doc, sent_count):
"""
Summarizes given text using word vectors and graph-based ranking.
Args:
doc: a spacy.Doc object
sent_count: number (/ratio) of sentences in the summary
Returns:
Text summary
"""
sents = list(doc.sents)
sent_graph = networkx.Graph()
sent_graph.add_nodes_from(idx for idx, sent in enumerate(sents))
for i, j in it.combinations(sent_graph.nodes_iter(), 2):
# Calculate cosine similarity of two sentences transformed to the interval [0,1]
similarity = (sents[i].similarity(sents[j]) + 1) / 2
if similarity != 0:
sent_graph.add_edge(i, j, weight=similarity)
sent_ranks = networkx.pagerank_scipy(sent_graph)
if 0 < sent_count < 1:
sent_count = round(sent_count * len(sent_ranks))
sent_count = int(sent_count)
top_indices = top_keys(sent_count, sent_ranks)
# Return the key sentences in chronological order
top_sents = map(lambda i: sents[i], sorted(top_indices))
return format_output(doc, list(top_sents))
def format_output(doc, sents):
"""
Breaks the summarized text into paragraphs.
Args:
doc: a spacy.Doc object
sents: a list of spacy.Spans, the sentences in the summary
Returns:
Text summary as a string with newlines
"""
sent_iter = iter(sents)
output = [next(sent_iter)]
par_breaks = (idx for idx, tok in enumerate(doc) if '\n' in tok.text)
try:
# Find the first newline after first sentence
idx = next(i for i in par_breaks if i >= output[0].end)
for sent in sent_iter:
if '\n' not in output[-1].text:
if idx < sent.start:
# If there was no newline in the previous sentence
# and there is one in the text between the two sentences, add it
output.append(doc[idx])
output.append(sent)
idx = next(i for i in par_breaks if i >= sent.end)
except StopIteration:
# Add the rest of sentences if there are no more newlines
output.extend(sent_iter)
return ''.join(elem.text_with_ws for elem in output)
def sgrank(doc, kp_count, window=1500, idf=None):
"""
Extracts keyphrases from a text using SGRank algorithm.
Args:
doc: a spacy.Doc object
kp_count: number of keyphrases
window: word co-occurrence window length
idf: a dictionary (string, float) of inverse document frequencies
Returns:
list of keyphrases
Raises:
TypeError if idf is not dictionary or None
"""
if isinstance(idf, dict):
idf = defaultdict(lambda: 1, idf)
elif idf is not None:
msg = "idf must be a dictionary, not {}".format(type(idf))
raise TypeError(msg)
cutoff_factor = 3000
token_count = len(doc)
top_n = max(int(token_count * 0.2), 100)
min_freq = 1
if 1500 < token_count < 4000:
min_freq = 2
elif token_count >= 4000:
min_freq = 3
terms = [tok for toks in (ngrams(doc, n) for n in range(1,7)) for tok in toks]
term_strs = {id(term): normalize(term) for term in terms}
# Count terms and filter by the minimum term frequency
counts = Counter(term_strs[id(term)] for term in terms)
term_freqs = {term_str: freq for term_str, freq in counts.items()
if freq >= min_freq}
if idf:
# For ngrams with n >= 2 we have idf = 1
modified_tfidf = {term_str: freq * idf[term_str] if ' ' not in term_str else freq
for term_str, freq in term_freqs.items()}
else:
modified_tfidf = term_freqs
# Take top_n values, but also those that have have equal tfidf with the top_n:th value
# This guarantees that the algorithm produces similar results with every run
ordered_tfidfs = sorted(modified_tfidf.items(), key=lambda t: t[1], reverse=True)
top_n = min(top_n, len(ordered_tfidfs))
top_n_value = ordered_tfidfs[top_n-1][1]
top_terms = set(str for str, val in it.takewhile(lambda t: t[1] >= top_n_value, ordered_tfidfs))
terms = [term for term in terms if term_strs[id(term)] in top_terms]
term_weights = {}
# Calculate term weights
for term in terms:
term_str = term_strs[id(term)]
term_len = math.sqrt(len(term))
term_freq = term_freqs[term_str]
occ_factor = math.log(cutoff_factor / (term.start + 1))
# Sum the frequencies of all other terms that contain this term
subsum_count = sum(term_freqs[other] for other in top_terms
if other != term_str and term_str in other)
freq_diff = term_freq - subsum_count
if idf and term_len == 1:
freq_diff *= idf[term_str]
weight = freq_diff * occ_factor * term_len
if term_str in term_weights:
# log(1/x) is a decreasing function, so the first occurrence has largest weight
if weight > term_weights[term_str]:
term_weights[term_str] = weight
else:
term_weights[term_str] = weight
# Use only positive-weighted terms
terms = [term for term in terms if term_weights[term_strs[id(term)]] > 0]
num_co_occurrences = defaultdict(lambda: defaultdict(int))
total_log_distance = defaultdict(lambda: defaultdict(float))
# Calculate term co-occurrences and co-occurrence distances within the co-occurrence window
for t1, t2 in it.combinations(terms, 2):
dist = abs(t1.start - t2.start)
if dist <= window:
t1_str = term_strs[id(t1)]
t2_str = term_strs[id(t2)]
if t1_str != t2_str:
num_co_occurrences[t1_str][t2_str] += 1
total_log_distance[t1_str][t2_str] += math.log(window / max(1, dist))
# Weight the graph edges based on word co-occurrences
edge_weights = defaultdict(lambda: defaultdict(float))
for t1, neighbors in total_log_distance.items():
for n in neighbors:
edge_weights[t1][n] = (total_log_distance[t1][n] / num_co_occurrences[t1][n]) \
* term_weights[t1] * term_weights[n]
# Normalize edge weights by sum of outgoing edge weights
norm_edge_weights = []
for t1, neighbors in edge_weights.items():
weights_sum = sum(neighbors.values())
norm_edge_weights.extend((t1, n, weight / weights_sum)
for n, weight in neighbors.items())
term_graph = networkx.Graph()
term_graph.add_weighted_edges_from(norm_edge_weights)
term_ranks = networkx.pagerank_scipy(term_graph)
if 0 < kp_count < 1:
kp_count = round(kp_count * len(term_ranks))
kp_count = int(kp_count)
top_phrases = top_keys(kp_count, term_ranks)
return top_phrases
def textrank(doc, kp_count):
"""
Extracts keyphrases of a text using TextRank algorithm.
Args:
doc: a spacy.Doc object
kp_count: number of keyphrases
Returns:
list of keyphrases
"""
tokens = [normalize(tok) for tok in doc]
candidates = [normalize(*token) for token in ngrams(doc, 1)]
word_graph = networkx.Graph()
word_graph.add_nodes_from(set(candidates))
word_graph.add_edges_from(zip(candidates, candidates[1:]))
kw_ranks = networkx.pagerank_scipy(word_graph)
if 0 < kp_count < 1:
kp_count = round(kp_count * len(kw_ranks))
kp_count = int(kp_count)
top_words = {word: rank for word, rank in kw_ranks.items()}
keywords = set(top_words.keys())
phrases = {}
tok_iter = iter(tokens)
for tok in tok_iter:
if tok in keywords:
kp_words = [tok]
kp_words.extend(it.takewhile(lambda t: t in keywords, tok_iter))
n = len(kp_words)
avg_rank = sum(top_words[w] for w in kp_words) / n
phrases[' '.join(kp_words)] = avg_rank
top_phrases = top_keys(kp_count, phrases)
return top_phrases
def ngrams(doc, n, filter_stopwords=True, good_tags={'NOUN', 'PROPN', 'ADJ'}):
"""
Extracts a list of n-grams from a sequence of tokens. Optionally
filters stopwords and parts-of-speech tags.
Args:
doc: sequence of spacy.Tokens (for example: spacy.Doc)
n: number of tokens in an n-gram
filter_stopwords: flag for stopword filtering
good_tags: set of accepted part-of-speech tags
Returns:
a generator of spacy.Spans
"""
ngrams_ = (doc[i:i + n] for i in range(len(doc) - n + 1))
ngrams_ = (ngram for ngram in ngrams_
if not any(w.is_space or w.is_punct for w in ngram))
if filter_stopwords:
ngrams_ = (ngram for ngram in ngrams_
if not any(w.is_stop for w in ngram))
if good_tags:
ngrams_ = (ngram for ngram in ngrams_
if all(w.pos_ in good_tags for w in ngram))
for ngram in ngrams_:
yield ngram
def normalize(term):
"""
Parses a token or span of tokens into a lemmatized string.
Proper nouns are not lemmatized.
Args:
term: a spacy.Token or spacy.Span object
Returns:
lemmatized string
Raises:
TypeError if input is not a Token or Span
"""
if isinstance(term, spacy.tokens.token.Token):
return term.text if term.pos_ == 'PROPN' else term.lemma_
elif isinstance(term, spacy.tokens.span.Span):
return ' '.join(word.text if word.pos_ == 'PROPN' else word.lemma_
for word in term)
else:
msg = "Normalization requires a Token or Span, not {}.".format(type(term))
raise TypeError(msg)
def top_keys(n, d):
# Helper function for retrieving top n keys in a dictionary
return sorted(d.keys(), key=lambda k: d[k], reverse=True)[:n]
usage = """
Usage: summarize.py [args] <URL>
Supported arguments:
-s --sentences the number of sentences in the summary
-k --keyphrases the number of keyphrases
If the arguments are specifiec as decimal numbers smaller than one, they are
considered as ratios with respect to the original text.
"""
if __name__ == "__main__":
import argparse
import sys
if len(sys.argv) == 0:
print(usage)
parser = argparse.ArgumentParser()
parser.add_argument("url")
parser.add_argument("-s", "--sentences", type=float, dest="sent_count",
default=default_sents)
parser.add_argument("-k", "--keyphrases", type=float, dest="kp_count",
default=default_kp)
args = parser.parse_args()
res = summarize_page(args.url, args.sent_count, args.kp_count)
print("{} \nKeyphrases: {}".format(res[0], res[1]))
|
997,042 | 625c7a8f53c7f349dfe13ff82e29dab7271a1b24 | """ insertion_sort.py """
def insertion_sort(arr):
""" performs an insertion sort on the input """
arr_len = len(arr)
for i in range(1, arr_len):
last_index = i
for j in reversed(range(i)):
if arr[last_index] < arr[j]:
arr[last_index], arr[j] = arr[j], arr[last_index]
last_index = j
else:
break
return arr
def test_program():
""" tests the program """
in_test = [22, 11, 99, 88, 9, 7, 42]
ex_res = [7, 9, 11, 22, 42, 88, 99]
res = insertion_sort(in_test)
assert res == ex_res, "Expected %r but got %r" % (ex_res, res)
in_test = [5, 4, 6, 3, 7, 2, 1]
ex_res = [1, 2, 3, 4, 5, 6, 7]
res = insertion_sort(in_test)
assert res == ex_res, "Expected %r but got %r" % (ex_res, res)
in_test = [9, 6, 3, 1]
ex_res = [1, 3, 6, 9]
res = insertion_sort(in_test)
assert res == ex_res, "Expected %r but got %r" % (ex_res, res)
in_test = [3, 1, 2, 2, 3, 1]
ex_res = [1, 1, 2, 2, 3, 3]
res = insertion_sort(in_test)
assert res == ex_res, "Expected %r but got %r" % (ex_res, res)
in_test = [5, -1, 3, -10, 17, 47]
ex_res = [-10, -1, 3, 5, 17, 47]
res = insertion_sort(in_test)
assert res == ex_res, "Expected %r but got %r" % (ex_res, res)
in_test = [5, 51, 21, 19, 17, 47]
ex_res = [5, 17, 19, 21, 47, 51]
res = insertion_sort(in_test)
assert res == ex_res, "Expected %r but got %r" % (ex_res, res)
print("All tests have passed")
test_program()
|
997,043 | 82d8aa0497cd15fb3a9329b2f13d26094cf463c7 | # Имеется реализованная функция f(x), принимающая на вход целое число x, которая вычисляет некоторое целочисленое
# значение и возвращает его в качестве результата работы.
# Функция вычисляется достаточно долго, ничего не выводит на экран, не пишет в файлы и зависит только от переданного аргумента x.
#
# Напишите программу, которой на вход в первой строке подаётся число n — количество значений x, для которых требуется
# узнать значение функции f(x), после чего сами эти n значений, каждое на отдельной строке.
# Программа должна после каждого введённого значения аргумента вывести соответствующие значения функции f на отдельной строке.
#
# Для ускорения вычисления необходимо сохранять уже вычисленные значения функции при известных аргументах.
# Обратите внимание, что в этой задаче установлено достаточно сильное ограничение в две секунды по времени исполнения кода на тесте.
# First solution
d = dict()
a = int(input())
def my_function(n):
for i in range(0, n):
b = int(input())
if (b in d.keys()):
print(d[b])
else:
v = f(b)
d[b] = v
print(d[b])
my_function(a)
# Second
d = dict()
for i in range(int(input())):
b = int(input())
if (b not in d.keys()):
d[b] = f(b)
print(d[b])
|
997,044 | f05d4da245b93b5437fde1e194897e1e09347d63 | from flask import Flask, render_template, json
from markupsafe import escape
import urllib.request
import os
from datetime import datetime
from jinja2 import ext
app = Flask(__name__)
with urllib.request.urlopen("http://apis.is/petrol/") as url:
data = json.loads(url.read().decode())
def format_time(data):
return datetime.strptime(data, '%Y-%m-%dT%H:%M:%S.%f').strftime('%d/%m-%Y %H:%M')
app.jinja_env.filters['format_time'] = format_time
app.jinja_env.add_extension(ext.do)
def minPetrol():
minPetrolPrice = 1000
company = None
address = None
lst = data['results']
for i in lst:
if i['bensin95'] is not None:
if i['bensin95'] < minPetrolPrice:
minPetrolPrice = i['bensin95']
company = i['company']
address = i['name']
return [minPetrolPrice, company, address]
def minDiesel():
minDieselPrice = 1000
company = None
address = None
lst = data['results']
for i in lst:
if i['diesel'] is not None:
if i['diesel'] < minDieselPrice:
minDieselPrice = i['diesel']
company = i['company']
address = i['name']
return [minDieselPrice, company, address]
@app.route('/')
def home():
return render_template('index.html', data=data, minP=minPetrol(), minD = minDiesel())
@app.route('/company/<company>')
def comp(company):
return render_template('company.html', data=data, com=company)
@app.route('/moreinfo/<key>')
def info(key):
return render_template('moreinfo.html',data=data,k=key)
@app.errorhandler(404)
def pagenotfound(error):
return render_template("pagenotfound.html"), 404
@app.errorhandler(500)
def servernotfound(error):
return render_template("servererror.html"), 500
if __name__ == '__main__':
app.run(debug=True,use_reloader=True) |
997,045 | 38984eb61e851535c55d91c310b43c03f6f5fcbd | import math
N=int(input())
A=list(map(int,input().split()))
L=[0]*(N+1)
for i in range(N):
L[i+1]=math.gcd(L[i],A[i])
R=[0]*(N+1)
for i in range(N-1,-1,-1):
R[i]=math.gcd(R[i+1],A[i])
M=[]
for i in range(N):
M.append(math.gcd(L[i],R[i+1]))
print(max(M))
|
997,046 | ae2b18466862f389aacf4d8a9d7eb46645f994e5 | # -- Defining tuples --
short_tuple = "Rolf", "Bob"
a_bit_clearer = ("Rolf", "Bob")
not_a_tuple = "Rolf"
# -- Adding to a tuple --
friends = ("Rolf", "Bob", "Anne")
friends.append("Jen") # ERROR!
print(friends) # ["Rolf", "Bob", "Anne", "Jen"]
# -- Removing from a tuple --
friends.remove("Bob") # ERROR!
print(friends) # ["Rolf", "Anne", "Jen"]
# Tuples are useful for when you want to keep it unchanged forever.
# Most of the time I'd recommend using tuples over lists, and only use lists when you specifically want to allow changes.
|
997,047 | a94c24c0dfac9a2c1fb729fac2209eee18ffd2d2 | import mrcfile
import numpy as np
import math
from datetime import datetime
from scipy.ndimage import gaussian_filter
# data structure holds voxel information
class Voxel(object):
def __init__(self, x, y, z, density, region_id=-1, nlist=None):
self.x_coordinate = x
self.y_coordinate = y
self.z_coordinate = z
self.density = density
self.regionID = region_id
self.nlist=nlist
# class Voxel(object):
# def __init__(self, x, y, z, density, region_id= -1):
# self.x_coordinate = x
# self.y_coordinate = y
# self.z_coordinate = z
# self.density = density
# self.regionID = region_id
# def updaterId(self, x, y, z, rId):
# if self.x_coordinate == x and self.y_coordinate == y and self.z_coordinate == z:
# self.regionID = rId
def three_d_array(x, y, z):
return [[[None for k in range(z)] for j in range(y)]for i in range(x)]
class DNode(object):
def __init__(self, data = None, prev = None, next = None, ):
self.data = data
self.prev = prev
self.next = next
class DLinkedList(object):
def __init__(self):
self.head = None
self.tail = None
self.size = 0
def AppendToHead(self, data):
new_node = DNode(data=data)
if self.head:
new_node.next = self.head
self.head.prev = new_node
self.head = new_node
else:
self.head = new_node
self.tail = new_node
self.size += 1
def AppendToTail(self, data):
new_node = DNode(data=data)
if self.tail:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
else:
self.tail = new_node
self.head = new_node
self.size +=1
def Search(self, key):
current = self.head
while current and current.data != key:
current = current.next
return current
def RemoveFromHead(self):
x = self.head
if self.head:
if self.head == self.tail:
self.head = None
self.tail = None
else:
self.head = self.head.next
self.head.prev = None
self.size -= 1
return x
def RemoveFromTail(self):
x = self.tail
if self.tail:
if self.head == self.tail:
self.head = None
self.tail = None
else:
self.tail = self.tail.prev
self.tail.next = None
self.size -= 1
return x
# data structure for regions
class Tree(object):
def __init__(self, root):
self.root = root
self.children = []
self.size = 1
def add_child(self, node):
# assert isinstance(node, Tree)
self.children.append(node)
self.size += 1
def add_children(self, children):
if children is not None:
for child in children:
self.add_child(child)
def get_size(self):
return self.size
def get_root(self):
return self.root
# class ListNeighbor(object):
# def __init__(self):
def readData(matrix): # Read the data from mrc.data to voxel object and save in vList
vList = []
regionx = []
row = matrix.shape[0]
col = matrix.shape[1]
dep = matrix.shape[2]
for z in range(dep):
for y in range(col):
for x in range(row):
density = matrix[x, y, z]
v = Voxel(x, y, z, density)
if density < threshold:
regionx.append(v)
else:
vList.append(v)
# return [vList, regionx]
return vList
# initialize program and ask user to input filename
def initialize(): # initialize program
global mrc, img_matrix, nx, ny, nz, size, img_3d #, unit
fname = input("choose mrc file:")
mrc = mrcfile.open(fname, mode='r+')
img_matrix = np.copy(mrc.data)
nx = mrc.header.nx
ny = mrc.header.ny
nz = mrc.header.nz
size = img_matrix.size
img_3d = three_d_array(None,nx,ny,nz)
# checkpoint
print("number of total voxels: %d" % (size))
#unit = int(math.sqrt(nx))
def smoothing(matrix, sig=1, cv=0.0, trunc=4.0): #gaussian filter
return gaussian_filter(matrix, sigma=sig, mode="constant", cval=cv, truncate=trunc)
def neighbors(matrix, voxel): # return list of neighbor's coordinates
# initialize list of neighbors
neighbor= []
# get x boundary
row = len(matrix)
# get y boundary value
col = len(matrix[0])
# get z boundary value
dep = len(matrix[0][0])
# get x, y, z coordinates
x, y, z = voxel.x_coordinate, voxel.y_coordinate, voxel.z_coordinate
# loop to find neighbors coordinates, index must greater or equal to 0, and less or equal to the boundary value
for k in range(max(0, z-1), min(dep, z+2)):
for j in range(max(0, y-1), min(col, y+2)):
for i in range(max(0, x-1), min(row, x+2)):
# check if density is less than threshold
if matrix[i, j, k] >= threshold:
# exclude itself
if(i, j, k) != (x, y, z):
neighbor.append((i, j, k))
return neighbor
def getRegions(matrix, vList):
mregion = []
regionNum = -1
t1 = datetime.now()
vSortedList = sorted(vList, key=lambda voxel: voxel.density, reverse=True)
t2 = datetime.now()
delta = t2 - t1
print("time cost of sort is : %f" % delta.total_seconds())
print("sorted done, and the number of voxels above threshold is %d" % (len(vSortedList)))
c = 0
for v in vSortedList:
regionRecord = dict()
if v.density >= threshold:
c += 1
vi = v.x_coordinate + nx * v.y_coordinate + nx * ny * v.z_coordinate
nb = neighbors(matrix, v)
for pos in nb:
index = pos[0] + nx*pos[1] + nx*ny*pos[2]
rId = vList[index].regionID
if rId != -1:
if rId in regionRecord:
regionRecord[rId] += 1
else:
regionRecord[rId] = 1
if len(regionRecord) == 0:
regionNum += 1
v.regionID = regionNum
vList[vi].regionID = regionNum
tree = Tree(root = v)
mregion.insert(regionNum, tree)
else:
r = max(regionRecord, key = regionRecord.get)
v.regionID = r
vList[vi].regionID = r
mregion[r].add_child(v)
else:
break
print("number of voxels above threshold: %d" % c)
return mregion
def gradient(mi, mj): # calculate the gradient
distance = pow((mi.x_coordinate - mj.x_coordinate), 2) + pow((mi.y_coordinate - mj.y_coordinate), 2) + pow((mi.z_coordinate - mj.z_coordinate), 2)
return (mj.density - mi.density)/distance
t1 = datetime.now()
initialize()
t2 = datetime.now()
delta = t2 - t1
print("time cost of initialize is : %f" % delta.total_seconds())
img_matrix = smoothing(img_matrix)
print(img_matrix.mean())
img_matrix = smoothing(img_matrix)
threshold = img_matrix.mean()
print(threshold)
vList = readData(img_matrix)
t1 = datetime.now()
mregion = getRegions(img_matrix,vList)
t2 = datetime.now()
delta = t2 - t1
print("time cost of initial M0 is : %f" % delta.total_seconds())
print("number of regions at first before merge: %d" % (len(mregion)))
# tStep = 14
# count = 0
# mregion.reverse()
#
# while count < tStep:
# t1 = datetime.now()
# img_matrix = smoothing(img_matrix)
# print("smoothed %d times" % (count+1))
# p = len(mregion)
# q = int((1+p)/2)
# for t in mregion:
# v = t.root
# xc = v.x_coordinate
# yc = v.y_coordinate
# zc = v.z_coordinate
# v.density = img_matrix[xc, yc, zc]
# print("update density %d times" % (count+1))
# for i in range(q+1):
# gra = dict()
# for j in mregion[1:-1]:
# j = mregion.index(j)
# gra[j]=gradient(mregion[0].root, mregion[j].root)
# g = max(gra, key=gra.get)
# k = mregion[g].root.regionID
# mregion[k].add_child(mregion[0].root)
# mregion[k].children.extend(mregion[0].children)
# mregion.pop(0)
# count += 1
# t2 = datetime.now()
# delta = t2 - t1
# print("time cost of merge is : %f" % delta.total_seconds())
# print("merged %d times" % (count))
#
# rs = len(mregion)
# print("number of regions: %d" % rs)
# shape = (nx, ny, nz)
#
# for i in range(0,rs):
# fname='emdr'+str(i)+'.mrc'
# mrc_new = mrcfile.new('mrcfilestest/{}'.format(fname), overwrite=True)
# mrc_new.set_data(np.zeros(shape, dtype=np.float32))
# mrc_new.voxel_size = mrc.voxel_size
# t = mregion[i]
# r = t.root
# childlist = t.children
# mrc_new.data[t.x_coordinate, t.y_coordinate, t.z_coordinate] = t.density
# for v in childlist:
# mrc_new.data[v.x_coordinate, v.y_coordinate, v.z_coordinate] = v.density
# mrc_new.close()
#
# mrc.close()
|
997,048 | cca1d5979c245d5220d0147081ef338280d86513 |
from xai.brain.wordbase.verbs._reconquer import _RECONQUER
#calss header
class _RECONQUERING(_RECONQUER, ):
def __init__(self,):
_RECONQUER.__init__(self)
self.name = "RECONQUERING"
self.specie = 'verbs'
self.basic = "reconquer"
self.jsondata = {}
|
997,049 | f6853e5e2d0806fa8f079571593f8044c5370c13 | def get_sandwiches(*toppings):
print("\nThis sandwiches include: ")
for topping in toppings:
print("-" + topping)
get_sandwiches('tomato', 'potato', 'fish')
get_sandwiches('tomato', 'cheese', 'potato', 'tuna fish')
|
997,050 | bacd2f9b75b713450e3ed769e9543b6a1c31a3c7 | #! /usr/bin/env python
import rfmath
import wireless_network
def main():
print'==============================================================='
print'option 1 Calculate dBm or mW'
print'option 2 Kismet.netxml file parse'
print''
opt=input('What would you like to do? ')
if opt==1:
rfmath.main_menu()
if opt==2:
wireless_network.kismet_main()
main()
|
997,051 | de10edccf705518b2bbc7ca2838129130d421448 | #!/usr/bin/env python
# -*- coding:Utf-8 -*-
from __future__ import division
from pylab import *
import scipy.linalg as LS
from gg_math import *
from solveU import ComputeS
from solveS import SolveS, ComputeEddington, TauGrid, ComputeVfromU
def SolveFeautrier():
nbang = 8
grid, tmp, deltgrid = TauGrid(decnb = 20)
anglerad, gmu, gwt = gaussangles(nbang)
I0 = zeros((nbang))
S = zeros((len(grid)))
I0[-1] = 1
#S += 1 #IlovePython
epsilon = 0.00001
alpha = 1 - epsilon
beta = epsilon
for i in range(5):
urez = SolveS(nbang, grid, deltgrid, I0, S, anglerad, gmu, gwt)
edding, h0 = ComputeEddington(urez, gmu, gwt)
S = ComputeS(grid, deltgrid, edding, alpha, beta, h0)
print i, urez
print "================="
#print urez
print "We now compute v"
v = ComputeVfromU(urez, deltgrid, gmu)
print "Now I+"
print urez + v
print "Now I -"
print urez - v
if __name__=="__main__":
SolveFeautrier()
|
997,052 | 8e3707038f9ac01de6e28507c85d7981b5d7310e | from data_generator import generate_post, fetch_post
import threading
from ApiClient import ApiClient
base_url = 'https://world-bulletin-board.uc.r.appspot.com'
username = 'test1'
password = 'test1234'
def retrieve_post():
api_client = ApiClient(base_url=base_url)
api_client.login(username, password)
latitude, longitude, rand_radius, rand_tags = fetch_post()
posts = api_client.get_post(latitude, longitude, rand_radius * 2000, rand_tags)
def test_get_posts():
num_threads = 500
threads = []
for i in range(num_threads):
threads.append(threading.Thread(target=retrieve_post))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
test_get_posts()
|
997,053 | 82de3649d7470fb026aac9a37c27ce91ffd429ec | # Generated by Django 3.0.8 on 2020-09-22 14:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Command',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('commandname', models.CharField(blank=True, max_length=120)),
('os', models.CharField(max_length=32)),
],
),
]
|
997,054 | fb540eaa0a2fbe991cd5556718d47312901820e6 | #Fonseca lab_01
#project 1
levels = int(input("How many levels is your pyramid? ")) #ask the user how many levels is your pyramid
for k in range(1,levels + 1): #pyramid starts at 1, and ends at whatever number you entered for levels + 1.
print("*" * k) #printing out half pyramid
#project 2
levels = int(input("How many levels is your pyramid? ")) #ask the user how many levels is your pyramid
for k in range(0, levels): #starts at 0 to make the odd function true
j = 2 * k + 1 #formula for an odd number
print("*" * j) #prints out pyramid in odd increments only
#I could not figure out how to print the full pryamid unfortunately
#project 3
levels = int(input("How many levels is your pyramid? ")) #ask the user how many levels is your pyramid
for k in range(0, levels): #starts at 0 to make the odd function true
j = 2 * k + 1 #formula for an odd number
print("atttt" * j) #prints out pyramid in odd increments only
#makes a triangle with any thing you enter for a building block
#project 4
#I was not able to figure out how to rotate, or even create a code for a parabola.
#I could not figure out what to put in as a code.
#project 5
#I was not able to figure out this problem.
#I could not figure out how to start this problem or what I should be using.
|
997,055 | 185f32746f170951d412e80c2402120911efa8e2 | #我的代码
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
if len(nums) == 1:
return 0
if nums[1]<nums[0]:
return 0
if nums[-1]>nums[-2]:
return len(nums)-1
score = {}
score[0] =0
c=0
for i in range(1,len(nums)):
if nums[i]>nums[i-1]:
c+=1
score[i]=c
else:
c-=1
score[i]=c
if i>=2 and score[i] == score[i-2] and score[i]<score[i-1]:
return i-1
#别人的代码
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
for i in range(1,len(nums)):
if nums[i] < nums[i-1]:
return i-1
return len(nums)-1 |
997,056 | c0e4f4b386ef9ee1537611ae3687f0d339ffc229 | import datetime
import gym
import itertools
from agents.sac_agent import SAC_agent
from utils import *
import argparse
def get_args():
parser = argparse.ArgumentParser(description='PyTorch GAIL example')
parser.add_argument('--env-name', default="Hopper-v2",
help='name of the environment to run')
parser.add_argument('--policy', default="Gaussian",
help='algorithm to use: Gaussian | Deterministic')
parser.add_argument('--eval', type=bool, default=True,
help='Evaluates a policy a policy every 10 episode (default:True)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.005, metavar='G',
help='target smoothing coefficient(τ) (default: 0.005)')
parser.add_argument('--lr', type=float, default=0.0003, metavar='G',
help='learning rate (default: 0.0003)')
parser.add_argument('--alpha', type=float, default=0.2, metavar='G',
help='Temperature parameter α determines the relative importance of the entropy term against the reward (default: 0.2)')
parser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',
help='Temperature parameter α automaically adjusted.')
parser.add_argument('--seed', type=int, default=456, metavar='N',
help='random seed (default: 456)')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='batch size (default: 256)')
parser.add_argument('--num-steps', type=int, default=1000001, metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument('--hidden-size', type=int, default=400, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--updates-per-step', type=int, default=1, metavar='N',
help='model updates per simulator step (default: 1)')
parser.add_argument('--start-steps', type=int, default=300, metavar='N',
help='Steps sampling random actions (default: 10000)')
parser.add_argument('--target-update-interval', type=int, default=1, metavar='N',
help='Value target update per no. of updates per step (default: 1)')
parser.add_argument('--replay-size', type=int, default=1e6, metavar='N',
help='size of replay buffer (default: 10000000)')
parser.add_argument('--device', type=str, default="cuda:0",
help='run on CUDA (default: False)')
parser.add_argument('--actor-path', type=str, default='assets/learned_models/sac_actor_Hopper-v2_1', help='actor resume path')
parser.add_argument('--critic-path', type=str, default='assets/learned_models/sac_critic_Hopper-v2_1', help='critic resume path')
args = parser.parse_args()
return args
args = get_args()
# Environment
# env = NormalizedActions(gym.make(args.env_name))
env = gym.make(args.env_name)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
env.seed(args.seed)
state_dim = env.observation_space.shape[0]
agent = SAC_agent(env, env.observation_space.shape[0], env.action_space, args, running_state=None)
agent.load_model(actor_path=args.actor_path, critic_path=args.critic_path)
agent.save_expert_traj(max_step=50000) |
997,057 | 7a6b82290802265eced8842438fbdf8e9e7a42d4 | import picamera
from picamera import PiCamera
import time
import cv2
import numpy as np
import glob
from tqdm import tqdm
from matplotlib import pyplot as plt
#=====================================
# Function declarations
#=====================================
#Function that Downsamples image x number (reduce_factor) of times.
def downsample_image(image, reduce_factor):
for i in range(0,reduce_factor):
#Check if image is color or grayscale
if len(image.shape) > 2:
row,col = image.shape[:2]
else:
row,col = image.shape
image = cv2.pyrDown(image, dstsize= (col//2, row // 2))
return image
#=========================================================
# Stereo 3D reconstruction
#=========================================================
#Specify image paths
img_path1 = 'data/UphotoL.png'
img_path2 = 'data/UphotoR.png'
#Load pictures
img_1 = cv2.imread(img_path1)
img_2 = cv2.imread(img_path2)
h,w,_ = img_2.shape
#Generate point cloud.
print ("\nGenerating the 3D map...")
window_size = 11
min_disp = 0
num_disp = 64 - min_disp
stereo = cv2.StereoSGBM_create(minDisparity = min_disp,
numDisparities = num_disp,
blockSize = 6,
P1 = 8*3*window_size**2,
P2 = 32*3*window_size**2,
disp12MaxDiff = 1,
uniquenessRatio = 20,
speckleWindowSize = 100,
speckleRange = 60
)
#Compute disparity map
print ("\nComputing the disparity map...")
disparity = stereo.compute(img_1,img_2)
disparityN = disparity/16
disp = cv2.normalize(disparityN, None, 0, 255, norm_type=cv2.NORM_MINMAX)
disp = np.array(disp, dtype=np.uint8)
disp = cv2.applyColorMap(disp, cv2.COLORMAP_JET)
cv2.imshow('disp', disp)
cv2.imshow('imgL', img_1)
points_3D = []
colors = []
#M = None
M = 8788.53
#camera internal reference
camera_factor = 700;
camera_cx = 256;
camera_cy = 212;
camera_fx = 363.0;
camera_fy = 363;
#https://www.programmersought.com/article/8647778259/
for m in range(0,w,5):
for n in range(0,h,5):
if disparityN[n,m] > 10 and disparity[n,m] < 500:
z = M/disparityN[n,m]
if z > 1000:
cv2.waitkey(0)
x = (m - camera_cx) * z / camera_fx;
#x = m
y = h-(n - camera_cy) * z / camera_fy;
#y = h-n
points_3D.append([x,y,z])
colors.append(disp[n,m])
points_3D = np.array(points_3D)
colors = np.array(colors)/255
print(points_3D.shape)
print(colors.shape)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(w/2,0,h/2, facecolors=[0,1,1], linewidth=0.5)
ax.scatter(points_3D[:,0],points_3D[:,2],points_3D[:,1], facecolors=colors, linewidth=0.1);
plt.show()
|
997,058 | da677e138a2d6b34413536f27ec8d9cab2541611 | # This is a testing suite for a pseudo ARMv7 cpu made using logisim
import math
import random
tests = open("ARMv7.txt", 'w')
tests.write("This is a testing suite for a pseudo ARMv7 cpu made using logisim\n")
def test():
tests.close() |
997,059 | a4590da8cfc6bf61e0fecdf1b0117efe5812b7ba | # -*- coding: utf-8 -*-
"""Generate a default configuration-file section for rc_data_feed"""
from __future__ import print_function
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[feeds]
# comma separated section names. ex. sqlserver_feed,file_feed
feed_names=<your feeds>
reload=true
# use reload_types to limit the types of objects when reload=true.
# Ex: incident,task,note,artifact,attachment,<data_table_api_name>
reload_types=
# set to true if ElasticSearch errors occur during reload=true
reload_query_api_method=false
# feed_data is the default message destination that will be listened to
queue=feed_data
# set to true if attachment data should be part of payload send to plugins
include_attachment_data=false
# if necessary, specify the supported workspace (by label, case sensitive) and the list of feeds associated with it
# ex: 'Default Workspace': ['sqlserver_feed'], 'workspace A': ['kafka_feed', 'resilient_feed']
workspaces=
"""
return config_data
|
997,060 | 3cf0e2e84f56a84a6ee7152426e19f17a598c3d7 | #-------------------------------------------------------------------------------
# Name: settings.py
# Purpose: To create a game for my cs FSE
#
# Author: Ikenna Uduh, 35300999
#
# Created: 15-12-2017
#-------------------------------------------------------------------------------
import pygame
from pygame.locals import *
# define display surface
size = w, h = 900, 880
# initialise display
pygame.init()
CLOCK = pygame.time.Clock()
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Ice Cream Magnet Jump")
FPS = 300
# define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (100, 0, 0)
#Splash screen Colours
sBackground = 97, 232, 196
sButtonClr = 239, 148, 29
sButtonClrPressed = 230, 201, 163
sMainButtonClr = sButtonClr
sMainButtonClr2 = sButtonClr
sPAgainButtonClr = sButtonClr
# Pictures used in program
unscaled_platformPic = pygame.image.load("imgs/Singleplatform.png").convert_alpha()
platformPic = pygame.transform.scale(unscaled_platformPic, (100, 51))
enemy = pygame.image.load("imgs/enemy.png").convert_alpha()
playerR = pygame.image.load("imgs/playerLRight.png").convert_alpha()
playerL = pygame.image.load("imgs/playerLLeft.png").convert_alpha()
playerD = pygame.image.load("imgs/playerLDead.png").convert_alpha()
playerU = pygame.image.load("imgs/playerLJump.png").convert_alpha()
floorImg = pygame.image.load("imgs/floor.png").convert_alpha()
bgimg = pygame.image.load("imgs/bg.jpg").convert_alpha()
player = playerU
# ### getting sizes of each picture drawn
platSize = platformPic.get_rect().size
#Scaling the image down to something we can use
scale_floorImg = pygame.transform.scale(floorImg, (w, platSize[1]))
scale_enemy = pygame.transform.flip(pygame.transform.scale(enemy, (platSize)),True,False)
# ### getting sizes pictures drawn
playerSize = player.get_rect().size
enemySize = scale_enemy.get_rect().size
# loading in fonts
font1 = pygame.font.SysFont("arial",20)
font2 = pygame.font.SysFont("arial",100)
font3 = pygame.font.SysFont("arial",80)
font4 = pygame.font.SysFont("arial",25)
font5 = pygame.font.SysFont("arial",50)
#Words of wisdom
help1 = font2.render("Instructions", True, BLACK)
help2 = font1.render("The goal of this game is simple, get to the top.", True, BLACK)
help3 = font1.render("To move use W to go left, D to go right, and the space bar to jump.", True, BLACK)
help4 = font1.render("The player can hop from platform to platform simply by coming in contact with it.", True, BLACK)
help5 = font1.render("Once you reach the highest platform jump above the top of the screen to proceed to the next section.", True, BLACK)
help6 = font1.render("After you have proceeded to the second sections your handi-cap will be disabled.", True, BLACK)
help7 = font1.render("Now if you fall off of all of the platforms and end up at the bottom of the screen, you lose.", True, BLACK)
help8 = font1.render("Enjoy!", True, BLACK)
#Sounds
bg_music = pygame.mixer.music.load("sounds/bg_music.mp3")
walk_sound = pygame.mixer.Sound('sounds/walk_sound.mp3')
crash_sound = pygame.mixer.Sound('sounds/Crash.mp3')
#Movement varibles
maxJumpHeight = 220
xPos = int(w/2) # players x location
yPos = h - platSize[1] + 20 # players y location
gravVel = 1
yVel = 1 # movement speed along the vertical axis
xVel = 2 # movement speed along the horizontal axis
jumpCounter = 0
onGround = True # if the player is on a platform
jumpping = False # if the player is preforming a jump
startFloor = True # defining if there should be a starting platform at the start of the stage
trackY = yPos # used in determining amount of points
points = 0
stage = 1
RAD = 125 # radius of the play button
RAD2 = 50 # radius of the help button
RAD3 = 200 # radius of the play again button
enemyX = 1 # The enemy's starting location in pixels
enemyVel = 1 # The enemy's starting velocity
lossScreen = False # if the losing/play again screen is shown
gameStart = False # if the actual game is running or not
helpStart = False # if the help screen is being shown |
997,061 | 14b635ce04c8494799350b170a5d4fd9fa85a607 | from itertools import count
from itertools import product
from itertools import takewhile
from eutility.eusequence import Primes
from eutility.eumath import quadratic
from eutility.eutility import Biggest
from eutility.eumath import primes
def euler027(limit):
'''Quadratic primes
Euler discovered the remarkable quadratic formula:
n**2 + n + 41
It turns out that the formula will produce 40 primes for the consecutive values n = 0 to 39.
However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is divisible by 41, and certainly when
n = 41, 41n**2 + 41 + 41 is clearly divisible by 41.
The incredible formula n**2 - 79n + 1601 was discovered, which produces 80 primes for the
consecutive values n = 0 to 79. The product of the coefficients, -79 and 1601, is -126479.
Considering quadratics of the form:
n**2 + an + b, where |a| < 1000 and |b| < 1000
where |n| is the modulus/absolute value of n
e.g. |11| = 11 and |-4| = 4
Find the product of the coefficients, a and b, for the quadratic expression that produces
the maximum number of primes for consecutive values of n, starting with n = 0.
'''
P = Primes()
B = Biggest()
for a, b in product(range(-limit+1, limit), primes(limit)):
B.set(len(list(takewhile(lambda x: quadratic(x, a, b) in P, count()))), a * b)
return B.data
|
997,062 | 16ef094895a656922174e028cd945015f9701655 | from slack import *
import argparse
import sys
def send_messages(channel, message, attachments):
try:
Slack(channel).send(message, attachments)
except Exception as e:
print(str(e))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--channel", help="Canal SLACK a receber a mensagem!", type=str)
parser.add_argument("-m", "--mensagem", help="Mensagem a ser enviada!", type=str)
parser.add_argument("-a", "--attachments", help="Imagens a serem exibidas na mensagem. Obs: Informar url da imagem.", type=str)
args = parser.parse_args()
if args.mensagem == None or args.channel == None:
parser.print_help()
sys.exit(0)
send_messages(args.channel, args.mensagem, args.attachments) |
997,063 | 36dcc3a9a86198eb61e5be289e370fe6dfd81fc1 | # MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Take a list of list of etc and reduces down to contain a single list with just the elements
def flatten(l):
return flatten(l[0]) + (flatten(l[1:]) if len(l) > 1 else []) if type(l) is list else [l]
# For a list/tuple with one element, return the element, otherwise return the list
def unwrap(l):
if not type(l) is list and not type(l) is tuple:
return l
elif len(l) == 1:
return l[0]
else:
return l
# For a single (non-list/tuple) element, wrap in a list, otherwise leave as is
def wrap(l):
if not type(l) is list and not type(l) is tuple:
return [l]
else:
return l
# For a non-list element, wrap in a list, otherwise leave as is
def wrap_list(l):
if not type(l) is list:
return [l]
else:
return l |
997,064 | 448d93f997f72011385abd2c3583e6cbc52f79f4 | from django.db import models
#supplier Model
class Supplier(models.Model):
supplier_name = models.CharField(max_length=100)
supplier_address = models.CharField(max_length=300)
supplier_phone = models.CharField(max_length=12)
gst_uin = models.CharField(max_length=30)
state_name = models.CharField(max_length=50)
code = models.CharField(max_length=20)
email = models.CharField(max_length=30)
brand_name = models.CharField(max_length=50)
#product model
class ProductModel(models.Model):
name = models.CharField(max_length=50, null=False)
brand = models.ForeignKey(Supplier,on_delete=models.CASCADE)
color = models.CharField(max_length=100,default=None, blank=True, null=True)
ram = models.CharField(max_length=100,default=None, blank=True, null=True)
rom = models.CharField(max_length=100,default=None, blank=True, null=True)
discription = models.CharField(max_length=100,default=None, blank=True, null=True)
#class warehouse
class Warehouse(models.Model):
name = models.CharField(max_length=100)
phone = models.CharField(max_length=100)
gst = models.CharField(max_length=100)
email = models.CharField(max_length=100)
state = models.CharField(max_length=100)
code = models.CharField(max_length=100)
address = models.CharField(max_length=400)
|
997,065 | 66837d602a0017ee05a0876c5b511bf683e24d16 | VERSION = (1, 0, 2)
__version__ = '.'.join(str(n) for n in VERSION)
|
997,066 | 731f9bfffd27f8e44f2b12f19bbf1cd39aa7b837 | def pos_arroba(x):
a = x.find('@')
return x[:a] |
997,067 | 4b6719d5611a3e021da5411250618368c5b2951b | from ncssl_api_client.console.parsers.abstract_parser import AbstractParser
from ncssl_api_client.api.commands.invoker import Invoker
from ncssl_api_client.api.enumerables.certificate_types import CertificateTypes
class ActivateParser(AbstractParser):
def __init__(self):
self.name = Invoker.COMMAND_NAME_ACTIVATE
self.help = "Generates CSR and activates a certificate with it"
def add_parser(self, subparsers):
super(ActivateParser, self).add_parser(subparsers)
self.parser.add_argument("-cn", "--common_name", help="Common Name to activate certificate for", type=str, required=True)
self.parser.add_argument("-sans", "--sans", help="Additional Domains to activate certificate for", type=str, dest="DNSNames")
self.parser.add_argument("-sans_e", "--sans_emails", help="A comma-separated list of approver emails for additional domains", type=str, dest="DNSApproverEmails")
self.parser.add_argument("-enc", "--encrypt", help="Whether to encrypt private key", action='store_true')
self.parser.add_argument("-id", "--cert_id", help="Certificate ID to activate", dest='CertificateID')
self.parser.add_argument("-t", "--type", help="Certificate Type", type=CertificateTypes, default='PositiveSSL', dest='Type', choices=list(CertificateTypes))
self.parser.add_argument("-y", "--years", help="Validity period", type=int, default=1, dest='Years')
group = self.parser.add_mutually_exclusive_group()
group.add_argument("-http", "--http_dcv", help="Use HTTP validation", action='store_true', dest='HTTPDCValidation')
group.add_argument("-dns", "--dns_dcv", help="Use DNS validation", action='store_true', dest='DNSDCValidation')
group.add_argument("-e", "--email", help="Approver Email", type=str, dest='ApproverEmail')
|
997,068 | 05db1e39bf7638824b2c8dba027eb767409ae346 | #!/usr/bin/env python
# coding: utf-8
# This blog is a tour through Inheritance in Python.
#
# This blog assumes no prior knowledge, and teaches the Reader from the ground up what Inheritance and how to use it in Python.
#
# For the Reader who already knows inheritance and is reading this blog in order to audit it (you know who you are!). Please comment if there's anything you question. Any feedback is welcome.
#
# Let's Go.
# What is inheritance?
#
# That's a big question, right?! If we could say in a sentence or paragraph, what it is, then, well, it obviously wouldn't be complete. Instead, let's describe Inheritance as we go.
#
# The first point, Inheritance means exaclty that, you inherit. Let's look at code that does this.
#
# In this inheritance example, we'll see what this means to have one's own method and inherit some method
# In Python, the `__init__` is the constructor. This method is called when an object is created.
#
# It contains the arguments passed to the class.
# In[25]:
# __init__ constructor example
# class with no inheritance
class MyClass:
def __init__(self, a):
print(f"we're in: {self.__class__.__name__}")
self.a = a
my_class = MyClass('foo')
vars(my_class), my_class.a
# Let's inherit from a parent
# In[33]:
class ParentMyClass:
def __init__(self, a):
cls_name = self.__class__.__name__
print(f"parents: {cls_name}", a)
class Child(ParentMyClass):
def __init__(self, a):
super().__init__(a)
print(f"child: {self.__class__.__name__}", a)
self.a = a
Child('bob')
# Call the Parent class when they have a different implementation
# In[42]:
class ParentMyClass:
def __init__(self, a):
cls_name = self.__class__.__name__
self.a = a
print("foo", a)
class Child(ParentMyClass):
def __init__(self, b):
super().__init__(b)
print(f"bar", b)
self.b = b
child = Child('bob')
# Why should I call a parent class?
#
# Maybe the parent class sets some functionality that I want to happen for free.
# In[ ]:
# In[40]:
child.a
# In[41]:
child.b
# In[ ]:
class A:
def __init__(self):
print("I'm A")
class B:
def __init__(self):
print("I'm B")
# In[ ]:
# In[ ]:
class A:
def __init__(self):
print("I'm A")
class B:
def __init__(self):
print("I'm B")
class C:
def __init__(self):
print("I'm C")
class D(A):
def __init__(self):
super().__init__()
print("I'm D")
class E(A, B):
def __init__(self):
print("I'm E")
d = D()
# In[9]:
E()
# In[ ]:
|
997,069 | f0e66ed08997973fe3dc5ea0694206e277ce2402 | qnt = int(input())
if 2 <= qnt <= 99:
for i in range(qnt):
pergunta = input()
if '?' in pergunta:
print("gzuz") |
997,070 | 9589f3c9be14c57cd3f58056ff52f1b0f348e005 | from zops.anatomy.layers.tree import merge_dict
from collections import OrderedDict
class FeatureNotFound(KeyError):
pass
class FeatureAlreadyRegistered(KeyError):
pass
class AnatomyFeatureRegistry(object):
feature_registry = OrderedDict()
@classmethod
def clear(cls):
cls.feature_registry = OrderedDict()
@classmethod
def get(cls, feature_name):
"""
Returns a previously registered feature associated with the given feature_name.
:param str feature_name:
:return AnatomyFeature:
"""
try:
return cls.feature_registry[feature_name]
except KeyError:
raise FeatureNotFound(feature_name)
@classmethod
def register(cls, feature_name, feature):
"""
Registers a feature instance to a name.
:param str feature_name:
:param AnatomyFeature feature:
"""
if feature_name in cls.feature_registry:
raise FeatureAlreadyRegistered(feature_name)
cls.feature_registry[feature_name] = feature
@classmethod
def register_from_file(cls, filename):
from zops.anatomy.yaml import yaml_from_file
contents = yaml_from_file(filename)
return cls.register_from_contents(contents)
@classmethod
def register_from_text(cls, text):
from zops.anatomy.yaml import yaml_load
from zops.anatomy.text import dedent
text = dedent(text)
contents = yaml_load(text)
return cls.register_from_contents(contents)
@classmethod
def register_from_contents(cls, contents):
for i_feature in contents['anatomy-features']:
feature = AnatomyFeature.from_contents(i_feature)
cls.register(feature.name, feature)
@classmethod
def tree(cls):
"""
Returns all files created by the registered features.
This is part of the helper functions for the end-user. Since the user must know all the file-ids in order to add
contents to the files we'll need a way to list all files and their IDs.
:return 3-tupple(str, str, str):
Returns a tuple containing:
[0]: Feature name
[1]: File-id
[2]: Filename
"""
result = []
for i_name, i_feature in cls.feature_registry.items():
if i_feature.filename:
result.append((i_name, i_feature.filename, i_feature.filename))
return result
class IAnatomyFeature(object):
"""
Implements a feature. A feature can add content in many files in its 'apply' method.
Usage:
tree = AnatomyTree()
variables = {}
feature = AnatomyFeatureRegistry.get('alpha')
feature.apply(tree, variables)
tree.apply('directory')
"""
def __init__(self, name):
self.__name = name
@property
def name(self):
return self.__name
def apply(self, tree):
"""
Apply this feature instance in the given anatomy-tree.
:param AnatomyTree tree:
"""
raise NotImplementedError()
class AnatomyFeature(IAnatomyFeature):
def __init__(self, name, variables=None, use_features=None):
super(AnatomyFeature, self).__init__(name)
self.__variables = OrderedDict()
self.__variables[name] = variables or OrderedDict()
self.__use_features = use_features or OrderedDict()
self.__filename = None
self.__contents = None
self.__symlink = None
self.__executable = False
@classmethod
def from_contents(cls, contents):
def optional_pop(dd, key, default):
try:
return dd.pop(key)
except KeyError:
return default
name = contents.pop('name')
variables = contents.pop('variables', OrderedDict())
use_features = contents.pop('use-features', None)
result = AnatomyFeature(name, variables, use_features)
create_file = contents.pop('create-file', None)
if create_file:
filename = create_file.pop('filename')
symlink = optional_pop(create_file, 'symlink', None)
executable = optional_pop(create_file, 'executable', False)
if symlink is not None:
result.create_link(filename, symlink, executable=executable)
else:
file_contents = create_file.pop('contents')
result.create_file(filename, file_contents, executable=executable)
if create_file.keys():
raise KeyError(list(create_file.keys()))
if contents.keys():
raise KeyError(list(contents.keys()))
return result
@property
def filename(self):
return self.__filename
def apply(self, tree):
"""
Implements AnatomyFeature.apply.
"""
tree.add_variables(self.__use_features, left_join=True)
if self.__filename:
if self.__contents:
tree.create_file(self.__filename, self.__contents, executable=self.__executable)
else:
tree.create_link(self.__filename, self.__symlink, executable=self.__executable)
tree.add_variables(self.__variables, left_join=False)
def using_features(self, features):
for i_name, i_vars in self.__use_features.items():
feature = AnatomyFeatureRegistry.get(i_name)
feature.using_features(features)
# DEBUGGING: print('using anatomy-feature {} ({})'.format(self.name, id(self)))
feature = features.get(self.name)
if feature is None:
features[self.name] = self
else:
assert id(feature) == id(self)
def create_file(self, filename, contents, executable=False):
self.__filename = filename
self.__contents = contents
self.__symlink = None
self.__executable = executable
def create_link(self, filename, symlink, executable=False):
self.__filename = filename
self.__contents = None
self.__symlink = symlink
self.__executable = executable
|
997,071 | 755f0412c720a75742f62da16381bbd7649cfd25 | name = "Danny"
age = 15
student = {"name": name}
scores = [100, 99, 95]
location = ('123 Main', 'NY')
for item in (name, age, student, scores, location):
print(f"{type(item)!s: <15}| repr: {repr(item): <20}| str: {str(item)}")
class Student:
def __init__(self, name, age):
self.name = name
self.age = age
class Student:
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return "Student __repr__ string"
def __str__(self):
return "Student __str__ string"
class Student:
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return f"Student({self.name!r}, {self.age})"
def __str__(self):
return f"Student Name: {self.name}; Age: {self.age}" |
997,072 | 6e21348855d3ccb8903f6081f5618f21b9254c06 | import torch
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
from tqdm import tqdm
import torchvision
import cv2
import numpy as np
from os.path import join, basename, dirname, exists
import json
from utils import get_paths, get_files_paths_and_labels
from utils import get_validation_augmentations, get_training_augmentations
import pandas as pd
from sklearn.model_selection import train_test_split
class SETIDataset(Dataset):
def __init__(self, data_file_paths, targets, transform=None):
"""
Initializes SETI dataset class.
Parameters
----------
data_folder : PATH-STR
Path to parent data folder.
labels_path : PATH-STR
Path to label file.
transform : FUNCTION, optional
Function to preprocess a given cadence. The default is None.
Returns
-------
None.
"""
self.transform = transform
self.data_file_paths = data_file_paths
self.targets = targets
return
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
# Read file at given index
data = np.load(self.data_file_paths[idx])
data = data.astype(np.float32)
data = torch.from_numpy(data)
# Perform augmentations if desired
if not self.transform is None:
data = self.transform(data)
else:
data = data[np.newaxis, :, :]
data = torch.from_numpy(data).float()
# Grab label, return
label = torch.tensor(self.targets[idx]).float()
return data, label
def get_dataloaders(data_dir, hyp):
"""
Ingests the data folder and returns training and validation data loaders.
Parameters
----------
data_dir : path
Path to parent data directory.
hyp : TYPE
hyperparameters desired.
Returns
-------
Train, validation dataloaders.
"""
# Grab data, targets
data_file_paths, targets = get_files_paths_and_labels(data_dir)
# Split into train/validation
train_data, val_data, train_labels, val_labels = train_test_split(data_file_paths,
targets,
train_size=hyp['perc_train'],
shuffle=hyp['shuffle'],
stratify=targets)
# Create train/validation augmentation handler
train_aug = get_training_augmentations(hyp)
val_aug = get_validation_augmentations(hyp)
# Create datasets
train_dset = SETIDataset(train_data, train_labels, transform=train_aug)
val_dset = SETIDataset(val_data, val_labels, transform=val_aug)
# Create dataloaders
train_loader = DataLoader(train_dset, shuffle=True, batch_size=hyp['batch_size'],
pin_memory=True, num_workers=8)
val_loader = DataLoader(val_dset, batch_size=hyp['batch_size'],
pin_memory=True, num_workers=8)
return train_loader, val_loader
def get_dataset_parameters(dataloader):
"""
Returns mean, std of data.
Parameters
----------
dataloader : torch.utils.DataLoader
dataset loader.
Returns
-------
None.
"""
mean = 0.0
meansq = 0.0
count = 0
for index, (data, targets) in enumerate(dataloader):
mean = data.sum()
meansq = meansq + (data**2).sum()
count += np.prod(data.shape)
total_mean = mean/count
total_var = (meansq/count) - (total_mean**2)
total_std = torch.sqrt(total_var)
print("mean: " + str(total_mean))
print("std: " + str(total_std))
|
997,073 | dad50d730a2a52db569f905adb3921b6f8a8e246 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def w(x):
""" weight """
w = np.exp(-x**2)/np.sqrt(np.pi)
return w
def next_chain_link(x, y):
""" checks whether y is accepted as next chain link """
gamma = np.random.rand()
alpha = w(y)/w(x)
return alpha >= gamma
def metro_alg(N):
""" metropolis algorithm that creates markov chain of lenght N """
chain = []
chain_removed = []
chain.append(0)
chain_removed.append(0)
for i in range(N):
j = 0
y = (np.random.rand()-0.5)*10
if next_chain_link(chain[i], y):
chain.append(y)
else:
chain.append(chain[i])
if next_chain_link(chain_removed[j], y):
chain_removed.append(y)
j += 1
return chain, chain_removed
# N = 100000
# chain, chain_removed = metro_alg(N)
#
# x_values = np.linspace(-3, 3, N) #x values to plot w(x)
# sns.distplot(chain, label="chain")
# sns.distplot(chain_removed, label="chain removed")
# plt.plot(x_values, w(x_values), label="weight")
# plt.legend()
# plt.show()
# a) little bump at the peak probably comes from random.rand which creates random number between 0 and whithout 1?
# b) chain-removed has slightly lower peak but very little
#######################################################################################################################
#2 a)
N = 64
kb = 1 #boltzman constant
index = np.arange(1, N+1) #used to create random indices
# def H(lattice, h):
# """ calculates the energy H({s_l}) """
#
# H = 0
# for i in range(1, N+1):
# for j in range(1, N+1):
# H -= lattice[i, j]*(lattice[i, j-1] + lattice[i-1, j]) + h*lattice[i, j]
# H -= 2*lattice[i, j] * (lattice[i, j - 1] + lattice[i - 1, j] + lattice[i, j + 1] + lattice[i + 1, j]) + 2*h * lattice[i, j]
#
# return H
# def next_chain_link_ising(x, y, T, h):
# """ checks whether y is accepted as next chain link """
#
# gamma = np.random.rand()
# alpha = np.exp(-(H(y, h) - H(x, h))/(kb * T))
#
# return alpha >= gamma
def transform_lattice(lattice):
""" transforms random lattice into lattice of +1/2 and -1/2 and sets periodic bounds """
for i in range(N+1):
for j in range(N+1):
if lattice[i, j] >= 0.5:
lattice[i, j] = 1/2
else:
lattice[i, j] = -1/2
for i in range(N+1):
lattice[0, i] = lattice[N, i]
lattice[N+1, i] = lattice[1, i]
lattice[i, 0] = lattice[i, N]
lattice[i, N + 1] = lattice[i, 1]
lattice[0, 0] = lattice[N, N]
lattice[0, N+1] = lattice[N, 1]
lattice[N+1, 0] = lattice[1, N]
lattice[N+1, N+1] = lattice[1, 1]
return lattice
def H(lattice, i, j, h, T):
""" checks wether spin flip is accepted """
gamma = np.random.rand()
delta_E = -2*lattice[i, j] * (lattice[i, j - 1] + lattice[i - 1, j] + lattice[i, j + 1] + lattice[i + 1, j]) - 2*h * lattice[i, j]
return not (delta_E > 0 and np.exp(-(delta_E)/(kb * T)) > gamma)
def metro_ising(L, T, h):
""" creates markov chain of lenght L and calculates magnetization """
lattice = transform_lattice(np.random.rand(N + 2, N + 2)) # +2 because of periodic bounds
ising_chain = [lattice]
m = 0
for i in range(L):
rand_row = np.random.choice(index)
rand_col = np.random.choice(index)
if H(ising_chain[i], rand_row, rand_col, h, T):
new_lattice = ising_chain[i].copy()
new_lattice[rand_row][rand_col] *= -1
ising_chain.append(transform_lattice(new_lattice))
else:
ising_chain.append(ising_chain[i])
m += np.sum(ising_chain[i][1:N + 1, 1:N + 1]) # magnetization
return m
chain_lenght = 100 # 10000 is too big
h_arr = [0.1, 0.5, 1, 5]
T = np.linspace(0.1, 30, 10)
# a)
# chain, _ = metro_ising(chain_lenght, T[0], h[0])
# sns.heatmap(chain[chain_lenght-1][1:N, 1:N], xticklabels=False, yticklabels=False, cbar=False)
# plt.title("T = " + str(T[0]))
# plt.legend()
# plt.show()
# b)
m_val = []
for temp in T:
m = metro_ising(chain_lenght, temp, h_arr[0])
m_val.append(m/chain_lenght)
plt.plot(T, m_val, label="h = " + str(h_arr[0]))
plt.ylabel("magnetization m")
plt.xlabel("Temperature T")
plt.legend()
plt.show()
|
997,074 | cc57f5812a111a62a43a8e0554c3c3dad5e2177f | #!/usr/bin/env python
import base64
from Crypto.Cipher import AES
import os
import secrets
import shelve
import tempfile
import sys
key_var = 'GIT_SHELL_CREDENTIALS_KEY'
path_var = 'GIT_SHELL_CREDENTIALS_PATH'
iv456 = 'sixteencharacter'
def newKey():
return base64.b64encode(secrets.token_bytes()).decode('ascii')
def crypter():
return AES.new(base64.b64decode(os.environ[key_var])[:32], AES.MODE_CBC, iv456)
def encrypt(message):
encoded = message.encode('utf-8')
padded = encoded + (b'\0' * (-len(encoded) % 16))
return crypter().encrypt(padded)
def decrypt(encrypted):
padded = crypter().decrypt(encrypted)
return padded.rstrip(b'\0').decode('utf-8')
if __name__ == '__main__':
command = sys.argv[1]
if command not in ('setup', 'get', 'store', 'erase'):
raise ValueError("Unknown command {}".format(command))
if command == 'setup':
path = os.path.join(tempfile.mkdtemp(), 'git-credentials')
print('export {}={}'.format(key_var, newKey()))
print('export {}={}'.format(path_var, path))
sys.exit(0)
if not os.environ.get(path_var):
raise ValueError("{} not set up".format(os.path.basename(sys.argv[0])))
with shelve.open(os.environ[path_var]) as data:
keys = 'username', 'password'
if command == 'get' and all(key in data for key in keys):
for key in keys:
print('{}={}'.format(key, decrypt(data[key])))
elif command == 'store':
given = dict(x.rstrip('\r\n').split('=', 1) for x in sys.stdin)
for key in keys:
data[key] = encrypt(given[key])
elif command == 'erase':
for key in keys:
data.pop(key, None)
|
997,075 | 7806a018718f6dc011f0de35e104ad1622b2d191 | import socket
import math
import sympy
import math
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
import hashlib
########################################
class Server(object):
"""docstring for Server"""
def __init__(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serv.bind(('0.0.0.0', 8006))
self.serv.listen(5)
self.name = input("PLease Enter the NAME for RSA: ")
print("")
self.l1=[character for character in self.name]
self.l2=[ord(character) for character in self.name]
print(self.l1)
print(self.l2)
self.sums = sum(self.l2)
print("")
print("Sum of ASCII value is : " + str(self.sums))
print("")
#####################################################
#AES
# This function is intended to provide the padding for the block size if the data left out doesnt fit the 16byte so padding is added, otherwise AES won't encrypt
def pad(self, s):
"""
Arguments:
--------------
s: String
Short byte string to be padded to get 16 bytes of AES Block Size
Description:
--------------
Function adds padding to short block to make it standard AES block of 16 bytes
Returned Values:
----------------
Returns 16 bytes of padded block
"""
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
# Encrypting the Message
def encrypt(self, message, key, key_size=256):
"""
Arguments:
--------------
message: bytes
message to be encrypted
key: string
AES key for encrypting message
Key_size: int
Size of the AES encryption key
Description:
--------------
Function encrypts the message using AES CBC
Returned Values:
----------------
Returns cipher text
"""
message = self.pad(message)
# key = self.padKey(key)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(message)
def padBinKey(self,s):
return r"0"*(128-len(str(s)) % 128) + str(s)
# Decrypting the Message
def decrypt(self, ciphertext, key):
"""
Arguments:
--------------
ciphertext: bytes
ciphertext to be decrypted
Description:
--------------
Function decrypts ciphertext of AES to plaintext
Returned Values:
----------------
Returns plaintext msg
"""
iv = ciphertext[:AES.block_size]
# key = self.padKey(key)
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return plaintext.rstrip(b"\0")
# tackles the key generation using the SHA256 to be used for the AES encrytion
def getAESHashKey(self,keyBin):
"""
Arguments:
--------------
KeyBin: bytes
AES Starter key to get SHA256 Key
Description:
--------------
Function calculates SHA256 key for AES
Returned Values:
----------------
Returns the SHA256 Key
"""
hasher=SHA256.new(keyBin)
self.key = bytes(hasher.digest())
return self.key
def rotateKey(self,key,n):
"""
Arguments:
--------------
key: int
key to be rotated for next round
n:int
number to which the AES 128-bit key is rotated
Description:
--------------
Function calculates primitive roots of prime number
Returned Values:
----------------
Returns rotated key
"""
return key[n:] + key [:n]
#####################################################
# Diffie Logic
def gcd(self,a,b):
while b != 0:
a, b = b, a % b
return a
def primRoots(self,modulo):
"""
Arguments:
--------------
modulo: int
Number whose primitive roots are to be calculated
Description:
--------------
Function calculates primitive roots of prime number
Returned Values:
----------------
Returns the list of primitive roots
"""
roots = []
required_set = set(num for num in range (1, modulo) if self.gcd(num, modulo) == 1)
for g in range(1, modulo):
actual_set = set(pow(g, powers) % modulo for powers in range (1, modulo))
if required_set == actual_set:
roots.append(g)
return roots
def encryptRSA(self,rsaClientKey,key,n):
"""
Arguments:
--------------
rsaClientKey: int
public key of client shared after RSA calcualtions
Key: int
value to be converted to hash
n: int
p * q, limit for rsa
Description:
--------------
Function encrypts the key with provided rsaClientPublicKey and n.
Returned Values:
----------------
Returns encrypted hash
"""
encrypted = pow(key, rsaClientKey, n)
print("Encrypted Value: {}".format(encrypted))
return encrypted
def decryptRSA(self,rsaServerPrivKey,hashed,n):
"""
Arguments:
--------------
rsaServerPrivKey: int
Private key of Server shared after RSA calcualtions
hasned: int
value to be converted to key
n: int
p * q, limit for rsa
Description:
--------------
Function decrypts the hash with provided rsaServerPrivKey and n.
Returned Values:
----------------
Returns decrypted key
"""
decrypted = pow(hashed,rsaServerPrivKey,n)
print("Decrypted: {}".format(decrypted))
return decrypted
####################################################
def getServerParameters(self):
"""
Description:
--------------
Function returns the parameters to initialized Server socket for further communication through this socket
Returned Values:
----------------
returns server socket obj
"""
return self.serv
def isPrime(self,num):
"""
Arguments:
--------------
num: int
Integer to be checked if prime
Description:
--------------
Function returns True or False based on the number it prime or not.
Returned Values:
----------------
Boolean values based on the Acceptance or Negation
"""
if num > 1:
for i in range(2, num,1):
if ((num % i) == 0):
return False
return True
def nextPrime(self,N):
"""
Arguments:
--------------
N: int
Number after which next prime is to be calculated
Description:
--------------
Function calculates the next prime number after the number N described above.
Returned Values:
----------------
Returns the next prime number
"""
if (N <= 1):
return 2
prime = N
found = False
while(not found):
prime = prime + 1
if(self.isPrime(prime) == True):
found = True
return prime
def calculateEncryptKey(self):
"""
Description:
--------------
Function calculates Encryption key E, using the standard mechanism of p,q two prime numbers, ɸ & thus calculating E
Returned Values:
----------------
returns calculated E and Phi for decryption key calculation
"""
p = self.nextPrime(self.sums)
q = self.nextPrime(p)
print("The first prime number : p = "+ str(p))
print("The second prime number : q = "+ str(q))
print("")
n = p * q
fi = (p-1) * (q-1)
print("\nCalculated n = " + str(n))
print("Calculated fi(n) = " + str(fi))
encKeysList=list()
for i in range(2, fi,1):
if( math.gcd(i, fi) == 1 and self.isPrime(i) ):
encKeysList.append(i)
if len(encKeysList) >= 20:
return (encKeysList,fi,n)
return (encKeysList,fi,n)
def calculateDecryptKey(self,encKeysList, fi):
"""
Arguments:
--------------
encKeysList: int
Encryption keys list containing 20 keys for calculating inverse which is also a prime number
fi: int
Phi calculated by multiplication of decrementing p & q by 1
Description:
--------------
Function calculates the Decryption key which is also prime
Returned Values:
----------------
Returns the encryption key along with decryption key in the int format
"""
for encKey in encKeysList:
for decKey in range(2, fi,1):
if (((encKey*decKey) % fi) == 1):
if (self.isPrime(decKey)):
return (encKey,decKey)
def main():
"""
Description:
--------------
Execution point of program, This contains the main Flow of program and control segments
"""
############################################
obj = Server()
encKeysList,fi,nServer=obj.calculateEncryptKey()
print("\nEncryption keys top 20 List : "+ str(encKeysList));
encryption_key, decryption_key = obj.calculateDecryptKey(encKeysList, fi);
print("Server RSA Enc key e : "+ str(encryption_key));
print("SERVER RSA Dec key (d) : " + str(decryption_key));
serv = obj.getServerParameters()
############################################
# Diffie
diffie_q = sympy.randprime(500,1000)
print("\nDIFFIE HELLMAN DANCE (SERVER)!")
print("Value of q is :" + str(diffie_q))
primitive_roots = obj.primRoots(diffie_q)
diffie_a=primitive_roots[-1]
print("Value of alpha a : " + str(diffie_a))
private_key_Xa = int(input("\nEnter a private key whose value is less than q : "))
public_key_Ya = pow(diffie_a, private_key_Xa , diffie_q)
print("SERVER DIFFIE PUBLIC KEY (Ya): "+ str(public_key_Ya))
#########################################
while True:
conn, addr = serv.accept()
from_client = ''
server_msg_rsa=str(encryption_key)+":"+str(nServer)
conn.send(bytes(server_msg_rsa,"utf_8"))
client_rsa=str(conn.recv(4098),"utf_8")
client_rsa_key=int(client_rsa.split(":")[0])
nClient=int(client_rsa.split(":")[1])
print("\nClient RSA Enc KEY: {}".format(client_rsa_key))
diffie_q_enc=obj.encryptRSA(client_rsa_key,diffie_q,nClient)
diffie_a_enc=obj.encryptRSA(client_rsa_key,diffie_a,nClient)
public_key_Ya_enc=obj.encryptRSA(client_rsa_key,public_key_Ya,nClient)
server_msg_diffie=str(diffie_q_enc)+":"+str(diffie_a_enc)+":"+str(public_key_Ya_enc)
conn.send(bytes(server_msg_diffie,"utf_8"))
data = conn.recv(4096)
from_client = str(data,"utf_8")
#client_rsa_key=int(from_client.split(":")[0])
client_diffie_public_key=int(from_client)
print("Client DIFFIE PUBLIC KEY: {}".format(client_diffie_public_key))
secret_key = pow(client_diffie_public_key, private_key_Xa, diffie_q)
print("FINAL SECRET: {}".format(secret_key))
AESkey = obj.padBinKey(bin(secret_key)[2:])
print("\nAES KEY 128 Bit: {}\n".format(AESkey))
AESKeyHash = obj.getAESHashKey(bytes(AESkey,"utf_8"))
msgCount=1
while True:
try:
server_chat=input("Server > ")
if server_chat:
server_chat_enc=obj.encrypt(bytes(server_chat,"utf_8"),AESKeyHash)
conn.send(server_chat_enc)
msgCount=msgCount+1
server_chat=''
if msgCount >= 7:
client_data = conn.recv(4096)
if client_data:
testData=int(str(client_data,"utf_8"))
testKey = obj.decryptRSA(decryption_key,testData,nServer)
AESkey = obj.padBinKey(bin(testKey)[2:])
AESKeyHash=obj.getAESHashKey(bytes(AESkey,"utf_8"))
print("\nKey Change: AES KEY 128 Bit: {}\n".format(AESkey))
msgCount=1
else:
n=int(input("8th Message, enter times rotate n: "))
testKey = int(obj.rotateKey(AESkey,n))
rotatedKey = str(obj.encryptRSA(client_rsa_key,testKey,nClient))
conn.send(bytes(rotatedKey,"utf_8"))
AESkey = obj.padBinKey(testKey)
AESKeyHash=obj.getAESHashKey(bytes(AESkey,"utf_8"))
print("\nKey Change: AES KEY 128 Bit: {}\n".format(AESkey))
msgCount=1
client_chat=conn.recv(4096)
if client_chat:
print("Client Chat Encrypted: {}".format(client_chat))
print("Client Chat: {}\n".format(str(obj.decrypt(client_chat,AESKeyHash),"utf_8")))
msgCount=msgCount+1.
client_chat=''
except:
print('client disconnected')
break
conn.close()
if __name__ == '__main__':
main()
|
997,076 | 9a8a2190579df2d6cc615547a1e9685511704e1e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: winston
"""
from keras.models import Model
from keras.layers import Dense, Input, Dropout
from keras.optimizers import Adam
from utils import cc_coef
def dense_network_MTL(num_nodes):
inputs = Input((6373,))
encode = Dense(num_nodes)(inputs)
encode = Dropout(0.3)(encode)
encode = Dense(num_nodes, activation='relu')(encode)
encode = Dropout(0.3)(encode)
encode = Dense(num_nodes, activation='relu')(encode)
output_act = Dense(units=1, activation='linear')(encode)
output_dom = Dense(units=1, activation='linear')(encode)
output_val = Dense(units=1, activation='linear')(encode)
adam = Adam(lr=0.0001)
model = Model(inputs=inputs, outputs=[output_act, output_dom, output_val])
model.compile(optimizer=adam, loss=[cc_coef, cc_coef, cc_coef])
return model
def dense_network_class(num_nodes, num_class):
inputs = Input((6373,))
encode = Dense(num_nodes)(inputs)
encode = Dropout(0.3)(encode)
encode = Dense(num_nodes, activation='relu')(encode)
encode = Dropout(0.3)(encode)
encode = Dense(num_nodes, activation='relu')(encode)
outputs = Dense(units=num_class, activation='softmax')(encode)
adam = Adam(lr=0.0001)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=adam, loss='categorical_crossentropy')
return model
|
997,077 | 939d23afb4fabe24afafae4c555a046b79cb4b63 | file = open('zen.txt', 'r')
text_list = []
for line in file:
text_list.append(line)
file.close()
for line in text_list[::-1]:
print(line, end='')
# зачёт!
|
997,078 | 47955a078cf27e9a4256b1f33b91d010a00fcfe4 | from rest_framework import serializers
from .models import ExercisesDetails
from exercises.serializers import ExercisesSerializer
class ExercisesDetailsSerializer(serializers.HyperlinkedModelSerializer):
#exercise_id = serializers.CharField(read_only=True) #use if only want to display exercise name
#usesexercises serializer for exercise_id to display the exercise assocaited with this details in the JSON.
exercise_id = ExercisesSerializer(read_only=True)
# exercise_id = serializers.HyperlinkedRelatedField(
# view_name='exercises-api-view',
# lookup_field='exercise',
# many=True,
# read_only=True
# )
class Meta:
model = ExercisesDetails
fields = ['id', 'exercise_id', 'weight', 'set_amount', 'total_reps', 'volume']
# extra_kwargs = {
# 'exercise_id': {'view_name': 'exercises-api-view', 'lookup_field': 'exercise'}
# } |
997,079 | 7b240dff45162ee19bd99293b0f08539da5d7d28 | import numpy as np
from scipy.integrate import quad
from scipy.special import jv
from scipy.optimize import brentq
from scipy.interpolate import interp1d
import os, subprocess,copy,copy_reg,types
from multiprocessing import Pool, Manager
import itertools
import matplotlib.pyplot as plt
# This file contains functions used to compute I(k) functions for maps
# and the angular cross correlations between those maps
#will use the classes defined in these files:
from MapParams import *
from CosmParams import Cosmology
from ClRunUtils import *
###########################################################################
# ClData - contains C_l data and info about how it was produced
# plus indices relevant for
###########################################################################
class ClData(object):
def __init__(self,rundata,bintags,dopairs=[],clgrid=np.array([]),addauto=True,docrossind=[],nbarlist=[]):
if rundata.tag: runtag = '_'+rundata.tag
else: runtag=''
self.clfile= ''.join([rundata.cldir,'Cl',runtag,'.dat'])
self.rundat = rundata #Clrundata instance
self.bintaglist=bintags #tag, given mapind
self.Nmap=len(bintags)
self.tagdict={bintags[m]:m for m in xrange(self.Nmap)} #mapind, given tag
self.Ncross=self.Nmap*(self.Nmap+1)/2
crosspairs,crossinds=get_index_pairs(self.Nmap)
self.crosspairs=crosspairs #[crossind,mapinds] (NCross x2)
self.crossinds=crossinds #[mapind,mapind] (Nmap x Nmap)
if len(docrossind): #if list of cross pair indices given, use those
self.docross = docrossind
self.pairs=get_pairs_fromcrossind(self.bintaglist,docrossind,self.crosspairs,self.crossinds)
else: #otherwise uses pairs. if both empty, just does auto correlations
self.pairs=consolidate_dotags(dopairs,bintags)
docross=get_docross_ind(self.tagdict,self.pairs,self.crossinds,addauto=addauto)
self.docross=docross #crossinds which have C_l computed
self.Nell=rundata.lvals.size
self.cl=clgrid #[crossind, ell]
self.dupesuf=False #set to false if haven't created duplicates. this gets modified by add_dupmape function
#when adding shot noise and/or applying calibration errors
# need to know the average number density per steradian per map
nbarlist=np.array(nbarlist)
if nbarlist.size==self.Nmap:
self.nbar =nbarlist#same size as bintags, contains nbar for galaxy maps, -1 for otherrs
else: #minus one means no nbar given for map at that index
self.nbar=-1*np.ones(self.Nmap)
#keep noise contrib to C_l in separate array
self.noisecl = np.zeros((self.Ncross,self.Nell))
for i in xrange(self.Nmap):
if self.nbar[i]!=-1: #assumes -1 for no noise or isw
diagind=self.crossinds[i,i]
self.noisecl[diagind,:]=1/self.nbar[i]
self.noisecl[diagind,0]=0
def hasClvals(self):
return bool(self.cl.size)
def clcomputed_forpair(self,tag1,tag2): #returns true/false depending on if has computed cl for this pair
mapind1=self.tagdict[tag1]
mapind2=self.tagdict[tag2]
xind=self.crossinds[mapind1,mapind2]
return xind in self.docross
def get_cl_from_pair(self,tag1,tag2,ell=False, include_nbar=False):
"""return cl for pair of maptags (autopower if same tag). If no ell given, returns full ell array"""
if not self.clcomputed_forpair(tag1,tag2):
print "No Cl data for {0:s} with {1:s}".format(tag1, tag2)
return float('NaN')
mapind1=self.tagdict[tag1]
mapind2=self.tagdict[tag2]#tagdict[tag1] #this was erroneously [tag1] instead of 2. Corrected 160621 NJW. Not called anywhere, so should be ok.
xind=self.crossinds[mapind1,mapind2]
if ell:
if include_nbar: return self.cl[xind,ell]+self.noisecl[xind,ell]
else: return self.cl[xind, ell]
else: #return all ell as array
if include_nbar: return self.cl[xind,:]+self.noisecl[xind,:]
else: return self.cl[xind,:]
#pass string, for all binmaps with that string in their tag, change nbar
def changenbar(self,mapstr,newnbar):
changeinds=[]
for i in xrange(self.Nmap):
if mapstr in self.bintaglist[i]:
self.nbar[i]=newnbar
diagind=self.crossinds[i,i]
if newnbar==-1:
self.noisecl[diagind,:]=0.
else:
self.noisecl[diagind,:]=1./newnbar
#given binmap tag, remove that map
def deletemap(self,tag):
if tag not in self.bintaglist:
return False
newNmap=self.Nmap-1
newNcross=newNmap*(newNmap+1)/2
oldmapind=self.tagdict[tag]
newcl=np.zeros((newNcross,self.Nell))
delxinds=self.crossinds[oldmapind,:]
#newdocross=np.setdiff1d(self.docross,delxinds)#unique elements of docross not in delxinds
newi=0
for j in xrange(self.Ncross):
if not j in delxinds: #copy over values we're keeping
newcl[newi,:]=self.cl[j,:]
newi+=1
#set up new values
self.Nmap=newNmap
self.bintaglist.remove(tag)
self.tagdict={self.bintaglist[m]:m for m in xrange(self.Nmap)}
self.Ncross=newNcross
crosspairs,crossinds=get_index_pairs(self.Nmap)
self.crosspairs=crosspairs #[crossind,mapinds] (NCross x2)
self.crossinds=crossinds #[mapind,mapind] (Nmap x Nmap)
#THIS IS A TEMPORARY HACK
self.pairs=consolidate_dotags(['all'],self.bintaglist)
#self.docross=['all']
#self.pairs=get_pairs_fromcrossind(self.bintaglist,newdocross,self.crosspairs,self.crossinds)
self.cl=newcl
self.nbar=np.delete(self.nbar,oldmapind)
#just set up noisecl again
self.noisecl = np.zeros((self.Ncross,self.Nell))
for i in xrange(self.Nmap):
if self.nbar[i]!=-1: #assumes -1 for no noise or isw
diagind=self.crossinds[i,i]
#self.noisecl[i,i]=1/self.nbar[i] #THIS INDEXING IS WRONG - fixed 160628 NJW. Function not referenced anywhere in analysis, so no impact on results.
self.noisecl[diagind,:]=1/self.nbar[i]
self.noisecl[diagind,0]=0
return True
def add_dupemap(self, tag, dupesuf='_1',verbose=False):
"""Duplicate already existing binmap in Cldata"""
if (self.dupesuf != False and self.dupesuf != dupesuf): #suffix to add for duplicate rec_glm tags
print 'Duplicate map already created with dupe_suf "{0}" - cannot change dupesuf to {1}'.format(self.dupesuf,dupesuf)
else: self.dupesuf = dupesuf
oldtag = tag
if oldtag not in self.bintaglist:
raise KeyError("Error! {0} not in taglist - don't know which map to duplicate.")
while tag in self.bintaglist:
tag += self.dupesuf
if verbose:print 'Duplicating {0}: naming new bintag "{1}"'.format(oldtag,tag)
newNmap=self.Nmap+1
newNcross=newNmap*(newNmap+1)/2
oldmapind=self.tagdict[oldtag]
newmapind= newNmap-1 #put new map at end
new_nbar = self.nbar[oldmapind]
newcl=np.zeros((newNcross,self.Nell))
#delxinds=self.crossinds[oldmapind,:]
##newdocross=np.setdiff1d(self.docross,delxinds)#unique elements of docross not in delxinds
#this isn't really necessary for adding a map since we're keeping all the old Cl, but keeping to minimize changes from original deletemap method
#NOPE, BELOW IS WRONG, NOT ORDERED THAT WAY -- AUTOPOWERS ARE FIRST, PER THE "NEW" ORDERING IN HEALPY.SYNALM
#http://healpy.readthedocs.io/en/latest/generated/healpy.sphtfunc.synalm.html
#assuming the order goes as I think it does... first entries will all agree, then just duplicate last row, and again duplicate last element
# so cl[newNmap,newNmap] == cl[newNmap-1,newNmap] == cl[newNmap-1, newNmap-1], and cl[newNmap,:] = cl[newNmap-1,:]
newcrosspairs,newcrossinds = get_index_pairs(newNmap)
newdox = []
# print "newNmap:",newNmap
for w in xrange(newNmap):
for v in xrange(newNmap):
if v<= w: #symmetric matrix
xind_new = newcrossinds[w,v]
if w < self.Nmap: #not looking at any pairs involving new map
xind_old=self.crossinds[w,v]
# tag1 = bintaglist[w]
# tag2 = bintaglist[v]
elif v<self.Nmap: # know w==self.Nmap. use the old xind from the original map for the new Cl[xind] of the duplicate map
xind_old=self.crossinds[oldmapind,v]
# print "map1={0}, map2={1},xind_old={2},xind_new={3}".format(w,v,xind_old,xind_new)
else: #both v,w == self.Nmap
xind_old=self.crossinds[oldmapind,oldmapind]
# print "Map1={0}, map2={1},xind_old={2},xind_new={3}".format(w,v,xind_old,xind_new)
newcl[xind_new, :] = self.cl[xind_old,:]
newdox.append(xind_new) #new cross correlations we've calc'd
# print xind_new
#set up new values
self.docross.extend(newdox) #indicate we've calculated the cross correlations
self.Nmap=newNmap
self.bintaglist.append(tag)
self.tagdict={self.bintaglist[m]:m for m in xrange(self.Nmap)}
self.Ncross=newNcross
self.crosspairs=newcrosspairs #[crossind,mapinds] (NCross x2)
self.crossinds=newcrossinds #[mapind,mapind] (Nmap x Nmap)
#THIS IS A TEMPORARY HACK
self.pairs=consolidate_dotags(['all'],self.bintaglist) #IS THIS STILL LEGITIMATE GIVEN THE "HACK" COMMENT ABOVE? [NJW 160627]
#self.docross=['all']
#self.pairs=get_pairs_fromcrossind(self.bintaglist,newdocross,self.crosspairs,self.crossinds)
self.cl=newcl
self.nbar=np.append(self.nbar,new_nbar)
#just set up noisecl again
self.noisecl = np.zeros((self.Ncross,self.Nell))
for i in xrange(self.Nmap):
if self.nbar[i]!=-1: #assumes -1 for no noise or isw
diagind=self.crossinds[i,i]
self.noisecl[diagind,:]=1/self.nbar[i]
self.noisecl[diagind,0]=0
return (self,tag) #return the new (now uniqe) tag
###########################################################################
def sphericalBesselj(n,x):
return jv(n + 0.5, x) * np.sqrt(np.pi/(2*x))
def findxmin(n,tol=1.e-10):
#use this to find the min xvalue where we'll call the bessel fn nonzero
#basically, smallest x where j_l(x)=tol
return brentq(lambda x:sphericalBesselj(n,x)-tol,tol,n)
###########################################################################
# functions for computing, tabulating,and using I_l(k) functions
###########################################################################
#=========================================================================
#Functions for computing Cl with Limber approx
def LimberCl_intwrapper(argtuple):
nl,indocross,mappair,cosm,zintlim,epsilon=argtuple
if not indocross: #don't do anything if we don't want this pair
return 0.
n,lval=nl
if lval==0:
return 0
binmap1,binmap2=mappair
#get cosmological functions
co_r = cosm.co_r #function with arg z
z_from_cor = cosm.z_from_cor #function with arg r
hubble = cosm.hub #functionw ith arg z
D = cosm.growth #function with arg z
f = cosm.growthrate #function with arg z
c = cosm.c
#limber approx writes k in terms of ell, z; set P(k) up for this
# clip off first entry to avoid dividing by zero
kofz_tab=(lval+.5)/cosm.r_array[1:] #the k value corresponding to each z value;
Pofz_tab=cosm.P(kofz_tab) #tabulated, the P(k) corresponding to k=ell/r(z) for each z value;
kofz=interp1d(cosm.z_array[1:],kofz_tab,bounds_error=False,fill_value=0.)
Pofz=interp1d(cosm.z_array[1:],Pofz_tab,bounds_error=False,fill_value=0.)
#use info in binmaps to figure out zmin and zmax
zmin=max(binmap1.zmin,binmap2.zmin)
#zmin=max(0.01,zmin) #CHECK THAT REMOVING THIS IS OK
zmax=min(binmap1.zmax,binmap2.zmax)
if zmax<=zmin:
return 0.
#set up the ISW prefactor as a function of z
Nisw=binmap1.isISW + binmap2.isISW
#print binmap1.tag,binmap2.tag,Nisw
if Nisw:
prefactor= (100.)**2 #H0^2 in units h^2km^2/Mpc^2/s^2
prefactor*= 3./cosm.c**2 #h^2/Mpc^2
iswpref =lambda z: prefactor*(1.-f(z))/(kofz(z)**2) if kofz(z)!=0 else 0. #unitless function
if Nisw==1:
iswprefactor= iswpref
elif Nisw==2:
iswprefactor=lambda z: iswpref(z)*iswpref(z)
else:
iswprefactor=lambda z:1.
result=quad(lambda z: LimberCl_integrand(z,hubble,D,co_r,Pofz,iswprefactor,binmap1.window,binmap2.window,c),zmin,zmax,full_output=1,limit=zintlim,epsabs=epsilon,epsrel=epsilon)[0]
return result
def LimberCl_integrand(z,hubble,growth,cor,Pz_interpfn,iswprefactor,window1,window2,c=299792):
result=window1(z)*window2(z)
#print 'windows;',result
#print 'pzinterp',Pz_interpfn(z)
#print 'hubble*growth*r^2',hubble(z)*(growth(z)**2)/(cor(z)**2)
if result==0 or z==0 or cor(z)==0.:
return 0
result*=Pz_interpfn(z)*hubble(z)*(growth(z)**2)/(cor(z)**2)/c
result*=iswprefactor(z)
result=np.nan_to_num(result)#if nan, will get replaced with zero
return result
#=============================================================
# functions handling Ilk for an individual bin map
#=========================================================================
# getIlk: reads in Ilk file if there, otherwise computes
def getIlk_for_binmap(binmap,rundata,redo=False,DoNotOverwrite=False):
needIlk=True
if not redo:
#check if file w appropriate name exists
if binmap.isISW:
if rundata.iswilktag: runtag='.'+rundata.iswilktag
else: runtag=''
else:
if rundata.ilktag: runtag = '.'+rundata.ilktag
else: runtag=''
f = ''.join([rundata.ilkdir,binmap.tag,'_Ilk',runtag,'.dat'])
if os.path.isfile(f):
#read it in, check that ell and k vals are good
Ilk,k_forI=readIlk_file(binmap,rundata)
if Ilk.size:
needIlk=False
if needIlk and (not DoNotOverwrite):
Ilk=computeIlk(binmap,rundata)
k_forI=rundata.kdata.karray
elif DoNotOverwrite:
print "***in getIlk: DoNotOverwrite=True, but need Ilk values"
return Ilk,k_forI
#-------------------------------------------------------------------------
def computeIlk(binmap,rundata):
DOPARALLEL=1
print "Computing Ilk for ",binmap.tag,'DOPARALLEL=',DOPARALLEL
#set up arrays
kvals = rundata.kdata.karray
Nk = kvals.size
# just do the ell with no limber approx
if rundata.limberl>=0 and rundata.limberl<=rundata.lmax:
lvals = rundata.lvals[:np.where(rundata.lvals<rundata.limberl)[0][-1]+1]#rundata.lvals
else:
lvals=rundata.lvals
Nell = lvals.size
Ivals = np.zeros((Nell,Nk))
eps = rundata.epsilon
zintlim = rundata.zintlim
#set up labels to help references go faster
cosm = rundata.cosm
if not cosm.tabZ or cosm.zmax<binmap.zmax:
cosm.tabulateZdep(max(rundata.zmax,binmap.zmax),nperz=cosm.nperz)
co_r = cosm.co_r #function with arg z
krcutadd=rundata.kdata.krcutadd #to make integral well behaved w fast osc
krcutmult=rundata.kdata.krcutmult
#bounds for integral in comoving radius
rmin=co_r(binmap.zmin)
rmax=co_r(binmap.zmax)
lk= itertools.product(lvals,kvals) #items=[l,k]
argiter=itertools.izip(lk,itertools.repeat(rmin),itertools.repeat(rmax),itertools.repeat(cosm),itertools.repeat(binmap),itertools.repeat(krcutadd),itertools.repeat(krcutmult),itertools.repeat(zintlim),itertools.repeat(eps),itertools.repeat(rundata.sharpkcut),itertools.repeat(rundata.besselxmincut))
if DOPARALLEL:
pool = Pool()
results=pool.map_async(Iintwrapper,argiter)
newI=np.array(results.get())
pool.close()
pool.join()
#rearrange into [l,k] shape
Ivals=newI.reshape(Nell,Nk)
else:
argiter=list(argiter)
for i in xrange(len(argiter)):
argtuple=argiter[i]
lk,rmin,rmax,cosm,binmap,krcutadd,krcutmult,zintlim,epsilon,zeropostcut,besselxmincut= argtuple
l,kval=lk
lind=np.where(lvals==l)[0][0]
kind=np.where(kvals==kval)[0][0]
Ival=Iintwrapper(argtuple)
Ivals[lind,kind]=Ival
#save result to file
writeIlk(Ivals,binmap,rundata)
return Ivals
#--------------------------------------------------
#wrapper function for integral, so multithreading works
def Iintwrapper(argtuple):#(l,kval,rmin,rmax,cosm,binmap,zintlim=10000):
#print "in Iintwrapper"
lk,rmin,rmax,cosm,binmap,krcutadd,krcutmult,zintlim,epsilon,zeropostcut,besselxmincut = argtuple
l,kval=lk
dr=rmax-rmin
if l==0: return 0. #don't compute monopole
#bessel function will be effectively zero below some argument; adjust rmin accordingly
if besselxmincut:
xmin=findxmin(l,epsilon) #ADDED 5/19; seems to speed things up without chaning Ilk results much
rmin=max(rmin,xmin/kval) #ADDED 5/19
if rmin>=rmax:
return 0.
#print ' reading binmap info'
window =binmap.window #function with args i,z
isISW=binmap.isISW
#print ' readin cosm info'
co_r = cosm.co_r #function with arg z
z_from_cor = cosm.z_from_cor #function with arg r
hubble = cosm.hub #functionw ith arg z
D = cosm.growth #function with arg z
f = cosm.growthrate #function with arg z
c = cosm.c
#print ' computing prefactor'
#get appropriate prefactors
prefactor=1.
if binmap.isISW:
H02 = (100.)**2 #h^2km^2/Mpc^2/s^2
prefactor= 3.*H02/cosm.c**2 #h^2/Mpc^2
prefactor=prefactor/(kval**2) #unitless
#print ' looking at pre/post cut division'
#find r where we want to switch from full bessel to approx
ALLPRECUT=False
ALLPOSTCUT=False
if krcutmult<0 or krcutadd<0: #set these to negative to turn off approx
ALLPRECUT=True
r_atkrcut=rmax
elif kval*dr>2*np.pi*10.: #only use approx if many oscillations fit inside bin
r_atkrcut=(l*krcutmult+krcutadd)/kval
if r_atkrcut<rmin:
r_atkrcut=rmin
ALLPOSTCUT=True
if r_atkrcut>rmax:
r_atkrcut=rmax
ALLPRECUT=True
else:
r_atkrcut=rmax
ALLPRECUT=True
#print ' doing integrals'
#print 'krcutmult=',krcutmult,'krcutadd',krcutadd
#print "r-atkrcut=",r_atkrcut,'ALLPRECUT=',ALLPRECUT,"ALLPOSTCUT=",ALLPOSTCUT
#calculate!
if ALLPOSTCUT:
result_precut=0.
else:
result_precut=quad(lambda r: Iintegrand(r,l,kval,window,z_from_cor,hubble,D,f,isISW,c,prefactor),rmin,r_atkrcut,full_output=1,limit=zintlim,epsabs=epsilon,epsrel=epsilon)[0]
if zeropostcut or ALLPRECUT:
result_postcut= 0
elif l%2==0: #after krcut, use quad's ability to weight with sin or cos
#even l bessels act line sin/x
result_postcut=quad(lambda r: Iintegrand_postcut(r,l,kval,window,z_from_cor,hubble,D,f,isISW,c,prefactor),r_atkrcut,rmax,full_output=1,limit=zintlim,epsabs=epsilon,epsrel=epsilon,weight='sin',wvar=kval)[0]
else: #odd bessels act like cos/x
result_postcut=quad(lambda r: Iintegrand_postcut(r,l,kval,window,z_from_cor,hubble,D,f,isISW,c,prefactor),r_atkrcut,rmax,full_output=1,limit=zintlim,epsabs=epsilon,epsrel=epsilon,weight='cos',wvar=kval)[0]
return result_precut+result_postcut
#--------------------------------------------------
# function which is integrated over to get Ilk
def Iintegrand(r,l,k,window,z_from_cor,hubble,growth,growthrate,isISW=False,c=299792,prefactor=1.):
z = z_from_cor(r)
w= window(z)
if w==0:
return 0
else:
dI = w*growth(z)*hubble(z)/c
if isISW: #ISW gets f-1 piece
dI*= (1.-growthrate(z))
if dI==0: return 0
bessel = sphericalBesselj(l,k*r)
dI*=bessel
return dI*prefactor
# function which is integrated over to get Ilk after k past krcut
def Iintegrand_postcut(r,l,k,window,z_from_cor,hubble,growth,growthrate,isISW=False,c=299792,prefactor=1.):
z = z_from_cor(r)
w= window(z)
if w==0:
return 0
dI = w*growth(z)*hubble(z)/c
if isISW: #ISW gets f-1 piece
dI*= (1.-growthrate(z))
if dI==0: return 0
if l%2==0: #even l, sin weighting; sin(x) handled by quad
bessel = np.sin(np.pi*(l+1.)/2.)/(k*r)
else: #odd l, cos weighting
bessel = np.cos(np.pi*(l+1.)/2.)/(k*r)
dI*=bessel
return dI*prefactor
#-------------------------------------------------------------------------
def writeIlk(Ilkarray,binmap,rundata):
if binmap.isISW:
if rundata.iswilktag: runtag = '.'+rundata.iswilktag
else: runtag=''
else:
if rundata.ilktag: runtag = '.'+rundata.ilktag
else: runtag=''
outfile = ''.join([rundata.ilkdir,binmap.tag,'_Ilk',runtag,'.dat'])
print 'Writing Ilk data to ',outfile
k = rundata.kdata.karray
lvals = rundata.lvals
Nell = sum(l<rundata.limberl for l in lvals) #number below limber switch
krcutstr='{0:13g}.{1:<10g}'.format(rundata.kdata.krcutadd,rundata.kdata.krcutmult)
if rundata.kdata.krcutadd<0 or rundata.kdata.krcutmult<0:
krcutstr='{0:23g}'.format(-1.)
headerstr = '\n'.join([binmap.infostr,rundata.infostr])
collabels =''.join([' {0:23s} {1:23s}\n{2:s}'.format('k[h/Mpc] (top=krcutadd.mult)','ell=>',krcutstr),''.join([' {0:23d}'.format(lvals[n]) for n in xrange(Nell)]),'\n'])
bodystr=''.join([\
''.join([' {0:+23.16e}'.format(k[row]),''.join([' {0:+23.16e}'.format(Ilkarray[lind,row]) for lind in xrange(Nell)]),'\n'])\
for row in xrange(k.size)])
f=open(outfile,'w')
f.write(headerstr) #5 lines long: bin,map,run,kdata,cosm
f.write('\n##############################\n') #6th dummy line
f.write(collabels) #line 7 has row, col labels, line 8 has lvals
f.write(bodystr)
f.close()
#-------------------------------------------------------------------------
# read in file containing Ilk for given map bin, resturn Ilk array,lvals, kvals
def readIlk_file(binmap,rundata):
if binmap.isISW:
if rundata.iswilktag: runtag = '.'+rundata.iswilktag
else: runtag=''
else:
if rundata.ilktag: runtag = '.'+rundata.ilktag
else: runtag=''
infile=''.join([rundata.ilkdir,binmap.tag,'_Ilk',runtag,'.dat'])
print "Reading Ilk from file",infile
x = np.loadtxt(infile,skiprows=6)
inkrcut=x[0,0]
inkrcutadd=int(inkrcut)
inkrcutmult=int(str(inkrcut)[str(inkrcut).find('.')+1:])
k=x[1:,0]
l=x[0,1:].astype(int)
I=np.transpose(x[1:,1:])
#read header to get nperlogk info
f=open(infile,'r')
f.readline()#binmap infoline
f.readline()#runtag and lvals
f.readline()#cosmolog info
kstr = f.readline() #kdata info
f.close()
kstr=kstr[kstr.find('kperlog=')+len('kperlog='):]#cut just before nperlogk
innperlogk=int(kstr[:kstr.find(',')])
inkmin=k[0]
inkmax=k[-1]
#return ivals if nperlogk and l values match up, otherwise return empty array
#should have all ell in lvals where ell<limberl, assume ascending order
limberl=rundata.limberl
if limberl>=0 and limberl<=rundata.lmax:
# these are the expected ell values we want out
checkell=rundata.lvals[:np.where(rundata.lvals<limberl)[0][-1]+1]
else:
checkell=rundata.lvals
if l.size>=checkell.size:
lind_incheck=[] #index of each checkell element in l
for lval in checkell:
where = np.where(l==lval)[0]
if where.size==1:
lind_incheck.append(where[0])
else:
print " *** unexpected lvals, recompute."
return np.array([]),np.array([])
lind_incheck=np.array(lind_incheck)
if innperlogk>= rundata.kdata.nperlogk and inkmin<=rundata.kdata.kmin and inkmax>=rundata.kdata.kmax:
return I[lind_incheck,:],k #k_forI can be different than kdata, as long as it samples enough
else:
print " *** unexpected kvals, recompute."
return np.array([]),np.array([])
else:
print " *** unexpected number of lvals, recompute."
return np.array([]),np.array([])
###########################################################################
# functions for computing, tabulating,and using cross corr functions
###########################################################################
#-------------------------------------------------------------------------
# getCl - returns desired cross corr for given list of binmaps
# Checks for existing Cl file, checks that all maps wanted are in it
# Computes necessary cross corr, saves
# dopairs = list [(maptag1,maptag2)...] for pairs we want Cl for
# if empty: just get the autocorrelation Cl
# if contains the string 'all', compute all
# redoAllCl -> compute requested values, overwrite any existing Cl file
# redoTheseCl -> don't overwrite old file, but recompute all requested Cl
# vals and overwrite existing data for those pairs
# redoAutoCl -> Like redoTheseCl, but also includes autocorrelations
# redoIlk - recompute + overwrite existing tabulated Ilk data
# DoNotOverwrite - "read only" safeguard
#-------------------------------------------------------------------------
def getCl(binmaplist,rundata,dopairs=[],redoAllCl=False,redoTheseCl=False,redoAutoCl=False,redoIlk=False,DoNotOverwrite=True):
print "in getCL, DoNotOverwrite=",DoNotOverwrite
if redoIlk:
#if we're recomputing Ilk, we need to recompute all the Cl
redoAllCl=True
# print "Getting C_l for auto-corr and requested pairs:",dopairs
if 'all' in dopairs:
dopairs=[p for p in itertools.combinations_with_replacement([m.tag for m in binmaplist],2)]
#oldcl,oldtags,olddo= readCl_file(rundata)
oldcl=readCl_file(rundata)
#print 'oldcl.cl.shape',oldcl.cl.shape
#print 'olcl.hasClvals',oldcl.hasClvals()
#print " pairs computed previously:",olddo
#if redoAllCl or not oldcl.size:
if redoAllCl or not oldcl.hasClvals():
if DoNotOverwrite:
print "***In getCl: DoNotOverwrite=True but need C_l values."
else:
print "Computing new C_l for all requested cross corr, overwriting existing data."
#compute and write new C_l file if one of the redo bool=True
# or if clfile doesn't exist, or if lvals are wrong in clfile
#Clvals=computeCl(binmaplist,rundata,dopairs=dopairs,redoIlk=redoIlk,addauto=True)
cldat=computeCl(binmaplist,rundata,dopairs=dopairs,redoIlk=redoIlk,addauto=True)
writeCl_file(cldat)
else: #can potentially use previously computed values
ANYNEW=False
if redoAutoCl:
autoinnew=True
print " Will recompute auto-corr for requested maps"
else:
print " Using previously computed auto-corr."
autoinnew=False
#indices etc requested in arguments
taglist=get_bintaglist(binmaplist)
nbarlist=[m.nbar for m in binmaplist]
cldat=ClData(rundata,taglist,dopairs=dopairs,addauto=True,nbarlist=nbarlist)
Nmap=cldat.Nmap
dopairs=cldat.pairs
tagdict = cldat.tagdict
crosspairs=cldat.crosspairs
crossinds=cldat.crossinds
Ncross = cldat.Ncross
docross=cldat.docross #list of pair indices want to compute
#old = follow indices for maplist in prev existing file
oldind=-1*np.ones(Nmap) #for each binmap, its index in oldbinmaplist
#get set up to navigate existing (old) Cl data
oldxinds=oldcl.crossinds
olddox=oldcl.docross #index of (in oldtag basis) cross corrs to do
#get indices of tags existing in oldbintags
oldind=translate_tag_inds(cldat,oldcl)
for t in xrange(Nmap): #add autocorr for any maps not in oldtags
if oldind[t]<0 and not redoAutoCl:
docross.append(crossinds[t,t])
newdocross = docross[:]
crossfromold = []#crossinds of x corrs previously computed
if not (redoTheseCl or redoAutoCl):
# print " Checking for previously computed C_l values."
#Remove from newdocross for pairs already computed
for t in xrange(Nmap):
if oldind[t]>=0: #tag in oldtags
#check which desired pairs are already computed
for t2 in xrange(t,Nmap): #loop through possible pairs
#if pair not in dopairs, don't compute
if crossinds[t,t2] not in newdocross:
continue
#otherwise, check if second tag in oldtags
#if pair in olddo, already computed; don't need it
elif (oldind[t2]>=0) and (oldxinds[oldind[t],oldind[t2]] in olddox):
newdocross.remove(crossinds[t,t2])
crossfromold.append(crossinds[t,t2])
else:
print " Will compute C_l for all requested pairs."
ANYNEW=True
#need new values if entries in newdocross, otherwise returns zero array
if not DoNotOverwrite:
newcl= computeCl(binmaplist,rundata,docrossind=newdocross,redoIlk=redoIlk)
else:
#if we're not saving data, don't bother computing
# just get dummy ClData object
if newdocross:
print "***WARNING. Need new Cl data have set READONLY."
print newdocross
print crosspairs[newdocross]
newcl= computeCl(binmaplist,rundata,docrossind=np.array([]),redoIlk=False)
if np.any(newcl.cl!=0):
ANYNEW=True
#Clvals = Clgrid to return, all asked for in this call
Clvals = np.copy(newcl.cl)
for n in crossfromold: #get the prev computed values from oldcl
i0 = crosspairs[n,0]
i1 = crosspairs[n,1]
oldn = oldxinds[oldind[i0],oldind[i1]]
Clvals[n,:] = oldcl.cl[oldn,:]
#put Clvals data into the relevant ClData instance
cldat.cl=Clvals
#combine new and old Cl to write everything to file
if ANYNEW:
if not DoNotOverwrite:
print " Combining new and old C_l for output file."
overwriteold=(redoTheseCl or redoAutoCl)
comboCl=combine_old_and_new_Cl(cldat,oldcl)
writeCl_file(comboCl)
else:
print "***In getCl: DoNotOverwrite=True, but computed some new values. Not saving new vals."
return cldat
#------------------------------------------------------------------------
# Given list of binmaps, computes Cl for each pair, returns Ncross x Nell array
# dopairs = list [(maptag1,maptag2)...] for pairs we want Cl for
# if redoIlk, recomputes even if files exist
# if addauto and no crossinds given,
# compute autocorrelations even if not in dopairs
def computeCl(binmaps,rundata,dopairs=[],docrossind=[],redoIlk=False,addauto=False):
bintags=[m.tag for m in binmaps]
nbars=[m.nbar for m in binmaps] #will be -1 for e.g. ISW
cldat=ClData(rundata,bintags,dopairs=dopairs,docrossind=docrossind,addauto=addauto,nbarlist=nbars)
#get list of pairs of indices for all unique cross corrs
Nmap=cldat.Nmap #len(binmaps)
Nell = cldat.Nell #rundata.lvals.size
crosspairs=cldat.crosspairs
crossinds=cldat.crossinds
Ncross=cldat.Ncross
tagdict=cldat.tagdict
docross=cldat.docross
#print 'in computeCl, dopairs',dopairs
#if we're not computing anything, just return array ofzeros
Clvals = np.zeros((Ncross,Nell))
if not len(docross):
# print " No new values needed."
cldat.cl=Clvals
return cldat
print " Computing new C_l values."
# First sort out when to switch to limber approx
limberl=rundata.limberl #where to switch to Limber
print "limberl=",limberl
if limberl>0 and limberl<=rundata.lmax:
lvals_preLim=rundata.lvals[:np.where(rundata.lvals<limberl)[0][-1]+1]
Nell_preLim=lvals_preLim.size
lvals_postLim=rundata.lvals[np.where(rundata.lvals<limberl)[0][-1]+1:]
Nell_postLim=Nell-Nell_preLim
elif limberl==0:
lvals_preLim=np.array([])
Nell_preLim=0
lvals_postLim=rundata.lvals
Nell_postLim=Nell
else:
lvals_preLim=rundata.lvals
lvals_postLim=np.array([])
Nell_preLim=Nell
Nell_postLim=0
#print 'preLim lvals:',lvals_preLim
#print 'Nell_preLim',Nell_preLim
#print 'postLim lvals:',lvals_postLim
#print 'Nell_postLim',Nell_postLim
#get k and power spectrum info for run, need this limber or not
kdata=rundata.kdata
cosm = rundata.cosm
if not cosm.havePk:
# For Pk, just use camb's default adaptive nperlogk spacing
print 'getting CAMB P(k), kmin,kmax=',kdata.kmin,kdata.kmax
cosm.getPk(kdata.kmin,kdata.kmax)#kperln=kdata.nperlogk*np.log(10))
if Nell_preLim:
#get Ilk functions
print " Getting Ilk transfer functions.."
Igrid=[]#map,ell,k; ell indices only for ell<limberl
kforIgrid=[]#map,k
#np.zeros((Nmap,Nell_preLim,rundata.kdata.karray.size))
for m in xrange(Nmap):
Igridbit,k_forI=getIlk_for_binmap(binmaps[m],rundata,redoIlk)
Igrid.append(Igridbit)
kforIgrid.append(k_forI)
Igrid=np.array(Igrid)
kforIgrid = np.array(kforIgrid)
lnkforIgrid = np.log(kforIgrid)
#set up P(k) in terms of lnk
Plnk = interp1d(np.log(cosm.k_forPower),cosm.P_forPower,bounds_error=False,fill_value=0.)
lnkmin=np.log(kdata.kmin)
lnkmax=np.log(kdata.kmax)
#Do Cl computations, interating through crosspairs and lvals
print " Performing non-Limber C_l integrals."
nl= itertools.product(xrange(Ncross),xrange(Nell_preLim)) #items=[n,lind]
Ipair_fornl=[(Igrid[crosspairs[xind,0],lind,:],Igrid[crosspairs[xind,1],lind,:]) for (xind,lind) in itertools.product(xrange(Ncross),xrange(Nell_preLim))]
lnkforIpair=[(lnkforIgrid[crosspairs[xind,0],:],lnkforIgrid[crosspairs[xind,1],:]) for (xind,lind) in itertools.product(xrange(Ncross),xrange(Nell_preLim))]
indocross=[xind in docross for (xind,lind) in itertools.product(xrange(Ncross),xrange(Nell_preLim))]
#put everything into a tuple for the integral wrapper
argiter = itertools.izip(nl,indocross,itertools.repeat(lnkmin),itertools.repeat(lnkmax),itertools.repeat(Plnk),Ipair_fornl,lnkforIpair,itertools.repeat(rundata.kintlim),itertools.repeat(rundata.epsilon)) #for quad
pool = Pool()
results=pool.map_async(Clintwrapper,argiter)
newCl=np.array(results.get())
pool.close()
pool.join()
#rearrange into [n,l] shape
Clvals[:,:Nell_preLim]=newCl.reshape(Ncross,Nell_preLim)
# Do Limber approx calculations
if Nell_postLim:
print " Performing Limber approx C_l integrals."
#make sure z-dep functions have been tabulated
#print [m.zmax for m in binmaps]
zmax=max([m.zmax for m in binmaps])
if not cosm.tabZ or cosm.zmax<zmax:
cosm.tabulateZdep(zmax,nperz=cosm.nperz)
nl= itertools.product(xrange(Ncross),lvals_postLim) #items=[n,lvals]
mappair=[(binmaps[crosspairs[xind,0]],binmaps[crosspairs[xind,1]]) for (xind,lind) in itertools.product(xrange(Ncross),xrange(Nell_postLim))]
indocross=[xind in docross for (xind,lind) in itertools.product(xrange(Ncross),xrange(Nell_postLim))]
#put everything into a tuple for the integral wrapper
argiter = itertools.izip(nl,indocross,mappair,itertools.repeat(cosm),itertools.repeat(rundata.zintlim),itertools.repeat(rundata.epsilon)) #for quad
#run computations in parallel
DOPARALLEL=1
if DOPARALLEL:
print " Running Limber approx integrals in parallel."
pool=Pool()
results=pool.map_async(LimberCl_intwrapper,argiter)
limberCl=np.array(results.get())
pool.close()
pool.join()
Clvals[:,Nell_preLim:]=limberCl.reshape(Ncross,Nell_postLim)
else: #the nonparallel version is for testing that things run
argiter=list(argiter)
print " Running Limber approx integrals (not in parallel)."
for i in xrange(len(argiter)):
argtuple=argiter[i]
nl,indocross,mappair,cosm,zintlim,epsilon=argtuple
n,lval=nl
lind=np.where(rundata.lvals==lval)[0][0]
thiscl=LimberCl_intwrapper(argtuple)
print 'n,lval',n,lval,thiscl*lval*(1+lval)/(2*np.pi)
Clvals[n,lind]=thiscl
cldat.cl=Clvals
return cldat#Clvals
#------------------------------------------------------------------------
def Clintwrapper(argtuple):
#nl,bool dothiscross,lnkmin,lnkmax,Pk_array,Igrid,kintlim =argtuple
nl,dothiscross,lnkmin,lnkmax,Plnkfunc,Ipair_fornl,lnkforIpair,kintlim,epsilon=argtuple
n,lind=nl
if not dothiscross: clval= 0
else:
ik1=Ipair_fornl[0]
lnkfori1=lnkforIpair[0]
ik2=Ipair_fornl[1]
lnkfori2=lnkforIpair[1]
#COMMENTED OUT ON 6/1/15;
# #find nonzero overlap of the Ilk functions #ADDED 5/19
# if less than tolerance, should treat Ilk as zero to avoid noise contrib
checktol=epsilon
ISNONZERO=True
if np.any(ik1>checktol):
i1minind=np.where(ik1>checktol)[0][0]
i1maxind=np.where(ik1>checktol)[0][-1]
ISNONZERO= i1minind!=i1maxind
else:
ISNONZERO=False
if np.any(ik2>checktol):
i2minind=np.where(ik2>checktol)[0][0]
i2maxind=np.where(ik2>checktol)[0][-1]
ISNONZERO= (i2minind!=i2maxind) and ISNONZERO
else:
ISNONZERO=False
if not ISNONZERO:
return 0.
i1_minlnk=lnkfori1[i1minind]
i1_maxlnk=lnkfori1[i1maxind]
i2_minlnk=lnkfori2[i2minind]
i2_maxlnk=lnkfori2[i2maxind]
highermin=max(i1_minlnk,i2_minlnk)
lowermax=min(i1_maxlnk,i2_maxlnk)
if highermin>=lowermax: #no overlap
return 0.
else:
lnkmin=max(highermin,lnkmin)
lnkmax=min(lowermax,lnkmax)
#P_interp = interp1d(lnk_array,Pk_array,kind='cubic')
if i1maxind-i1minind>3: #need at least 4 pts for cubic interp
I1_interp= interp1d(lnkfori1[i1minind:i1maxind+1],ik1[i1minind:i1maxind+1],kind='cubic',bounds_error=False,fill_value=0.)
else: #just do linear interp (the highermin/lowermax stuff above sets things to 1 if they're equal)
I1_interp= interp1d(lnkfori1[i1minind:i1maxind+1],ik1[i1minind:i1maxind+1],kind='linear',bounds_error=False,fill_value=0.)
if i2maxind-i2minind>3: #need at least 4 pts for cubic interp
I2_interp= interp1d(lnkfori2[i2minind:i2maxind+1],ik2[i2minind:i2maxind+1],kind='cubic',bounds_error=False,fill_value=0.)
else:
I2_interp= interp1d(lnkfori2[i2minind:i2maxind+1],ik2[i2minind:i2maxind+1],kind='linear',bounds_error=False,fill_value=0.)
#I1_interp= interp1d(lnkfori1,ik1,kind='cubic',bounds_error=False,fill_value=0.)
#I2_interp= interp1d(lnkfori2,ik2,kind='cubic',bounds_error=False,fill_value=0.)
clval= quad(lambda lnk: Cl_integrand(lnk,Plnkfunc,I1_interp,I2_interp),lnkmin,lnkmax,limit=kintlim,epsabs=epsilon,epsrel=epsilon,full_output=1)[0]
return clval*2./np.pi
def Cl_integrand(lnk,Pk_interpfn,Ik1_interpfn,Ik2_interpfn):
k3=np.exp(3*lnk)
P = Pk_interpfn(lnk)
I1 = Ik1_interpfn(lnk)
I2 = Ik2_interpfn(lnk)
return k3*P*I1*I2
#-------------------------------------------------------------------------
# Given number of maps, get pairs of indices for unique pairs
# crosspairs[n] holds indices of nth pair of maps, [n,0]<=[n,1]
def get_index_pairs(Nmap):
#Arranged like 'new=True' ordering in hp.synalm
Ncross=Nmap*(Nmap+1)/2
crosspairs=np.zeros((Ncross,2),int) #at location crossind, pair of map ind
crossinds=np.zeros([Nmap,Nmap],int)#at location [mapind,mapind], crossind
for w in xrange(Nmap):
for v in xrange(w,Nmap):
diff=v-w
n=w+diff*Nmap - np.sum(np.arange(diff))
crosspairs[n,:]=w,v
crossinds[w,v] = n
crossinds[v,w]=crossinds[w,v]
return crosspairs,crossinds
def get_index_pairs_old(Nmap):
Ncross=Nmap*(Nmap+1)/2
crosspairs=np.zeros((Ncross,2),int) #at location crossind, pair of map ind
crossinds = np.zeros((Nmap,Nmap),int)#at location [mapind,mapind], crossind
u=0
v=0
for n in xrange(Ncross):
crosspairs[n,:]=u,v
crossinds[u,v]=n
crossinds[v,u]=n
v+=1
if v==Nmap:
u+=1
v=u
return crosspairs,crossinds
#-------------------------------------------------------------------------
# Given list of binmap tags and crossinds, return list of pairs associated with those xinds
def get_pairs_fromcrossind(taglist,docrossind,crosspairs=np.array([]),crossinds=np.array([])):
if not crosspairs.size or not crossinds.size:
crosspairs,crossinds=get_index_pairs(len(taglist))
pairlist=[]
for n in docrossind:
i0=crosspairs[n,0]
i1=crosspairs[n,1]
pair =(taglist[i0],taglist[i1])
pairlist.append(pair)
return consolidate_dotags(pairlist,taglist)
#-------------------------------------------------------------------------
# Given list of BinMaps and dopairs [(tag1,tag2),(,)...] list
# return list of crossinds for which we want to compute C_l
# if addauto=True, autocorrelations will be included even if not in other lists
def get_docross_ind(tagdict,dopairs,crossinds=np.array([]),addauto=False):
#print 'in get_docross_ind: dopairs',dopairs
if not crossinds.size:
crosspairs,crossinds = get_index_pairs(len(tagdict))
docross=[] #index of cross corrs to do
#add all autocorrelations to 'do' list
if addauto:
#print 'in get_docross_ind: adding autopower cls'
for i in xrange(len(tagdict)):
docross.append(crossinds[i,i])
for pair in dopairs:
#print 'in get_docross_ind: on pair',pair
p0=pair[0]
p1=pair[1]
i0=i1=-1 #-1 means not in tagdict
p0isbin= '_bin' in p0
p1isbin= '_bin' in p1
#if a tag is for a specific bin, and not in tagdict, won't be computed
if p0isbin:
if (p0 in tagdict): i0 =tagdict[p0]
else: continue
if p1isbin:
if (p1 in tagdict): i1 =tagdict[p1]
else:continue
# add necessary docross entries to list
if p0isbin*p1isbin: #both individual bins
docross.append(crossinds[i0,i1])
elif p0isbin!=p1isbin: #one individual bin, one type
if p0isbin:
pbin=p0
ptype=p1
ibin=i0
elif p1isbin:
pbin=p1
ptype=p0
ibin=i1
for tag in tagdict:
new=False
if tag[:tag.find('_bin')]==ptype:
itype = tagdict[tag]
new=True
if new: #if a new maptype match has been found
#print 'adding to computations',tag,': ',p0,p1
docross.append(crossinds[i0,i1])
else: #both types of bin
i0list=[]
i1list=[]
for tag in tagdict:
if tag[:tag.find('_bin')]==p0: i0list.append(tagdict[tag])
if tag[:tag.find('_bin')]==p1: i1list.append(tagdict[tag])
i0i1combos= itertools.product(i0list,i1list)
for combo in i0i1combos:
docross.append(crossinds[combo[0]][combo[1]])
docross=list(set(docross)) #remove duplicates
return docross
#------------------------------------------------------------------------
# given two ClData instances returns oldind: array of size newcl.Nmap, where
# oldind[i] = indix where newcl.bintaglist[i] appears in oldcl.bintaglist
# that is to say newcl.bintaglist[i]=oldcl.bintaglist[oldind[i]]
# except: oldind[i]=-1 if tag doesn't appear in oldtaglist
def translate_tag_inds(newcl,oldcl):
#old = follow indices for maplist in prev existing file
oldind=-1*np.ones(newcl.Nmap) #for each tag in newcl,bintaglist, its index in oldcl.bintaglist
#get indices of tags existing in oldbintags
for t in xrange(newcl.Nmap):
tag=newcl.bintaglist[t]
if tag in oldcl.bintaglist:
oldind[t]=oldcl.tagdict[tag]
return oldind
#------------------------------------------------------------------------
def combine_old_and_new_Cl(newcl,oldcl,Overwrite=False):
#combine new and old Cl info to write everything to file
# if OVERWRITE; new Cl values kept even if old exist for that pair
Nmap = newcl.Nmap
Noldmap = oldcl.Nmap
tagdict=newcl.tagdict
crossinds=newcl.crossinds
oldxinds=oldcl.crossinds
oldind=translate_tag_inds(newcl,oldcl)
combotags=oldcl.bintaglist[:] #slicing makes deep copy
for t in xrange(Nmap): #add any new maptags
if oldind[t]<0: combotags.append(newcl.bintaglist[t])
comboNmap = len(combotags)
combopairs,comboxinds = get_index_pairs(comboNmap)
comboNcross = combopairs.shape[0]
#set up arrays to translate between old, new, combo cross indices
# mapindtranslate[n,0]=old tag ind of map n, [n,1]=new tag ind
mapindtranslate=-1*np.ones((comboNcross,2))
mapindtranslate[:oldcl.Nmap,0] = np.arange(oldcl.Nmap)
for m in xrange(len(combotags)):
if combotags[m] in newcl.tagdict:
mapindtranslate[m,1]=newcl.tagdict[combotags[m]]
# xindtranslate[n,0]=oldxind of combo n, [n,1]=new crossind
xindtranslate=-1*np.ones((comboNcross,2))
for n in xrange(comboNcross):
c0,c1=combopairs[n]
old0 = mapindtranslate[c0,0]
new0 = mapindtranslate[c0,1]
old1 = mapindtranslate[c1,0]
new1 = mapindtranslate[c1,1]
if old0>=0 and old1>=0:
xindtranslate[n,0] = oldcl.crossinds[old0,old1]
if new0>=0 and new1>=0:
xindtranslate[n,1] = newcl.crossinds[new0,new1]
#combine "do" pairs
combopairs = consolidate_dotags(newcl.pairs+oldcl.pairs,combotags)
Nell = newcl.Nell
comboCl = np.zeros((comboNcross,Nell))
for n in xrange(comboNcross):
oldn = xindtranslate[n,0]
newn = xindtranslate[n,1]
if Overwrite and oldn>=0 and newn>=0:
comboCl[n,:] = newcl.cl[newn,:]
elif oldn>=0: #if No overwrite, but val was in old file, copy it over
comboCl[n,:] = oldcl.cl[oldn,:]
elif newn>=0: #not in old file, but in new
comboCl[n,:] = newcl.cl[newn,:]
combocl=ClData(newcl.rundat,combotags,combopairs,clgrid=comboCl)
return combocl
#------------------------------------------------------------------------
# given list of unique tag pairs [(tag0,tag1),...] all bin tags
# consoliate so that if tag paired w all bins of
# replace with (tag0,type) rather than (tag0,type_binX)
# ->assumes no duplicates in binmaplist
def consolidate_dotags(pairs,bintaglist):
#print 'CONSOLIDATING',pairs
Nmap = len(bintaglist)
tagdict = {bintaglist[m]:m for m in xrange(Nmap)}
crosspairs,crossinds = get_index_pairs(Nmap)
#get list of unique map types
types=[]
typedict={}
binind_fortype=[]# [type][list of indices for bintagss]
for n in xrange(Nmap):
tt= bintaglist[n][:bintaglist[n].find('_bin')]
if tt not in types:
types.append(tt)
typedict[tt]=len(types)-1#index of type
binind_fortype.append([n])
else:
binind_fortype[typedict[tt]].append(n)
#get crosscorr indices for all 'do' pairs. assumes all autocorrs included
docross=get_docross_ind(tagdict,pairs,crossinds)
pairedwith=np.zeros((Nmap,Nmap)) #1 if bins assoc w/indices are paired
accountedfor=np.zeros((Nmap,Nmap)) #1 if this pair is in 'results'
for n in docross:
i0 = crosspairs[n,0]
i1 = crosspairs[n,1]
pairedwith[i0,i1]=pairedwith[i1,i0]=1
results=[]
for t0 in xrange(len(types)):
binind0 = binind_fortype[t0] #list of bintag indices
for t1 in xrange(t0,len(types)):
#print 'looking at type pair:',types[t0],types[t1]
binind1 = binind_fortype[t1]
#each b1 index has bool, true if that b1 is paired with all t0
pairedwithall0=[all([pairedwith[b1,b0] for b0 in binind0]) for b1 in binind1]
if all(pairedwithall0): #type-type match
#print ' type-type match!'
results.append((types[t0],types[t1]))
#mark those pairs as accounted for
for b0 in binind0:
for b1 in binind1:
accountedfor[b0,b1]=accountedfor[b1,b0]=1
else:
#add type-bin pairs
#print ' checking bin-type matches'
for bi1 in xrange(len(binind1)):
if pairedwithall0[bi1]:
#print ' adding', (types[t0],bintaglist[binind1[bi1]])
results.append((types[t0],bintaglist[binind1[bi1]]))
for b0 in binind0:
accountedfor[b0,binind1[bi1]]=accountedfor[binind1[bi1],b0]=1
#check for bin0 bins paired with all t1
pairedwithall1=[all([pairedwith[b1,b0] for b1 in binind1]) for b0 in binind0]
for bi0 in xrange(len(binind0)):
if pairedwithall1[bi0]:
#print ' adding', (types[t1],bintaglist[binind0[bi0]])
results.append((types[t1],bintaglist[binind0[bi0]]))
for b1 in binind1:
accountedfor[b1,binind0[bi0]]=accountedfor[binind0[bi0],b1]=1
#now, check if there are any bin-bin pairs left
#print ' checking for leftover bin-bin pairs'
for n in docross:
i0 = crosspairs[n,0]
i1 = crosspairs[n,1]
if not accountedfor[i0,i1]:
if i0!=i1:
#print ' adding', (bintaglist[i0],bintaglist[i1])
results.append((bintaglist[i0],bintaglist[i1]))
accountedfor[i0,i1]=accountedfor[i1,i0]=1
#this is just for testing
orphans = pairedwith*np.logical_not(accountedfor)
if np.any(orphans):
print "MISSING SOME PAIRS IN CONSOLIDATION"
return results
#------------------------------------------------------------------------
def readCl_file(rundata):
#return Clarray, lvals, and string ids of all maps cross corr'd
#will return empty arrays if file doesn't exist or wrong lvals
outcl= np.array([])
bintags=[]
dopairs=[]
nbar=[]
if rundata.tag: runtag = '_'+rundata.tag
else: runtag=''
infile = ''.join([rundata.cldir,'Cl',runtag,'.dat'])
if os.path.isfile(infile):
print "Reading C_l file:", infile
#open infile and read the first couple lines to get maplist and dopairs
f=open(infile,'r')
h0=f.readline() #header line containing list of bin tags
h0b=f.readline()#header line containting nbar for each bintag (added 6/15)
h1=f.readline() #header line containing list of pairs of tags to do
f.close()
bintags = h0[h0.find(':')+2:].split()
#Since adding the nbarline is new, check whether h0b is nbar or pairs
if h0b[:5]=='nbar:':
hasnbar=True
nbarstr=h0b#[h0b.find(':')+2:].split()
nbar=np.array([float(x) for x in nbarstr[nbarstr.find(':')+2:].split()])
else: #in old format, just has pairs
hasnbar=False
#leave nbar as empty array, ClData init will fill in all nbar=-1
h1=h0b
dopairs = [(p[:p.find('-')],p[p.find('-')+1:]) for p in h1[h1.find(':')+2:].split()]
dopairs=consolidate_dotags(dopairs,bintags)
Nmaps = len(bintags)
if hasnbar:
data = np.loadtxt(infile,skiprows=9)
else:
data = np.loadtxt(infile,skiprows=8)
if len(data.shape)>1: #if more than one ell value, more than one row in file
l = data[:,0].astype(int)
clgrid = np.transpose(data[:,1:]) #first index is crosspair, second is ell
else: #just one row
l= data[0].astype(int)
clgrid = data[1:].reshape(data[1:].size,1)
#return clgrid if l values match up, otherwise return empty array
if l.size==rundata.lvals.size:
if (l-rundata.lvals<rundata.epsilon).all():
outcl=clgrid
else:
print " *** unexpected lvals, recompute"
else:
print " *** unexpected size for lvals array, recompute"
cldat=ClData(rundata,bintags,dopairs,outcl,nbarlist=nbar)
return cldat#outcl,bintags,dopairs
#------------------------------------------------------------------------
def writeCl_file(cldat):
#cldat= a ClData class instance
if not cldat.hasClvals:
print "WARNING: writing file for ClData with empty cl array."
rundata=cldat.rundat
crosspairs=cldat.crosspairs
crossinds=cldat.crossinds
taglist=cldat.bintaglist
nbarlist=cldat.nbar
dopairs=cldat.pairs
Clgrid=cldat.cl
if rundata.tag: runtag = '_'+rundata.tag
else: runtag=''
outfile = ''.join([rundata.cldir,'Cl',runtag,'.dat'])
lvals = rundata.lvals
print "Writing C_l data to file:",outfile
f=open(outfile,'w')
#write info about cross corr in data; these lists will be checked
header0 = 'Maps: '+' '.join(taglist)+'\n'
header0b= 'nbar:'+''.join([' {0:5.3e}'.format(x) for x in nbarlist])+'\n'
header1 = 'Computed for pairs: '+' '.join([pair[0]+'-'+pair[1] for pair in dopairs])+'\n'
f.write(header0)
f.write(header0b)
f.write(header1)
#write info about run ; won't be checked but good to have
f.write(rundata.infostr+'\n')
f.write('##############################\n') #skiprows = 8
#write column labels
#crosspairs,crossinds=get_index_pairs(len(taglist))
Npairs = crosspairs.shape[0]
colhead0 = ''.join([' {0:23s}'.format(''),''.join([' {0:23s}'.format(taglist[crosspairs[n,0]]) for n in xrange(Npairs)]),'\n'])
colhead1 = ''.join([' {0:23s}'.format('lvals'),''.join([' {0:23s}'.format(taglist[crosspairs[n,1]]) for n in xrange(Npairs)]),'\n'])
f.write(colhead0)
f.write(colhead1)
#write out ell and C_l values, l = rows, pairs= columns
bodystr=''.join([''.join([' {0:+23d}'.format(lvals[l]),''.join([' {0:+23.16e}'.format(Clgrid[n,l]) for n in xrange(Npairs)]),'\n'])\
for l in xrange(lvals.size)])
f.write(bodystr)
f.close()
#=========================================================================
# combineCl_twobin:
# given input cldat containting maps with tags tag1, tag1, combine the Cl from
# those bins into one larger bin. Only works if nbar are in cldat.
# newmaptag- binmap tag to be associated with new map made from combo
# note that it should have _bin# in order to be id's as a binmap tag
# ouptut: clData object with one less map bin.
def combineCl_twobin(cldat,tag1,tag2,combotag,newruntag='',keept1=False,keept2=False):
newNmap=cldat.Nmap-1+keept1+keept2
mapind1=cldat.tagdict[tag1]
mapind2=cldat.tagdict[tag2]
xind11=cldat.crossinds[mapind1,mapind1]
xind22=cldat.crossinds[mapind2,mapind2]
xind12=cldat.crossinds[mapind1,mapind2]
nbar1=cldat.nbar[mapind1]
nbar2=cldat.nbar[mapind2]
if nbar1<0 or nbar2<0:
print "***WARNING, no nbar info for one of these maps!"
return
nbartot=nbar1+nbar2
# gather info needed to make a new clData object
newbintaglist=[]
newnbarlist=[]
newdocross=[]
for m in xrange(cldat.Nmap):
if (keept1 or m!=mapind1) and (keept2 or m!=mapind2):
newbintaglist.append(cldat.bintaglist[m])
newnbarlist.append(cldat.nbar[m])
newbintaglist.append(combotag)
newnbarlist.append(nbartot)
combomapind=newNmap-1 #map index of combined map (last entry)
#set up structures for new output dat
newNcross=newNmap*(newNmap+1)/2
newcl=np.zeros((newNcross,cldat.Nell))
newxpairs,newxinds=get_index_pairs(newNmap)
#fill in values appropriately. Ref: Hu's lensing tomography paper
for n in xrange(newNcross):
i,j=newxpairs[n] #in new map index bases
if i==combomapind and j==combomapind: #both are the new combined map
newcl[n,:]+=nbar1*nbar1*cldat.cl[xind11,:]
newcl[n,:]+=nbar2*nbar2*cldat.cl[xind22,:]
newcl[n,:]+=2.*nbar1*nbar2*cldat.cl[xind12,:]
newcl[n,:]/=nbartot*nbartot
elif i==combomapind or j==combomapind: #just 1 is combo
if i==combomapind:
k=j #map not in combo, in new basis
else:
k=i
oldmapind=cldat.tagdict[newbintaglist[k]] #in old map basis
xind1k=cldat.crossinds[mapind1,oldmapind]
xind2k=cldat.crossinds[mapind2,oldmapind]
newcl[n,:]+=nbar1*cldat.cl[xind1k,:]
newcl[n,:]+=nbar2*cldat.cl[xind2k,:]
newcl[n,:]/=nbartot
else: #nether are combined map, just translate indices
oldi=cldat.tagdict[newbintaglist[i]]
oldj=cldat.tagdict[newbintaglist[j]]
oldxind=cldat.crossinds[oldi,oldj]
newcl[n,:]=cldat.cl[oldxind,:]
if np.any(newcl[n,:]): #not strictly accurate for combo bin; will mark
# xind as computed even if only one of the constituent bins were
newdocross.append(n)
#construct clData object and return it
outcldat=ClData(copy.deepcopy(cldat.rundat),newbintaglist,clgrid=newcl,addauto=False,docrossind=newdocross,nbarlist=newnbarlist)
if newruntag:
outcldat.rundat.tag=newruntag
return outcldat
#=========================================================================
# renameCl_binmap:
# given input cldat containing map with tag intag, rename that bin to newtag
# keeporig - if False, intag just gets renamed, otherwise, it is copied
# newtag- binmap tag to be associated with new map made from combo
# note that it should have _bin# in order to be id's as a binmap tag
# ouptut: clData object with new bin label
def renameCl_binmap(cldat,intag,newtag,newruntag='',keeporig=True):
inmapind=cldat.tagdict[intag]
if not keeporig:#just change name in place
#need to change bintaglist, tagdict
newbintaglist=cldat.bintaglist[:]
newbintaglist[inmapind]=newtag
newtagdict=cldat.tagdict.copy()
newtagdict.pop(intag)
newtagdict[newtag]=cldat.tagdict[intag]
#just copy over other data
clgrid=cldat.cl[:,:]
newdocross=cldat.docross[:]
newnbarlist=cldat.nbar[:]
else:
newNmap=cldat.Nmap+keeporig
innbar=cldat.nbar[inmapind]
xind11=cldat.crossinds[inmapind,inmapind]#autopower of in map
if innbar<0:
print "***WARNING, no nbar info map to be copied!"
return
# gather info needed to make a new clData object
newbintaglist=[]
newnbarlist=[]
newdocross=[]
for m in xrange(cldat.Nmap):
newbintaglist.append(cldat.bintaglist[m])
newnbarlist.append(cldat.nbar[m])
newbintaglist.append(newtag)
newnbarlist.append(innbar)
newmapind=newNmap-1 #map index of copied map (last entry)
#set up structures for new output dat
newNcross=newNmap*(newNmap+1)/2
newcl=np.zeros((newNcross,cldat.Nell))
newxpairs,newxinds=get_index_pairs(newNmap)
#fill in values appropriately. Ref: Hu's lensing tomography paper
for n in xrange(newNcross):
i,j=newxpairs[n] #in new map index bases
if i==newmapind and j==newmapind: #both are the new copied map
newcl[n,:]=cldat.cl[xind11,:]
elif i==newmapind or j==newmapind: #just 1 is new copied map
if i==newmapind:
k=j #the map that's not the copy, in new basis
else:
k=i
oldmapind=cldat.tagdict[newbintaglist[k]] #in old map basis
xind1k=cldat.crossinds[inmapind,oldmapind]
newcl[n,:]=cldat.cl[xind1k,:]
else: #nether are combined map, just translate indices
oldi=cldat.tagdict[newbintaglist[i]]
oldj=cldat.tagdict[newbintaglist[j]]
oldxind=cldat.crossinds[oldi,oldj]
newcl[n,:]=cldat.cl[oldxind,:]
if np.any(newcl[n,:]): #not strictly accurate for combo bin; will mark
# xind as computed even if only one of the constituent bins were
newdocross.append(n)
#construct clData object and return it
outcldat=ClData(copy.deepcopy(cldat.rundat),newbintaglist,clgrid=newcl,addauto=False,docrossind=newdocross,nbarlist=newnbarlist)
if newruntag:
outcldat.rundat.tag=newruntag
return outcldat
#----------------------------------------------------------
# combineCl_binlist:
# given input cldat, merge all bins in taglist
# ->taglist bins must be in cldat, and must have nbar!=-1
# newmaptag- binmap tag to be associated with new map made from combo
# keeporig - if True, original bins kept, if false, any combined bins dropped
# renamesingle - if len(taglist)==1 and combotag is passed, rename that bin
# or, if keeporig, make a copy of that bin with a new name
# ouptut: clData object with one less map bin.
def combineCl_binlist(cldat,taglist,combotag,newruntag='',keeporig=True,renamesingle=False):
outcldat=cldat
origtaglist=taglist[:]
if newruntag:
outruntag=newruntag
else:
outruntag=cldat.rundat.tag
if len(taglist)>1:
while len(taglist)>1:
tag1=taglist[0]
tag2=taglist[1]
keep1=keep2=False
if tag1 in origtaglist:
keep1=keeporig
if tag2 in origtaglist:
keep2=keeporig
#print 'tag1,2=',tag1,tag2
outcldat=combineCl_twobin(outcldat,tag1,tag2,combotag,outruntag,keep1,keep2)
taglist=taglist[1:]
taglist[0]=combotag
elif renamesingle and combotag:#add a copied version of input binmap
outcldat=renameCl_binmap(outcldat,taglist[0],combotag,outruntag,keeporig)
return outcldat
#------------------------------------------------------------------------
# get_reduced_cldata
# returns ClData object with some maps, etc taken out;
# map indices of output matches order given in dothesemaps
def get_reduced_cldata(incldat,dothesemaps=[]):
bintaglist=incldat.bintaglist
keepinds=[]
newtags=[]
newnbars=[]
#check that dothese maps is smaller than bintaglist
for m in dothesemaps: #if so construct new bintaglist
if '_bin' in m: #is a specific map
mi = np.where(bintaglist==m)[0][0]
keepinds.append(mi)
newtags.append(m)
newnbars.append(incldat.nbar[mi])
else: #is a maptype
for mi in xrange(incldat.Nmap):
if m in bintaglist[mi]:
keepinds.append(mi)
newtags.append(bintaglist[mi])
newnbars.append(incldat.nbar[mi])
# use similar alg to that constructing Dl matrices to get recuded cldata
newNmap = len(newtags)
newNcross=newNmap*(newNmap+1)/2
outxpairs,outxinds=get_index_pairs(newNmap)
outcl = np.zeros((newNcross,incldat.Nell))
for i in xrange(newNmap):
for j in xrange(i,newNmap):
ci=keepinds[i] #mapind of i in input cl basis
cj=keepinds[j] #mapind of j in input cl basis
cxij = incldat.crossinds[ci,cj] #crossind of ij pair in input clbasis
outcxij= outxinds[i,j] #crossind of ij pair in output cl basis
outcl[outcxij,:]=incldat.cl[cxij,:]
outcldat=ClData(incldat.rundat,newtags,incldat.pairs,outcl,nbarlist=newnbars)
return outcldat
|
997,080 | ce5d6e1d3b99d3b0b1ff5e70b2e4987f72e7073b | from django.shortcuts import render,redirect
from django.views.generic import View
from django.urls import reverse
from .models import *
from django.contrib.auth.models import User
from django.contrib import messages
# Create your views here.
# def home(request):
# return render(request,'index.html')
class BaseView(View):
views = {}
views['categories'] = Category.objects.all()
views['subcategories'] = SubCategory.objects.all()
class HomeView(BaseView):
def get(self,request):
self.views['products'] = Product.objects.all()
self.views['hots'] = Product.objects.filter(labels = 'hot')
self.views['categories'] = Category.objects.all()
self.views['sliders'] = Slider.objects.all()
return render(request,'index.html',self.views)
class ProductView(BaseView):
def get(self,request,slug):
self.views['product_detail'] = Product.objects.filter(slug = slug)
return render(request,'single.html',self.views)
class SubCategoryProductView(BaseView):
def get(self,request,slug):
ids = SubCategory.objects.get(slug = slug).id
self.views['subcategory_product'] = Product.objects.filter(subcategory_id = ids)
return render(request,'kitchen.html',self.views)
def signup(request):
if request.method == "POST":
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
cpassword = request.POST['cpassword']
if password == cpassword:
if User.objects.filter(username = username).exists():
messages.error(request,"This username is already taken")
return redirect('/signup')
elif User.objects.filter(email = email).exists():
messages.error(request,"This email is already taken")
return redirect('/signup')
else:
data = User.objects.create_user(
first_name = first_name,
last_name = last_name,
username = username,
email = email,
password = password
)
data.save()
messages.success(request,"You are registered")
return redirect('/signup')
else:
messages.success(request,"Password does not match")
return redirect('/signup')
return render(request,'register.html')
|
997,081 | fb9613b7ba8a046b14c848a27eb6587bb4aaade1 | import gspread
from oauth2client.service_account import ServiceAccountCredentials
import config
import telebot
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
def findYouStudents(date, id):
dictOfCurStudens = {}
if (sheets[id].title[0] != '_'):
sheet = sheets[id]
row = sheet.row_values(1)
del row[0]
del row[0]
if (len(row) != 0):
for col in range(0, len(row)): # Dates
if (row[col] == date): # Check date == column
column = sheet.col_values(col + 3)
del column[0]
if (len(column) != 0):
for r in range(0, len(column)): # Columns
if (column[r] != ''):
dictOfCurStudens[f'{r + 1}'] = sheet.cell(r + 2, 2).value
if (len(dictOfCurStudens) == 0):
return ''
else:
return dictOfCurStudens
return ''
def createDictOfCurStudents(date, idWT):
dictA = findYouStudents(date, idWT)
if (dictA == ''):
return 0
else:
if (sheets[idWT].title[0] != '_'):
return {
'group': f'{sheets[idWT].title[len(sheets[idWT].title) - 1]}',
'studens': findYouStudents(date, idWT)}
def findName(idName, idGroup):
for sh in sheets:
if (sh.title[len(sh.title) - 1] == str(idGroup) and sh.title[0] != '_'):
return sh.cell(idName + 1, 2).value
def createStrOfStuends(grp): # grp type int
string = ''
sheets = client.open('Bonuses').worksheets()
for sh in sheets:
if (sh.title[len(sh.title) - 1] == str(grp) and sh.title[0] != '_'):
column = sh.col_values(2)
del column[0]
for i in range(0, len(column)):
string += f'{i + 1}: {column[i]}\n'
return string
def listOfStudens(id):
sheets = client.open('Bonuses').worksheets()
for sh in sheets:
if (sh.title[len(sh.title) - 1] == str(id) and sh.title[0] != '_'):
column = sh.col_values(2)
del column[0]
return column
def createArrOfStd(time):
string = ''
sheets = client.open('Bonuses').worksheets()
arr = []
for sh in sheets:
if (sh.title[0] != '_'):
rows = sh.row_values(1)
del rows[0]
del rows[0]
for r in range(0, len(rows)):
if (rows[r] == time):
column = sh.col_values(2)
row = sh.col_values(r + 3)
del column[0]
del row[0]
for i in range(0, len(row)):
if(row[i] != ''):
arr.append(column[i])
return arr
def createArrRightStd(name, arr):
arrStd = arr.copy()
for a in arrStd:
if (a == name):
arrStd.remove(a)
return arrStd
def createStrRightStd(arr):
s = ''
for i in range(0, len(arr)):
s += f'{i + 1}: {arr[i]}\n'
return s
sheets = client.open('Bonuses').worksheets()
bot = telebot.TeleBot()
@bot.message_handler(commands=['start'])
def exchange_command(message):
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.row(
telebot.types.InlineKeyboardButton('161', callback_data=161),
telebot.types.InlineKeyboardButton('162', callback_data=162),
telebot.types.InlineKeyboardButton('163', callback_data=163),
telebot.types.InlineKeyboardButton('164', callback_data=164),
telebot.types.InlineKeyboardButton('165', callback_data=165)
)
bot.send_message(message.chat.id, 'Из какой ты группы?', reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: True)
def query_handler(call):
bot.answer_callback_query(callback_query_id=call.id, text='Спасибо за честный ответ!')
if (161 <= int(call.data) <= 165):
answer = ''
if call.data == '161':
if (open("config.py", encoding = "utf8").read() == ''):
open("config.py", encoding = "utf8").close()
d = {call.message.chat.id: {
'group': '1'
}
}
else:
d = config.dictOfStudens
d[call.message.chat.id] = {
'group': '1'
}
with open("config.py", "w", encoding = "utf8") as file:
file.write(f'dictOfStudens = {d}')
data = createStrOfStuends(1)
answer = 'Ура вы из 161!\n' + 'Введите свой номер по списку\n' + data
elif call.data == '162':
if (open("config.py", encoding = "utf8").read() == ''):
open("config.py", encoding = "utf8").close()
d = {call.message.chat.id: {
'group': '2'
}
}
else:
d = config.dictOfStudens
d[call.message.chat.id] = {
'group': '2'
}
with open("config.py", "w", encoding = "utf8") as file:
file.write(f'dictOfStudens = {d}')
data = createStrOfStuends(2)
answer = 'Ура вы из 162!\n' + 'Введите свой номер по списку\n' + data
elif call.data == '163':
if (open("config.py", encoding = "utf8").read() == ''):
open("config.py", encoding = "utf8").close()
d = {call.message.chat.id: {
'group': '3'
}
}
else:
d = config.dictOfStudens
d[call.message.chat.id] = {
'group': '3'
}
with open("config.py", "w", encoding = "utf8") as file:
file.write(f'dictOfStudens = {d}')
data = createStrOfStuends(3)
answer = 'Ура вы из 163!\n' + 'Введите свой номер по списку\n' + data
elif call.data == '164':
if (open("config.py", encoding = "utf8").read() == ''):
open("config.py", encoding = "utf8").close()
d = {call.message.chat.id: {
'group': '4'
}
}
else:
d = config.dictOfStudens
d[call.message.chat.id] = {
'group': '4'
}
with open("config.py", "w", encoding = "utf8") as file:
file.write(f'dictOfStudens = {d}')
data = createStrOfStuends(4)
answer = 'Ура вы из 164!\n' + 'Введите свой номер по списку\n' + data
elif call.data == '165':
if (open("config.py", encoding = "utf8").read() == ''):
open("config.py", encoding = "utf8").close()
d = {call.message.chat.id: {
'group': '5'
}
}
else:
d = config.dictOfStudens
d[call.message.chat.id] = {
'group': '5'
}
with open("config.py", "w", encoding = "utf8") as file:
file.write(f'dictOfStudens = {d}')
data = createStrOfStuends(5)
answer = 'Ура вы из 165!\n' + 'Введите свой номер по списку\n' + data
bot.send_message(call.message.chat.id, answer)
bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id)
if (1 <= int(call.data) <= 2):
if (call.data == '1' and len(config.dictOfStudens[call.message.chat.id]) == 2):
d = config.dictOfStudens
d[call.message.chat.id]['name'] = findName(int(d[call.message.chat.id]['number']), int(d[call.message.chat.id]['group']))
with open("config.py", "w", encoding = "utf8") as file:
file.write(f'dictOfStudens = {d}')
elif (call.data == '2' or len(config.dictOfStudens[call.message.chat.id]) == 3):
s = config.dictOfStudens[call.message.chat.id]['group']
fString = createStrOfStuends(int(s))
bot.send_message(call.message.chat.id, f'Введите свой номер по списку\n {fString}')
@bot.message_handler(content_types=["text"])
def repeat_all_messages(message):
try:
d = config.dictOfStudens
if (message.text.isdigit() == True and 1 <= len(d[message.chat.id]) <= 2 and d[message.chat.id]['group'].isdigit()):
s = listOfStudens(int(d[message.chat.id]['group']))
if (1 <= int(message.text) <= len(s)):
c = 0
g = 0
flag = -1
if (len(d.keys()) == 1):
flag = 1
else:
for key in d.keys():
if (len(d[key]) == 3 and d[key]['number'] != message.text):
c += 1
for key in d.keys():
if (len(d[key]) == 3):
g += 1
if (c == g or flag == 1):
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.row(
telebot.types.InlineKeyboardButton('Да', callback_data=1),
telebot.types.InlineKeyboardButton('Нет', callback_data=2),
)
d[message.chat.id]['number'] = message.text
with open("config.py", "w", encoding ="utf8") as file:
file.write(f'dictOfStudens = {d}')
idGrp = d[message.chat.id]['group']
bot.send_message(message.chat.id, f'Вы {findName(int(message.text), int(idGrp))}?', reply_markup = keyboard)
else:
bot.send_message(message.chat.id, f'Этот человек уже зарегистрирован')
else:
bot.send_message(message.chat.id, f'Вы неправильно ввели свой номер по списку!')
except KeyError:
bot.send_message(message.chat.id, 'Вы ещё не выбрали свою группу!')
# Start voting
if (message.text.split(' ')[0] == '123'): # Password
if(len(open("config.py", encoding = "utf8").read().split('\n\n')) == 1):
open("config.py", encoding = "utf8").close()
arrOfCurStudents = []
for sh in range(0, len(sheets)): # Open new list
dictSh = createDictOfCurStudents(message.text.split(' ')[1], sh)
if (dictSh != 0):
arrOfCurStudents.append(dictSh)
dictOfStd = config.dictOfStudens
newDict = {}
for keyD in dictOfStd.keys():
for element in range(0, len(arrOfCurStudents)):
if (dictOfStd[keyD]['group'] == arrOfCurStudents[element]['group']):
for key, val in arrOfCurStudents[element]['studens'].items():
if (dictOfStd[keyD]['name'] == val and dictOfStd[keyD]['number'] == key):
newDict[dictOfStd[keyD]['name']] = keyD
with open("config.py", "w", encoding = "utf8") as file:
file.write(f'dictOfStudens = {dictOfStd}\n\ncurrencyStudens = {newDict}')
with open("output/date.txt", "w", encoding = "utf8") as file:
file.write(message.text.split(' ')[1])
else:
open("config.py", encoding = "utf8").close()
arr = config.currencyStudens
arrStd = createArrOfStd(message.text.split(' ')[1])
for val in arr.values():
data = createStrRightStd(createArrRightStd(config.dictOfStudens[val]['name'], arrStd))
bot.send_message(val, f'Пожалуйста, выберете того человека, которому вы отдадите свой балл:\n{data}')
dictStd = {name: 0 for name in arrStd}
if (len(open("config.py", encoding = "utf8").read().split('\n\n')) == 2):
open("config.py", encoding = "utf8").close()
with open("config.py", encoding = "utf8") as file:
string = file.read()
with open("config.py", "w", encoding = "utf8") as file:
file.write(f'{string}\n\nresultsOfInterview = {dictStd}')
else:
open("config.py", encoding = "utf8").close()
NewDict = {val: key for key, val in arr.items()}
with open("config.py", encoding = "utf8") as file:
string = file.read()
with open("config.py", "w", encoding = "utf8") as file:
file.write(f'{string}\n\ncheckYourVote = {NewDict}')
# Voting
if (len(message.text.split(' ')) == 1 and message.text.isdigit() and len(open('config.py', encoding = "utf8").read().split('\n\n')) == 4 and len(config.dictOfStudens[message.chat.id].keys()) == 3 and 1 <= int(message.text) <= len((config.resultsOfInterview).keys())):
open('config.py', encoding = "utf8").close()
d = config.checkYourVote
if(d[message.chat.id] != ''):
with open("output/date.txt", encoding = "utf8") as file:
arrStd = createArrOfStd(file.read())
if (1 <= int(message.text) <= len(arrStd)):
data = createArrRightStd(config.dictOfStudens[message.chat.id]['name'], arrStd)
if (1 <= int(message.text) <= len(data)):
arr = config.resultsOfInterview
arr[data[int(message.text) - 1]] += 1
d[message.chat.id] = ''
with open('config.py', encoding = "utf8") as file:
spl = file.read().split('\n\n')
spl[2] = f'resultsOfInterview = {arr}'
d = config.checkYourVote
d[message.chat.id] = ''
spl[3] = f'checkYourVote = {d}'
with open('config.py', "w", encoding = "utf8") as file:
file.write(f'{spl[0]}\n\n{spl[1]}\n\n{spl[2]}\n\n{spl[3]}')
bot.send_message(message.chat.id, "Спасибо за ваш голос!")
else:
bot.send_message(message.chat.id, "Вы уже проголосовали!")
# Stop voting
if(message.text == 'STOP' and message.chat.id == 490492546 and len(open('config.py', encoding = "utf8").read().split('\n\n')) == 4):
open('config.py', encoding = "utf8").close()
arr = config.resultsOfInterview
data = ''
for val, key in arr.items():
data += f'{val}: {key}\n'
arr = config.currencyStudens
for val in arr.values():
bot.send_message(val, data)
bot.send_message(490492546, data)
arr = config.dictOfStudens
with open('config.py', "w", encoding = "utf8") as file:
file.write(f'dictOfStudens = {arr}')
bot.polling(none_stop=True, interval=0) |
997,082 | d28def87be06685fca05c953f2f593b1a3c00fdd | import sys
sys.path.append("librerias")
from Adafruit_PWM_Servo_Driver import PWM
import time
from Tkinter import *
from PIL import Image
import threading
# Initialise the PWM device using the default address
pwm = PWM(0x40)
# Note if you'd like more debug output you can instead run:
#pwm = PWM(0x40, debug=True)
#servo90 = ((servoMax-servoMin)/2)+servoMin
servoMin = 100 # Min pulse length out of 4096
servoMax = 650 # Max pulse length out of 4096
matrixServos = list() #matriz de posiciones de los servos
c=0
### CONSTANTES ###
B1 = 0; B2 = 1; B3 = 2;
D1 = 3; D2 = 4; D3 = 5; D4 = 6; D5 = 7;
A1 = 8; A2 = 9; A3 = 10;
C1 = 11; C2 = 12; C3 = 13; C4 = 14; C5 = 15;
### FIN CONSTANTES ###
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 50 # 60 Hz
print "%d us per period" % pulseLength
pulseLength /= 4096 # 12 bits of resolution
print "%d us per bit" % pulseLength
pulse *= 1000
pulse /= pulseLength
pwm.setPWM(channel, 0, pulse)
def servo(puerto, angulo):
frec = int((angulo * ((servoMax-servoMin)/180)) + servoMin)
pwm.setPWM(puerto ,0 ,frec)
#time.sleep(.3)
###
###Funcion para leer archivo txt y agregarlo a una matriz
###
def setMatrixServos(archivo):
with open(archivo, "r") as ins:
for line in ins:
matrixServos.append(line.split(","))
#------------------
###
### obtengo las posiciones segun el nombre asignado en el archivo
###
def getPositions(str):
for i in matrixServos:
if i[0] == str:
return i
#----------------
#----------------
def setPositionRobot2(posicion):
#A
#a1
servo(2, int(posicion[1]))
#a2
servo(1, int(posicion[2]))
#a3
servo(0, int(posicion[3]))
#B
#b1
servo(13, int(posicion[4]))
#b2
servo(14, int(posicion[5]))
#b3
servo(15, int(posicion[6]))
#C
#c1
servo(3, int(posicion[7]))
#c2
servo(4, int(posicion[8]))
#c3
servo(5, int(posicion[9]))
#c4
servo(6, int(posicion[10]))
#c5
servo(7, int(posicion[11]))
#D
#d1
servo(8, int(posicion[12]))
#d2
servo(9, int(posicion[13]))
#d3
servo(10, int(posicion[14]))
#d4
servo(11, int(posicion[15]))
#d5
servo(12, int(posicion[16]))
def cambiarValores(posicion):
#A
s1.set(int(posicion[1]))
s2.set(int(posicion[2]))
s3.set(int(posicion[3]))
#B
s4.set(int(posicion[4]))
s5.set(int(posicion[5]))
s6.set(int(posicion[6]))
#C
s7.set(int(posicion[7]))
s8.set(int(posicion[8]))
s9.set(int(posicion[9]))
s10.set(int(posicion[10]))
s11.set(int(posicion[11]))
#D
s12.set(int(posicion[12]))
s13.set(int(posicion[13]))
s14.set(int(posicion[14]))
s15.set(int(posicion[15]))
s16.set(int(posicion[16]))
pwm.setPWMFreq(60)
def modo_real():
arch=str(nombre_entry.get())+".txt"
setMatrixServos(arch)
print "Archivo: "+arch
posicion = getPositions("extendido")
setPositionRobot2(posicion)
time.sleep(3)
posicion = getPositions("homealto")
setPositionRobot2(posicion)
time.sleep(3)
posicion = getPositions("getup1")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup2")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup3")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup4")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup5")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup6")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup7")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup8")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup9")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup10")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup11")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup12")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup13")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup14")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup15")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup16")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup17")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup18")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup19")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup20")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup21")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup22")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup23")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup24")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup25")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup26")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup27")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("getup28")
setPositionRobot2(posicion)
time.sleep(1)
"""for x in range(0,50):
posicion = getPositions("home5")
setPositionRobot2(posicion)
time.sleep(2)
posicion = getPositions("t")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("tt2")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("tt3")
setPositionRobot2(posicion)
time.sleep(1)
posicion = getPositions("tt4")
setPositionRobot2(posicion)
time.sleep(1)
"""
def cambiarValoresYEjecutar():
boton_cambiarValores()
valores_servos()
def boton_cambiarValores():
arch2=str(nombre_entry.get())+".txt"
setMatrixServos(arch2)
pos=str(nombre_entry2.get())
posicion = getPositions(pos)
print pos
cambiarValores(posicion)
def home():
print "home"
#A
s1.set(130)
s2.set(70)
s3.set(90)
#B
s4.set(54)
s5.set(123)
s6.set(90)
#C
s7.set(84)
s8.set(30)
s9.set(16)
s10.set(58)
s11.set(87)
#D
s12.set(106)
s13.set(104)
s14.set(164)
s15.set(122)
s16.set(90)
def valores_servos():
#A
#a1
print s1.get()
servo(2, int(s1.get()))
#a2
print s2.get()
servo(1, int(s2.get()))
#a3
print s3.get()
servo(0, int(s3.get()))
#B
#b1
print s4.get()
servo(13, int(s4.get()))
#b2
print s5.get()
servo(14, int(s5.get()))
#b3
print s6.get()
servo(15, int(s6.get()))
#C
#c1
print s7.get()
servo(3, int(s7.get()))
#c2
print s8.get()
servo(4, int(s8.get()))
#c3
print s9.get()
servo(5, int(s9.get()))
#c4
print s10.get()
servo(6, int(s10.get()))
#c5
print s11.get()
servo(7, int(s11.get()))
#D
#d1
print s12.get()
servo(8, int(s12.get()))
#d2
print s13.get()
servo(9, int(s13.get()))
#d3
print s14.get()
servo(10, int(s14.get()))
#d4
print s15.get()
servo(11, int(s15.get()))
#d5
print s16.get()
servo(12, int(s16.get()))
def agregar_datos():
archi=open(str(nombre_entry.get())+".txt",'a')
archi.write('\n')
archi.write(str(nombre_entry2.get())+",")
archi.write(str(s1.get())+",")
archi.write(str(s2.get())+",")
archi.write(str(s3.get())+",")
archi.write(str(s4.get())+",")
archi.write(str(s5.get())+",")
archi.write(str(s6.get())+",")
archi.write(str(s7.get())+",")
archi.write(str(s8.get())+",")
archi.write(str(s9.get())+",")
archi.write(str(s10.get())+",")
archi.write(str(s11.get())+",")
archi.write(str(s12.get())+",")
archi.write(str(s13.get())+",")
archi.write(str(s14.get())+",")
archi.write(str(s15.get())+",")
archi.write(str(s16.get()))
archi.close()
print "SE HA AGREGADO AL ARCHIVO"
def crear_archivo():
archi=open(str(nombre_entry.get())+".txt",'w')
archi.write(str(nombre_entry2.get())+",")
archi.write(str(s1.get())+",")
archi.write(str(s2.get())+",")
archi.write(str(s3.get())+",")
archi.write(str(s4.get())+",")
archi.write(str(s5.get())+",")
archi.write(str(s6.get())+",")
archi.write(str(s7.get())+",")
archi.write(str(s8.get())+",")
archi.write(str(s9.get())+",")
archi.write(str(s10.get())+",")
archi.write(str(s11.get())+",")
archi.write(str(s12.get())+",")
archi.write(str(s13.get())+",")
archi.write(str(s14.get())+",")
archi.write(str(s15.get())+",")
archi.write(str(s16.get()))
archi.close()
print "SE HA GUARDADO EL ARCHIVO"
def a3_onChange(value):
servo(0,int(value) )
def a2_onChange(value):
servo(1,int(value) )
def a1_onChange(value):
servo(2,int(value) )
def c1_onChange(value):
servo(3,int(value) )
def c2_onChange(value):
servo(4,int(value) )
def c3_onChange(value):
servo(5,int(value) )
def c4_onChange(value):
servo(6,int(value) )
def c5_onChange(value):
servo(7,int(value) )
def d1_onChange(value):
servo(8,int(value) )
def d2_onChange(value):
servo(9,int(value) )
def d3_onChange(value):
servo(10,int(value) )
def d4_onChange(value):
servo(11,int(value) )
def d5_onChange(value):
servo(12,int(value) )
def b1_onChange(value):
servo(13,int(value) )
def b2_onChange(value):
servo(14,int(value) )
def b3_onChange(value):
servo(15,int(value) )
def setPositionsFromFile(file):
arch=str(file)+".txt"
setMatrixServos(arch)
print matrixServos
for i in matrixServos:
posicion = i
setPositionRobot2(posicion)
time.sleep(.1)
master = Tk()
master.title('SERVOS')
## SERVOS A
a1 = IntVar()
s1 = Scale(master, from_=0, to=180, label="A1", orient=HORIZONTAL, command=a1_onChange)
s1.grid(row=1,column=1)
a2 = IntVar()
s2 = Scale(master, from_=0, to=180, label="A2", orient=HORIZONTAL, command=a2_onChange)
s2.grid(row=2,column=1)
a3 = IntVar()
s3 = Scale(master, from_=0, to=180, label="A3",orient=HORIZONTAL, command=a3_onChange)
s3.grid(row=3,column=1)
## SERVOS B
b1 = IntVar()
s4 = Scale(master, from_=0, to=180, label="B1",orient=HORIZONTAL, command=b1_onChange)
s4.grid(row=1,column=4)
b2 = IntVar()
s5 = Scale(master, from_=0, to=180, label="B2",orient=HORIZONTAL, command=b2_onChange)
s5.grid(row=2,column=4)
b3 = IntVar()
s6 = Scale(master, from_=0, to=180, label="B3",orient=HORIZONTAL, command=b3_onChange)
s6.grid(row=3,column=4)
## SERVOS C
cc1 = IntVar()
s7 = Scale(master, from_=0, to=180, label="C1",orient=HORIZONTAL, command=c1_onChange)
s7.grid(row=1,column=2)
cc2 = IntVar()
s8 = Scale(master, from_=0, to=180, label="C2",orient=HORIZONTAL, command=c2_onChange)
s8.grid(row=2,column=2)
cc3 = IntVar()
s9 = Scale(master, from_=0, to=180, label="C3",orient=HORIZONTAL, command=c3_onChange)
s9.grid(row=3,column=2)
cc4 = IntVar()
s10 = Scale(master, from_=0, to=180, label="C4",orient=HORIZONTAL, command=c4_onChange)
s10.grid(row=4,column=2)
cc5 = IntVar()
s11 = Scale(master, from_=0, to=180, label="C5",orient=HORIZONTAL, command=c5_onChange)
s11.grid(row=5,column=2)
## SERVOS D
d1 = IntVar()
s12 = Scale(master, from_=0, to=180, label="D1",orient=HORIZONTAL, command=d1_onChange)
s12.grid(row=1,column=3)
d2 = IntVar()
s13 = Scale(master, from_=0, to=180, label="D2",orient=HORIZONTAL, command=d2_onChange)
s13.grid(row=2,column=3)
d3 = IntVar()
s14 = Scale(master, from_=0, to=180, label="D3",orient=HORIZONTAL, command=d3_onChange)
s14.grid(row=3,column=3)
d4 = IntVar()
s15 = Scale(master, from_=0, to=180, label="D4",orient=HORIZONTAL, command=d4_onChange)
s15.grid(row=4,column=3)
d5 = IntVar()
s16 = Scale(master, from_=0, to=180, label="D5",orient=HORIZONTAL, command=d5_onChange)
s16.grid(row=5,column=3)
while True:
# CAMPO 1 : Nombre archivo
nombre_label = Label(master,text="Nombre txt: ")
nombre_label.grid(row=1,column=7)
nombre_str = StringVar()
nombre_entry = Entry(master,textvariable=nombre_str)
nombre_str.set("main");
nombre_entry.grid(row=1,column=8)
nombre_label2 = Label(master,text="Nombre rutina: ")
nombre_label2.grid(row=2,column=7)
nombre_str2 = StringVar()
nombre_entry2 = Entry(master,textvariable=nombre_str2)
nombre_str2.set("");
nombre_entry2.grid(row=2,column=8)
# CAMPO 2 :
real = Button(master,text="Cambiar valores GUI y ejecutar",command=cambiarValoresYEjecutar,relief=FLAT)
real.grid(row=3,column=7)
# CAMPO 3 :
agregar = Button(master,text="Agregar rutina de valores al archivo",command=agregar_datos,relief=FLAT)
agregar.grid(row=4,column=7)
#CAMPO 4:
crear = Button(master,text="Crear un nuevo archivo",command=crear_archivo,relief=FLAT)
crear.grid(row=5,column=7)
home()
setPositionsFromFile("caminar")
mainloop()
|
997,083 | a657179e0a29bbb71e438c345733e96855762c1e | #09_switch.py
import RPi.GPIO as GPIO
import time
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin pos$
GPIO.setmode(GPIO.BCM)
switch_pin = 23
GPIO.setup(switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
if GPIO.input(switch_pin) == False:
print("Button Pressed")
time.sleep(0.2) |
997,084 | b9c4ddef03f5ce4eb2a61bb3d78ef1ec6fcf0702 | ## pythonAES.py
#
# Example on two-way encryption/decryption in AES for python
# Kudos goes to: https://gist.github.com/sekondus/4322469
# Another good ref: http://eli.thegreenplace.net/2010/06/25/aes-encryption-of-files-in-python-with-pycrypto
#
# LU: 08/03/16
## NOTES:
# 1 - AES requires two different parameters for encryption: A key and an
# initialization vector (IV).
#
# 2 -
##
from Crypto.Cipher import AES
from Crypto import Random
import base64
import os
KEY_SIZE = 32 # Must be 16, 24, 32 for AES
PADDING = '{' # Padding character
## Lambda expressions:
# Padding to make sure the KEY_SIZE is always satisfied
pad = lambda s: s + (KEY_SIZE - len(s) % KEY_SIZE) * PADDING;
# Encrypt, encode; decrypt, decoders
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
# Generate a random secret key
#secret = os.urandom(KEY_SIZE)
secret = Random.new().read(AES.block_size); # Better alternative than the above snippet
# Create a cipher object using the random secret key
# The 2nd param is a block chaining mode. Avoid using MODE_ECB; use
# MODE_CFB or MODE_CBC instead.
cipher = AES.new(secret, AES.MODE_ECB)
# Get string to encode from user
userStr = raw_input("Please provide a string: ")
# Encode a string
encoded = EncodeAES(cipher, userStr)
print 'Encrypted string: ', encoded
# Decode the encoded string
decoded = DecodeAES(cipher, encoded)
print 'Decrypted string: ', decoded
|
997,085 | 481c1c79f29ec8e494e867283542c1ff0c0e4f40 | from api import app
from api.model.email import Email
from api.model.mailgun import Mailgun
from api.model.mandrill import Mandrill
import unittest
import json
class TestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
app.config['TESTING'] = True
self.test_email = {
"to": "pamelastone@gmail.com",
"to_name": "Pam Lu",
"from": "pamela.stone@gmail.com",
"from_name": "Pam Sender",
"subject": "A Message",
"body": "<h1>Your Bill</h1><p>$10</p>"
}
def test_model_email(self):
email = Email(self.test_email)
assert email.to_email == self.test_email['to']
assert email.to_name == self.test_email['to_name']
assert email.from_email == self.test_email['from']
assert email.from_name == self.test_email['from_name']
assert email.subject == self.test_email['subject']
assert b'Your Bill' in email.body
assert email.is_valid == True
email = Email({"to":"test"})
assert email.is_valid == False
email = Email({"to": "pamelastone@gmail.com"})
assert email.is_valid == False
def test_model_mailgun(self):
test_url = 'test_url'
test_key = 'test_key'
mailgun = Mailgun(test_url, test_key)
assert mailgun.api_url == test_url
assert mailgun.api_key == test_key
def test_model_mandrill(self):
test_url = 'test_url'
test_key = 'test_key'
mandrill = Mandrill(test_url, test_key)
assert mandrill.api_url == test_url
assert mandrill.api_key == test_key
def test_api_get_email(self):
data = self.app.get('/email')
assert data._status_code == 405
def test_api_post_email(self):
r = self.app.post('/email', data = json.dumps(self.test_email), content_type='application/json')
assert r._status_code == 200
self.test_email_bad_1 = {
"to": "pamelastone@gmail.com",
"to_name": "Pam Lu",
"from": "pamela.stone@gmail.com",
"from_name": "Pam Sender"
}
self.test_email_bad_2 = {
"to": "pamelastone",
"to_name": "Pam Lu",
"from": "pamela.stone@gmail.com",
"from_name": "Pam Sender"
}
r = self.app.post('/email', data=json.dumps(self.test_email_bad_1), content_type='application/json')
assert r._status_code == 400
r = self.app.post('/email', data=json.dumps(self.test_email_bad_2), content_type='application/json')
assert r._status_code == 400
if __name__ == '__main__':
unittest.main() |
997,086 | e8c1091a69a3a91e0fa30145888fc2d58e21d1d4 | import numpy as np
import random
import tensorflow as tf
from tensorpack import *
import math
import tflearn
import scipy
import scipy.io as sio
import time
from tensorflow.python.framework import ops
import warnings
import os
import threading
class GeneratorRunner(object):
"Custom runner that that runs an generator in a thread and enqueues the outputs."
def __init__(self, generator, placeholders, enqueue_op, close_op):
self._generator = generator
self._placeholders = placeholders
self._enqueue_op = enqueue_op
self._close_op = close_op
def _run(self, sess, coord):
try:
while not coord.should_stop():
try:
# print "======== values = self._generator.get_data()"
values = next(self._generator)
# print values.shape
# values = [values]
if len(values) != len(self._placeholders):
print "======== len(values), len(self._placeholders)", len(values), len(self._placeholders)
assert len(values) == len(self._placeholders), \
'generator values and placeholders must have the same length'
#if len(values[0]) == self._placeholders[0].get_shape().as_list()[0]:
feed_dict = {placeholder: value \
for placeholder, value in zip(self._placeholders, values)}
sess.run(self._enqueue_op, feed_dict=feed_dict)
except (StopIteration, tf.errors.OutOfRangeError):
try:
sess.run(self._close_op)
except Exception:
pass
return
except Exception as ex:
if coord:
coord.request_stop(ex)
else:
raise
def create_threads(self, sess, coord=None, daemon=False, start=False):
"Called by `start_queue_runners`."
print "===== GeneratorRunner.create_threads"
thread = threading.Thread(
target=self._run,
args=(sess, coord))
if coord:
coord.register_thread(thread)
if daemon:
thread.daemon = True
if start:
thread.start()
return [thread]
def read_batch_generator(
generator, dtypes, shapes, batch_size,
queue_capacity=1000,
allow_smaller_final_batch=True):
"Reads values from an generator, queues, and batches."
assert len(dtypes) == len(shapes), 'dtypes and shapes must have the same length'
queue = tf.FIFOQueue(
capacity=queue_capacity,
dtypes=dtypes,
shapes=shapes)
placeholders = [tf.placeholder(dtype, shape) for dtype, shape in zip(dtypes, shapes)]
print placeholders
enqueue_op = queue.enqueue(placeholders)
close_op = queue.close(cancel_pending_enqueues=False)
queue_runner = GeneratorRunner(generator, placeholders, enqueue_op, close_op)
tf.train.add_queue_runner(queue_runner)
if allow_smaller_final_batch:
return queue.dequeue_up_to(batch_size)
else:
print "===== returning read_batch_generator->queue.dequeue_many"
return queue.dequeue_many(batch_size)
class data_loader(object):
def __init__(self, flags):
## All variables ##
global FLAGS
FLAGS = flags
self.out_size = (FLAGS.num_point, 3)
self.resolution = FLAGS.resolution
self.vox_reso = FLAGS.voxel_resolution
self.is_training = tf.placeholder(dtype=bool,shape=[],name='gen-is_training')
# data_lmdb_path = "/home/rz1/Documents/Research/3dv2017_PBA/data/lmdb"
# data_lmdb_path = "/data_tmp/lmdb/"
# data_lmdb_path = "/newfoundland/rz1/lmdb/"
#data_lmdb_path = "./data/lmdb/"
data_lmdb_path = flags.data_path
data_lmdb_train_file = flags.data_file + '_train.tfr'
data_lmdb_test_file = flags.data_file + '_test.tfr'
data_size_train_file = flags.data_file + '_train.npy'
data_size_test_file = flags.data_file + '_test.npy'
# data_lmdb_path = "/home/ziyan/3dv2017_PBA_out/data/lmdb/"
# self.data_pcd_train = data_lmdb_path + "randLampbb8Full_%s_%d_train_imageAndShape.lmdb"%(FLAGS.cat_name, FLAGS.num_point)
# self.data_pcd_train = data_lmdb_path + "random_randomLamp0822_%s_%d_train_imageAndShape_single.lmdb"%(FLAGS.cat_name, FLAGS.num_point)
self.data_ae_train = os.path.join(data_lmdb_path, data_lmdb_train_file)
self.data_size_train = os.path.join(data_lmdb_path, data_size_train_file)
self.tfrecord_train_size = int(np.load(self.data_size_train))
#self.data_pcd_train = data_lmdb_path + "random_randLamp1005_%s_%d_train_imageAndShape_single_persp.amdb"%(FLAGS.cat_name, FLAGS.num_point)
# self.data_pcd_train = '/data_tmp/lmdb/badRenderbb9_car_24576_train_imageAndShape.lmdb'
# self.data_pcd_test = data_lmdb_path + "random_randomLamp0822_%s_%d_test_imageAndShape_single.lmdb"%(FLAGS.cat_name, FLAGS.num_point)
self.data_ae_test = os.path.join(data_lmdb_path, data_lmdb_test_file)
self.data_size_test = os.path.join(data_lmdb_path, data_size_test_file)
self.tfrecord_test_size = int(np.load(self.data_size_test))
#self.data_pcd_test = data_lmdb_path + "random_randLamp1005_%s_%d_test_imageAndShape_single_persp.lmdb"%(FLAGS.cat_name, FLAGS.num_point)
# self.data_pcd_test = '/newfoundland/rz1/lmdb/badRenderbb9_car_24576_test_imageAndShape.lmdb'
buffer_size = 32
parall_num = 16
self.batch_size = FLAGS.batch_size # models used in a batch
'''
self.ds_train = LMDBData(self.data_ae_train, shuffle=True) #[pcd, axis_angle_single, tw_single, angle_single, rgb_single, style]
self.x_size_train = self.ds_train.size()
self.ds_train = LocallyShuffleData(self.ds_train, buffer_size)
self.ds_train = PrefetchData(self.ds_train, buffer_size, parall_num)
self.ds_train = LMDBDataPoint(self.ds_train)
self.ds_train = PrefetchDataZMQ(self.ds_train, parall_num)
self.ds_train = BatchData(self.ds_train, self.batch_size, remainder=False, use_list=True) # no smaller tail batch
self.ds_train = RepeatedData(self.ds_train, -1) # -1 for repeat infinite times
# TestDataSpeed(self.ds_train).start_test() # 164.15it/s
self.ds_train.reset_state()
'''
#raise Exception, 'update size'
self.ds_train = TFRecordData(self.data_ae_train, size = self.tfrecord_train_size) #[pcd, axis_angle_single, tw_single, angle_single, rgb_single, style]
self.x_size_train = self.ds_train.size()
self.ds_train = LocallyShuffleData(self.ds_train, buffer_size)
self.ds_train = PrefetchData(self.ds_train, buffer_size, parall_num)
self.ds_train = PrefetchDataZMQ(self.ds_train, parall_num)
#self.ds_train = RepeatedData(self.ds_train, 10) #remove this later
self.ds_train = BatchData(self.ds_train, self.batch_size, remainder=False, use_list=True) # no smaller tail batch
self.ds_train = RepeatedData(self.ds_train, -1) # -1 for repeat infinite times
# TestDataSpeed(self.ds_train).start_test() # 164.15it/s
self.ds_train.reset_state()
'''
#self.ds_test = LMDBData(self.data_pcd_test, shuffle=True) #[pcd, axis_angle_single, tw_single, angle_single, rgb_single, style]
self.ds_test = LMDBData(self.data_ae_test, shuffle=False) #[pcd, axis_angle_single, tw_single, angle_single, rgb_single, style]
self.x_size_test = self.ds_test.size()
#self.ds_test = LocallyShuffleData(self.ds_test, 200)
self.ds_test = PrefetchData(ds=self.ds_test, nr_prefetch=buffer_size, nr_proc=parall_num)
self.ds_test = LMDBDataPoint(self.ds_test)
self.ds_test = PrefetchDataZMQ(ds=self.ds_test, nr_proc=parall_num)
# all dataset will be iterated
self.ds_test = BatchData(self.ds_test, self.batch_size, remainder=False, use_list=True)
self.ds_test = RepeatedData(self.ds_test, -1)
# TestDataSpeed(self.ds_test).start_test()
self.ds_test.reset_state()
'''
#raise Exception, 'update size'
self.ds_test = TFRecordData(self.data_ae_test, size=self.tfrecord_test_size) #[pcd, axis_angle_single, tw_single, angle_single, rgb_single, style]
self.x_size_test = self.ds_test.size()
self.ds_test = PrefetchData(ds=self.ds_test, nr_prefetch=buffer_size, nr_proc=parall_num)
self.ds_test = PrefetchDataZMQ(ds=self.ds_test, nr_proc=parall_num)
# all dataset will be iterated
#self.ds_test = RepeatedData(self.ds_test, 10)
self.ds_test = BatchData(self.ds_test, self.batch_size, remainder=False, use_list=True)
self.ds_test = RepeatedData(self.ds_test, -1)
self.ds_test.reset_state()
self.rgb_batch_train, self.invZ_batch_train, self.mask_batch_train, self.sn_batch_train,\
self.angles_batch_train, self.vox_batch_train = read_batch_generator\
(generator=self.ds_train.get_data(), dtypes=[tf.uint8, tf.float32, tf.float32, tf.float32, tf.float32,\
tf.uint8], \
shapes=[[self.batch_size, self.resolution, self.resolution, 3], [self.batch_size, self.resolution, \
self.resolution, 1], [self.batch_size, self.resolution, self.resolution, 1], \
[self.batch_size, self.resolution, self.resolution, 3],\
[self.batch_size, 3], [self.batch_size, self.vox_reso, self.vox_reso, self.vox_reso]], batch_size=1, queue_capacity=100)
self.rgb_batch_test, self.invZ_batch_test, self.mask_batch_test, self.sn_batch_test,\
self.angles_batch_test, self.vox_batch_test = read_batch_generator\
(generator=self.ds_test.get_data(), dtypes=[tf.uint8, tf.float32, tf.float32, tf.float32, tf.float32,
tf.uint8], \
shapes=[[self.batch_size, self.resolution, self.resolution, 3], [self.batch_size, self.resolution, \
self.resolution, 1], [self.batch_size, self.resolution, self.resolution, 1], \
[self.batch_size, self.resolution, self.resolution, 3],\
[self.batch_size, 3], [self.batch_size, self.vox_reso, self.vox_reso, self.vox_reso]], batch_size=1, queue_capacity=100)
#self.rgb_batch_test, self.invZ_batch_test, self.mask_batch_test, self.sn_batch_test,\
# self.angles_batch_test, self.vox_batch_test = read_batch_generator\
# (generator=self.ds_test.get_data(), dtypes=[tf.uint8, tf.float32, tf.float32, tf.float32, tf.float32,
# tf.uint8], \
# shapes=[[1, self.resolution, self.resolution, 3], [1, self.resolution, \
# self.resolution, 1], [1, self.resolution, self.resolution, 1], \
# [1, self.resolution, self.resolution, 3],\
# [1, 3], [1, self.vox_reso, self.vox_reso, self.vox_reso]], batch_size=self.batch_size, queue_capacity=100)
self.rgb_batch = tf.reshape(tf.cond(self.is_training, \
lambda: tf.to_float(self.rgb_batch_train), \
lambda: tf.to_float(self.rgb_batch_test)), [self.batch_size, self.resolution, self.resolution, 3])
## normalization happens in autoencoder
self.invZ_batch = tf.reshape(tf.cond(self.is_training, \
lambda: tf.to_float(self.invZ_batch_train), \
lambda: tf.to_float(self.invZ_batch_test)), [self.batch_size, self.resolution, self.resolution, 1])
self.mask_batch = tf.reshape(tf.cond(self.is_training, \
lambda: tf.to_float(self.mask_batch_train), \
lambda: tf.to_float(self.mask_batch_test)), [self.batch_size, self.resolution, self.resolution, 1])
self.sn_batch = tf.reshape(tf.cond(self.is_training, \
lambda: tf.to_float(self.sn_batch_train), \
lambda: tf.to_float(self.sn_batch_test)), [self.batch_size, self.resolution, self.resolution, 3])
self.angles_batch = tf.reshape(tf.cond(self.is_training, \
lambda: tf.to_float(self.angles_batch_train), \
lambda: tf.to_float(self.angles_batch_test)), [self.batch_size, 3])
self.voxel_batch = tf.reshape(tf.cond(self.is_training, \
lambda: tf.to_float(self.vox_batch_train), \
lambda: tf.to_float(self.vox_batch_test)), [self.batch_size, self.vox_reso, self.vox_reso, self.vox_reso])
|
997,087 | 817de196caedd7db281bf61b4fc3440fc3244920 | import util
class Node:
id = 0
def __init__(self, state, parent, action, pathcost):
self.state = state
self.parent = parent
self.action = action
self.pathcost = pathcost
self.id = Node.id
Node.id = Node.id + 1
def __eq__(self, other):
return self.id == other.id
def __str__(self):
if self.parent != None: idp = self.parent.id
else: idp = None
#return 'id:'+str(self.id)+' '+str(self.state)+' '+str(idp)+' '+str(self.action)+' '+str(self.pathcost) # naive
#return " ".join([str(i) for i in ['id:',self.id,self.state,idp,self.action,self.pathcost]]) 3 join + list comprehen.
return '[%d: %s %s %s %d' % (self.id, self.state, idp, self.action, self.pathcost)
def path(self):
n = self
path = []
while n.parent != None:
path.append(n.action)
n = n.parent
path.reverse()
return path
def pathR(self):
n = self
path = []
if n.parent == None:
path.reverse()
return path
else:
path.append(n.action)
pathR(n.parent)
def contains(self,qeue):
for x in qeue:
if self.state == x.state: return True
return False
def isBetter(self,list):
for x in list:
if self.state == x.state and self.pathcost<x.pathcost: return True
return False
def replace(self,list):
for x in list:
if n.state == x.state:
list.remove(x)
list.append(self)
return False
if __name__ == "__main__":
n = Node((0,0), None, None, 0)
for i in range(3):
n = Node((n.state[0]+1,n.state[1]),n,'south'+str(i),n.pathcost+1)
print n
print n.path() |
997,088 | 3ee7ad159cd548a1fe8aab3808fd6c1c9ed6172c | from CsvReader import *
from Network import Network, get_rating
from random import shuffle
from NetworkCupy import NetworkCupy
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import time
def test_network_automatically(network):
wines = get_normalized_data('../data/winequality-red.csv')
poor_wines = get_poor_wines(wines) # only wines with quality less than 6.5
good_wines = get_good_wines(wines) # only wines with quality greater than 6.5
training_size = 4 / 5 # fraction of wines being a training set
nr_of_poor_wines = int(
training_size * len(poor_wines)) # nr of training bad wines is a fraction of whole set of poor wines
nr_of_good_wines = int(
training_size * len(good_wines)) # nr of training good wines is a fraction of whole set of good wines
copies_of_good_wines = int(
nr_of_poor_wines / nr_of_good_wines) # amount of copies of good wines so the amount of good and poor wines is the same
testing_input_set = []
testing_output_set = []
testing_set = []
for data in get_testing_data(nr_of_poor_wines, len(poor_wines),
poor_wines): # we take remaining wines to be a testing set
testing_set.append(copy.deepcopy(data))
for data in get_testing_data(nr_of_good_wines, len(good_wines), good_wines):
for i in range(0, copies_of_good_wines):
testing_set.append(copy.deepcopy(data))
seperate_inputs_and_outputs(testing_set, testing_input_set, testing_output_set)
wrong = 0
correct = 0
for l in range(0, len(testing_input_set)):
result = network.feed_forward(testing_input_set[l])
if get_rating(result) == get_rating(testing_output_set[l][0]):
correct += 1
else:
wrong += 1
print("Tested " + str(len(testing_input_set)) + " datasets.")
print("Result: " + str(correct / len(testing_input_set) * 100) + " % of good predictions")
def test_network_manually(network):
fixed_acidity = input("fixed_acidity = ")
volatile_acidity = input("volatile_acidity = ")
citric_acid = input("citric acid = ")
residual_sugar = input("residual sugar = ")
chlorides = input("chlorides = ")
free_sulfur_dioxide = input("free sulfur dioxide = ")
total_sulfur_dioxide = input("total sulfur dioxide = ")
density = input("density = ")
pH = input("pH = ")
sulphates = input("sulphates = ")
alcohol = input("alcohol = ")
row = [float(fixed_acidity), float(volatile_acidity), float(citric_acid), float(residual_sugar), float(chlorides),
float(free_sulfur_dioxide), float(total_sulfur_dioxide), float(density), float(pH), float(sulphates),
float(alcohol)]
data = get_whole_data('../data/winequality-red.csv')
normalize_row(row, data)
result = network.feed_forward(row)
print(get_rating(result))
def train_network(use_cupy, lr, n_epoch, filename):
wines = get_normalized_data('../data/winequality-red.csv')
poor_wines = get_poor_wines(wines) # only wines with quality less than 6.5
good_wines = get_good_wines(wines) # only wines with quality greater than 6.5
training_size = 4 / 5 # fraction of wines being a training set
training_input_set = []
training_output_set = []
training_set = []
nr_of_poor_wines = int(
training_size * len(poor_wines)) # nr of training bad wines is a fraction of whole set of poor wines
nr_of_good_wines = int(
training_size * len(good_wines)) # nr of training good wines is a fraction of whole set of good wines
copies_of_good_wines = int(
nr_of_poor_wines / nr_of_good_wines) # amount of copies of good wines so the amount of good and poor wines is the same
for data in get_training_data(nr_of_poor_wines, poor_wines):
training_set.append(copy.deepcopy(data))
for data in get_training_data(nr_of_good_wines, good_wines):
for i in range(0, copies_of_good_wines): # here we clone good wine few times.
training_set.append(copy.deepcopy(data))
shuffle(training_set)
seperate_inputs_and_outputs(training_set, training_input_set, training_output_set)
testing_input_set = []
testing_output_set = []
testing_set = []
for data in get_testing_data(nr_of_poor_wines, len(poor_wines),
poor_wines): # we take remaining wines to be a testing set
testing_set.append(copy.deepcopy(data))
for data in get_testing_data(nr_of_good_wines, len(good_wines), good_wines):
for i in range(0, copies_of_good_wines):
testing_set.append(copy.deepcopy(data))
seperate_inputs_and_outputs(testing_set, testing_input_set, testing_output_set)
if not use_cupy:
network = Network()
else:
network = NetworkCupy()
copy_network = copy.deepcopy(network)
step = 20
network = copy.deepcopy(copy_network)
network.learningRate = lr
times = []
ratios = []
epochs = []
for i in range(0, n_epoch, step):
start = time.time()
for j in range(0, step):
loss_sum = 0
for k in range(0, len(training_input_set)):
result = network.feed_forward(training_input_set[k])
network.backward_propagation(training_output_set[k][0], result, training_input_set[k])
loss_sum += abs(result - training_output_set[k][0])
wrong = 0
correct = 0
for l in range(0, len(testing_input_set)):
result = network.feed_forward(testing_input_set[l])
if get_rating(result) == get_rating(testing_output_set[l][0]):
correct += 1
else:
wrong += 1
end = time.time()
times.append(end - start)
epochs.append(i)
ratios.append(correct / len(testing_input_set) * 100)
print(i)
plt.plot(epochs, ratios, linestyle="-", marker='o')
plt.xlabel("epochs")
plt.ylabel("ratio")
plt.suptitle('train-set ' + str(training_size) + ' learning rate: ' + str(lr), fontSize=12)
plt.savefig('../diagrams/' + str(filename) + '.png')
return network
|
997,089 | 6b4479bbdfbf2ff249a546f645fd241b905a5a27 | from django.urls import path
from . import views
app_name="shop"
urlpatterns=[
path("",views.index, name="index"),
path("buy/<int:value>", views.buy, name="buy"),
path("addtocart/<int:id>", views.addtocart, name="addtocart"),
path("removefromCart/<int:id>", views.removefromCart, name="removefromCart")
] |
997,090 | 2fb45c5e366b8e19ce1efe7d7e61ef8173d08efa | import sys
import shutil
import re
import string
import os
from subprocess import Popen, PIPE
####
if len(sys.argv) != 5: #remember, the script name counts as an argument!
print 'runjob.py <xml template> <runtype> <run number> <fileNumber>'
print '<runtype> can be eviotolcio, recon, dqm, dst'
sys.exit()
####
xmlfile=sys.argv[1]
runtype=sys.argv[2]
run=sys.argv[3]
nums=sys.argv[4]
#get the missing jobs
#cmd ='./getfilenumbers.sh ' +runtype +' '+str(run)
#print cmd
#p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
#nums=p.stdout.readline()
#nums = "0"
print nums
sys.stdout.flush()
if nums == "foobar" :
print "Nothing to run"
sys.stdout.flush()
sys.exit()
####
#parse the xml template
tmpfile = 'temp.xml'
shutil.copy(xmlfile, tmpfile)
with open(tmpfile,"r") as tmp:
lines = tmp.readlines()
with open(tmpfile,"w") as tmp:
for line in lines:
if re.search("List .*\"filenum\"", line) != None:
line=line.replace("666",str(nums))
print line.rstrip()
if re.search("Variable .*\"run\"", line) != None:
line=line.replace("666",str(run))
print line.rstrip()
tmp.write(line)
os.system("jsub -xml temp.xml")
|
997,091 | 18c045b888e690532cac8b70669dc1e286713800 | """
* Copyright 2019 OpenStack Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from django.db import models
from .member import Member
from .summit_event import SummitEvent
class EventFeedback(models.Model):
id = models.IntegerField(db_column='ID', primary_key=True)
rate = models.FloatField(db_column='Rate')
note = models.TextField(db_column='Note')
approved = models.BooleanField(db_column='Approved')
event = models.ForeignKey(
SummitEvent, related_name='feedback', db_column='EventID', on_delete=models.CASCADE)
owner = models.ForeignKey(
Member, related_name='feedback', db_column='OwnerID', on_delete=models.CASCADE)
def __str__(self):
return self.id
class Meta:
app_label = 'reports'
db_table = 'SummitEventFeedback' |
997,092 | d45c46d5badc8dffa7e81ffdb6e1e5d29a950782 | """
2019-03-08 https://www.geeksforgeeks.org/exploratory-data-analysis-in-python/
2019-03-08 pip install pandas
"""
def dataAnalysis():
import pandas as pd
Df = pd.read_csv("https://vincentarelbundock.github.io/Rdatasets/csv/car/Chile.csv")
Df.describe()
if __name__ == '__main__':
dataAnalysis() |
997,093 | 60ae685acf1ff1b61ab5913c8954c82c8c65223c | # -*- coding, utf-8 -*-
from collections import OrderedDict
QUALITY_DICT = OrderedDict((
# chords consist of 2 notes
('5', (0, 7)),
('sus', (0, 7)),
# 3 notes
('', (0, 4, 7)),
('maj', (0, 4, 7)),
('m', (0, 3, 7)),
('min', (0, 3, 7)),
('dim', (0, 3, 6)),
('aug', (0, 4, 8)),
('sus2', (0, 2, 7)),
('sus4', (0, 5, 7)),
# 4 notes
('6', (0, 4, 7, 9)),
('7', (0, 4, 7, 10)),
('7-5', (0, 4, 6, 10)),
('7b5', (0, 4, 6, 10)),
('7+5', (0, 4, 8, 10)),
('7#5', (0, 4, 8, 10)),
('7sus4', (0, 5, 7, 10)),
('m6', (0, 3, 7, 9)),
('m7', (0, 3, 7, 10)),
('m7-5', (0, 3, 6, 10)),
('dim6', (0, 3, 6, 9)),
('M7', (0, 4, 7, 11)),
('maj7', (0, 4, 7, 11)),
('M7+5', (0, 4, 8, 11)),
('maj7#5', (0, 4, 8, 11)),
('maj7-5', (0, 4, 6, 11)),
('maj7b5', (0, 4, 6, 11)),
('mM7', (0, 3, 7, 11)),
('add9', (0, 4, 7, 14)),
('madd9', (0, 3, 7, 14)),
('2', (0, 4, 7, 14)),
('add11', (0, 4, 7, 17)),
('4', (0, 4, 7, 17)),
# 5 notes
('6/9', (0, 4, 7, 9, 14)),
('9', (0, 4, 7, 10, 14)),
('m9', (0, 3, 7, 10, 14)),
('M9', (0, 4, 7, 11, 14)),
('maj9', (0, 4, 7, 11, 14)),
('9sus4', (0, 5, 7, 10, 14)),
('7-9', (0, 4, 7, 10, 13)),
('7b9', (0, 4, 7, 10, 13)),
('7+9', (0, 4, 7, 10, 15)),
('7#9', (0, 4, 7, 10, 15)),
('9-5', (0, 4, 6, 10, 14)),
('9b5', (0, 4, 6, 10, 14)),
('9+5', (0, 4, 8, 10, 14)),
('9#5', (0, 4, 8, 10, 14)),
('7#9b5', (0, 4, 6, 10, 15)),
('7#9#5', (0, 4, 8, 10, 15)),
('7b9b5', (0, 4, 6, 10, 13)),
('7b9#5', (0, 4, 8, 10, 13)),
('11', (0, 7, 10, 14, 17)),
('7+11', (0, 4, 7, 10, 18)),
('7#11', (0, 4, 7, 10, 18)),
('7b9#9', (0, 4, 7, 10, 13, 15)),
('7b9#11', (0, 4, 7, 10, 13, 18)),
('7#9#11', (0, 4, 7, 10, 15, 18)),
('7-13', (0, 4, 7, 10, 20)),
('7b13', (0, 4, 7, 10, 20)),
# 6 notes
('7b9b13', (0, 4, 7, 10, 13, 17, 20)),
('9+11', (0, 4, 7, 10, 14, 18)),
('9#11', (0, 4, 7, 10, 14, 18)),
('13', (0, 4, 7, 10, 14, 21)),
('13-9', (0, 4, 7, 10, 13, 21)),
('13b9', (0, 4, 7, 10, 13, 21)),
('13+9', (0, 4, 7, 10, 15, 21)),
('13#9', (0, 4, 7, 10, 15, 21)),
('13+11', (0, 4, 7, 10, 18, 21)),
('13#11', (0, 4, 7, 10, 18, 21)),
))
SCALE_QUALITY_DICT = OrderedDict((
('maj', (0,2,4,5,7,9,11)),
('min', (0,2,3,5,7,8,10)),
#pentatonic scales
('majpenta', (0, 2, 4, 7, 9)),
('minpenta', (0, 3, 5, 7, 10)),
##blues scales
('majblues', (0, 2, 3, 4, 7, 9)),
('minblues', (0, 3, 5, 6, 7, 10)),
))
TUNING_DICT = OrderedDict((
('standard', ('E', 'A', 'D', 'G', 'B', 'E')),
('dadgad', ('D', 'A', 'D', 'G', 'A', 'D')),
('dsus4', ('D', 'A', 'D', 'G', 'A', 'D')),
('dropd', ('D', 'A', 'D', 'G', 'B', 'E')),
('openc', ('C', 'G', 'C', 'G', 'C', 'E')),
('opendm', ('D', 'A', 'D', 'F', 'A', 'D ')),
('gsus4', ('D', 'G', 'D', 'G', 'C', 'D')),
('opengm', ('D', 'G', 'D', 'G', 'Bb','D')),
('openg', ('D', 'G', 'D', 'G', 'B', 'D')),
('opend',('D', 'A', 'D', 'F#', 'A', 'D')),
('opend6',('D', 'A', 'D', 'F#', 'B', 'D')),
('opena', ('E', 'A', 'C#', 'E', 'A', 'E')),
('eadd11', ('E', 'A', 'E', 'G#', 'A', 'E')),
))
|
997,094 | 709f5fcad66648da32db04f1ce6d87686b2c4321 | import csv
from matplotlib import pyplot as plt
import matplotlib
import random
class ResultReader():
def __init__(self):
with open('Results.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
rows_list = []
count = 0
for row in readCSV:
for index, item in enumerate(row):
if count < 1:
rows_list.append([item, []])
else:
rows_list[index][1].append(row[index])
#print(count)
count += 1
print("Rows list: ", rows_list)
self.dates= []
self.example_prices_all = []
self.output_dict = {}
for index, item in enumerate(rows_list):
if index > 1:
example_prices = []
for i in rows_list[index][1]:
if len(i) > 2:
string = i[1:-1]
num = int(string)
else: num = 0
example_prices.append(num)
# print(example_prices)
self.example_prices_all.append([rows_list[index][0],example_prices])
self.dates.append(rows_list[index][0])
print("Actual Dates", self.dates)
example_dates = []
for i in rows_list[0][1]:
if i[4] == '/':
string = i[2:4]
elif i[6] == '/':
#print("1st: ",i[2:6])
string = i[2:6]
#print("2nd: ",string)
num = float(string)
example_dates.append(num)
print("Example Prices All: ", self.example_prices_all)
print("Example Dates: ", example_dates)
font = {'family': 'normal',
'weight': 'bold',
'size': 5}
matplotlib.rc('font', **font)
print("Dates: {}".format(example_dates))
self.dates_dict = {}
for index,item in enumerate(example_dates):
self.dates_dict[index] = item
print("Dates dictionary: {}".format(self.dates_dict))
# self.plotting()
self.output()
def output(self):
# Initialise the output
for index, item in enumerate(self.example_prices_all):
self.output_dict[index] = [[],[]]
# Populate the output
for index, item in enumerate(self.example_prices_all):
# print("index: ", index)
tempDates = []
tempPrices = []
# print(item[1])
emptyflag = 1
for ind, ite in enumerate(item[1]):
if ite != 0:
tempDates.append(self.dates_dict[ind])
tempPrices.append(ite)
emptyflag = 0
if emptyflag == 1:
break
# raise Exception("Sorry there is an empty to date to be removed: {}".format(self.dates_dict[index]))
self.output_dict[index] = [tempDates, tempPrices]
print("Output Dictionary", self.output_dict)
return self.output_dict
def plotting(self):
for i in self.example_prices_all:
tempDates = []
tempPrices = []
for ind, ite in enumerate(i[1]):
if ite != 0:
tempDates.append(self.dates_dict[ind])
tempPrices.append(ite)
plt.plot(tempDates, tempPrices, label = i[0])
# for x, y in zip(example_dates, i[1]):
# label = i[0]
#
# plt.annotate(label, # this is the text
# (x, y), # this is the point to label
# textcoords="offset points", # how to position the text
# xytext=(random.randrange(0,20,10), 0), # distance from text to points (x,y)
# ha='center') # horizontal alignment can be left, right or center
#
# plt.plot(example_dates, example_prices)
# plt.legend()
plt.show()
if __name__ == "__main__":
Reader = ResultReader()
Reader.plotting()
output = Reader.output()
|
997,095 | 039a63c0168e6db3603f82ba8893ba8295c37f19 | from django.contrib import admin
from models import User
# Register your models here.
admin.site.register(User) #Para que el User model este en el Django model |
997,096 | cefaba81e67017355b0fd9a3abf3fd64ec69532c | import rpyc
from rpyc.utils.server import ThreadedServer
from rpyc.utils.authenticators import TlsliteVdbAuthenticator
import thread, time
from nose.tools import raises
from nose import SkipTest
try:
from tlslite.api import TLSError
except ImportError:
raise SkipTest("tlslite not installed")
users = {
"foo" : "bar",
"spam" : "eggs",
}
class Test_tlslite(object):
def setup(self):
authenticator = TlsliteVdbAuthenticator.from_dict(users)
self.server = ThreadedServer(rpyc.SlaveService, hostname = "localhost",
authenticator = authenticator, auto_register = False)
self.server.logger.quiet = True
thread.start_new(self.server.start, ())
time.sleep(1) # make sure the server has initialized, etc.
def teardown(self):
self.server.close()
def test_successful(self):
c = rpyc.classic.tlslite_connect("localhost", "spam", "eggs",
port = self.server.port)
print ("server credentials = %r" % (c.root.getconn()._config["credentials"],))
print (c.modules.sys)
c.close()
def _expect_fail(self, username, password):
print ("expecting %s:%s to fail" % (username, password))
c = rpyc.classic.tlslite_connect("localhost", username, password,
port = self.server.port)
@raises(TLSError)
def test_wrong_tokens(self):
self._expect_fail("spam", "bar")
@raises(TLSError)
def test_wrong_tokens2(self):
self._expect_fail("bloop", "blaap")
|
997,097 | 2f3513ad97718d79e658b5b63e82791352a040b5 | class AutoEncoder: FeedForwardNN
__encoding__
def __init__(self, encoding):
self.__model__ = tf.keras.Sequential()
self.__encoding__= encoding
def addLayer(self,layer_type):
if layer_type != 'encoding':
self.__model__.add(l)
self.__model__compile()
|
997,098 | b36f3e793a87fe09e6a4921be5d8aa233c96ba61 | from pprint import pprint
import requests
# Работа с ВК
class VkUser:
url = 'https://api.vk.com/method/'
def __init__(self, token, version):
self.params = {
'access_token': token,
'v': version
}
# Поиск id номера в случае, если его нет
def search_id(self, user_ids):
search_id_url = self.url + 'users.search'
search_id_params = {
'q': user_ids,
'fields' : id
}
req = requests.get(search_id_url, params={**self.params, **search_id_params}).json()
if req['response']['count'] == 0:
print('Такого аккаунта не существует')
return exit
else:
owner_id = req['response']['items'][0]['id']
return owner_id
# Поиск фото по номеру аккаунта
def search_photos(self, owner_id, sorting=0):
photos_search_url = self.url + 'photos.get'
photos_search_params = {
'count': 50,
'owner_id': owner_id,
'extended': 1,
'album_id': 'profile'
}
req = requests.get(photos_search_url, params={**self.params, **photos_search_params}).json()
return req['response']['items']
# Работа с Яндексом
class YaUploader:
API_BASE_URL = "https://cloud-api.yandex.net:443"
def __init__(self, token: str):
self.token = token
self.headers = {
'Authorization': self.token
}
# Создание новой папки
def new_folder(self):
name_folder = input(f'Как назвать папку? ')
req = requests.put(self.API_BASE_URL + '/v1/disk/resources?path=' + name_folder, headers=self.headers)
# print(req)
if req.status_code == 409:
name_folder = name_folder + '(1)'
req = requests.put(self.API_BASE_URL + '/v1/disk/resources?path=' + name_folder, headers=self.headers)
print(f'Такая папка уже существует, документы будут загружены в папку {name_folder}')
return name_folder
# Метод для загрузки файла по ссылке в папку Яндекс диска
def upload(self, name_folder, name_file, path_to_file: str):
name_folder_file = f'{name_folder}/{name_file}.jpeg'
params = {
'path': name_folder_file,
'url' : path_to_file
}
requests.post(self.API_BASE_URL + '/v1/disk/resources/upload',
params=params, headers=self.headers)
if __name__ == "__main__":
def VK_seach_photo_Yandex_upload():
# Получение ТОКЕНА. Если нет прикрепленного файла, используем ручной ввод.
# with open('token_VK.txt', 'r') as file_object:
# token_VK = file_object.read().strip()
token_VK = input('Введите свой ТОКЕН для ВК ')
# with open('token_yandex.txt', 'r') as file_object:
# token_yandex = file_object.read().strip()
token_yandex = input('Введите свой ТОКЕН для Яндекс Диска ')
# Работа по поиску фото профиля
vk_client = VkUser(token_VK, '5.131')
user_ids = input('Введите id или имя аккаунта, чьи фото мы копируем: ')
if user_ids.isdigit() == True:
owner_id = int(user_ids)
else:
owner_id = vk_client.search_id(user_ids)
print(f'Ищем фото аккаунта с id {owner_id}')
photos_json = vk_client.search_photos(owner_id)
photos_count = len(photos_json)
# pprint(photos_json)
# Запрашиваю количество фоток для скачивания
print(f'У аккаунта {owner_id} в профиле {photos_count} фотографий')
photos_count_need = int(input('Сколько фотографий мы хотим скопировать: '))
if photos_count_need < photos_count:
photos_count = photos_count_need
else:
print('Скопируем сколько есть, больше никак')
i = 0
# Создаю новый json по образцу
new_json = []
while i < photos_count:
photos_dict = {}
likes = photos_json[i]['likes']['count']
# Если лайки совпадают, то мы добавляем дату
for x in new_json:
if likes == x['file name']:
likes = str(photos_json[i]['likes']['count']) + '.' + str(photos_json[i]['date'])
size_len = len(photos_json[i]['sizes']) - 1
size = photos_json[i]['sizes'][size_len]
photos_dict['file name'] = likes
photos_dict['size'] = size
new_json.append(photos_dict)
i += 1
# При необходимости можем посмотреть список фотографий и информацию по размеру:
# pprint(new_json)
# Работа по загрузке фото на Яндекс Диск
uploader = YaUploader(token_yandex)
name_folder = uploader.new_folder()
# Загружаю фото поочереди по ссылке из созданного json файла
x = 0
while x < photos_count:
name_file = new_json[x]['file name']
path_to_file = new_json[x]['size']['url']
uploader.upload(name_folder, name_file, path_to_file)
x += 1
print(f'Файл {name_file} загружен')
print('ГОТОВО, Спасибо за внимание!')
# Вызов функции
VK_seach_photo_Yandex_upload()
|
997,099 | 4f348eca3fe0fd0b2fc70986a8ed8527d6b7c172 | import numpy as np
import openmdao.api as om
import dymos as dm
import matplotlib.pyplot as plt
from infection import Infection
pop_total = 1.0
infected0 = 0.01
ns = 50
p = om.Problem(model=om.Group())
traj = dm.Trajectory()
p.model.add_subsystem('traj', subsys=traj)
phase = dm.Phase(ode_class=Infection,
transcription=dm.GaussLobatto(num_segments=ns,
order=3))
p.model.linear_solver = om.DirectSolver()
phase.set_time_options(fix_initial=True, duration_bounds=(200.0, 301.0), targets=['t'])
#phase.set_time_options(fix_initial=True, fix_duration=True)
ds = 1e-2
phase.add_state('S', fix_initial=True, rate_source='Sdot', targets=['S'], lower=0.0,
upper=pop_total, ref=pop_total/2, defect_scaler = ds)
phase.add_state('E', fix_initial=True, rate_source='Edot', targets=['E'], lower=0.0,
upper=pop_total, ref=pop_total/2, defect_scaler = ds)
phase.add_state('I', fix_initial=True, rate_source='Idot', targets=['I'], lower=0.0,
upper=pop_total, ref=pop_total/2, defect_scaler = ds)
phase.add_state('R', fix_initial=True, rate_source='Rdot', targets=['R'], lower=0.0,
upper=pop_total, ref=pop_total/2, defect_scaler = ds)
phase.add_state('D', fix_initial=True, rate_source='Ddot', targets=['D'], lower=0.0,
upper=pop_total, ref=pop_total/2, defect_scaler = ds)
phase.add_state('int_sigma', rate_source='sigma_sq', lower=0.0, defect_scaler = 1e-2)
#p.driver = om.ScipyOptimizeDriver()
p.driver = om.pyOptSparseDriver()
#p.driver.options['optimizer'] = 'SNOPT'
#p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-8
#p.driver.opt_settings['Major optimality tolerance'] = 1.0E-5
#p.driver.opt_settings['iSumm'] = 6
p.driver.options['optimizer'] = 'IPOPT'
p.driver.opt_settings['hessian_approximation'] = 'limited-memory'
# p.driver.opt_settings['mu_init'] = 1.0E-2
p.driver.opt_settings['nlp_scaling_method'] = 'user-scaling'
p.driver.opt_settings['print_level'] = 5
p.driver.opt_settings['linear_solver'] = 'mumps'
p.driver.declare_coloring()
beta = 0.25
gamma = 0.95 / 14.0
alpha = 1.0 / 5.0
epsilon = 1.0 / 365.
mu = (1 - 14*gamma) / 14.0
lim = 0.15
phase.add_input_parameter('alpha', targets=['alpha'], dynamic=True, val=alpha)
phase.add_input_parameter('beta', targets=['beta'], dynamic=True, val=beta)
phase.add_input_parameter('gamma', targets=['gamma'], dynamic=True, val=gamma)
phase.add_input_parameter('epsilon', targets=['epsilon'], dynamic=True, val=epsilon)
phase.add_input_parameter('mu', targets=['mu'], dynamic=True, val=mu)
# just converge ODEs
phase.add_objective('time', loc='final')
phase.add_timeseries_output('theta')
traj.add_phase(name='phase0', phase=phase)
p.setup(check=True)
p.set_val('traj.phase0.t_initial', 0)
p.set_val('traj.phase0.t_duration', 200)
p.set_val('traj.phase0.states:S',
phase.interpolate(ys=[pop_total - infected0, 0], nodes='state_input'))
p.set_val('traj.phase0.states:E',
phase.interpolate(ys=[infected0, 0], nodes='state_input'))
p.set_val('traj.phase0.states:I',
phase.interpolate(ys=[0, pop_total/3], nodes='state_input'))
p.set_val('traj.phase0.states:R',
phase.interpolate(ys=[0, pop_total/3], nodes='state_input'))
p.set_val('traj.phase0.states:D',
phase.interpolate(ys=[0, pop_total/3], nodes='state_input'))
p.run_driver()
sim_out = traj.simulate()
t = sim_out.get_val('traj.phase0.timeseries.time')
s = sim_out.get_val('traj.phase0.timeseries.states:S')
e = sim_out.get_val('traj.phase0.timeseries.states:E')
i = sim_out.get_val('traj.phase0.timeseries.states:I')
r = sim_out.get_val('traj.phase0.timeseries.states:R')
d = sim_out.get_val('traj.phase0.timeseries.states:D')
int_sigma = sim_out.get_val('traj.phase0.timeseries.states:int_sigma')
print("objective:", int_sigma[-1])
theta = sim_out.get_val('traj.phase0.timeseries.theta')
fig = plt.figure(figsize=(10, 8))
plt.title('baseline simulation - no mitigation')
plt.subplot(511)
plt.plot(t, s, 'orange', lw=2, label='susceptible')
plt.legend(), plt.xticks(np.arange(0, t[-1], 50), " ")
plt.subplot(512)
plt.plot(t, e, 'k', lw=2, label='exposed')
plt.legend(), plt.xticks(np.arange(0, t[-1], 50), " ")
plt.subplot(513)
plt.plot(t, i, 'teal', lw=2, label='infected')
plt.legend(), plt.xticks(np.arange(0, t[-1], 50), " ")
plt.subplot(514)
plt.plot(t, r, 'g', lw=2, label='recovd/immune')
plt.legend(), plt.xticks(np.arange(0, t[-1], 50), " ")
plt.subplot(515)
plt.plot(t, d, lw=2, label='dead')
plt.xlabel('days')
plt.legend()
fig = plt.figure(figsize=(10, 5))
plt.subplot(211)
print("dead:", d[-1])
plt.title('baseline simulation - no mitigation')
plt.plot(t, s/pop_total, 'orange', lw=2, label='susceptible')
plt.plot(t, e/pop_total, 'k', lw=2, label='exposed')
plt.plot(t, i/pop_total, 'teal', lw=2, label='infected')
plt.plot(t, r/pop_total, 'g', lw=2, label='recovd/immune')
plt.plot(t, d/pop_total, lw=2, label='dead')
plt.xlabel('days')
plt.legend()
plt.subplot(212)
plt.plot(t, len(t)*[beta], lw=2, label='$\\beta$')
plt.plot(t, theta, lw=2, label='$\\theta$(t)')
plt.legend()
plt.show() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.