index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
25,400 | da62bdb8f38b5db679eb1758ff129985fce7e87d | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 13:22:39 2017
@author: Gourav
"""
''' '''
import openpyxl
wb3=openpyxl.load_workbook('D:\\w\\a.xlsx')
ws3=wb3.active
wb=openpyxl.load_workbook('D:\\w\\ESSAY.xlsx')
ws=wb.active
r1=ws.max_row
wb1=openpyxl.load_workbook('D:\\w\\LIWCEXCEL.xlsx')
ws1=wb1.active
r2=ws1.max_row
print(r2)
k=1
for i in range(2,(r1+1)):
if ws.cell(row=i,column=3).value=='y' and ws.cell(row=i,column=4).value=='n' and ws.cell(row=i,column=5).value=='n' and ws.cell(row=i,column=6).value=='n' and ws.cell(row=i,column=7).value=='n':
dict={"PREPOSITION":0,"NUMBER":0,"AFFECT":0,"POSEMO":0,"POSFEEL":0,"OPTIM":0,"NEGEMO":0,"ANX":0,
"ANGER":0,"SAD":0,"PRONOUN":0,"COGMECH":0,"CAUSE":0,"INSIGHT":0,"DISCREP":0,"INHIB":0,"TENTAT":0,
"CERTAIN":0,"SENSES":0,"SEE":0,"HEAR":0,"I":0,"FEEL":0,"SOCIAL":0,"COMM":0,"OTHREF":0,"FRIENDS":0,
"FAMILY":0,"HUMANS":0,"TIME":0,"PAST":0,"PRESENT":0,"WE":0,"FUTURE":0,"SPACE":0,"UP":0,"DOWN":0,
"INCL":0,"EXCL":0,"MOTION":0,"OCCUP":0,"SCHOOL":0,"JOB":0,"SELF":0,"ACHEIVE":0,"LEISURE":0,"HOME":0,
"SPORTS":0,"TV":0,"MUSIC":0,"MONEY":0,"METAPH":0,"RELIG":0,"DEATH":0,"YOU":0,"PHYSICAL":0,"BODY":0,
"SEXUAL":0,"EATING":0,"SLEEP":0,"GROOM":0,"SWEAR":0,"NONFL":0,"FILLERS":0,"OTHER":0,"NEGATE":0,"ASSENT":0,"ARTICLE":0}
print(ws.cell(row=i,column=1).value)
m=(ws.cell(row=i,column=2).value)
text=str(m)
l=text.lower()
t=l.replace("_"," ")
t1=t.replace("-"," ")
t2=t1.replace(","," ")
t3=t2.replace("."," ")
t4=t3.replace("("," ")
t5=t4.replace(")"," ")
t6=t5.replace("?"," ")
t7=t6.split(" ")
t8=list(filter(str.strip,t7))
print(t8)
c=1
for x in t8:
for j in range(1,r2):
if x==(ws1.cell(row=j,column=1).value):
for li in dict:
if li==(ws1.cell(row=j,column=2).value):
print(x,li)
dict[li]+=1
else:
dict[li]+=0
print(dict)
for item in dict.values():
ws3.cell(row=k,column=c,value=item)
c+=1
k+=1
wb3.save('D:\\w\\a.xlsx')
|
25,401 | e9dcc6d6ae1ba13232311b1c9fabb6a385e30cdb | import shutil
import subprocess
import unittest
from os import getcwd, makedirs, path, remove
from pathlib import Path
from mock import MagicMock, patch
from backgroundchanger import utils
from open_file_mock import MockOpen
@patch('backgroundchanger.utils.platform_system')
class TestExceptions(unittest.TestCase):
def test_mac_exception(self, mock_platform):
mock_platform.return_value = 'Darwin'
self.assertRaises(
ValueError, utils.get_background_cmd, "./tests/test.png")
def test_win_exception(self, mock_platform):
mock_platform.return_value = 'Windows'
self.assertRaises(
ValueError, utils.get_background_cmd, "./tests/test.png")
@patch('backgroundchanger.utils.distro_name')
def test_linux_cmd(self, mock_distro, mock_platform):
mock_distro.return_value = 'Ubuntu'
mock_platform.return_value = 'Linux'
res = utils.get_background_cmd("./tests/test.png")
assert res[0] == 'gsettings'
@patch('backgroundchanger.utils.Tk')
def test_get_screen_size(mock_tk):
mock_tk.return_value.winfo_vrootheight = MagicMock()
mock_tk.return_value.winfo_vrootheight.return_value = 10
mock_tk.return_value.winfo_vrootwidth = MagicMock()
mock_tk.return_value.winfo_vrootwidth.return_value = 10
screen = utils.get_screen_size()
assert screen['height'] == 10
assert screen['width'] == 10
def test_get_keys():
with patch('builtins.open', new_callable=MockOpen) as open_mock:
jsonStr = '''
{
"access_key" : "ak",
"secret_key" : "sk"
}'''
config_file_path = path.join(Path.home(), '.config', 'python-backgroundchanger','unsplash_keys.json')
open_mock.set_read_data_for(path=config_file_path, data=jsonStr)
keys = utils.get_keys()
assert keys['access_key'] =='ak'
assert keys['secret_key'] =='sk'
@patch('backgroundchanger.utils.Popen')
def test_reload_gala(mock_popen):
utils.reload_gala()
mock_popen.assert_called_once_with(['gala', '-r'],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
def test_copy_file():
destDir = './tests/dummyDest/'
srcDir = './tests/dummySrc/'
if not path.exists(destDir):
makedirs(destDir)
if not path.exists(srcDir):
makedirs(srcDir)
with open(path.join(getcwd(), 'tests/', 'dummySrc/' 'dummy.txt'), 'w+') as fp:
pass
utils.copy_file('./tests/dummySrc/dummy.txt', './tests/dummyDest/')
assert path.isfile('./tests/dummyDest/dummy.txt')
shutil.rmtree(destDir)
shutil.rmtree(srcDir)
@patch('backgroundchanger.utils.call')
def test_change_background(mock_call):
utils.get_background_cmd = MagicMock()
utils.get_background_cmd.return_value = ['dummy','cmd']
utils.change_background("./tests/test.png")
mock_call.assert_called_once_with(['dummy', 'cmd'])
|
25,402 | 366d0940f051a99206037d708b8d7e69acb2df9a | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.task.console_task import ConsoleTask
class ClassmapTask(ConsoleTask):
"""Print a mapping from class name to the owning target from target's runtime classpath."""
@classmethod
def register_options(cls, register):
super(ClassmapTask, cls).register_options(register)
register('--internal-only', default=False, type=bool, fingerprint=True,
help='Specifies that only class names of internal dependencies should be included.')
register('--transitive', default=True, type=bool,
help='Outputs all targets in the build graph transitively.')
def classname_for_classfile(self, target, classpath_products):
contents = ClasspathUtil.classpath_contents((target,), classpath_products)
for f in contents:
classname = ClasspathUtil.classname_for_rel_classfile(f)
# None for non `.class` files
if classname:
yield classname
def console_output(self, _):
def should_ignore(target):
return self.get_options().internal_only and isinstance(target, JarLibrary)
classpath_product = self.context.products.get_data('runtime_classpath')
targets = self.context.targets() if self.get_options().transitive else self.context.target_roots
for target in targets:
if not should_ignore(target):
for file in self.classname_for_classfile(target, classpath_product):
yield '{} {}'.format(file, target.address.spec)
@classmethod
def prepare(cls, options, round_manager):
super(ClassmapTask, cls).prepare(options, round_manager)
round_manager.require_data('runtime_classpath')
|
25,403 | 8557600b269726c5fadfd17feb8960dadf2f4bb8 | A_23_01_11 = {0: {'A': 0.133, 'C': 0.025, 'E': 0.074, 'D': 0.073, 'G': 0.078, 'F': -0.099, 'I': -0.042, 'H': -0.039, 'K': -0.074, 'M': -0.126, 'L': -0.117, 'N': -0.002, 'Q': 0.075, 'P': 0.151, 'S': 0.085, 'R': -0.122, 'T': 0.093, 'W': -0.054, 'V': -0.019, 'Y': -0.092}, 1: {'A': 0.071, 'C': -0.024, 'E': 0.05, 'D': 0.001, 'G': -0.004, 'F': -0.188, 'I': 0.027, 'H': -0.035, 'K': 0.025, 'M': -0.006, 'L': 0.052, 'N': -0.005, 'Q': 0.068, 'P': -0.073, 'S': 0.084, 'R': 0.002, 'T': 0.076, 'W': -0.083, 'V': 0.093, 'Y': -0.131}, 2: {'A': -0.141, 'C': -0.018, 'E': 0.036, 'D': 0.075, 'G': -0.033, 'F': -0.101, 'I': 0.097, 'H': -0.033, 'K': 0.05, 'M': 0.044, 'L': 0.054, 'N': -0.028, 'Q': 0.101, 'P': 0.162, 'S': -0.148, 'R': 0.025, 'T': -0.074, 'W': -0.005, 'V': -0.015, 'Y': -0.051}, 3: {'A': 0.114, 'C': -0.01, 'E': 0.102, 'D': 0.095, 'G': 0.047, 'F': -0.045, 'I': -0.003, 'H': -0.068, 'K': -0.132, 'M': -0.027, 'L': -0.027, 'N': 0.043, 'Q': 0.017, 'P': -0.04, 'S': 0.111, 'R': -0.183, 'T': 0.09, 'W': -0.113, 'V': 0.069, 'Y': -0.04}, 4: {'A': 0.075, 'C': 0.002, 'E': 0.032, 'D': 0.017, 'G': -0.001, 'F': -0.149, 'I': -0.024, 'H': -0.026, 'K': -0.05, 'M': -0.056, 'L': -0.135, 'N': 0.031, 'Q': 0.049, 'P': 0.157, 'S': 0.067, 'R': -0.019, 'T': 0.075, 'W': -0.022, 'V': 0.038, 'Y': -0.063}, 5: {'A': -0.241, 'C': 0.032, 'E': 0.045, 'D': 0.046, 'G': -0.026, 'F': 0.062, 'I': 0.057, 'H': 0.018, 'K': -0.056, 'M': 0.036, 'L': 0.027, 'N': 0.041, 'Q': 0.078, 'P': 0.129, 'S': -0.191, 'R': -0.004, 'T': -0.17, 'W': 0.132, 'V': -0.105, 'Y': 0.089}, 6: {'A': 0.175, 'C': 0.056, 'E': 0.109, 'D': 0.179, 'G': 0.085, 'F': -0.041, 'I': 0.062, 'H': -0.253, 'K': -0.217, 'M': -0.102, 'L': -0.082, 'N': -0.023, 'Q': 0.0, 'P': 0.243, 'S': -0.006, 'R': -0.268, 'T': 0.089, 'W': -0.014, 'V': 0.095, 'Y': -0.089}, 7: {'A': -0.032, 'C': -0.005, 'E': 0.038, 'D': 0.044, 'G': 0.035, 'F': -0.208, 'I': -0.199, 'H': 0.045, 'K': -0.081, 'M': -0.089, 'L': -0.098, 'N': 0.094, 'Q': 0.158, 'P': 0.049, 'S': 0.104, 'R': 0.0, 'T': 0.093, 'W': 0.031, 'V': -0.058, 'Y': 0.08}, 8: {'A': -0.038, 'C': -0.007, 'E': 0.008, 'D': 0.06, 'G': 0.005, 'F': -0.164, 'I': -0.193, 'H': 0.072, 'K': 0.036, 'M': -0.062, 'L': -0.072, 'N': 0.087, 'Q': 0.09, 'P': -0.029, 'S': 0.065, 'R': 0.079, 'T': 0.09, 'W': 0.038, 'V': -0.093, 'Y': 0.029}, 9: {'A': -0.007, 'C': 0.013, 'E': -0.004, 'D': -0.047, 'G': 0.02, 'F': 0.049, 'I': 0.094, 'H': 0.005, 'K': 0.015, 'M': 0.06, 'L': 0.032, 'N': 0.017, 'Q': -0.043, 'P': -0.176, 'S': -0.018, 'R': -0.059, 'T': -0.031, 'W': 0.018, 'V': 0.034, 'Y': 0.026}, 10: {'A': 0.129, 'C': -0.077, 'E': -0.057, 'D': -0.018, 'G': 0.006, 'F': -0.44, 'I': -0.181, 'H': 0.07, 'K': 0.244, 'M': -0.098, 'L': -0.179, 'N': -0.007, 'Q': 0.117, 'P': 0.079, 'S': 0.175, 'R': 0.346, 'T': 0.112, 'W': -0.128, 'V': -0.027, 'Y': -0.066}, -1: {'con': 4.10391}} |
25,404 | 8b38e2a2d1fa50abfb6e8dcbb62726e453794039 | from __future__ import absolute_import
from ._utils import *
from .stn import *
#from .super_module import *
from .module_trainer import * |
25,405 | 9c9056d8dc28cf4a6d4f8435d0dd1d7ad19ff6d3 | from flask import Flask, render_template
#AUSTIN
from flask.ext.sqlalchemy import SQLAlchemy
from flask import request
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/pokemon.db'
db = SQLAlchemy(app)
########################################################################
class Pokemon(db.Model):
""""""
__tablename__ = "pokemon"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
gender = db.Column(db.String)
pokemonType = db.Column(db.String)
level = db.Column(db.String)
#----------------------------------------------------------------------
def __init__(self, name, gender, pokemonType, level):
""""""
self.name = name
self.gender = gender
self.pokemonType = pokemonType
self.level = level
#AUSTIN
@app.route('/search')
def search():
pokemonArray = Pokemon.query.all()
identity = request.args.get('identity', '')
name = request.args.get('name', '')
for pokemon in pokemonArray:
if (pokemon.name == name):
return render_template('search.html', pokemon=pokemon)
|
25,406 | ce45a209fae74d4a35a93ab676392ff38a5f0626 | from bs4 import BeautifulSoup
import json
import os
import pandas as pd
import re
import requests
import subprocess
def text_from_pdf(pdf_path):
txt_path = pdf_path.replace("pdfs", "txts").replace(".pdf", ".txt")
if not os.path.exists(os.path.dirname(txt_path)):
os.makedirs(os.path.dirname(txt_path))
if not os.path.exists(txt_path):
subprocess.call(["pdftotext", pdf_path, txt_path])
f = open(txt_path, encoding="utf8")
text = f.read()
f.close()
return text
base_url = "https://www.aclweb.org/anthology/"
r = requests.get(base_url + "index.html")
soup = BeautifulSoup(r.content, "lxml")
# second is the set of accessible proceeding
selected_proceedings = (
['ACL', 2017, 'P17', [[1001, 1195], [2001, 2107]]],
['ACL', 2018, 'P18', [[1001, 1256], [2001, 2125]]],
['ACL', 2019, 'P19', [[1001, 1660]]],
['EMNLP', 2017, 'D17', [[1001, 1323]]],
['EMNLP', 2018, 'D18', [[1001, 1549]]],
['EMNLP', 2019, 'D19', [[1001, 1682]]]
)
blacklist = [
# "Can_I_teach_a_robot_to_replicate_a_line_art"
]
papers = list()
for cid, year, header, ranges in selected_proceedings:
pids = []
for r in ranges:
pids += list(range(r[0], r[1] + 1))
for pid in pids:
if cid == 'EMNLP' and year == 2019 and pid == 1479:
continue # wried stuff, another missing paper
info_link = base_url + header + "-" + str(pid)
# save paper info page
paper_info_html_path = os.path.join("working", "html", cid+str(year), header+"-"+str(pid)+".html")
if not os.path.exists(paper_info_html_path):
r = requests.get(info_link)
if not os.path.exists(os.path.dirname(paper_info_html_path)):
os.makedirs(os.path.dirname(paper_info_html_path))
with open(paper_info_html_path, "wb") as f:
f.write(r.content)
# load saved paper html page
with open(paper_info_html_path, "rb") as f:
html_content = f.read()
paper_soup = BeautifulSoup(html_content, "lxml")
# grab title, author, abstract etc.
paper_title = paper_soup.find("h2", attrs={"id":"title"}).text.replace(" ", "_")
print("processing: " + cid, " ", str(year), " ", paper_title)
# import pdb; pdb.set_trace()
authors = [content.text for content in paper_soup.find('p', attrs={"class": "lead"}).contents if "</a>" in str(content)]
abstract = paper_soup.find('div', attrs={"class": "acl-abstract"}).text
# save pdf
pdf_link = info_link + ".pdf"
pdf_name = paper_title + ".pdf"
pdf_path = os.path.join("working", "pdfs", cid+str(year), pdf_name)
if not os.path.exists(pdf_path):
pdf = requests.get(pdf_link)
if pdf.status_code != 200: # always meet broken links... somewhere
blacklist.append([cid, paper_title])
continue
if not os.path.exists(os.path.dirname(pdf_path)):
os.makedirs(os.path.dirname(pdf_path))
pdf_file = open(pdf_path, "wb")
pdf_file.write(pdf.content)
pdf_file.close()
# pdf2txt for extracting content from downloaded pdf files
paper_text = text_from_pdf(pdf_path)
papers.append([paper_title, cid, year, "|".join(authors), info_link, abstract, paper_text])
print(blacklist)
acl_papers_data = pd.DataFrame(papers, columns=["title", "venue", "year", "authors", "url", "abstract", "paper_text"])
acl_papers_data.to_csv("output/acl.csv", index=False)
acl_papers_data_without_paper_text = acl_papers_data.drop(columns=["paper_text"])
acl_papers_data_without_paper_text.to_csv("output/acl_without_paper_text.csv", index=False) |
25,407 | a1fab4173a1f630b052e3e1274c0ca030fb99268 | N = int(input())
S = input()
ans = 0
for i in range(1000):
res = str(i).zfill(3)
cnt = 0
p = res[cnt]
for s in S:
if p == s:
cnt += 1
if cnt == 3:
ans += 1
break
p = res[cnt]
print(ans) |
25,408 | 7496b28523e998b88b9f9fdad564031ace66da18 |
import re
from django import forms
from django.conf import settings
import tweepy
class TwitterDownloadForm(forms.Form):
usernames = forms.CharField(widget=forms.Textarea)
def clean_usernames(self):
return [
username.strip()
for username in re.split(r'\n|,', self.cleaned_data['usernames'])
]
def generate_csv(self):
auth = tweepy.OAuthHandler(
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
)
auth.set_access_token(
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET,
)
api = tweepy.API(auth)
followers = []
for username in self.cleaned_data['usernames']:
for id in tweepy.Cursor(api.followers_ids, screen_name=username).items():
followers.append(str(id))
return '\n'.join(followers)
|
25,409 | b77364681af924c4c3a23bc1d3304ea0d9ce74f8 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
from torch.utils.data import DataLoader
net = nn.Sequential(
nn.Linear(5,1),
nn.ReLU(),
nn.Linear(1, 1))
optimizer = torch.optim.SGD(net.parameters(), lr=0.001)
loss_func = nn.MSELoss()
x = torch.linspace(1,10,10)
y = torch.linspace(10,1,10)
torch_dataset = Data.TensorDataset(x,y)
loader = DataLoader(dataset=torch_dataset,batch_size=5, num_workers=2)
for epoch in range(3):
for step, (batch_x, batch_y) in enumerate(loader):
prediction = net(batch_x)
loss = loss_func(prediction, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Epoch: ', epoch, '| Step: ', step, '| batch x: ',
batch_x.numpy(), '| batch y: ', batch_y.numpy()) |
25,410 | 36db9602baefc865fbe2d685bcf4e71a335652c6 | import xml.etree.ElementTree as ET
import os, glob
path = "./xmls/"
txtpath = "./xmls/txts/"
file_list = os.listdir(path)
file_list_xml = [file for file in file_list if file.endswith(".xml")]
for allfile in file_list_xml:
file_name, _ = os.path.splitext(allfile)
tree = ET.parse(path+allfile)
root = tree.getroot()
width = root.find('size').find('width').text
height = root.find('size').find('height').text
for mesh_heading in root.findall('object'):
name = mesh_heading.find('name').text
xmin = mesh_heading.find('bndbox').find('xmin').text
ymin = mesh_heading.find('bndbox').find('ymin').text
xmax = mesh_heading.find('bndbox').find('xmax').text
ymax = mesh_heading.find('bndbox').find('ymax').text
xcen=(int(xmax)+int(xmin))/(2*int(width))
ycen=(int(ymax)+int(ymin))/(2*int(height))
ratiowidth = (int(xmax)-int(xmin))/int(width)
ratioheight = (int(ymax)-int(ymin))/int(height)
line_to_write = "0 " + str(xcen) + ' ' + str(ycen) + ' ' + str(ratiowidth)+ ' '+str(ratioheight) + '\n'
with open(txtpath+file_name +'.txt', 'a') as f:
f.write(line_to_write)
|
25,411 | 2383c142fdeffcb2aeffc15af0a0ebe96fd1cfda | from .ruleset_060 import parse_file as parse_file_060
from .ruleset_070 import parse_file as parse_file_070
from .ruleset_100 import parse_file as parse_file_100
from .ruleset_110 import parse_file as parse_file_110
from .ruleset_200 import parse_file as parse_file_200
from .ruleset_210 import parse_file as parse_file_210
from .ruleset_300 import parse_file as parse_file_300
def parse_file(filename, ruleset='0.6.0'):
if ruleset == '0.6.0':
return parse_file_060(filename)
elif ruleset == '0.7.0':
return parse_file_070(filename)
elif ruleset == '1.0.0':
return parse_file_100(filename)
elif ruleset == '1.1.0':
return parse_file_110(filename)
elif ruleset == '2.0.0':
return parse_file_200(filename)
elif ruleset == '2.1.0':
return parse_file_210(filename)
elif ruleset == '3.0.0':
return parse_file_300(filename)
else:
raise Exception(f'Ruleset "{ruleset}" is not supported')
|
25,412 | c039956bb8a14d872ba1406dd60a5f0784111a8c | # https://docs.python.org/3/library/unittest.html
from unittest import TestCase
from tagging.classifier import feature_dict
class TestFeatureDict(TestCase):
def test_feature_dict(self):
sent = 'El gato come pescado .'.split()
fdict = {
'w': 'el', # lower
'wu': False, # isupper
'wt': True, # istitle
'wd': False, # isdigit
'pw': '<s>',
'nw': 'gato',
'nwu': False,
'nwt': False,
'nwd': False,
}
self.assertEqual(feature_dict(sent, 0), fdict)
|
25,413 | 7a146df9afdea74c47ea4298359614855b82f2d5 | class Solution:
def lastStoneWeight(self, stones: List[int]) -> int:
# heap nlogn time and n space
heap = [-i for i in stones]
heapq.heapify(heap)
while len(heap) > 1:
y = -heapq.heappop(heap)
x = -heapq.heappop(heap)
if y > x:
heapq.heappush(heap, x - y)
if len(heap) == 1:
return -heapq.heappop(heap)
return 0 |
25,414 | cc8e0ae579dd341cc9dc7e29dd6550bf5a08ca9e | import tkinter as tk
from tkinter import filedialog
import beta_tag_window
import beta_storage_window
import beta_search_images
import beta_search_content
class Beta:
'''This class represents an object which is the main window in beta version.
An instance of this class will open the main window with all widgets and
functionality.
The main window contains main menu and it is responsible for constructing
or removing the other objects it imports.'''
# Start def-------------------------------------------------------------------------------
def tag(self):
'''This method deletes all objects from main window and constructs a new object
from class TagWindow.'''
for i in self.root.winfo_children():
if i.winfo_class() != 'Menu':
i.destroy()
self.tag_window = beta_tag_window.TagWindow(self.root)
# End def---------------------------------------------------------------------------------
# Start def-------------------------------------------------------------------------------
def storage(self):
'''This method deletes all objects from main window and constructs a new object
from class StorageWindow.'''
for i in self.root.winfo_children():
if i.winfo_class() != 'Menu':
i.destroy()
self.storage_window = beta_storage_window.StorageWindow(self.root)
# End def---------------------------------------------------------------------------------
# Start def-------------------------------------------------------------------------------
def search_images(self):
'''This method deletes all objects from main window and constructs a new object
from class SearchImagesWindow.'''
for i in self.root.winfo_children():
if i.winfo_class() != 'Menu':
i.destroy()
self.search_images_window = beta_search_images.SearchImagesWindow(self.root)
# End def---------------------------------------------------------------------------------
# Start def-------------------------------------------------------------------------------
def search_content(self):
'''This method deletes all objects from main window and constructs a new object
from class SearchContentWindow.'''
for i in self.root.winfo_children():
if i.winfo_class() != 'Menu':
i.destroy()
self.search_content_window = beta_search_content.SearchContentWindow(self.root)
# End def---------------------------------------------------------------------------------
# Start constructor-----------------------------------------------------------------------
def __init__(self):
self.root = tk.Tk()
self.root.state('zoomed') #Open in fullscreen.
self.menu_bar = tk.Menu(self.root)
self.options_menu = tk.Menu(self.menu_bar, tearoff=0, bg='white')
self.options_menu.add_command(
label='Tag',
command=lambda: self.tag()
)
self.options_menu.add_command(
label='Edit Storage',
command=lambda: self.storage()
)
self.menu_bar.add_cascade(
label='Options',
menu=self.options_menu
)
self.search_menu = tk.Menu(self.options_menu, tearoff=0, bg='white')
self.search_menu.add_command(
label='Content by image',
command=lambda: self.search_content()
)
self.search_menu.add_command(
label='Images by content',
command=lambda: self.search_images()
)
self.options_menu.add_cascade(label='Search', menu=self.search_menu)
self.search_images_window = None
self.search_content_window = None
self.storage_window = None
self.tag_window = beta_tag_window.TagWindow(self.root)
self.root.config(menu=self.menu_bar)
self.root.title('Pictures tagging')
self.root.mainloop()
# End constructor-------------------------------------------------------------------------
# Create instance to initiate the main window.
beta = Beta()
|
25,415 | 3252f03e220ccdb8d90f8c1284ba1e19cd829cc4 | import sys
r,c=[int(x) for x in input().split()]
game=True
arr=[]
for i in range(r):
arr.append(input().split())
for i in arr:
for j in i:
if (j=="C") or (j=="M") or (j=="Y"):
print("#Color")
game=False
sys.exit()
if game==True:
print("#Black&White") |
25,416 | b523a34a97fe132b6a88cca3d1b4a822b15b5153 | #!/usr/bin/python
"""
Starter code for the evaluation mini-project.
Start by copying your trained/tested POI identifier from
that which you built in the validation mini-project.
This is the second step toward building your POI identifier!
Start by loading/formatting the data...
"""
import pickle
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
data_dict = pickle.load(open("../final_project/final_project_dataset.pkl", "r") )
### add more features to features_list!
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
### your code goes here
from sklearn import cross_validation
from sklearn import tree
X_train, X_test, y_train, y_test = cross_validation.train_test_split(features, labels, test_size=0.3, random_state=42)
clf = tree.DecisionTreeClassifier()
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
print "score",clf.score(X_test, y_test)
print "poi predicted in test set", sum([i for i in pred])
print "people in test set",len(X_test)
print "true positives",sum([1 for i,j in zip(pred,y_test) if i == 1 and j == 1])
from sklearn import metrics
print "precision",metrics.precision_score(y_test,pred)
print "recall",metrics.recall_score(y_test,pred)
|
25,417 | 5b280346caa053ac3c2a8c6acbcf5d9bf41196d2 | import argparse
import glob
from pyexiftool import exiftool
class ImageImporter:
def __init__(self):
print('hello')
def main():
parser = argparse.ArgumentParser()
for key, option in argument_options.items():
parser.add_argument(key, **option)
args = parser.parse_args()
print('{} -> {}, {}, {}'.format(
args.sources, args.targets, args.digit, args.pad))
iimp = ImageImporter()
iimp.sources = args.sources
files = glob.glob(args.source + '/*')
with exiftool.ExifTool() as et:
metadata = et.get_metadata_batch(files)
for d in metadata:
print("{:20.20} {:20.20}".format(
d["SourceFile"], d["EXIF:DateTimeOriginal"]))
for key, value in d.items():
if(key == 'MakerNotes:ShutterCount'):
print('{} {}'.format(key, value))
argument_options = {
'sources': {
'type': str,
'help': 'import source as directory',
},
'targets': {
'type': str,
'help': 'output directory for renamed file',
},
'--digit': {
'type': int,
'default': 6,
'help': 'shutter count format on filename',
},
'--pad': {
'type': str,
'default': '0',
'help': 'padding character of shutter count'
},
'--suffix': {
'type': str,
'help': 'filename suffix',
},
'--prefix': {
'type': str,
'help': 'filename prefix',
}
}
if __name__ == '__main__':
main()
|
25,418 | 54a856a90e75fdc83bbe09768390ed7c2bf0bae2 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for sdk_update.py and sdk_update_main.py."""
import errno
import os
import shutil
import subprocess
import sys
import time
class Error(Exception):
"""Generic error/exception for sdk_update module"""
pass
def RemoveDir(outdir):
"""Removes the given directory
On Unix systems, this just runs shutil.rmtree, but on Windows, this doesn't
work when the directory contains junctions (as does our SDK installer).
Therefore, on Windows, it runs rmdir /S /Q as a shell command. This always
does the right thing on Windows. If the directory already didn't exist,
RemoveDir will return successfully without taking any action.
Args:
outdir: The directory to delete
Raises:
CalledProcessError - if the delete operation fails on Windows
OSError - if the delete operation fails on Linux
"""
try:
shutil.rmtree(outdir)
except OSError:
if not os.path.exists(outdir):
return
# On Windows this could be an issue with junctions, so try again with rmdir
if sys.platform == 'win32':
subprocess.check_call(['rmdir', '/S', '/Q', outdir], shell=True)
def RenameDir(srcdir, destdir):
"""Renames srcdir to destdir. Removes destdir before doing the
rename if it already exists."""
max_tries = 5
num_tries = 0
for num_tries in xrange(max_tries):
try:
RemoveDir(destdir)
shutil.move(srcdir, destdir)
return
except OSError as err:
if err.errno != errno.EACCES:
raise err
# If we are here, we didn't exit due to raised exception, so we are
# handling a Windows flaky access error. Sleep one second and try
# again.
time.sleep(num_tries + 1)
# end of while loop -- could not RenameDir
raise Error('Could not RenameDir %s => %s after %d tries.\n'
'Please check that no shells or applications '
'are accessing files in %s.'
% (srcdir, destdir, num_tries, destdir))
|
25,419 | b9fee7d195ead1276a00a406fd401792c07a7bd7 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
import numpy as np
from coeffs import *
import sys, math
try:
dirname = sys.argv[1]; del sys.argv[1]
except:
print "Directory with results doesn't specify: "
print "Usage: computing.py dirname [nolog|logx|logy|loglog]"
print "Default values: . nolog"
print "Target is Si [Ion number = 14, Mass = 28.085 amu]"
dirname = '.'
# sys.exit(1)
import os, shutil
os.chdir(dirname)
filename = 'SRIMdata.txt'
nn = 1000
params = Parameters(filename)
Se = SeCoeffSRIM(nn,params,False)
Se.eval()
Sn = SnCoeff(Se.E, params)
Sn.eval()
q = QCoeff(Se.E, params)
q.eval()
alpha = AlphaCoeff(Se.E, params)
alpha.eval()
beta = Sn.Value + Se.Value
if(dirname=='.'):
ofile = open('result.txt','w')
else:
ofile = open(dirname+'.txt','w')
ofile.write('//\t'+dirname+'\t %g -- %g\n' % (min(alpha.E), max(alpha.E)))
ofile.write('Energy\t Alpha\t Beta\t Q\n')
np.savetxt(ofile,np.column_stack((alpha.E,alpha.Value,beta,q.Value)))
ofile.close()
|
25,420 | ed0e2f1d7cd200951eb90f6a5f5054987f9a922e | import urllib2
import json
from pprint import pprint
from threading import Thread
import os
import time#benchmark
import datetime
#contains all the noun found url
not_found_url = []
temporary_holder = []
data_list_json= []
def create_a_list_of_list(data):
final_list=[]
b =[]
# You don't need `a` to be a list here, just iterate the `range` object
for num in data:
if len(b) < 5:
b.append(num)
else:
# Add `b` to `final_list` here itself, so that you don't have
# to check if `b` has 3 elements in it, later in the loop.
final_list.append(b)
# Since `b` already has 3 elements, create a new list with one element
b = [num]
# `b` might have few elements but not exactly 3. So, add it if it is not empty
if len(b) != 0:
final_list.append(b)
return final_list
def download_image(url):
url = url['image']['image_link']
print(url)
file_name = url.split('/')[-1]
try:
u = urllib2.urlopen(url)
f = open("images/"+file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
except urllib2.HTTPError, URLError:
print("error not a valid url")
not_found_url.append(url)
temporary_holder.append(url)
def parse_this_json_file(file):
json_data=open('json/'+ file)
data = json.load(json_data)
data_list_json = create_a_list_of_list(data)
#print(data_list_json)
json_data.close()
send_this_data_to_the_thread_function(data_list_json)
def send_this_data_to_the_thread_function(data_list_json):
for list_of_list in data_list_json:
threadlist = []
#for u in list_of_list:
#download_image(u)
for u in list_of_list:
t = Thread(target=download_image, args=(u,))
t.start()
threadlist.append(t)
for b in threadlist:
b.join()
def cleaning_this_directory():
"""After downloading all the XML dataset
and after parsing the xml file to be able to create a
json file. This FUNCTION will move all the .xml and .json
to the directory ./json or ./xml
"""
import os, shutil
files = os.listdir(".")
for f in files:
if os.path.isfile(f):
extension = f.split(".")[-1]
if extension == 'jpg':
#move the file
os.rename(f, "images/"+f)
elif extension == 'JPG':
#move to xml file
os.rename(f, 'xml/'+f)
else:
pass
def rewrite_this_dataset(file_name):
"""
I need to
"""
for data in data_list_json:
uid = data['unique_identifier']
for a in temporary_holder:
if a['unique_identifier'] == uid:
data_list_json.remove(data)
write_a_json_file_for_the_database(data_list_json, file_name)
def write_a_json_file_for_the_database(artefact, dataset_name):
with io.open(dataset_name, 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(artefact, ensure_ascii=True, indent=4 )))
def main():
files = os.listdir('json')
for f in files[::]:
print(f)
temporary_holder =[]
if os.path.isfile("json/"+f):
parse_this_json_file(f)
if len(temporary_holder)>0:
rewrite_this_dataset(f)
write_a_json_file_for_the_database(not_found_url, 'not_found_url.json')
cleaning_this_directory()
#----------------------------------------------------------------------
if __name__ == "__main__":
start_time = time.time()
main()
seconds = (time.time() - start_time)
time_in_total = str(datetime.timedelta(seconds=seconds))
print("Time took to download\nall the dataset => {0}".format(time_in_total)) |
25,421 | 4ce14041b41df31e41bfefb4f14e4166f3ba1063 | print('Расчёт выручки фирмы')
cost = int(input('Введите значение выручки: '))
revenue = int(input('Введите значение издержек: '))
if cost > revenue:
print('Ваша фирма работает в прибыль!\n'
f'Рентабельность выручки состовляет {cost / revenue:.0f}')
number_people = int(input('Введите число сотрудников: '))
print(f'Прибыль на одного сотрудника составляет: {cost / number_people:.0f}')
elif cost < revenue:
print(f'Ваша фирма работает в убыток.')
else:
print(f'Ваша фирма работает в 0.') |
25,422 | 40a521caa86691204c25a48811dcd66b71f1ef8f | # AsynQueue:
# Asynchronous task queueing based on the Twisted framework, with task
# prioritization and a powerful worker interface.
#
# Copyright (C) 2006-2007, 2015 by Edwin A. Suominen,
# http://edsuom.com/AsynQueue
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Information about callables and what happens to them.
My L{Info} object is a flexible info provider, with several methods
for offering you text info about a call. Construct it with a function
object and any args and keywords if you want the info to include that
particular function call, or you can set it (and change it) later with
L{Info.setCall}.
The L{Info.nn} method may be useful in the still non-working L{wire}
module for separating a call into a namespace and attribute name. It's
not used yet, though, there or anywhere else in C{AsynQueue}.
Another useful object for development are my L{showResult} and
L{whichThread} decorator functions, which you can use together.
"""
import sys, traceback, inspect, threading, pickle
from contextlib import contextmanager
from twisted.internet import defer
from twisted.python import reflect
def hashIt(*args):
"""
Returns a pretty much unique 32-bit hash for pretty much any
python object.
"""
total = int(0)
for x in args:
if isinstance(x, dict):
for k, key in enumerate(sorted(x.keys())):
total += hashIt(k, key, x[key])
elif isinstance(x, (list, tuple)):
for k, value in enumerate(x):
total += hashIt(k, value)
else:
try:
thisHash = hash(x)
except:
try:
thisHash = hash(pickle.dumps(x))
except:
thisHash = 0
total += thisHash
return hash(total)
SR_STUFF = [0, None, False]
def showResult(f):
"""
Use as a decorator to print info about the function and its
result. Follows deferred results.
"""
def substitute(self, *args, **kw):
def msg(result, callInfo):
resultInfo = str(result)
if len(callInfo) + len(resultInfo) > 70:
callInfo += "\n"
print("\n{} -> {}".format(callInfo, resultInfo))
return result
SR_STUFF[0] += 1
callInfo = "{:03d}: {}".format(
SR_STUFF[0],
SR_STUFF[1].setCall(
instance=self, args=args, kw=kw).aboutCall())
result = f(self, *args, **kw)
if isinstance(result, defer.Deferred):
return result.addBoth(msg, callInfo)
return msg(result, callInfo)
SR_STUFF[1] = Info(whichThread=SR_STUFF[2]).setCall(f)
substitute.func_name = f.func_name
return substitute
def whichThread(f):
"""
Use as a decorator (after showResult) to include the current
thread in the info about the function.
"""
SR_STUFF[2] = True
return f
class Converter(object):
"""
I provide a bunch of methods for converting objects.
"""
def strToFQN(self, x):
"""
Returns the fully qualified name of the supplied string if it can
be imported and then reflected back into the FQN, or
C{None} if not.
"""
try:
obj = reflect.namedObject(x)
fqn = reflect.fullyQualifiedName(obj)
except:
return
return fqn
def objToPickle(self, x):
"""
Returns a string of the pickled object or C{None} if it couldn't
be pickled and unpickled back again.
"""
try:
xp = pickle.dumps(x)
pickle.loads(xp)
except:
return
return xp
def objToFQN(self, x):
"""
Returns the fully qualified name of the supplied object if it can
be reflected into an FQN and back again, or C{None} if
not.
"""
try:
fqn = reflect.fullyQualifiedName(x)
reflect.namedObject(fqn)
except:
return
return fqn
def processObject(self, x):
"""
Attempts to convert the supplied object to a pickle and, failing
that, to a fully qualified name.
"""
pickled = self.objToPickle(x)
if pickled:
return pickled
return self.objToFQN(x)
class InfoHolder(object):
"""
An instance of me is yielded by L{Info.context}, for you to call
about info concerning a particular saved function call.
"""
def __init__(self, info, ID):
self.info = info
self.ID = ID
def getInfo(self, name):
return self.info.getInfo(self.ID, name)
def nn(self, raw=False):
return self.info.nn(self.ID, raw)
def aboutCall(self):
return self.info.aboutCall(self.ID)
def aboutException(self, exception=None):
return self.info.aboutCall(self.ID, exception)
def aboutFailure(self, failureObj):
return self.info.aboutFailure(failureObj, self.ID)
class Info(object):
"""
Provides detailed info about function/method calls.
I provide text (picklable) info about a call. Construct me with a
function object and any args and keywords if you want the info to
include that particular function call, or you can set it (and
change it) later with L{setCall}.
"""
def __init__(self, remember=False, whichThread=False):
"""C{Info}(remember=False, whichThread=False)"""
self.cv = Converter()
self.lastMetaArgs = None
if remember:
self.pastInfo = {}
self.whichThread = whichThread
def setCall(self, *metaArgs, **kw):
"""
Sets my current f-args-kw tuple, returning a reference to myself
to allow easy method chaining.
The function I{f} must be an actual callable object if you
want to use L{nn}. Otherwise it can also be a string depicting
a callable.
You can specify I{args} with a second argument (as a list or
tuple), and I{kw} with a third argument (as a C{dict}). If you are
only specifying a single arg, you can just provide it as your
second argument to this method call without wrapping it in a
list or tuple. I try to be flexible.
If you've set a function name and want to add a sequence of
args or a dict of keywords, you can do it by supplying the
I{args} or I{kw} keywords. You can also set a class instance
at that time with the I{instance} keyword.
To sum up, here are the numbers of arguments you can provide:
1. A single argument with a callable object or string
depicting a callable.
2. Two arguments: the callable I{f} plus a single
argument or list of arguments to I{f}.
3. Three arguments: I{f}, I{args}, and a dict
of keywords for the callable.
@param metaArgs: 1-3 arguments as specified above.
@keyword args: A sequence of arguments for the callable I{f}
or one previously set.
@keyword kw: A dict of keywords for the callable I{f} or one
previously set.
@keyword instance: An instance of a class of which the
callable I{f} is a method.
"""
if metaArgs:
equiv = True
if self.lastMetaArgs is None:
equiv = False
elif len(metaArgs) != len(self.lastMetaArgs):
equiv = False
else:
for k, arg in enumerate(metaArgs):
try:
thisEquiv = (arg == self.lastMetaArgs[k])
except:
thisEquiv = False
if not thisEquiv:
equiv = False
break
if equiv and not hasattr(self, 'pastInfo'):
# We called this already with the same metaArgs and
# without any pastInfo to reckon with, so there's
# nothing to do.
return self
# Starting over with a new f
callDict = {'f': metaArgs[0], 'fs': self._funcText(metaArgs[0])}
args = metaArgs[1] if len(metaArgs) > 1 else []
if not isinstance(args, (tuple, list)):
args = [args]
callDict['args'] = args
callDict['kw'] = metaArgs[2] if len(metaArgs) > 2 else {}
callDict['instance'] = None
if self.whichThread:
callDict['thread'] = threading.current_thread().name
self.callDict = callDict
elif hasattr(self, 'callDict'):
# Adding to an existing f
for name in ('args', 'kw', 'instance'):
if name in kw:
self.callDict[name] = kw[name]
else:
raise ValueError(
"You must supply at least a new function/string "+\
"or keywords adding args, kw to a previously set one")
if hasattr(self, 'currentID'):
del self.currentID
# Runs the property getter
self.ID
if metaArgs:
# Save metaArgs to ignore repeated calls with the same metaArgs
self.lastMetaArgs = metaArgs
return self
@property
def ID(self):
"""
Returns a unique ID for my current callable.
"""
if hasattr(self, 'currentID'):
return self.currentID
if hasattr(self, 'callDict'):
thisID = hashIt(self.callDict)
if hasattr(self, 'pastInfo'):
self.pastInfo[thisID] = {'callDict': self.callDict}
else:
thisID = None
self.currentID = thisID
return thisID
def forgetID(self, ID):
"""
Use this whenever info won't be needed anymore for the specified
call ID, to avoid memory leaks.
"""
if ID in getattr(self, 'pastInfo', {}):
del self.pastInfo[ID]
@contextmanager
def context(self, *metaArgs, **kw):
"""
Context manager for setting and getting call info.
Call this context manager method with info about a particular call
(same format as L{setCall} uses) and it yields an
L{InfoHolder} object keyed to that call. It lets you get info
about the call inside the context, without worrying about the
ID or calling L{forgetID}, even after I have been used for
other calls outside the context.
"""
if not hasattr(self, 'pastInfo'):
raise Exception(
"Can't use a context manager without saving call info")
ID = self.setCall(*metaArgs, **kw).ID
yield InfoHolder(self, ID)
self.forgetID(ID)
def getInfo(self, ID, name, nowForget=False):
"""
Provides info about a call.
If the supplied name is 'callDict', returns the f-args-kw-instance
dict for my current callable. The value of I{ID} is ignored in
such case. Otherwise, returns the named information attribute
for the previous call identified with the supplied ID.
@param ID: ID of a previous call, ignored if I{name} is 'callDict'
@param name: The name of the particular type of info requested.
@type name: str
@param nowForget: Set C{True} to remove any reference to this
ID or callDict after the info is obtained.
"""
def getCallDict():
if hasattr(self, 'callDict'):
result = self.callDict
if nowForget:
del self.callDict
else:
result = None
return result
if hasattr(self, 'pastInfo'):
if ID is None and name == 'callDict':
return getCallDict()
if ID in self.pastInfo:
x = self.pastInfo[ID]
if nowForget:
del self.pastInfo[ID]
return x.get(name, None)
return None
if name == 'callDict':
return getCallDict()
return None
def saveInfo(self, name, x, ID=None):
if ID is None:
ID = self.ID
if hasattr(self, 'pastInfo'):
self.pastInfo.setdefault(ID, {})[name] = x
return x
def nn(self, ID=None, raw=False):
"""
Namespace-name parser.
For my current callable or a previous one identified by I{ID},
returns a 2-tuple suitable for sending to a process worker via
C{pickle}.
The first element: If the callable is a method, a pickled or
fully qualified name (FQN) version of its parent object. This
is C{None} if the callable is a standalone function.
The second element: If the callable is a method, the
callable's name as an attribute of the parent object. If it's
a standalone function, the pickled or FQN version. If nothing
works, this element will be C{None} along with the first one.
@param ID: Previous callable
@type ID: int
@param raw: Set C{True} to return the raw parent (or
function) object instead of a pickle or FQN. All the type
checking and round-trip testing still will be done.
"""
if ID:
pastInfo = self.getInfo(ID, 'wireVersion')
if pastInfo:
return pastInfo
result = None, None
callDict = self.getInfo(ID, 'callDict')
if not callDict:
# No callable set
return result
func = callDict['f']
if isinstance(func, str):
# A callable defined as a string can only be a function
# name, return its FQN or None if that doesn't work
result = None, self.cv.strToFQN(func)
elif inspect.ismethod(func):
# It's a method, so get its parent
parent = getattr(func, '__self__', None)
if parent:
processed = self.cv.processObject(parent)
if processed:
# Pickle or FQN of parent, method name
if raw:
processed = parent
result = processed, func.__name__
if result == (None, None):
# Couldn't get or process a parent, try processing the
# callable itself
processed = self.cv.processObject(func)
if processed:
# None, pickle or FQN of callable
if raw:
processed = func
result = None, processed
return self.saveInfo('wireVersion', result, ID)
def aboutCall(self, ID=None, nowForget=False):
"""
Returns an informative string describing my current function call
or a previous one identified by ID.
"""
if ID:
pastInfo = self.getInfo(ID, 'aboutCall', nowForget)
if pastInfo:
return pastInfo
callDict = self.getInfo(ID, 'callDict')
if not callDict:
return ""
func, args, kw = [callDict[x] for x in ('f', 'args', 'kw')]
instance = callDict.get('instance', None)
text = repr(instance) + "." if instance else ""
text += self._funcText(func) + "("
if args:
text += ", ".join([str(x) for x in args])
for name, value in kw.items():
text += ", {}={}".format(name, value)
text += ")"
if 'thread' in callDict:
text += " <Thread: {}>".format(callDict['thread'])
return self.saveInfo('aboutCall', text, ID)
def aboutException(self, ID=None, exception=None, nowForget=False):
"""
Returns an informative string describing an exception raised from
my function call or a previous one identified by ID, or one
you supply (as an instance, not a class).
"""
if ID:
pastInfo = self.getInfo(ID, 'aboutException', nowForget)
if pastInfo:
return pastInfo
if exception:
lineList = ["Exception '{}'".format(repr(exception))]
else:
stuff = sys.exc_info()
lineList = ["Exception '{}'".format(stuff[1])]
callInfo = self.aboutCall()
if callInfo:
lineList.append(
" doing call '{}':".format(callInfo))
self._divider(lineList)
if not exception:
lineList.append("".join(traceback.format_tb(stuff[2])))
del stuff
text = self._formatList(lineList)
return self.saveInfo('aboutException', text, ID)
def aboutFailure(self, failureObj, ID=None, nowForget=False):
"""
Returns an informative string describing a Twisted failure raised
from my function call or a previous one identified by ID. You
can use this as an errback.
"""
if ID:
pastInfo = self.getInfo(ID, 'aboutFailure', nowForget)
if pastInfo:
return pastInfo
lineList = ["Failure '{}'".format(failureObj.getErrorMessage())]
callInfo = self.aboutCall()
if callInfo:
lineList.append(
" doing call '{}':".format(callInfo))
self._divider(lineList)
lineList.append(failureObj.getTraceback(detail='verbose'))
text = self._formatList(lineList)
return self.saveInfo('aboutFailure', text, ID)
def _divider(self, lineList):
N_dashes = max([len(x) for x in lineList]) + 1
if N_dashes > 79:
N_dashes = 79
lineList.append("-" * N_dashes)
def _formatList(self, lineList):
lines = []
for line in lineList:
newLines = line.split(':')
for newLine in newLines:
for reallyNewLine in newLine.split('\\n'):
lines.append(reallyNewLine)
return "\n".join(lines)
def _funcText(self, func):
if isinstance(func, str):
return func
if callable(func):
text = getattr(func, '__name__', None)
if text:
return text
if inspect.ismethod(func):
text = "{}.{}".format(func.im_self, text)
return text
try: func = str(func)
except: func = repr(func)
return func
try: func = str(func)
except: func = repr(func)
return "{}[Not Callable!]".format(func)
|
25,423 | 207dc032bf56775d42409d1b4d87cc773df60e4c | # Author = 'LiuLeo2'
import sys
import getopt
reload(sys)
sys.setdefaultencoding('utf-8')
import urllib2
def calc_angle_by_pnt(pnt_start_lat, pnt_start_lon, pnt_end_lat, pnt_end_lon):
import math
angle = math.atan2(pnt_end_lat - pnt_start_lat, pnt_end_lon - pnt_start_lon)
if angle < 0:
angle += 2 * math.pi
angle *= 57.29577951308232
if 0 <= angle < 90:
angle = 90 - angle
elif 90 <= angle < 180:
angle = 360 - (angle - 90)
elif 180 <= angle < 270:
angle = 180 + (270 - angle)
elif 270 <= angle < 360:
angle = 90 + 360 - angle
return angle
def calc_distance_by_pnt(lat_left, lon_left, lat_right, lon_right):
import math
r = 6378137.0
lat_left_rad = lat_left * math.pi / 180
lon_left_rad = lon_left * math.pi / 180
lat_right_rad = lat_right * math.pi / 180
lon_right_rad = lon_right * math.pi / 180
dlon = abs(lon_left_rad - lon_right_rad)
dlat = abs(lat_left_rad - lat_right_rad)
p = pow(math.sin(dlon / 2), 2) + math.cos(lon_left_rad) * math.cos(lon_right_rad) * pow(math.sin(dlat / 2), 2)
d = r * 2 * math.asin(math.sqrt(p))
return d
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "i:s:")
in_file_name = ''
split_char = ''
if len(opts) <> 2:
print 'Arguments Num Error!'
else:
for opt, value in opts:
if opt == '-i':
in_file_name = value
elif opt == '-s':
split_char = value
record = []
records = []
urls = []
key = 'AIzaSyCkF6hNZLbw_FIU_02oRnBPPjJidLm5SIc'
if len(in_file_name) != 0:
with open(in_file_name, 'r') as fin:
for line in fin:
line = line.rstrip('\n')
line = line.rstrip('\r')
record = line.split(split_char)
records.append(line)
if record[0] == 'lat' and record[1] == 'lon':
continue
lat = str(int(record[1]) / 1000000.0)
lon = str(int(record[2]) / 1000000.0)
text = record[5]
index = text.find(' towards ') + len(' towards ')
destination = urllib2.quote(text[index:] + '+in+singapore')
#origin = urllib2.quote(text[:index] + '+in+singapore')
origin = urllib2.quote(lat + ' ' + lon)
urls.append("https://maps.googleapis.com/maps/api/directions/json?origin=%s&destination=%s&key=%s&language=eng" % (origin, destination, key))
with open(in_file_name, 'w') as fout:
d_mark = 20
import symbol
import json
for i in range(len(urls)):
page = urllib2.urlopen(urls[i])
json_parse = json.load(page)
instructions = json_parse["routes"][0]["legs"][0]['steps'][0]["html_instructions"]
start_lat = json_parse["routes"][0]["legs"][0]['start_location']['lat']
start_lon = json_parse["routes"][0]["legs"][0]['start_location']['lng']
end_lat = json_parse["routes"][0]["legs"][0]['end_location']['lat']
end_lon = json_parse["routes"][0]["legs"][0]['end_location']['lng']
angle = calc_angle_by_pnt(start_lat, start_lon, end_lat, end_lon)
records[i] = records[i][:records[i].rfind(split_char)] + split_char + str(int(angle + 0.5))
for line in records:
fout.write(line + '\n')
|
25,424 | b2f3dec1573d722f2fcc2db44058fe5fa5867cec | from django import forms
from .models import Project
from froala_editor.widgets import FroalaEditor
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['title', 'content']
widgets = {
'content': FroalaEditor(),
'title': forms.TextInput(
attrs={
'style': 'height: 30px; margin-bottom:15px; width:300px;',
'class': 'form-control',
'autocomplete': 'off'
}
)
} |
25,425 | 3e6e8b236d6ac4e48c0cf8747c43fe3094f21122 | #!/usr/bin/env python3
import sys
import time
donors_list = [("Rufio", [897, 200, 200]),
("Maggie", [543, 2, 3000]),
("Gus", [23, 32, 33222]),
("Kramer", [10, 87, 886]),
("Polly", [432, 32, 7896])
]
def thank_you():
find_donor()
def gen_stats(donor):
donations = donor[1]
total = sum(donations)
num = len(donations)
stats = (donor[0], total, num, total / num)
return stats
def report():
print("Generating report...")
time.sleep(1) # dramatic effect of creating the report
generate_report_template()
# gen_stats(donor)
def generate_report_template():
donor_name = "Donor Name"
total_given = "Total Given"
num_gifts = "Num Gifts"
average_gift = "Average Gift"
print(f"{donor_name} | {total_given} | {num_gifts} | {average_gift}")
print("-" * 68)
def find_donor():
print("Type 'list' to get a list of donors")
donor_name = input("Type in the name of a donor: ")
if donor_name == "list":
print("\nHere is your list of donors:\n")
for donor in donors_list:
print(f"Donors are: {donor}")
else:
for donor in donors_list:
if donor_name == donor[0]:
print(f"Found donor: {donor_name}")
else:
donors_list.append(donor_name)
print(f"Adding {donor_name} to the list of donors.")
def quit():
print("Quitting Mailroom...")
time.sleep(.5)
print("Goodbye")
sys.exit(0)
def main_menu():
while True:
answer = input(""" -> What you would you like to do?
Pick One:
1: Send a thank you
2: Create a report
3: Quit
>>> """)
print(f"You selected {answer}")
answer = answer.strip()
if answer == "1":
thank_you()
elif answer == "2":
report()
elif answer == "3":
quit()
else:
print("Please answer 1, 2, or 3")
if __name__ == "__main__":
print("Welcome to the Mailroom")
donor = ("fred flinstone"), [100, 50, 600]
# assert gen_stats(donor) == ("fred flinstone"), [750, 3, 250.0]
main_menu()
|
25,426 | 45bc53714a19cce94812de6eb922ce468f28be57 | import sys
import math
def calc(l,p,c):
count = 0
while l*c < p:
l *= c
count += 1
return count
def calc2(b):
count = 0
a = 1
while not a*2 > b:
count += 1
a *= 2
return count+1
def two():
t = int(sys.stdin.readline().strip())
for times in xrange(0,t):
l,p,c = map(int,sys.stdin.readline().strip().split())
out = "Case #"+str(times+1)+":"
one = calc(l,p,c)
if one == 0:
two = 0
else:
two = calc2(one)
print out,int(two)
two()
|
25,427 | 113968d1d666ada576ed512ccf0d02513c33b338 | import numpy as np
import matplotlib.pyplot as plt
import util
from matplotlib import rcParams
dir_fig = '../paper/graphics/'
rcParams['figure.figsize'] = [5, 3]
rcParams['axes.linewidth'] = 0.5
rcParams['axes.edgecolor'] = 'gray'
rcParams['axes.facecolor'] = 'None'
rcParams['axes.labelcolor'] = 'black'
rcParams['xtick.color'] = 'gray'
rcParams['ytick.color'] = 'gray'
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = 'Times New Roman'
rcParams['font.size'] = 13
rcParams['text.usetex'] = True
rcParams['text.latex.preamble'] = r'\usepackage{amsmath}'
phi = -np.pi / 4
nmin, nmax = -16, 16
n = np.arange(nmin, nmax + 1)
h = util.discrete_ir_constant_phase(n, phi)
h[n == 0] = None
plt.figure()
plt.stem([0], [np.cos(phi)], markerfmt='C3o', linefmt='C3', basefmt='C3',
label=r'$\cos\varphi\cdot\delta[n]$')
plt.stem(n, h, markerfmt='C0o', linefmt='C0', basefmt='')
plt.grid(color='lightgray')
plt.xlabel('$n$ / sample')
plt.ylabel('$h[n]$')
plt.xlim(nmin - 0.5, nmax + 0.5)
plt.ylim(-0.55, 0.8)
filename = 'discrete-ir-phi{:03.0f}.pdf'.format(np.rad2deg(phi))
plt.savefig(dir_fig + filename, dpi=300, bbox_inches='tight')
|
25,428 | dbcbd457dfdf56f8090ab9606212888715e04307 | import sys
import itertools
sys.setrecursionlimit(1000000000)
from heapq import heapify,heappop,heappush,heappushpop
import math
import collections
MOD = 10**9+7
n = int(input())
a = list(map(int,input().split()))
if n%2 == 0:
c = collections.Counter(a)
item = []
for key,value in c.items():
item.append([key,value])
item.sort()
ans = 1
for i in range(len(item)):
if 2*i + 1 == item[i][0]:
if item[i][1] == 2:
ans *= 2
ans %=MOD
else:
print(0)
sys.exit()
else:
print(0)
sys.exit()
print(ans%MOD)
else:
c = collections.Counter(a)
ans = 1
item = []
for key,value in c.items():
item.append([key,value])
item.sort()
for i in range(1,len(item)):
if 2*i == item[i][0]:
if item[i][1] == 2:
ans *= 2
ans %=MOD
else:
print(0)
sys.exit()
else:
print(0)
sys.exit()
if item[0][0] == 0 and item[0][1] == 1:
print(ans%MOD)
else:
print(0) |
25,429 | 6f54eb67f6ce49ef8b96bb266f779b0ae34119ba | # Generated by Django 3.2.5 on 2021-09-02 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web_calculator', '0023_auto_20210902_0931'),
]
operations = [
migrations.AlterField(
model_name='pageobjects',
name='content',
field=models.JSONField(max_length=100, verbose_name='Содержание'),
),
]
|
25,430 | 2d798f89a15e10eefda8ca814689b61478d84ea9 | from django.conf.urls import url
import calc.views as cv
urlpatterns = [
url(r'^$', cv.index, name='home'),
url(r'^add/', cv.add, name="add"),
url(r'add2/(\d+)/(\d+)/', cv.add2, name="add2"),
url(r're_add/(\d+)/(\d+)/', cv.add3),
] |
25,431 | f67da10af06c1f8b23bdf937b58df3352b1e6c17 | n1 = 0
n2 = 1
n=int(input("enter th nterm"))
while n2 < n:
print(n2)
n1,n2=n2,n1+n2
|
25,432 | 3110811d586e2d715b3c287cb7c3038cf102cc76 | from django.conf.urls.defaults import *
from weblog.models import Entry, Link
from tagging.models import Tag
urlpatterns = patterns('',
(r'^$', 'django.views.generic.list_detail.object_list',
{ 'queryset': Tag.objects.all(),
'template_name':'weblog/tag_list.html'},
'weblog_tag_list'),
(r'^entries/(?P<tag>[-\w]+)/$',
'tagging.views.tagged_object_list',
{ 'queryset_or_model': Entry.live.all(),
'template_name': 'weblog/entries_by_tag.html' },
'weblog_entry_archive_tag'),
(r'^links/(?P<tag>[-\w]+)/$',
'tagging.views.tagged_object_list',
{ 'queryset_or_model': Link.objects.all(),
'template_name': 'weblog/links_by_tag.html' },
'weblog_link_archive_tag'),
)
|
25,433 | d51ed726be0556dd476bda3ceee3a1d995bc82f8 | """empty message
Revision ID: f1053a0fc6f2
Revises: 1b8a20c84d1e
Create Date: 2020-03-30 22:55:43.464460
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f1053a0fc6f2'
down_revision = '1b8a20c84d1e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('shows', 'artist_image_link')
op.drop_column('shows', 'venue_name')
op.drop_column('shows', 'artist_name')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('shows', sa.Column('artist_name', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('shows', sa.Column('venue_name', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('shows', sa.Column('artist_image_link', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
25,434 | 49402f64206d17e0b3402047d284999d3c9528cb | #!/usr/bin/env python3
import requests
import os,time,sys
from time import sleep
import threading
import socket
def video():
r=requests.get("http://10.5.5.9/gp/gpControl/execute?p1=gpStream&c1=restart")
print(r.text)
time.sleep(3.0);
os.system("ffplay -fflags nobuffer -f:v mpegts -probesize 8192 udp://:8554")
def keepalive():
def get_command_msg(id):
return "_GPHD_:%u:%u:%d:%1lf\n" % (0, 0, 2, 0)
UDP_IP = "10.5.5.9"
UDP_PORT = 8554
KEEP_ALIVE_PERIOD = 250
KEEP_ALIVE_CMD = 2
MESSAGE = get_command_msg(KEEP_ALIVE_CMD)
print("UDP target IP:", UDP_IP)
print("UDP target port:", UDP_PORT)
print("message:", MESSAGE)
if sys.version_info.major >= 3:
MESSAGE = bytes(MESSAGE, "utf-8")
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
sleep(KEEP_ALIVE_PERIOD/1000)
exitFlag = 0
class kathread (threading.Thread):
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
def run(self):
print("Starting " + self.name)
keepalive()
print("Exiting " + self.name)
thread1 = kathread(1, "Thread-1")
# Start new Threads
thread1.start()
video()
print("Exiting Main Thread")
|
25,435 | 4efa60aa408cb5702cfd8d387c4eb6b9e9f93134 | class CityNotFound(Exception):
pass
class CityInactive(Exception):
pass
class CityUnpredicable(Exception):
pass
class InvalidKind(Exception):
pass
class StationNotFound(Exception):
pass
|
25,436 | 573fe4692550ece9a13eaf20ed60a73021a32335 | # -*- coding: utf-8 -*-
"""
The Struct is a convenient way to access data in a hash.
Makes it possible to load data from redis as an object and access the fields.
Then store changes back into redis.
"""
from six import add_metaclass
from json.encoder import JSONEncoder
from functools import wraps
from .pipelines import autoexec
from .keyspaces import Hash
from .fields import TextField
from .exceptions import InvalidOperation
from .futures import Future, IS
__all__ = ['Struct']
class StructMeta(type):
"""
Data binding of a redpipe.Hash to the core of the Struct object.
Creates it dynamically on class construction.
uses the keyspace and connection fields
Meta Classes are strange beasts.
"""
def __new__(mcs, name, bases, d):
if name in ['Struct']:
return type.__new__(mcs, name, bases, d)
class StructHash(Hash):
keyspace = d.get('keyspace', name)
connection = d.get('connection', None)
fields = d.get('fields', {})
keyparse = d.get('keyparse', TextField)
valueparse = d.get('valueparse', TextField)
memberparse = d.get('memberparse', TextField)
d['core'] = StructHash
return type.__new__(mcs, name, bases, d)
@add_metaclass(StructMeta)
class Struct(object):
"""
load and store structured data in redis using OOP patterns.
If you pass in a dictionary-like object, redpipe will write all the
values you pass in to redis to the key you specify.
By default, the primary key name is `_key`.
But you should override this in your Struct with the `key_name`
property.
.. code-block:: python
class Beer(redpipe.Struct):
fields = {'name': redpipe.StringField}
key_name = 'beer_id'
beer = Beer({'beer_id': '1', 'name': 'Schlitz'})
This will store the data you pass into redis.
It will also load any additional fields to hydrate the object.
**RedPipe** does this in the same pipelined call.
If you need a stub record that neither loads or saves data, do:
.. code-block:: python
beer = Beer({'beer_id': '1'}, no_op=True)
You can later load the fields you want using, load.
If you pass in a string we assume it is the key of the record.
redpipe loads the data from redis:
.. code-block:: python
beer = Beer('1')
assert(beer['beer_id'] == '1')
assert(beer['name'] == 'Schlitz')
If you need to load a record but only specific fields, you can say so.
.. code-block:: python
beer = Beer('1', fields=['name'])
This will exclude all other fields.
**RedPipe** cares about pipelining and efficiency, so if you need to
bundle a bunch of reads or writes together, by all means do so!
.. code-block:: python
beer_ids = ['1', '2', '3']
with redpipe.pipeline() as pipe:
beers = [Beer(i, pipe=pipe) for i in beer_ids]
print(beers)
This will pipeline all 3 together and load them in a single pass
from redis.
The following methods all accept a pipe:
* __init__
* update
* incr
* decr
* pop
* remove
* clear
* delete
You can pass a pipeline into them to make sure that the network i/o is
combined with another pipeline operation.
The other methods on the object are about accessing the data
already loaded.
So you shouldn't need to pipeline them.
"""
__slots__ = ['key', '_data']
keyspace = None
connection = None
key_name = '_key'
fields = {}
default_fields = 'all' # set as 'defined', 'all', or ['a', b', 'c']
def __init__(self, _key_or_data=None, pipe=None, fields=None, no_op=False):
"""
class constructor
:param _key_or_data:
:param pipe:
:param fields:
"""
keyname = self.key_name
self._data = {}
with self._pipe(pipe=pipe) as pipe:
try:
coerced = dict(_key_or_data)
self.key = coerced[keyname]
del coerced[keyname]
if no_op:
self._data = coerced
return
self.update(coerced, pipe=pipe)
except KeyError:
raise InvalidOperation(
'must specify primary key when cloning a struct')
except (ValueError, TypeError):
self.key = _key_or_data
if not no_op:
self.load(fields=fields, pipe=pipe)
def load(self, fields=None, pipe=None):
"""
Load data from redis.
restrict to just the fields specified.
:param fields: 'all', 'defined', or array of field names
:param pipe: Pipeline(), NestedPipeline() or None
:return: None
"""
if fields is None:
fields = self.default_fields
if fields == 'all':
return self._load_all(pipe=pipe)
if fields == 'defined':
fields = [k for k in self.fields.keys()]
if not fields:
return
with self._pipe(pipe) as pipe:
ref = self.core(pipe=pipe).hmget(self.key, fields)
def cb():
for i, v in enumerate(ref.result):
k = fields[i]
if k != self.key_name:
self._data[k] = v
pipe.on_execute(cb)
def _load_all(self, pipe=None):
with self._pipe(pipe) as pipe:
ref = self.core(pipe=pipe).hgetall(self.key)
def cb():
if not ref.result:
return
for k, v in ref.result.items():
if k != self.key_name:
self._data[k] = v
pipe.on_execute(cb)
def incr(self, field, amount=1, pipe=None):
"""
Increment a field by a given amount.
Return the future
Also update the field.
:param field:
:param amount:
:param pipe:
:return:
"""
with self._pipe(pipe) as pipe:
core = self.core(pipe=pipe)
new_amount = core.hincrby(self.key, field, amount)
ref = core.hget(self.key, field)
def cb():
self._data[field] = ref.result
pipe.on_execute(cb)
return new_amount
def decr(self, field, amount=1, pipe=None):
"""
Inverse of incr function.
:param field:
:param amount:
:param pipe:
:return: Pipeline, NestedPipeline, or None
"""
return self.incr(field, amount * -1, pipe=pipe)
def update(self, changes, pipe=None):
"""
update the data in the Struct.
This will update the values in the underlying redis hash.
After the pipeline executes, the changes will be reflected here
in the local struct.
If any values in the changes dict are None, those fields will be
removed from redis and the instance.
The changes should be a dictionary representing the fields to change
and the values to change them to.
:param changes: dict
:param pipe: Pipeline, NestedPipeline, or None
:return: None
"""
if not changes:
return
if self.key_name in changes:
raise InvalidOperation('cannot update the redis key')
deletes = {k for k, v in changes.items() if IS(v, None)}
updates = {k: v for k, v in changes.items() if k not in deletes}
with self._pipe(pipe) as pipe:
core = self.core(pipe=pipe)
def build(k, v):
core.hset(self.key, k, v)
def cb():
self._data[k] = v
pipe.on_execute(cb)
for k, v in updates.items():
build(k, v)
self.remove(deletes, pipe=pipe)
def remove(self, fields, pipe=None):
"""
remove some fields from the struct.
This will remove data from the underlying redis hash object.
After the pipe executes successfully, it will also remove it from
the current instance of Struct.
:param fields: list or iterable, names of the fields to remove.
:param pipe: Pipeline, NestedPipeline, or None
:return: None
"""
if not fields:
return
if self.key_name in fields:
raise InvalidOperation('cannot remove the redis key')
with self._pipe(pipe) as pipe:
core = self.core(pipe=pipe)
core.hdel(self.key, *fields)
def cb():
for k in fields:
try:
del self._data[k]
except KeyError:
pass
pipe.on_execute(cb)
def copy(self):
"""
like the dictionary copy method.
:return:
"""
return self.__class__(dict(self))
@property
def persisted(self):
"""
Not certain I want to keep this around.
Is it useful?
:return:
"""
return True if self._data else False
def clear(self, pipe=None):
"""
delete the current redis key.
:param pipe:
:return:
"""
with self._pipe(pipe) as pipe:
self.core(pipe=pipe).delete(self.key)
def cb():
self._data = {}
pipe.on_execute(cb)
def get(self, item, default=None):
"""
works like the dict get method.
:param item:
:param default:
:return:
"""
return self._data.get(item, default)
def pop(self, name, default=None, pipe=None):
"""
works like the dictionary pop method.
IMPORTANT!
This method removes the key from redis.
If this is not the behavior you want, first convert your
Struct data to a dict.
:param name:
:param default:
:param pipe:
:return:
"""
f = Future()
with self._pipe(pipe) as pipe:
c = self.core(pipe)
ref = c.hget(self.key, name)
c.hdel(self.key, name)
def cb():
f.set(default if ref.result is None else ref.result)
self._data.pop(name)
pipe.on_execute(cb)
return f
@classmethod
def delete(cls, keys, pipe=None):
"""
Delete one or more keys from the Struct namespace.
This is a class method and unlike the `clear` method,
can be invoked without instantiating a Struct.
:param keys: the names of the keys to remove from the keyspace
:param pipe: Pipeline, NestedPipeline, or None
:return: None
"""
with cls._pipe(pipe) as pipe:
core = cls.core(pipe)
core.delete(*keys)
@classmethod
def _pipe(cls, pipe=None):
return autoexec(pipe, name=cls.connection)
def __getitem__(self, item):
if item == self.key_name:
return self.key
return self._data[item]
def __delitem__(self, key):
tpl = 'cannot delete %s from %s indirectly. Use the delete method.'
raise InvalidOperation(tpl % (key, self))
def __setitem__(self, key, value):
tpl = 'cannot set %s key on %s indirectly. Use the set method.'
raise InvalidOperation(tpl % (key, self))
def __iter__(self):
for k in self.keys():
yield k
def __len__(self):
return len(dict(self))
def __contains__(self, item):
if item == self.key_name:
return True
return item in self._data
def iteritems(self):
yield self.key_name, self.key
for k, v in self._data.items():
yield k, v
def items(self):
return [row for row in self.iteritems()]
def __eq__(self, other):
if self is other:
return True
try:
if dict(self) == dict(other):
return True
except (TypeError, ValueError):
pass
return False
def keys(self):
return [row[0] for row in self.items()]
def __str__(self):
return "<%s:%s>" % (self.__class__.__name__, self.key)
def __repr__(self):
return repr(dict(self))
def __getstate__(self):
return self.key, self._data,
def __setstate__(self, state):
self.key = state[0]
self._data = state[1]
@property
def _redpipe_struct_as_dict(self):
return dict(self)
def _json_default_encoder(func):
"""
Monkey-Patch the core json encoder library.
This isn't as bad as it sounds.
We override the default method so that if an object
falls through and can't be encoded normally, we see if it is
a Future object and return the result to be encoded.
I set a special attribute on the Struct object so I can tell
that's what it is.
If that doesn't work, I fall back to the earlier behavior.
The nice thing about patching the library this way is that it
won't inerfere with existing code and it can itself be wrapped
by other methods.
So it's very extensible.
:param func: the JSONEncoder.default method.
:return: an object that can be json serialized.
"""
@wraps(func)
def inner(self, o):
try:
return o._redpipe_struct_as_dict # noqa
except AttributeError:
pass
return func(self, o)
return inner
JSONEncoder.default = _json_default_encoder(JSONEncoder.default)
|
25,437 | d36f28ead5ae6e9a88f18ad3881bca934777a6bc | # 引入了三方库
from PIL import Image
# 打开图片
im = Image.open(r"D:\python\girl.jpg")
print(im)
# 查看图片信息
print(im.format,im.size,im.mode)
# 设置图片的大小
im.thumbnail((500,500))
# 保存成新的图片
im.save("girl.jpg","JPEG")
|
25,438 | 94a2ce4bc9945d3022d5714bd64ce02e0ad18e9d | # Create your views here.
from django.shortcuts import render_to_response
from django.template import RequestContext
#Process the default request, returns the index page
def index(request):
return render_to_response('search/index.html',
context_instance=RequestContext(request))
def results(request):
return render_to_response('search/results.html',
context_instance=RequestContext(request))
|
25,439 | 1bf5a137e794864aa8746638c2611e16f3ac7481 | from tkinter import *
import tkinter.messagebox
def clickCallback():
tkinter.messagebox.askokcancel( "Hello Python", "Hello Runoob")
root = Tk()
root.geometry('250x340')
root.title('2048')
root.minsize(250,340)
root.maxsize(250,340)
root['background']='#FAF8EF'
frm_S = Frame(root,width=240,height=240,bg='#D4C8BD')
frm_NE1 = Frame(root,width=55,height=40,bg='#BBADA0')
frm_NE2 = Frame(root,width=55,height=40,bg='#BBADA0')
label_TITLE = Label(root,text='2048',width=4,font = 'Helvetica -44 bold',anchor='n',fg='#776E65',bg='#FAF8EF')
btn_RST = Button(root,text='New Game',width=10,bg='#8F7A66',fg='white',bd=0,font = 'Helvetica -12 bold',command=clickCallback)
label_SCORE = Label(frm_NE1,text='SCORE',bg='#BBADA0',fg='white',font = 'Helvetica -8')
label_BEST =Label(frm_NE2,text='BEST',bg='#BBADA0',fg='white',font = 'Helvetica -8')
label_nSCORE = Label(frm_NE1,text='0',bg='#BBADA0',fg='white',font = 'Helvetica -15 bold')
label_nBEST = Label(frm_NE2,text='0',bg='#BBADA0',fg='white',font = 'Helvetica -15 bold')
label_TITLE.grid(row=0,column=0,rowspan=2,sticky=NW,ipadx=3,ipady=15)
frm_NE1.grid(row=0,column=1,ipadx=10,pady=3)
frm_NE2.grid(row=0,column=2,ipadx=14,padx=5,pady=3)
btn_RST.grid(row=1,column=1,columnspan=2,padx=8,pady=5,sticky=SE)
frm_S.grid(row=2,column=0,columnspan=3,padx=6,pady=5,sticky=W)
label_SCORE.grid(row=0,column=0,sticky=W)
label_BEST.grid(row=0,column=0,sticky=W)
label_nSCORE.grid(row=1,column=0,sticky=W,ipadx=2)
label_nBEST.grid(row=1,column=0,sticky=W,ipadx=2)
root.mainloop()
|
25,440 | 93b6f6fb6ab6230a84a67d2b449a16aa4d12c753 | #-*- coding:utf-8 -*-
from django import forms
from apps.room.models import Room, RoomType
from libs.djex.forms import QForm, ModelForm, PageField
class RoomForm(ModelForm):
room_type = forms.ModelChoiceField(RoomType)
status = forms.IntegerField()
photo = forms.CharField(label="photo", required=False)
comment = forms.CharField(label="comment", required=False)
note = forms.CharField(label="note", required=False)
ref_id = forms.IntegerField(label="ref_id", initial=None, required=False)
class Meta:
model = Room
exclude = ['status']
class RoomQForm(QForm):
id = forms.CharField(label='id', required=False)
sn = forms.CharField(label='id', required=False)
name_like = forms.CharField(label='name_like', required=False)
status = forms.CharField(label='status', required=False)
#using = forms.CharField(label='using', required=False)
#status_time = forms.CharField(label='status_time', required=False)
iDisplayStart = PageField(label='记录起始点', default=0, required=False)
iDisplayLength = PageField(label='记录长', default=30, required=False)
orderBy = forms.CharField(label='', widget=forms.HiddenInput, required=False)
def get_condition(self):
form = self
conditions = {}
if form.cleaned_data.get('id', ''):
idIn = form.cleaned_data.get('id', '').split(',')
if idIn:
conditions['id__in'] = idIn
if form.cleaned_data.get('sn', ''):
snIn = form.cleaned_data.get('sn', '').split(',')
if snIn:
conditions['sn__in'] = snIn
if form.cleaned_data.get('name_like', ''):
conditions['name__contains'] = form.cleaned_data['name_like']
if form.cleaned_data.get('status', ''):
conditions['status'] = form.cleaned_data['status']
return conditions
class RoomTypeForm(ModelForm):
class Meta:
model = RoomType
exclude = ['status']
|
25,441 | 23fc5f5031e72e8d601d8bf8bdcedc6319f34c6f | from django.apps import AppConfig
class CitysConfig(AppConfig):
name = 'citys'
verbose_name = '区域管理'
|
25,442 | d493ee71f3c7c285c674d3cfe821ef004c760eab | """
Spectral filter module (FFT)
Sliders under the graph:
- Filters interpolation : Morph between the two filters
- Dry / Wet : Mix between the original signal and the delayed signals
Dropdown menus, toggles and sliders on the bottom left:
- Filter Range : Limits of the filter
- FFT Size : Size of the FFT
- FFT Envelope : Shape of the FFT
- FFT Overlaps : Number of FFT overlaps
- # of Voices : Number of voices played simultaneously (polyphony), only available at initialization time
- Polyphony Spread : Pitch variation between voices (chorus), only available at initialization time
Graph only parameters :
- Spectral Filter 1 : Shape of the first filter
- Spectral Filter 2 : Shape of the second filter
- Overall Amplitude : The amplitude curve applied on the total duration of the performance
"""
# User Interface
defineUI(id=1, name="env", label="Amplitude", unit="x", init=.8)
# cgraph(name="filter_table_1", label="Spectral Filter 1", table=True, size=8192, func=[(0,0),(0.05,1),(0.1,0),(0.2,0),(0.3,.7),(0.4,0),(0.5,0),(0.6,.5),(0.7,0),(1,0)], col="green"),
# cgraph(name="filter_table_2", label="Spectral Filter 2", table=True, size=8192, func=[(0,0),(0.02,1),(0.07,0),(0.25,0),(0.35,.7),(0.5,0),(0.65,0),(0.75,.5),(0.9,0),(1,0)], col="forestgreen"),
defineUI(id=2, name="interpol", label="FiltInterpolation", min=0, max=1, init=0, rel="lin", unit="x", col="olivegreen")
defineUI(id=3, name="mix", label="Dry/Wet", min=0, max=1, init=1, rel="lin", unit="x", col="blue")
defineUI(id=4, name="filter_range", func="filter_rangefunc", label="FilterRange", init="Up to Nyquist/2", value=["Up to Nyquist", "Up to Nyquist/2", "Up to Nyquist/4", "Up to Nyquist/8"], col="green")
defineUI(id=5, name="fftsize", func="fftsizefunc", label="FFTSize", init="1024", value=["16", "32", "64", "128", "256", "512", "1024", "2048", "4096", "8192"], col="red")
defineUI(id=6, name="wtype", func="wtypefunc", label="FFTEnvelope", init="Hanning", col="red", value=["Rectangular", "Hamming", "Hanning", "Bartlett", "Blackman 3", "Blackman 4", "Blackman 7", "Tuckey", "Sine"])
defineUI(id=7, name="overlaps", func="overlapsfunc", label="FFTOverlaps", rate="i", init="4", value=["1", "2", "4", "8", "16"])
# DSP
filter_table_1 = DataTable(10, [(0,0),(0.05,1),(0.1,0),(0.2,0),(0.3,.7),(0.4,0),(0.5,0),(0.6,.5),(0.7,0),(1,0)])
filter_table_2 = DataTable(10, [(0,0),(0.02,1),(0.07,0),(0.25,0),(0.35,.7),(0.5,0),(0.65,0),(0.75,.5),(0.9,0),(1,0)])
snd = stereoIn
size = int(fftsize_value)
olaps = 4
oneOverSr = 1.0 / sr
frange_bounds = {0: 2, 1: 4, 2: 8, 3:16}
delsrc = Delay(snd, delay=size*oneOverSr*2)
filter = NewTable(8192./sr)
interpolation = TableMorph(interpol, filter, [filter_table_1, filter_table_2])
fin = FFT(snd, size=size, overlaps=olaps)
frange_bound = frange_bounds[filter_range_index]
index = Scale(fin["bin"], 0, size, 0, frange_bound, 1)
amp = Pointer(filter, Clip(index, 0, 1))
real = fin["real"] * amp
imag = fin["imag"] * amp
fout = IFFT(real, imag, size=size, overlaps=olaps)
ffout = fout.mix(nchnls)
fade = SigTo(value=1, time=.05, init=1)
out = Interp(delsrc*env, ffout*env, mix, mul=fade).out()
def fftsizefunc():
newsize = int(fftsize.get())
fade.value = 0
time.sleep(.05)
delsrc.delay = newsize*oneOverSr*2
fin.size = newsize
fout.size = newsize
index.inmax = newsize
time.sleep(.05)
fade.value = 1
def wtypefunc():
fin.wintype = int(wtype.get())
fout.wintype = int(wtype.get())
def overlapsfunc():
olaps = int(overlaps.get())
size = int(fftsize.get())
wintype = int(wtype.get())
fin = FFT(snd, size=size, overlaps=olaps, wintype=wintype)
fout = IFFT(real, imag, size=size, overlaps=olaps, wintype=wintype)
foutTmp = fout.mix(nchnls)
def filter_rangefunc():
index.outmax = frange_bounds[index] |
25,443 | 4ca7263e32b41c3bf9dca96c25990ee933673497 | import numpy as np
import matplotlib.pyplot as plt
data = np.load("30kev.npz")
ray = data['ray'][:98]
comp = data['comp'][:98]
im = ray+comp
im = im[:,6:] - im[:,5:-1]
neg = im.copy()
pos = im.copy()
neg[np.where(neg>=0)] = np.nan
pos[np.where(pos<=0)] = np.nan
plt.imshow(im,origin='lower',cmap='bwr',vmin=im.min(),vmax=-im.min())
# plt.imshow(neg*-1,origin='lower',cmap='Blues')
# plt.colorbar()
# plt.imshow(pos,origin='lower',cmap='Reds')
plt.colorbar()
plt.show() |
25,444 | d3cbffeaf5fc4efb6577e35ec03a9c4026ebb200 | len 함수
len 함수는, 리스트 안의 원소 개수를 세주는 역할을 합니다.
alphabet = ["a", "b", "c", "d", "e", "f"]
print("리스트의 길이는: %d" % len(alphabet))
리스트의 길이는: 6
원소 추가하기
insert와 append를 사용하여 리스트에 원소를 추가할 수 있습니다.
numbers = []
# 마지막 위치에 5 추가
numbers.append(5)
print(numbers)
# 마지막 위치에 8 추가
numbers.append(8)
print(numbers)
# 마지막 위치에 10 추가
numbers.append(10)
print(numbers)
# 인덱스 0 자리에 0 추가
numbers.insert(0, 0)
print(numbers)
# 인덱스 3 자리에 12 추가
numbers.insert(3, 12)
print(numbers)
[5]
[5, 8]
[5, 8, 10]
[0, 5, 8, 10]
[0, 5, 8, 12, 10]
원소 빼기
del 함수를 사용함으로써 원하는 리스트의 원소를 삭제할 수 있습니다.
numbers = [1, 2, 3, 4, 5, 6, 7, 8]
# 인덱스 3에 있는 값 삭제
del numbers[3]
print(numbers)
# 인덱스 4부터 마지막 값까지 삭제
del numbers[4:]
print(numbers)
[1, 2, 3, 5, 6, 7, 8]
[1, 2, 3, 5]
sorted 함수
sorted 함수는 리스트의 원소들을 오름차순으로 정렬한 새로운 리스트를 리턴해줍니다.
sorted 함수를 이용하여 [8, 6, 2, 4, 5, 7, 1, 3]이라는 리스트를 정렬한 후 출력하면, [1, 2, 3, 4, 5, 6, 7, 8]이 나옵니다.
numbers = [8, 6, 2, 4, 5, 7, 1, 3]
numbers = sorted(numbers)
print(numbers)
[1, 2, 3, 4, 5, 6, 7, 8]
리스트 연결하기
리스트들을 +로 연결할 수 있습니다.
alphabet1 = ["a", "b", "c"]
alphabet2 = ["d", "e", "f"]
alphabet = alphabet1 + alphabet2
print(alphabet)
['a', 'b', 'c', 'd', 'e', 'f'] |
25,445 | 5f047bba8d74632fc2a525b33b81ed722017cd2d | #!/usr/bin/python
# FileName:function1.py
def sayHello():
print 'Hello World!' # block belonging to the function
sayHello()
|
25,446 | 3044f61709e155687b3d8e1fc91eea4d0e713566 | import asyncio
import json
from enum import IntEnum, IntFlag, Enum
from random import random
from typing import Final, List
import aiohttp
from volt.events import EventManager
from volt.utils.log import get_logger, DEBUG
from volt.utils.loop_task import loop, LoopTask
class WSClosedError(Exception):
"""Exception indicating websocket closure."""
def __init__(self, data: int):
self.data = data
super(WSClosedError, self).__init__(f'Websocket is closed with data {self.data}.')
class GatewayOpcodes(IntEnum):
"""
discord gateway events.
"""
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
PRESENCE = 3
VOICE_STATE = 4
VOICE_PING = 5
RESUME = 6
RECONNECT = 7
REQUEST_MEMBERS = 8
INVALIDATE_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
GUILD_SYNC = 12
def __repr__(self) -> str:
return f'GatewayOpCodes.{self.name} ({self.value})'
__str__ = __repr__
class GatewayEvents(Enum):
READY = 'ready'
class GatewayResponse:
__slots__ = ('op', 'data', 's', 't')
def __init__(self, data: str):
json_data = json.loads(data)
self.op: GatewayOpcodes = GatewayOpcodes(json_data['op'])
self.data = json_data.get('d')
self.s = json_data.get('s')
self.t = json_data.get('t')
def __str__(self) -> str:
return f'GatewayResponse(op={self.op.name})'
def __getitem__(self, item):
return self.__getattribute__(item) or None
class GatewayIntents(IntFlag):
GUILDS = 1 << 0
GUILD_MEMBERS = 1 << 1
GUILD_BANS = 1 << 2
GUILD_EMOJIS_AND_STICKERS = 1 << 3
GUILD_INTEGRATIONS = 1 << 4
GUILD_WEBHOOKS = 1 << 5
GUILD_INVITES = 1 << 6
GUILD_VOICE_STATES = 1 << 7
GUILD_PRESENCES = 1 << 8
GUILD_MESSAGES = 1 << 9
GUILD_MESSAGE_REACTIONS = 1 << 10
GUILD_MESSAGE_TYPING = 1 << 11
DIRECT_MESSAGES = 1 << 12
DIRECT_MESSAGE_REACTIONS = 1 << 13
DIRECT_MESSAGE_TYPING = 1 << 14
@classmethod
def all(cls) -> 'GatewayIntents':
return GatewayIntents(sum(cls.__members__.values()))
class GatewayBot:
def __init__(self, token: str, version: int = 9, intents: GatewayIntents = GatewayIntents.all()):
self.logger = get_logger('volt.gateway', stream=True, stream_level=DEBUG)
self.loop = asyncio.get_event_loop()
self.gateway_version: Final[int] = version
self.intents = intents
self.__session = None
self.__token: Final[str] = token
self.__hearbeat_interval: int = 0
self.__closed: bool = False
self.__ws: aiohttp.ClientWebSocketResponse = None
self.__last_seq = None
self._ping = None
self.heartbeat_sender = None
self.event_manager = EventManager(gateway=self)
async def connect(self):
self.logger.debug('')
self.__session = aiohttp.ClientSession()
self.__ws = await self.__session.ws_connect(
f'wss://gateway.discord.gg/?v={self.gateway_version}&encoding=json'
)
async def disconnect(self):
if self.heartbeat_sender:
self.heartbeat_sender.cancel()
if self.__ws:
await self.__ws.close()
if self.__session:
await self.__session.close()
# Should we close event loop?
async def identify(self):
self.logger.debug('Send `Identify`.')
from platform import system
await self.__ws.send_json({
'op': GatewayOpcodes.IDENTIFY.value,
'd': {
'token': self.__token,
'intents': self.intents.value,
'properties': {
'$os': system(),
'$browser': 'volt.py',
'$device': 'volt.py'
}
}
})
async def run(self):
await self.connect()
while not self.__closed:
resp = await self.receive()
self.logger.debug(f'Gateway Response : op = {resp.op}, d = {resp.data}')
if resp.op is GatewayOpcodes.HELLO:
# Login please!
await self.login(resp)
elif resp.op is GatewayOpcodes.DISPATCH:
# Dispatch events into internal event listeners.
self.event_manager.dispatch(resp)
elif resp.op is GatewayOpcodes.HEARTBEAT_ACK:
# Gateway acknowledged heartbeat.
# TODO : Calculate ws ping.
self._ping = None
await self.disconnect()
async def login(self, resp: GatewayResponse):
# First Heartbeat
self.__hearbeat_interval = resp.data['heartbeat_interval']
self.__last_seq = resp.s
@loop(seconds=self.__hearbeat_interval / 1000)
async def heartbeat_sender(self: 'GatewayBot'):
self.logger.debug('Sending heartbeat!')
await self.__ws.send_json({
'op': GatewayOpcodes.HEARTBEAT.value,
'd': self.__last_seq or None
})
self.heartbeat_sender = heartbeat_sender
self.heartbeat_sender.args = (self,) # inject self.
heartbeat_sender.start()
@heartbeat_sender.before_invoke
async def before_heartbeat_sender(self: 'GatewayBot'):
await asyncio.sleep(self.__hearbeat_interval * random() / 1000)
await heartbeat_sender() # Client must send first heartbeat in heartbeat_interval * random.random() milliseconds.
# Identify
await self.identify()
async def receive(self) -> GatewayResponse:
resp = await self.__ws.receive()
self.logger.debug(f'Raw gateway response = type = {resp.type}, data = {resp.data}')
if resp.type in (aiohttp.WSMsgType.CLOSE, aiohttp.WSMsgType.CLOSING, aiohttp.WSMsgType.CLOSED):
await self.disconnect()
raise WSClosedError(resp.data or None)
resp_obj = GatewayResponse(resp.data)
return resp_obj
async def close(self):
# Stop sending heartbeats and wait to gracefully close.
self.__closed = True
|
25,447 | de01b1f3d04b5380de2285e5abb319a8b096a994 | n1 = float(input('digite n1: '))
n2 = float(input('digite n2: '))
n3 = float(input('digite n3: '))
n4 = float(input('digite n4: '))
nota = (n1+2*n2+3*n3+4*n4)/10
print(round(nota,2)) |
25,448 | 763a376deefd1a75378a9c75b553a054c59f135b | #!/usr/bin/env python
from git import *
import os, optparse, sys, pkg_resources
class GitColors:
UP_TO_DATE = '\033[92m'
HAVE_CHANGES = '\033[91m'
END = '\033[0m'
def printChanges(repo):
print "\tFiles with changes:"
index = repo.index
diffs = index.diff(None)
for diff in diffs:
print "\t\t" + diff.a_blob.path
if len(repo.untracked_files) > 0:
print "\tUntracked files:"
for file in repo.untracked_files:
print "\t\t" + file
print ""
def checkForChanges(projects):
for project in projects:
repo = None
try:
repo = Repo(os.path.join(os.getcwd(), project))
if repo.is_dirty(True, True, True):
print project + " - " + GitColors.HAVE_CHANGES + "have changes" + GitColors.END
printChanges(repo)
elif not repo.is_dirty(True, True, True):
print project + " - " + GitColors.UP_TO_DATE + "up-to-date" + GitColors.END
except InvalidGitRepositoryError:
print project + " is not a valid git repository"
def pull(projects):
for project in projects:
repo = None
try:
repo = Repo(os.path.join(os.getcwd(), project))
origin = repo.remote()
origin.pull()
except InvalidGitRepositoryError:
print project + " is not a valid git repository"
def fetch(projects):
for project in projects:
repo = None
try:
repo = Repo(os.path.join(os.getcwd(), project))
origin = repo.remote()
origin.pull()
except InvalidGitRepositoryError:
print project + " is not a valid git repository"
def main():
parser = optparse.OptionParser(version=pkg_resources.require("py-utils-dda")[0].version, epilog="Git for multiple repos", description="GPL")
parser.add_option("-s", "--status", dest="status", action="store_true")
parser.add_option("-l", "--pull", dest="pull", action="store_true")
parser.add_option("-f", "--fetch", dest="fetch", action="store_true")
(opts, args) = parser.parse_args(sys.argv)
projects = []
for dir in os.listdir(os.getcwd()):
if os.path.isdir(dir) and not dir[0] == '.':
projects.append(dir)
if opts.status:
checkForChanges(projects)
if opts.pull:
pull(projects)
if opts.fetch:
fetch(projects)
if __name__ == "__main__":
main()
|
25,449 | e87404e05119eb84ede6397276a927b18e82ccaa | import sys
import math
import numpy as np
import matplotlib.pyplot as plt
"""
Last modified on - 11 September 2018
This script will generate a template for the nsets generator. From this template, nsets generator can read for each neuron these values = [ pulseduration, pulsestart, pulse-end, tau_rise, tau_fall, pulsemax, pulsemin ]
Here we just distribute the data in a gaussian way, depending on the input
This (has been/is being/will be) modified to include multiple gaussian distribution for each group.
Sample command:
If we want to create input for groups of 10 and 20 neurons, with the following (where g1-x is the xth gaussian added in the 1st group/type of neurons) :
g1-1 g1-2 g2
width 3 6 8
mean 4 7 11 # This will be taken as index of the neuron in that group eg here it will assign mean position to 5th neuron in group 1, 8th neuron in group 1, and 12th neuron in group 2 (which will be 20 + 12 = 32nd neuron overall)
PulseDuration 30 30 30
PulseStart 0 ms 10 ms 20 ms
PulseEnd 30 ms 40 ms 50 ms
tau_rise 0.4 0.1 0.1
tau_fall 0.3 0.1 0.2
PulseMax 30 mV 40 mV 50 mV
PulseMin 5 mV 0 mV 10 mV
python /address/script_name.py /address/outfilename.ecf 2 2,1 10,20 3,4,30,0,30,0.4,0.3,30,5*6,7,30,10,40,0.1,0.1,40,0*8,11,30,20,50,0.1,0.2,50,10
arg 1 outfile address
arg 2 number of types/groups of neurons
arg 3 number of gaussians in each groups as a comma separated list (eg 2,1) minimum value for each group = 1, see ***
arg 4 number of neurons in each group as a comma separated list (eg 10,20)
arg 5 width, mean and Pulse parameters for each gaussian (separated by commas) and for all gaussian separated by * (eg 5,3,4,10,50*5,6,7,10*50,8,11,10,50)
(g1,g2,g3 in this list will given as first 2 to group 1, next 1 to group 2, depending on arg 3)
***Right now, the script does not work if you write number of gaussians (arg 3) as 0,2 and give just two sets of gaussians property in arg 5. You will have to write 1,2 in arg 3 and specify 3 gaussians and set max current value of first gaussian to 0.
"""
"""
def createIExt(start_time, end_time, start_time_specific, end_time_specific, step, I_val):
time_list = np.ndarray.tolist(np.arange(start_time, end_time + step, step))
I_list = []
for t in time_list:
if (t >= start_time_specific) or (t <= end_time_specific):
I = round(float(I_val), 3)
else:
I = 0
I_list.append(I)
return I_list
"""
def getIval(nN, max_i_val, sigma, mu, pmin):
I_val_array = np.zeros(nN)
C = max_i_val/(1/(sigma*(math.sqrt(2*math.pi))))
for idNeuron in range(nN):
I_val = C * (1/(sigma*(math.sqrt(2*math.pi)))) * np.exp(-((float(idNeuron) - mu)**2)/(2*(sigma**2)))
if I_val < pmin:
I_val = 0
I_val_array[idNeuron] = I_val
return I_val_array
# Inputs
outfilename = sys.argv[1]
tN = int(sys.argv[2]) # Types of neurons
nGa = [int(a) for a in sys.argv[3].split(',')] # Number of gaussian in each type/group
nNa = [int(n) for n in sys.argv[4].split(',')] # number of neurons in each type/group
Df = sys.argv[5]
#print(Df)
A = sys.argv[5].split('*')
#print(A)
B = [p.split(',') for p in A]
print(B)
allD = []
for t in range(tN):
print(t)
allD_t = []
for nG in range(nGa[t]):
print(nG)
i = sum(nGa[:t]) + int(nG)
print(i)
D = B[i]
allD_t.append(D)
allD.append(allD_t)
print(allD)
start_time_all = max([float(p[3]) for p in B])
end_time_all = max([float(p[4]) for p in B])
#start_time_all = 10
#end_time_all = 50
#if (tNeurons != len(nNeurons)) or (tNeurons != len(heights)) or (tNeurons != len(widths)) or (tNeurons != len(h_pos)) or (tNeurons != len(start_times)) or (tNeurons != len(end_times)):
# print("The inputs do not correspond to number of neurons. Check the length of the terms being passed.")
# main
I_ext_list = []
for t in range(tN):
nN = nNa[t]
iext_t = []
I_val = np.zeros(nN)
for nG in range(nGa[t]):
sigma = float(allD[t][nG][0])
mu = float(allD[t][nG][1])
h = float(allD[t][nG][7])
I_val += getIval(nNa[t], h, sigma, mu, float(allD[t][nG][8]))
print(str(t) + ' ' + str(nG) + ' ' + str(nN))
print(I_val)
for n in range(nN):
t_start = float(allD[t][nG][3])
t_end = float(allD[t][nG][4])
#iext_t_n = createIExt(start_time_all, end_time_all, t_start, t_end, step, I_val[n])
d = [t, n, allD[t][nG][2], allD[t][nG][3], allD[t][nG][4], allD[t][nG][5], allD[t][nG][6], round(I_val[n],3), allD[t][nG][8]] # Need to add -
iext_t.append(d)
I_ext_list.append(iext_t)
print(len(I_ext_list))
print(str(len(I_ext_list[0])) + ' ' + str(len(I_ext_list[1])))
print(str(len(I_ext_list[0][1])) + ' ' + str(len(I_ext_list[1][2])))
#print(I_ext_list)
# Write this to outfile
#outfile = open(outfilename, 'w')
#outfile.write('time,' + ','.join([str(g) for g in range(sum(nNa))]) + '\n')
outfile = open(outfilename, 'w')
outfile.write('nNeuron,PulseDuration,PulseStart,PulseEnd,tau_rise,tau_fall,PulseMax,PulseMin\n')
#ime_list = np.ndarray.tolist(np.arange(start_time_all, end_time_all + step, step))
i = 1
pn_ival = []
nzPN = 0
ln_ival = []
nzLN = 0
for nTypeNeuron in range(tN):
for idNeuron in range(len(I_ext_list[nTypeNeuron])):
d = I_ext_list[nTypeNeuron][idNeuron]
d_str = [str(di) for di in d[2:]]
outfile.write(str(i) + ',' + ','.join(d_str) + '\n')
if nTypeNeuron == 0:
ival = float(d[7])
pn_ival.append(float(d[7]))
if ival != 0:
nzPN += 1
if nTypeNeuron == 1:
ival = float(d[7])
ln_ival.append(float(d[7]))
if ival != 0:
nzLN += 1
outfile.close()
print('\nNOn zero Pns are = ' + str(nzPN))
print('\nNOn zero Lns are = ' + str(nzLN))
plt.figure(1)
plt.plot(pn_ival)
plt.ylabel('Magnitude of external current (nA)')
plt.xlabel('Neuron ID')
#plt.grid('TRUE')
plt.figure(2)
plt.plot(ln_ival)
plt.ylabel('Magnitude of external current (nA)')
plt.xlabel('Neuron ID')
#plt.grid('TRUE')
plt.show()
"""
for t in time_list:
t_id = time_list.index(t)
y = [[str(I_ext_list[nTypeNeuron][idNeuron][t_id]) for idNeuron in range(len(I_ext_list[nTypeNeuron]))] for nTypeNeuron in range(tN)]
# print(y)
l = str(t)
for x in y:
substring = ','.join(x)
l = l + ',' + substring
l = l + '\n'
# print(l)
outfile.write(l)
"""
|
25,450 | aa19b61957a14ff6866944386896dac5da7329dd | from django.db import models
# Create your models here.
class SearchedLinks(models.Model):
keyword= models.CharField(max_length=500,null =True, blank = True)
link = models.CharField(max_length=500,null =True, blank = True)
|
25,451 | dff43cc6d6367c5970515a06bcf8df969d656324 | import typing
import strawberry
from dataclasses import asdict
@strawberry.type
class Options:
time: str
frequenzy: str
@strawberry.type
class Block:
subreddit: str
flairs: typing.List[str]
count: int
upvote_ratio: float
@strawberry.type
class Newsletter:
user_id: str
options: Options
blocks: typing.List[Block]
|
25,452 | 822c2ab3d6d5ab0a8729801642465b105bc3c030 | import openpyxl
from pathlib import Path
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import t
filename = 'datap3.txt'
pt = Path(__file__)
parent = pt.parent
filepath = Path.joinpath(parent,filename)
data = []
fl = open(filepath,'r')
data = fl.read().split('\n')
fl.close()
#print(data)
n = []
x = []
y = []
for i in range(len(data)):
k = data[i].split(' ')
n.append(float(k[0]))
x.append(float(k[1]))
y.append(float(k[2]))
#print(x[i],y[i])
num = 0
dem = 0
x_sqr = [xl**2 for xl in x]
ssx = sum(x_sqr)
x_mean = np.mean(x)
y_mean = np.mean(y)
#calcualting b0 and b1
for i in range(len(x)):
num+= (x[i]-x_mean)*(y[i]-y_mean)
dem+= (x[i]-x_mean)**2
b1 = num/dem
b0 = y_mean - b1*x_mean
y_pred = []
for xl in x:
y_pred.append(b1*xl+b0)
print('y='+str(b0)+'+('+str(b1)+')x')
#calculating sigma^2
sqresid = []
sqrmean = []
for i in range(len(y)):
dif = y_mean - y[i]
sqrmean.append(dif**2)
sst = sum(sqrmean)
for i in range(len(y)):
dif = y_pred[i] - y[i]
sqresid.append(dif**2)
sse = sum(sqresid)
ssr = sst-sse
#n-2 because we using sample data
s_sqr = sse/(len(x)-2)
print('sigma sqr = '+str(s_sqr))
#calculating Variances
s = s_sqr**0.5
sb1 = s/(dem**0.5)
varb1 = sb1**2
print('Var(b1) = '+str(varb1))
varb0 = (s_sqr*ssx)/(len(x)*dem)
print('Var(b0) = '+str(varb0))
#calculating correlation
r_sqr = ssr/sst
print('r^2 = '+str(r_sqr))
plt.plot([min(x),max(x)],[min(y_pred),max(y_pred)])
plt.scatter(x,y)
plt.show()
|
25,453 | 19e91810db18c34c931b74af8594b504b784ab2f | import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
def load_mnist():
digits = datasets.load_digits()
X = digits.data
y = digits.target
return X, y
def main():
print('loading mnist ...')
load_mnist()
if __name__ == ' __main__':
print('running ...')
main()
|
25,454 | 1674ca3bde8fbde6febaeb239c194c25a3ac0afd | """
给定一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/3sum
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution(object):
"""
双指针+排序 解决
执行用时 :
796 ms
, 在所有 python 提交中击败了
42.11%
的用户
内存消耗 :
14.9 MB
, 在所有 python 提交中击败了
72.79%
的用户
边界条件处理麻烦
"""
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
ret = []
if len(nums) < 3:
return ret
last_match = []
for cnt, num in enumerate(nums):
start_num = cnt
end_num = len(nums) - 1
if cnt > 0 and num == nums[cnt-1]: # 两个连续的值一致的情况
continue
if end_num - start_num <= 1:
break
while True:
poor = nums[start_num+1]
rich = nums[end_num]
if end_num-start_num == 1:
break
if num + poor + rich == 0:
if last_match and last_match == [num, poor, rich]: # 避免匹配到重复的结果
end_num -= 1
continue
if last_match and last_match != [num, poor, rich]:
last_match = [num, poor, rich]
ret.append([num, poor, rich])
end_num -= 1
if not last_match:
ret.append([num, poor, rich])
last_match = [num, poor, rich]
end_num -= 1
elif num + poor + rich > 0:
end_num -= 1
else:
start_num += 1
return ret
if __name__ == '__main__':
S = Solution()
# nums = [-1, 0, 1, 2, -1, -4]
nums = [0, 0, 1, 0, 0, 0 ,0 ]
ss = S.threeSum(nums)
print(ss)
# import re
#
# element = '<2.5 HC'
#
# element = re.sub('[,,%]', '', element) if re.search('\d', element) and not re.search("[\u4e00-\u9fa5]+", element) else 0
# if element != 0:
# element = re.search('\d+.?\d?', element)
# element = element.group(0)
# print(element)
|
25,455 | a80364b3c97abacdc20efbb512b3c9a6a5005c4c | from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.model_selection import GroupShuffleSplit
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.linear_model import LinearRegression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from GeoMagTS.data_preprocessing import DataFrameSelector, timeResolutionResampler, stormsProcessor
from GeoMagTS.models import GeoMagTSRegressor, GeoMagARX
from GeoMagTS.utils import create_narx_model, trainTestStormSplit, get_min_y_storms
import matplotlib.dates as mdates
from os import path
from matplotlib.backends.backend_pdf import PdfPages
DATA_FILE = '../../data/omni_2010-2019.pkl'
STORMTIMES_FILE = '../../data/stormtimes_qusai.pkl'
PLOTS_DIR = 'plots/'
data = pd.read_pickle(DATA_FILE)
storm_times_df = pd.read_pickle(STORMTIMES_FILE)
#### Data processing
# Data pre-processing parameters
time_resolution = '5T'
target_column = 'sym_h'
feature_columns = ['b', 'by_gse', 'vz_gse', 'density']
columns = [target_column] + feature_columns
storms_to_delete = [15, 69, 124]
storms_to_use = np.where(~np.isin(storm_times_df.index, storms_to_delete))
# Processing pipeline for entire dataframe
column_selector = DataFrameSelector(columns)
time_res_resampler = timeResolutionResampler(time_resolution)
storms_processor = stormsProcessor(storm_times_df=storm_times_df,
storms_to_use=storms_to_use)
data_pipeline = Pipeline([
("column_selector", column_selector),
("time_res_resampler", time_res_resampler),
("storms_processor", storms_processor),
])
# Get pre-processed data and storm labels
X, y = data_pipeline.fit_transform(data)
storm_labels = data_pipeline['storms_processor'].get_storm_labels()
storm_times = data_pipeline['storms_processor'].get_times()
n_storms = len(set(storm_labels))
# Split data into train, test
min_threshold = -100
min_y = get_min_y_storms(y, storm_labels)
storms_thres = np.where(min_y < min_threshold)[0]
# Model fitting parameters
auto_order = 24
exog_order = 18
pred_step = 24
transformer_X = RobustScaler()
transformer_y = RobustScaler()
n_hidden = 18
learning_rate = 0.005
params = {
'auto_order': 24,
}
def plot_loo_pred_one_storm(storm, X, y, storm_labels,
auto_order, exog_order, pred_step, transformer)
if not path.exists(PLOTS_DIR+'loo_pred_plots.pdf'):
pdf = PdfPages(PLOTS_DIR+'loo_pred_plots.pdf')
for storm in storms_thres:
# Split data
train_test_split = trainTestStormSplit(storm_labels, test_storms=[storm])
X_train, y_train, X_test, y_test = train_test_split.split_data(X, y)
storm_labels_train, storm_labels_test = train_test_split.split_storm_labels()
storm_times_test = train_test_split.get_test_storm_times(storm_times)
# Fit AR-X model
ar_model = GeoMagARX(auto_order=auto_order,
exog_order=exog_order,
pred_step=pred_step,
transformer_X=transformer_X,
transformer_y=transformer_y
)
ar_model.fit(X_train, y_train, storm_labels=storm_labels_train)
y_pred_ar = ar_model.predict(X_test, y_test, storm_labels_test)
rmse_ar = ar_model.score(X_test, y_test, storm_labels_test)
# Fit NARX model
base_estimator = KerasRegressor(build_fn=create_narx_model)
narx_model = GeoMagTSRegressor(base_estimator=base_estimator,
auto_order=auto_order,
exog_order=exog_order,
pred_step=pred_step,
transformer_X=transformer_X,
transformer_y=transformer_y,
n_hidden=n_hidden,
learning_rate=learning_rate
)
narx_model.fit(X_train, y_train, storm_labels=storm_labels_train,
epochs=4, verbose=2)
y_pred_narx = narx_model.predict(X_test, y_test, storm_labels_test)
rmse_narx = narx_model.score(X_test, y_test, storm_labels_test)
# Plot
fig, ax = plt.subplots(figsize=(15,7), sharex=True)
ax.plot(storm_times_test, y_test, label='Truth', color='black', linewidth=0.9)
ax.plot(storm_times_test, y_pred_ar,
label='Linear AR (RMSE: '+str(np.round(rmse_ar, decimals=2))+')',
color='blue', linewidth=0.6, alpha=0.7)
ax.plot(storm_times_test, y_pred_narx,
label='NARX (RMSE: '+str(np.round(rmse_narx, decimals=2))+')',
color='red', linewidth=0.6, alpha=0.7)
ax.set_title(
str(pred_step)+'-step ahead prediction '+
'(auto_order='+str(auto_order)+', '+
'exog_order='+str(exog_order)+')'
)
ax.legend()
locator = mdates.AutoDateLocator(minticks=15)
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
pdf.savefig(fig)
pdf.close()
|
25,456 | 282d19525f23a9d9eeed49285447810c8b02fcd3 | from __future__ import division
from collections import defaultdict
from collections.abc import ValuesView, KeysView, ItemsView
from copy import deepcopy
from itertools import chain
from datetime import datetime, timedelta
from hashlib import sha1
from operator import add
from random import choice, sample
import re
import sys
import time
import fnmatch
from mockredis.clock import SystemClock
from mockredis.lock import MockRedisLock
from mockredis.exceptions import RedisError, ResponseError, WatchError
from mockredis.pipeline import MockRedisPipeline
from mockredis.script import Script
from mockredis.sortedset import SortedSet
from mockredis.pubsub import Pubsub
if sys.version_info >= (3, 0):
long = int
xrange = range
basestring = str
unicode = str
from functools import reduce
class MockRedis(object):
"""
A Mock for a redis-py Redis object
Expire functionality must be explicitly
invoked using do_expire(time). Automatic
expiry is NOT supported.
"""
def __init__(self,
strict=False,
clock=None,
load_lua_dependencies=True,
blocking_timeout=1000,
blocking_sleep_interval=0.01,
decode_responses=False,
**kwargs):
"""
Initialize as either StrictRedis or Redis.
Defaults to non-strict.
"""
self.strict = strict
self.clock = SystemClock() if clock is None else clock
self.load_lua_dependencies = load_lua_dependencies
self.blocking_timeout = blocking_timeout
self.blocking_sleep_interval = blocking_sleep_interval
# The 'Redis' store
self.redis = defaultdict(dict)
self.redis_config = defaultdict(dict)
self.timeouts = defaultdict(dict)
self._pubsub = None
# Dictionary from script to sha ''Script''
self.shas = dict()
self.decode_responses = decode_responses
@classmethod
def from_url(cls, url, db=None, **kwargs):
return cls(**kwargs)
# Connection Functions #
def echo(self, msg):
return self._encode(msg)
def ping(self):
return b'PONG'
# Transactions Functions #
def lock(self, key, timeout=0, sleep=0):
"""Emulate lock."""
return MockRedisLock(self, key, timeout, sleep)
def pipeline(self, transaction=True, shard_hint=None):
"""Emulate a redis-python pipeline."""
return MockRedisPipeline(self, transaction, shard_hint)
def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
Copied directly from redis-py.
"""
shard_hint = kwargs.pop('shard_hint', None)
value_from_callable = kwargs.pop('value_from_callable', False)
watch_delay = kwargs.pop('watch_delay', None)
with self.pipeline(True, shard_hint) as pipe:
while 1:
try:
if watches:
pipe.watch(*watches)
func_value = func(pipe)
exec_value = pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
time.sleep(watch_delay)
continue
def watch(self, *argv, **kwargs):
"""
Mock does not support command buffering so watch
is a no-op
"""
pass
def unwatch(self):
"""
Mock does not support command buffering so unwatch
is a no-op
"""
pass
def multi(self, *argv, **kwargs):
"""
Mock does not support command buffering so multi
is a no-op
"""
pass
def execute(self):
"""Emulate the execute method. All piped commands are executed immediately
in this mock, so this is a no-op."""
pass
# Keys Functions #
def type(self, key):
key = self._encode(key)
if key not in self.redis:
return b'none'
type_ = type(self.redis[key])
if type_ is dict:
return b'hash'
elif type_ is str:
return b'string'
elif type_ is set:
return b'set'
elif type_ is list:
return b'list'
elif type_ is SortedSet:
return b'zset'
raise TypeError("unhandled type {}".format(type_))
def keys(self, pattern='*'):
"""Emulate keys."""
# making sure the pattern is unicode/str.
try:
pattern = pattern.decode('utf-8')
# This throws an AttributeError in python 3, or an
# UnicodeEncodeError in python 2
except (AttributeError, UnicodeEncodeError):
pass
# Make regex out of glob styled pattern.
regex = fnmatch.translate(pattern)
regex = re.compile(re.sub(r'(^|[^\\])\.', r'\1[^/]', regex))
keys = []
# Find every key that matches the pattern
for key in self.redis.keys():
decoded_key = key if isinstance(key, unicode) else key.decode('utf-8')
if regex.match(decoded_key):
keys.append(decoded_key)
return keys
def delete(self, *keys):
"""Emulate delete."""
key_counter = 0
for key in map(self._encode, keys):
if key in self.redis:
del self.redis[key]
key_counter += 1
if key in self.timeouts:
del self.timeouts[key]
return key_counter
def __delitem__(self, name):
if self.delete(name) == 0:
# redispy doesn't correctly raise KeyError here, so we don't either
pass
def exists(self, key):
"""Emulate exists."""
return self._encode(key) in self.redis
__contains__ = exists
def _expire(self, key, delta):
if key not in self.redis:
return False
self.timeouts[key] = self.clock.now() + delta
return True
def expire(self, key, delta):
"""Emulate expire"""
delta = delta if isinstance(delta, timedelta) else timedelta(seconds=delta)
return self._expire(self._encode(key), delta)
def pexpire(self, key, milliseconds):
"""Emulate pexpire"""
return self._expire(self._encode(key), timedelta(milliseconds=milliseconds))
def expireat(self, key, when):
"""Emulate expireat"""
expire_time = datetime.fromtimestamp(when)
key = self._encode(key)
if key in self.redis:
self.timeouts[key] = expire_time
return True
return False
def ttl(self, key):
"""
Emulate ttl
Even though the official redis commands documentation at http://redis.io/commands/ttl
states "Return value: Integer reply: TTL in seconds, -2 when key does not exist or -1
when key does not have a timeout." the redis-py lib returns None for both these cases.
The lib behavior has been emulated here.
:param key: key for which ttl is requested.
:returns: the number of seconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
"""
value = self.pttl(key)
if value is None or value < 0:
return value
return value // 1000
def pttl(self, key):
"""
Emulate pttl
:param key: key for which pttl is requested.
:returns: the number of milliseconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
"""
"""
Returns time to live in milliseconds if output_ms is True, else returns seconds.
"""
key = self._encode(key)
if key not in self.redis:
# as of redis 2.8, -2 is returned if the key does not exist
return long(-2) if self.strict else None
if key not in self.timeouts:
# as of redis 2.8, -1 is returned if the key is persistent
# redis-py returns None; command docs say -1
return long(-1) if self.strict else None
time_to_live = get_total_milliseconds(self.timeouts[key] - self.clock.now())
return long(max(-1, time_to_live))
def do_expire(self):
"""
Expire objects assuming now == time
"""
# Deep copy to avoid RuntimeError: dictionary changed size during iteration
_timeouts = deepcopy(self.timeouts)
for key, value in _timeouts.items():
if value - self.clock.now() < timedelta(0):
del self.timeouts[key]
# removing the expired key
if key in self.redis:
self.redis.pop(key, None)
def flushdb(self):
self.redis.clear()
self.pubsub().clear()
self.timeouts.clear()
def rename(self, old_key, new_key):
return self._rename(old_key, new_key)
def renamenx(self, old_key, new_key):
return 1 if self._rename(old_key, new_key, True) else 0
def _rename(self, old_key, new_key, nx=False):
old_key = self._encode(old_key)
new_key = self._encode(new_key)
if old_key in self.redis and (not nx or new_key not in self.redis):
self.redis[new_key] = self.redis.pop(old_key)
return True
return False
def dbsize(self):
return len(self.redis.keys())
def _decode(self, value):
if value is None:
return None
if self.decode_responses:
if isinstance(value, (list, tuple, set)):
value = type(value)(self._decode(v) for v in value)
# dict.keys()
elif isinstance(value, KeysView):
value = set(self._decode(v) for v in value)
# dict.values()
elif isinstance(value, ValuesView):
value = list(self._decode(v) for v in value)
# dict.items()
elif isinstance(value, ItemsView):
value = list((self._decode(k),self._decode(v)) for k,v in value)
elif isinstance(value, dict):
value = type(value)((self._decode(k), self._decode(v)) for k,v in value.items())
elif isinstance(value, bytes):
value = value.decode('utf-8', 'strict')
return value
def get(self, key):
key = self._encode(key)
return self._decode(self.redis.get(key))
def __getitem__(self, name):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value = self.get(name)
if value is not None:
return value
raise KeyError(name)
def mget(self, keys, *args):
args = self._list_or_args(keys, args)
return [self.get(arg) for arg in args]
def set(self, key, value, ex=None, px=None, nx=False, xx=False):
"""
Set the ``value`` for the ``key`` in the context of the provided kwargs.
As per the behavior of the redis-py lib:
If nx and xx are both set, the function does nothing and None is returned.
If px and ex are both set, the preference is given to px.
If the key is not set for some reason, the lib function returns None.
"""
key = self._encode(key)
value = self._encode(value)
if nx and xx:
return None
mode = "nx" if nx else "xx" if xx else None
if self._should_set(key, mode):
expire = None
if ex is not None:
expire = ex if isinstance(ex, timedelta) else timedelta(seconds=ex)
if px is not None:
expire = px if isinstance(px, timedelta) else timedelta(milliseconds=px)
if expire is not None and expire.total_seconds() <= 0:
raise ResponseError("invalid expire time in SETEX")
result = self._set(key, value)
if expire:
self._expire(key, expire)
return result
__setitem__ = set
def getset(self, key, value):
old_value = self.get(key)
self.set(key, value)
return old_value
def _set(self, key, value):
self.redis[key] = self._encode(value)
# removing the timeout
if key in self.timeouts:
self.timeouts.pop(key, None)
return True
def _should_set(self, key, mode):
"""
Determine if it is okay to set a key.
If the mode is None, returns True, otherwise, returns True of false based on
the value of ``key`` and the ``mode`` (nx | xx).
"""
if mode is None or mode not in ["nx", "xx"]:
return True
if mode == "nx":
if key in self.redis:
# nx means set only if key is absent
# false if the key already exists
return False
elif key not in self.redis:
# at this point mode can only be xx
# xx means set only if the key already exists
# false if is absent
return False
# for all other cases, return true
return True
def setex(self, key, time, value):
"""
Set the value of ``key`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if not self.strict:
# when not strict mode swap value and time args order
time, value = value, time
return self.set(key, value, ex=time)
def psetex(self, key, time, value):
"""
Set the value of ``key`` to ``value`` that expires in ``time``
milliseconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
return self.set(key, value, px=time)
def setnx(self, key, value):
"""Set the value of ``key`` to ``value`` if key doesn't exist"""
return self.set(key, value, nx=True)
def mset(self, *args, **kwargs):
"""
Sets key/values based on a mapping. Mapping can be supplied as a single
dictionary argument or as kwargs.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSET requires **kwargs or a single dict arg')
mapping = args[0]
else:
mapping = kwargs
for key, value in mapping.items():
self.set(key, value)
return True
def msetnx(self, *args, **kwargs):
"""
Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSETNX requires **kwargs or a single dict arg')
mapping = args[0]
else:
mapping = kwargs
for key in mapping.keys():
if self._encode(key) in self.redis:
return False
for key, value in mapping.items():
self.set(key, value)
return True
def decr(self, key, amount=1):
key = self._encode(key)
previous_value = long(self.redis.get(key, '0'))
self.redis[key] = self._encode(previous_value - amount)
return long(self.redis[key])
decrby = decr
def incr(self, key, amount=1):
"""Emulate incr."""
key = self._encode(key)
previous_value = long(self.redis.get(key, '0'))
self.redis[key] = self._encode(previous_value + amount)
return long(self.redis[key])
incrby = incr
def setbit(self, key, offset, value):
"""
Set the bit at ``offset`` in ``key`` to ``value``.
"""
key = self._encode(key)
index, bits, mask = self._get_bits_and_offset(key, offset)
if index >= len(bits):
bits.extend(b"\x00" * (index + 1 - len(bits)))
prev_val = 1 if (bits[index] & mask) else 0
if value:
bits[index] |= mask
else:
bits[index] &= ~mask
self.redis[key] = bytes(bits)
return prev_val
def getbit(self, key, offset):
"""
Returns the bit value at ``offset`` in ``key``.
"""
key = self._encode(key)
index, bits, mask = self._get_bits_and_offset(key, offset)
if index >= len(bits):
return 0
return 1 if (bits[index] & mask) else 0
def _get_bits_and_offset(self, key, offset):
bits = bytearray(self.redis.get(key, b""))
index, position = divmod(offset, 8)
mask = 128 >> position
return index, bits, mask
# Hash Functions #
def hexists(self, hashkey, attribute):
"""Emulate hexists."""
redis_hash = self._get_hash(hashkey, 'HEXISTS')
return self._encode(attribute) in redis_hash
def hget(self, hashkey, attribute):
"""Emulate hget."""
redis_hash = self._get_hash(hashkey, 'HGET')
return self._decode(redis_hash.get(self._encode(attribute)))
def hgetall(self, hashkey):
"""Emulate hgetall."""
redis_hash = self._get_hash(hashkey, 'HGETALL', decode=True)
return dict(redis_hash)
def hdel(self, hashkey, *keys):
"""Emulate hdel"""
redis_hash = self._get_hash(hashkey, 'HDEL')
count = 0
for key in keys:
attribute = self._encode(key)
if attribute in redis_hash:
count += 1
del redis_hash[attribute]
if not redis_hash:
self.delete(hashkey)
return count
def hlen(self, hashkey):
"""Emulate hlen."""
redis_hash = self._get_hash(hashkey, 'HLEN')
return len(redis_hash)
def hmset(self, hashkey, value):
"""Emulate hmset."""
redis_hash = self._get_hash(hashkey, 'HMSET', create=True)
for key, value in value.items():
attribute = self._encode(key)
redis_hash[attribute] = self._encode(value)
return True
def hmget(self, hashkey, keys, *args):
"""Emulate hmget."""
redis_hash = self._get_hash(hashkey, 'HMGET')
attributes = self._list_or_args(keys, args)
return [self._decode(redis_hash.get(self._encode(attribute))) for attribute in attributes]
def hset(self, hashkey, attribute=None, value=None, mapping=None):
"""Emulate hset."""
if attribute is None and not mapping:
raise DataError("'hset' with no key value pairs")
redis_hash = self._get_hash(hashkey, 'HSET', create=True)
if attribute is not None:
attribute = self._encode(attribute)
attribute_present = attribute in redis_hash
redis_hash[attribute] = self._encode(value)
return long(0) if attribute_present else long(1)
elif mapping:
created = long(0)
for attr, val in mapping.items():
attr = self._encode(attr)
attribute_present = attr in redis_hash
redis_hash[attr] = self._encode(val)
created += 0 if attribute_present else 1
return created
def hsetnx(self, hashkey, attribute, value):
"""Emulate hsetnx."""
redis_hash = self._get_hash(hashkey, 'HSETNX', create=True)
attribute = self._encode(attribute)
if attribute in redis_hash:
return long(0)
else:
redis_hash[attribute] = self._encode(value)
return long(1)
def hincrby(self, hashkey, attribute, increment=1):
"""Emulate hincrby."""
return self._hincrby(hashkey, attribute, 'HINCRBY', long, increment)
def hincrbyfloat(self, hashkey, attribute, increment=1.0):
"""Emulate hincrbyfloat."""
return self._hincrby(hashkey, attribute, 'HINCRBYFLOAT', float, increment)
def _hincrby(self, hashkey, attribute, command, type_, increment):
"""Shared hincrby and hincrbyfloat routine"""
redis_hash = self._get_hash(hashkey, command, create=True)
attribute = self._encode(attribute)
previous_value = type_(self._decode(redis_hash.get(attribute, '0')))
redis_hash[attribute] = self._encode(previous_value + increment)
return type_(redis_hash[attribute])
def hkeys(self, hashkey):
"""Emulate hkeys."""
redis_hash = self._get_hash(hashkey, 'HKEYS')
return self._decode(redis_hash.keys())
def hvals(self, hashkey):
"""Emulate hvals."""
redis_hash = self._get_hash(hashkey, 'HVALS')
return self._decode(redis_hash.values())
# List Functions #
def lrange(self, key, start, stop):
"""Emulate lrange."""
redis_list = self._get_list(key, 'LRANGE')
start, stop = self._translate_range(len(redis_list), start, stop)
return self._decode(redis_list[start:stop + 1])
def lpos(self, name, value):
"""Emulate lpos. (naive implementation)"""
values = list(self.lrange(name, 0, -1))
if value not in values:
return None
return values.index(value)
def lindex(self, key, index):
"""Emulate lindex."""
redis_list = self._get_list(key, 'LINDEX')
if self._encode(key) not in self.redis:
return None
try:
return self._decode(redis_list[index])
except (IndexError):
# Redis returns nil if the index doesn't exist
return None
def llen(self, key):
"""Emulate llen."""
redis_list = self._get_list(key, 'LLEN')
# Redis returns 0 if list doesn't exist
return len(redis_list)
def _blocking_pop(self, pop_func, keys, timeout):
"""Emulate blocking pop functionality"""
if not isinstance(timeout, (int, long)):
raise RuntimeError('timeout is not an integer or out of range')
if timeout is None or timeout == 0:
timeout = self.blocking_timeout
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
elapsed_time = 0
start = time.time()
while elapsed_time < timeout:
key, val = self._pop_first_available(pop_func, keys)
if val:
return key, val
# small delay to avoid high cpu utilization
time.sleep(self.blocking_sleep_interval)
elapsed_time = time.time() - start
return None
def _pop_first_available(self, pop_func, keys):
for key in keys:
val = pop_func(key)
if val:
return self._decode(key), self._decode(val)
return None, None
def blpop(self, keys, timeout=0):
"""Emulate blpop"""
return self._blocking_pop(self.lpop, keys, timeout)
def brpop(self, keys, timeout=0):
"""Emulate brpop"""
return self._blocking_pop(self.rpop, keys, timeout)
def lpop(self, key):
"""Emulate lpop."""
redis_list = self._get_list(key, 'LPOP')
if self._encode(key) not in self.redis:
return None
try:
value = redis_list.pop(0)
if len(redis_list) == 0:
self.delete(key)
return value
except (IndexError):
# Redis returns nil if popping from an empty list
return None
def lpush(self, key, *args):
"""Emulate lpush."""
redis_list = self._get_list(key, 'LPUSH', create=True)
# Creates the list at this key if it doesn't exist, and appends args to its beginning
args_reversed = [self._encode(arg) for arg in args]
args_reversed.reverse()
self.redis[self._encode(key)] = args_reversed + redis_list
return len(args)
def rpop(self, key):
"""Emulate lpop."""
redis_list = self._get_list(key, 'RPOP')
if self._encode(key) not in self.redis:
return None
try:
value = redis_list.pop()
if len(redis_list) == 0:
self.delete(key)
return self._decode(value)
except (IndexError):
# Redis returns nil if popping from an empty list
return None
def rpush(self, key, *args):
"""Emulate rpush."""
redis_list = self._get_list(key, 'RPUSH', create=True)
# Creates the list at this key if it doesn't exist, and appends args to it
redis_list.extend(map(self._encode, args))
return len(args)
def lrem(self, key, value, count=0):
"""Emulate lrem."""
value = self._encode(value)
redis_list = self._get_list(key, 'LREM')
removed_count = 0
if self._encode(key) in self.redis:
if count == 0:
# Remove all ocurrences
while redis_list.count(value):
redis_list.remove(value)
removed_count += 1
elif count > 0:
counter = 0
# remove first 'count' ocurrences
while redis_list.count(value):
redis_list.remove(value)
counter += 1
removed_count += 1
if counter >= count:
break
elif count < 0:
# remove last 'count' ocurrences
counter = -count
new_list = []
for v in reversed(redis_list):
if v == value and counter > 0:
counter -= 1
removed_count += 1
else:
new_list.append(v)
redis_list[:] = list(reversed(new_list))
if removed_count > 0 and len(redis_list) == 0:
self.delete(key)
return removed_count
def ltrim(self, key, start, stop):
"""Emulate ltrim."""
redis_list = self._get_list(key, 'LTRIM')
if redis_list:
start, stop = self._translate_range(len(redis_list), start, stop)
self.redis[self._encode(key)] = redis_list[start:stop + 1]
return True
def rpoplpush(self, source, destination):
"""Emulate rpoplpush"""
transfer_item = self.rpop(source)
if transfer_item is not None:
self.lpush(destination, transfer_item)
return transfer_item
def brpoplpush(self, source, destination, timeout=0):
"""Emulate brpoplpush"""
transfer_item = self.brpop(source, timeout)
if transfer_item is None:
return None
key, val = transfer_item
self.lpush(destination, val)
return val
def lset(self, key, index, value):
"""Emulate lset."""
redis_list = self._get_list(key, 'LSET')
if redis_list is None:
raise ResponseError("no such key")
try:
redis_list[index] = self._encode(value)
except IndexError:
raise ResponseError("index out of range")
def sort(self, name,
start=None,
num=None,
by=None,
get=None,
desc=False,
alpha=False,
store=None,
groups=False):
# check valid parameter combos
if [start, num] != [None, None] and None in [start, num]:
raise ValueError('start and num must both be specified together')
# check up-front if there's anything to actually do
items = num != 0 and self.get(name)
if not items:
if store:
return 0
else:
return []
by = self._encode(by) if by is not None else by
# always organize the items as tuples of the value from the list and the sort key
if by and b'*' in by:
items = [(i, self.get(by.replace(b'*', self._encode(i)))) for i in items]
elif by in [None, b'nosort']:
items = [(i, i) for i in items]
else:
raise ValueError('invalid value for "by": %s' % by)
if by != b'nosort':
# if sorting, do alpha sort or float (default) and take desc flag into account
sort_type = alpha and str or float
items.sort(key=lambda x: sort_type(x[1]), reverse=bool(desc))
# results is a list of lists to support different styles of get and also groups
results = []
if get:
if isinstance(get, basestring):
# always deal with get specifiers as a list
get = [get]
for g in map(self._encode, get):
if g == b'#':
results.append([self.get(i) for i in items])
else:
results.append([self.get(g.replace(b'*', self._encode(i[0]))) for i in items])
else:
# if not using GET then returning just the item itself
results.append([i[0] for i in items])
# results to either list of tuples or list of values
if len(results) > 1:
results = list(zip(*results))
elif results:
results = results[0]
# apply the 'start' and 'num' to the results
if not start:
start = 0
if not num:
if start:
results = results[start:]
else:
end = start + num
results = results[start:end]
# if more than one GET then flatten if groups not wanted
if get and len(get) > 1:
if not groups:
results = list(chain(*results))
# either store value and return length of results or just return results
if store:
self.redis[self._encode(store)] = results
return len(results)
else:
return results
# SCAN COMMANDS #
def _common_scan(self, values_function, cursor='0', match=None, count=10, key=None):
"""
Common scanning skeleton.
:param key: optional function used to identify what 'match' is applied to
"""
if count is None:
count = 10
cursor = int(cursor)
count = int(count)
if not count:
raise ValueError('if specified, count must be > 0: %s' % count)
values = values_function()
if cursor + count >= len(values):
# we reached the end, back to zero
result_cursor = 0
else:
result_cursor = cursor + count
values = values[cursor:cursor+count]
if match is not None:
regex = re.compile(b'^' + re.escape(self._encode(match)).replace(b'\\*', b'.*') + b'$')
if not key:
key = lambda v: v
values = [v for v in values if regex.match(key(v))]
return [result_cursor, values]
def scan(self, cursor='0', match=None, count=10):
"""Emulate scan."""
def value_function():
return sorted(self.redis.keys()) # sorted list for consistent order
return self._common_scan(value_function, cursor=cursor, match=match, count=count)
def scan_iter(self, match=None, count=10):
"""Emulate scan_iter."""
cursor = '0'
while cursor != 0:
cursor, data = self.scan(cursor=cursor, match=match, count=count)
for item in data:
yield item
def sscan(self, name, cursor='0', match=None, count=10):
"""Emulate sscan."""
def value_function():
members = list(self.smembers(name))
members.sort() # sort for consistent order
return members
return self._common_scan(value_function, cursor=cursor, match=match, count=count)
def sscan_iter(self, name, match=None, count=10):
"""Emulate sscan_iter."""
cursor = '0'
while cursor != 0:
cursor, data = self.sscan(name, cursor=cursor,
match=match, count=count)
for item in data:
yield item
def zscan(self, name, cursor='0', match=None, count=10):
"""Emulate zscan."""
def value_function():
values = self.zrange(name, 0, -1, withscores=True)
values.sort(key=lambda x: x[1]) # sort for consistent order
return values
return self._common_scan(value_function, cursor=cursor, match=match, count=count, key=lambda v: v[0]) # noqa
def zscan_iter(self, name, match=None, count=10):
"""Emulate zscan_iter."""
cursor = '0'
while cursor != 0:
cursor, data = self.zscan(name, cursor=cursor, match=match,
count=count)
for item in data:
yield item
def hscan(self, name, cursor='0', match=None, count=10):
"""Emulate hscan."""
def value_function():
values = self.hgetall(name)
values = list(values.items()) # list of tuples for sorting and matching
values.sort(key=lambda x: x[0]) # sort for consistent order
return values
scanned = self._common_scan(value_function, cursor=cursor, match=match, count=count, key=lambda v: v[0]) # noqa
scanned[1] = dict(scanned[1]) # from list of tuples back to dict
return scanned
def hscan_iter(self, name, match=None, count=10):
"""Emulate hscan_iter."""
cursor = '0'
while cursor != 0:
cursor, data = self.hscan(name, cursor=cursor,
match=match, count=count)
for item in data.items():
yield item
# SET COMMANDS #
def sadd(self, key, *values):
"""Emulate sadd."""
if len(values) == 0:
raise ResponseError("wrong number of arguments for 'sadd' command")
redis_set = self._get_set(key, 'SADD', create=True)
before_count = len(redis_set)
redis_set.update(map(self._encode, values))
after_count = len(redis_set)
return after_count - before_count
def scard(self, key):
"""Emulate scard."""
redis_set = self._get_set(key, 'SADD')
return len(redis_set)
def sdiff(self, keys, *args):
"""Emulate sdiff."""
func = lambda left, right: left.difference(right)
return self._apply_to_sets(func, "SDIFF", keys, *args)
def sdiffstore(self, dest, keys, *args):
"""Emulate sdiffstore."""
result = self.sdiff(keys, *args)
self.redis[self._encode(dest)] = result
return len(result)
def sinter(self, keys, *args):
"""Emulate sinter."""
func = lambda left, right: left.intersection(right)
return self._apply_to_sets(func, "SINTER", keys, *args)
def sinterstore(self, dest, keys, *args):
"""Emulate sinterstore."""
result = self.sinter(keys, *args)
self.redis[self._encode(dest)] = result
return len(result)
def sismember(self, name, value):
"""Emulate sismember."""
redis_set = self._get_set(name, 'SISMEMBER')
if not redis_set:
return 0
result = self._encode(value) in redis_set
return 1 if result else 0
def smembers(self, name):
"""Emulate smembers."""
return self._get_set(name, 'SMEMBERS', decode=True).copy()
def smove(self, src, dst, value):
"""Emulate smove."""
src_set = self._get_set(src, 'SMOVE')
dst_set = self._get_set(dst, 'SMOVE')
value = self._encode(value)
if value not in src_set:
return False
src_set.discard(value)
dst_set.add(value)
self.redis[self._encode(src)], self.redis[self._encode(dst)] = src_set, dst_set
return True
def spop(self, name):
"""Emulate spop."""
redis_set = self._get_set(name, 'SPOP')
if not redis_set:
return None
member = choice(list(redis_set))
redis_set.remove(member)
if len(redis_set) == 0:
self.delete(name)
return self._decode(member)
def srandmember(self, name, number=None):
"""Emulate srandmember."""
redis_set = self._get_set(name, 'SRANDMEMBER',decode=True)
if not redis_set:
return None if number is None else []
if number is None:
return choice(list(redis_set))
elif number > 0:
return sample(list(redis_set), min(number, len(redis_set)))
else:
return [choice(list(redis_set)) for _ in xrange(abs(number))]
def srem(self, key, *values):
"""Emulate srem."""
redis_set = self._get_set(key, 'SREM')
if not redis_set:
return 0
before_count = len(redis_set)
for value in values:
redis_set.discard(self._encode(value))
after_count = len(redis_set)
if before_count > 0 and len(redis_set) == 0:
self.delete(key)
return before_count - after_count
def sunion(self, keys, *args):
"""Emulate sunion."""
func = lambda left, right: left.union(right)
return self._apply_to_sets(func, "SUNION", keys, *args)
def sunionstore(self, dest, keys, *args):
"""Emulate sunionstore."""
result = self.sunion(keys, *args)
self.redis[self._encode(dest)] = result
return len(result)
# SORTED SET COMMANDS #
def zadd(self, name, mappings):
zset = self._get_zset(name, "ZADD", create=True)
insert_count = lambda member, score: 1 if zset.insert(self._encode(member), float(score)) else 0 # noqa
return sum((insert_count(member, score) for member, score in list(mappings.items())))
def zcard(self, name):
zset = self._get_zset(name, "ZCARD")
return len(zset) if zset is not None else 0
def zcount(self, name, min, max):
zset = self._get_zset(name, "ZCOUNT")
if not zset:
return 0
return len(zset.scorerange(float(min), float(max)))
def zincrby(self, name, value, amount=1):
zset = self._get_zset(name, "ZINCRBY", create=True)
value = self._encode(value)
score = zset.score(value) or 0.0
score += float(amount)
zset[value] = score
return score
def zinterstore(self, dest, keys, aggregate=None):
aggregate_func = self._aggregate_func(aggregate)
members = {}
for key in keys:
zset = self._get_zset(key, "ZINTERSTORE")
if not zset:
return 0
for score, member in zset:
members.setdefault(member, []).append(score)
intersection = SortedSet()
for member, scores in members.items():
if len(scores) != len(keys):
continue
intersection[member] = reduce(aggregate_func, scores)
# always override existing keys
self.redis[self._encode(dest)] = intersection
return len(intersection)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
zset = self._get_zset(name, "ZRANGE")
if not zset:
return []
start, end = self._translate_range(len(zset), start, end)
func = self._range_func(withscores, score_cast_func, decode_value_func=self._decode)
return [func(item) for item in zset.range(start, end, desc)]
def zrangebyscore(self, name, min, max, start=None, num=None,
withscores=False, score_cast_func=float):
if (start is None) ^ (num is None):
raise RedisError('`start` and `num` must both be specified')
zset = self._get_zset(name, "ZRANGEBYSCORE")
if not zset:
return []
func = self._range_func(withscores, score_cast_func, decode_value_func=self._decode)
include_start, min = self._score_inclusive(min)
include_end, max = self._score_inclusive(max)
scorerange = zset.scorerange(min, max, start_inclusive=include_start, end_inclusive=include_end) # noqa
if start is not None and num is not None:
start, num = self._translate_limit(len(scorerange), int(start), int(num))
scorerange = scorerange[start:start + num]
return [func(item) for item in scorerange]
def zrank(self, name, value):
zset = self._get_zset(name, "ZRANK")
return zset.rank(self._encode(value)) if zset else None
def zrem(self, name, *values):
zset = self._get_zset(name, "ZREM")
if not zset:
return 0
count_removals = lambda value: 1 if zset.remove(self._encode(value)) else 0
removal_count = sum((count_removals(value) for value in values))
if removal_count > 0 and len(zset) == 0:
self.delete(name)
return removal_count
def zremrangebyrank(self, name, start, end):
zset = self._get_zset(name, "ZREMRANGEBYRANK")
if not zset:
return 0
start, end = self._translate_range(len(zset), start, end)
count_removals = lambda score, member: 1 if zset.remove(member) else 0
removal_count = sum((count_removals(score, member) for score, member in zset.range(start, end))) # noqa
if removal_count > 0 and len(zset) == 0:
self.delete(name)
return removal_count
def zremrangebyscore(self, name, min, max):
zset = self._get_zset(name, "ZREMRANGEBYSCORE")
if not zset:
return 0
count_removals = lambda score, member: 1 if zset.remove(member) else 0
include_start, min = self._score_inclusive(min)
include_end, max = self._score_inclusive(max)
removal_count = sum((count_removals(score, member)
for score, member in zset.scorerange(min, max,
start_inclusive=include_start,
end_inclusive=include_end)))
if removal_count > 0 and len(zset) == 0:
self.delete(name)
return removal_count
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float):
return self.zrange(name, start, end,
desc=True, withscores=withscores, score_cast_func=score_cast_func)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float):
if (start is None) ^ (num is None):
raise RedisError('`start` and `num` must both be specified')
zset = self._get_zset(name, "ZREVRANGEBYSCORE")
if not zset:
return []
func = self._range_func(withscores, score_cast_func, decode_value_func=self._decode)
include_start, min = self._score_inclusive(min)
include_end, max = self._score_inclusive(max)
scorerange = [x for x in reversed(zset.scorerange(float(min), float(max),
start_inclusive=include_start,
end_inclusive=include_end))]
if start is not None and num is not None:
start, num = self._translate_limit(len(scorerange), int(start), int(num))
scorerange = scorerange[start:start + num]
return [func(item) for item in scorerange]
def zrevrank(self, name, value):
zset = self._get_zset(name, "ZREVRANK")
if zset is None:
return None
rank = zset.rank(self._encode(value))
if rank is None:
return None
return len(zset) - rank - 1
def zscore(self, name, value):
zset = self._get_zset(name, "ZSCORE")
return zset.score(self._encode(value)) if zset is not None else None
def zunionstore(self, dest, keys, aggregate=None):
union = SortedSet()
aggregate_func = self._aggregate_func(aggregate)
for key in keys:
zset = self._get_zset(key, "ZUNIONSTORE")
if not zset:
continue
for score, member in zset:
if member in union:
union[member] = aggregate_func(union[member], score)
else:
union[member] = score
# always override existing keys
self.redis[self._encode(dest)] = union
return len(union)
# Script Commands #
def eval(self, script, numkeys, *keys_and_args):
"""Emulate eval"""
sha = self.script_load(script)
return self.evalsha(sha, numkeys, *keys_and_args)
def evalsha(self, sha, numkeys, *keys_and_args):
"""Emulates evalsha"""
if not self.script_exists(sha)[0]:
raise RedisError("Sha not registered")
script_callable = Script(self, self.shas[sha], self.load_lua_dependencies)
numkeys = max(numkeys, 0)
keys = keys_and_args[:numkeys]
args = keys_and_args[numkeys:]
return script_callable(keys, args)
def script_exists(self, *args):
"""Emulates script_exists"""
return [arg in self.shas for arg in args]
def script_flush(self):
"""Emulate script_flush"""
self.shas.clear()
def script_kill(self):
"""Emulate script_kill"""
"""XXX: To be implemented, should not be called before that."""
raise NotImplementedError("Not yet implemented.")
def script_load(self, script):
"""Emulate script_load"""
encoded_script = script if isinstance(script, bytes) else script.encode("utf-8")
sha_digest = sha1(encoded_script).hexdigest()
self.shas[sha_digest] = script
return sha_digest
def register_script(self, script):
"""Emulate register_script"""
return Script(self, script, self.load_lua_dependencies)
def call(self, command, *args):
"""
Sends call to the function, whose name is specified by command.
Used by Script invocations and normalizes calls using standard
Redis arguments to use the expected redis-py arguments.
"""
command = self._normalize_command_name(command)
args = self._normalize_command_args(command, *args)
redis_function = getattr(self, command)
value = redis_function(*args)
return self._normalize_command_response(command, value)
def _normalize_command_name(self, command):
"""
Modifies the command string to match the redis client method name.
"""
command = command.lower()
if command == 'del':
return 'delete'
return command
def _normalize_command_args(self, command, *args):
"""
Modifies the command arguments to match the
strictness of the redis client.
"""
if command == 'zadd' and not self.strict and len(args) >= 3:
# Reorder score and name
zadd_args = [x for tup in zip(args[2::2], args[1::2]) for x in tup]
return [args[0]] + zadd_args
if command in ('zrangebyscore', 'zrevrangebyscore'):
# expected format is: <command> name min max start num with_scores score_cast_func
if len(args) <= 3:
# just plain min/max
return args
start, num = None, None
withscores = False
for i, arg in enumerate(args[3:], 3):
# keywords are case-insensitive
lower_arg = self._encode(arg).lower()
# handle "limit"
if lower_arg == b"limit" and i + 2 < len(args):
start, num = args[i + 1], args[i + 2]
# handle "withscores"
if lower_arg == b"withscores":
withscores = True
# do not expect to set score_cast_func
return args[:3] + (start, num, withscores)
return args
def _normalize_command_response(self, command, response):
if command in ('zrange', 'zrevrange', 'zrangebyscore', 'zrevrangebyscore'):
if response and isinstance(response[0], tuple):
return [value for tpl in response for value in tpl]
return response
# Config Set/Get commands #
def config_set(self, name, value):
"""
Set a configuration parameter.
"""
self.redis_config[name] = value
def config_get(self, pattern='*'):
"""
Get one or more configuration parameters.
"""
result = {}
for name, value in self.redis_config.items():
if fnmatch.fnmatch(name, pattern):
try:
result[name] = int(value)
except ValueError:
result[name] = value
return result
# PubSub commands #
def pubsub(self, **kwargs):
""" Return a mocked 'PubSub' object """
if not self._pubsub:
self._pubsub = Pubsub(self, **kwargs)
return self._pubsub
def publish(self, channel, message):
self.pubsub().publish(channel, message)
# Internal #
def _get_list(self, key, operation, create=False, decode=False):
"""
Get (and maybe create) a list by name.
"""
return self._get_by_type(key, operation, create, b'list', [], decode=decode)
def _get_set(self, key, operation, create=False, decode=False):
"""
Get (and maybe create) a set by name.
"""
return self._get_by_type(key, operation, create, b'set', set(), decode=decode)
def _get_hash(self, name, operation, create=False, decode=False):
"""
Get (and maybe create) a hash by name.
"""
return self._get_by_type(name, operation, create, b'hash', {}, decode=decode)
def _get_zset(self, name, operation, create=False, decode=False):
"""
Get (and maybe create) a sorted set by name.
"""
return self._get_by_type(name, operation, create, b'zset', SortedSet(), return_default=False, decode=decode) # noqa
def _get_by_type(self, key, operation, create, type_, default, return_default=True, decode=False):
"""
Get (and maybe create) a redis data structure by name and type.
"""
key = self._encode(key)
if self.type(key) in [type_, b'none']:
if create:
val = self.redis.setdefault(key, default)
if decode:
val = self._decode(val)
return val
else:
val = self.redis.get(key, default if return_default else None)
if decode:
val = self._decode(val)
return val
raise TypeError("{} requires a {}".format(operation, type_))
def _translate_range(self, len_, start, end):
"""
Translate range to valid bounds.
"""
start = int(start)
end = int(end)
if start < 0:
start += len_
start = max(0, min(start, len_))
if end < 0:
end += len_
end = max(-1, min(end, len_ - 1))
return start, end
def _translate_limit(self, len_, start, num):
"""
Translate limit to valid bounds.
"""
if start > len_ or num <= 0:
return 0, 0
return min(start, len_), num
def _range_func(self, withscores, score_cast_func, decode_value_func=lambda x: x):
"""
Return a suitable function from (score, member)
"""
if withscores:
return lambda score_member: (decode_value_func(score_member[1]), score_cast_func(self._encode(score_member[0]))) # noqa
else:
return lambda score_member: decode_value_func(score_member[1])
def _aggregate_func(self, aggregate):
"""
Return a suitable aggregate score function.
"""
funcs = {"sum": add, "min": min, "max": max}
func_name = aggregate.lower() if aggregate else 'sum'
try:
return funcs[func_name]
except KeyError:
raise TypeError("Unsupported aggregate: {}".format(aggregate))
def _apply_to_sets(self, func, operation, keys, *args):
"""Helper function for sdiff, sinter, and sunion"""
keys = self._list_or_args(keys, args)
if not keys:
raise TypeError("{} takes at least two arguments".format(operation.lower()))
left = self._get_set(keys[0], operation) or set()
for key in keys[1:]:
right = self._get_set(key, operation) or set()
left = func(left, right)
return left
def _list_or_args(self, keys, args):
"""
Shamelessly copied from redis-py.
"""
# returns a single list combining keys and args
try:
iter(keys)
# a string can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, basestring):
keys = [keys]
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
def _score_inclusive(self, score):
if isinstance(score, basestring) and score[0] == '(':
return False, float(score[1:])
return True, float(score)
def _encode(self, value):
"Return a bytestring representation of the value. Originally taken from redis-py connection.py"
if isinstance(value, bytes):
value = value
elif isinstance(value, (int, long)):
value = str(value).encode('utf-8')
elif isinstance(value, float):
value = repr(value).encode('utf-8')
elif not isinstance(value, basestring):
value = str(value).encode('utf-8')
else:
value = value.encode('utf-8', 'strict')
return value
def _log(self, level, msg):
pass
def get_total_milliseconds(td):
return int((td.days * 24 * 60 * 60 + td.seconds) * 1000 + td.microseconds / 1000.0)
def mock_redis_client(**kwargs):
"""
Mock common.util.redis_client so we
can return a MockRedis object
instead of a Redis object.
"""
return MockRedis(**kwargs)
mock_redis_client.from_url = mock_redis_client
def mock_strict_redis_client(**kwargs):
"""
Mock common.util.redis_client so we
can return a MockRedis object
instead of a StrictRedis object.
"""
return MockRedis(strict=True, **kwargs)
mock_strict_redis_client.from_url = mock_strict_redis_client
|
25,457 | f63cf608af94eab4c1d0e99f422f633466ab67ed | import os
import json
import tweepy
from datetime import datetime,timedelta
import pytz
# Options
PRODUCTION = os.environ.get('TWITTER_API_KEY') is not None
if not PRODUCTION :
from dotenv import load_dotenv
load_dotenv(override=True)
api_key = os.environ.get('TWITTER_API_KEY')
api_secret = os.environ.get('TWT_API_SECRET')
access_token = os.environ.get('ACCESS_TOKEN')
access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET')
# Function
def oauth_login(api_key, api_secret):
auth = tweepy.OAuthHandler(api_key,api_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth)
try:
api.verify_credentials()
print("Authentication ok!")
except:
print("Error when Authenticating")
return api
def oauth2_login(api_key, api_secret):
auth = tweepy.AppAuthHandler(api_key,api_secret)
return tweepy.API(auth)
def fetch_tweet_json(filename):
with open(filename,encoding='utf8') as f:
tweets = json.load(f)
return tweets
def delete_tweet_before_date(tweetObj,date_inp,exception_twt=[]):
deleted_soon = list()
cutoff_date = datetime.strptime(date_inp,"%d/%m/%y %H:%M")
# cutoff_date is still naive, convert it to aware with UTC localization.
cutoff_date = pytz.utc.localize(cutoff_date)
# if tweed.created_at < cutoff_date, append the tweet id to list.
for tweet in tweetObj:
tweetInfo = tweet['tweet']
tweetDate = datetime.strptime(tweetInfo['created_at'],"%a %b %d %H:%M:%S %z %Y")
if tweetDate < cutoff_date and (tweetInfo['id'] not in exception_twt):
deleted_soon.append(tweetInfo['id'])
# confimation input in cli and loop thorugh all the id to delete it
if (input("THIS PROCESS CANNOT BE UNDONE, PROCEED? (Y/N) ").lower() == "y"):
for tweetID in deleted_soon:
api.destroy_status(tweetID)
print("{} tweets was deleted.".format(len(deleted_soon)))
if __name__ == "__main__":
api = oauth_login(api_key,api_secret)
print("Authenticated as @{}".format(api.me().screen_name))
tweets = fetch_tweet_json('tweet.json')
date_of_tweet = input("Enter the cutoff date: (DD/MM/YY HH:MM")
delete_tweet_before_date(tweets, date_of_tweet) |
25,458 | 56a0b937442086d2bd97ad06efa4778cd935d1af | # encoding: utf-8
"""
@ author: wangmingrui
@ time: 2019/9/30 14:42
@ desc:
"""
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.urls import resolve
from ..models import Board
from ..views import home, BoardListView
class HomeTests(TestCase):
def setUp(self):
self.board = Board.objects.create(name='Django', description='Django board.')
url = reverse('home') # 根据‘home’反向解析出url
self.response = self.client.get(url)
def test_home_view_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_home_url_resolves_home_view(self):
view = resolve('/')
# self.assertEqual(view.func, home)
self.assertEqual(view.func.view_class, BoardListView)
def test_home_view_contains_link_to_topics_page(self):
board_topics_url = reverse('board_topics', kwargs={'pk': self.board.pk})
self.assertContains(self.response, 'href="{0}"'.format(board_topics_url))
|
25,459 | 17fa59d6c82f28d24f266b4c2943065eb8558fd7 | from django.urls import path
from .views import (
checkout,
IndexView,
FoodDetailView,
add_to_cart,
remove_from_cart,
# product
)
app_name = "food"
urlpatterns = [
path('', IndexView.as_view(), name="index"),
path('product/<slug>/', FoodDetailView.as_view(), name="product"),
path('add-to-cart/<slug>', add_to_cart, name='add-to-cart'),
path('remove-from-cart/<slug>', remove_from_cart, name='remove-from-cart'),
# path('products', product, name="product"),
] |
25,460 | 7d1a7724bdde9c4e0ce2925299f82efb67ad2441 | class Solution(object):
def findDisappearedNumbers(self,nums):
size = len(nums)
lst = [i for i in range(0,size+1)]
for i in nums:
lst[nums[i]]+=1
res = []
for i in range(1,size+1):
if lst[i]==0:
res.append(i)
return res
|
25,461 | d63782d17ffccdd453729f65ab1b95e91fd80010 | from .app import Application
from .code_mod import CodeSnippet, Module, Notebook
from .drv_inst import Driver, Instrument
from .record import Record, Report, Sample, Project
from .user import User
|
25,462 | de0ee01503250274e275963cb697bb0cc4416bbc | '''
Author: Tingjun Li
Create Time: 2017-04-10
Function: Find and show book details
'''
from django.http import JsonResponse
from django.core import serializers, exceptions
import json
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from book.models import Book, Tag, Category
from apitools.decorators import accept_methods,have_perms
# Show cate list
@accept_methods(['get'])
def cate_list(request):
response_data = {}
response_data['result'] = 'error'
try:
cates = Category.objects.all()
respdata = list()
for item in cates:
cateitem = {}
cateitem['id']=item.id
cateitem['text']=item.text
cateitem['note']=item.note
respdata.append(cateitem)
response_data['result'] = 'ok'
response_data['data'] = json.dumps(respdata)
except:
response_data['result'] = 'error'
response_data['message'] = 'Fail to fetch categories.'
return JsonResponse(response_data)
# Add cate
@csrf_exempt
@accept_methods(['post'])
@have_perms(['book.book.admin_book_category_add'])
def cate_add(request):
response_data = {}
response_data['result'] = 'error'
req = json.loads(request.body.decode('utf-8'))
cate_name = req
cate = Category(text=cate_name['text'])
try:
cate.save()
response_data['result'] = 'ok'
response_data['data'] = {'text': cate_name['text'], 'id': cate.id}
except:
response_data['result'] = 'error'
response_data['message'] = 'Fail to fetch categories.'
return JsonResponse(response_data)
@accept_methods(['post'])
def cate_getname(request):
response_data = {}
response_data['result'] = 'error'
req = json.loads(request.body.decode('utf-8'))
cate_id = req['id']
try:
cate = Category.objects.get(id=cate_id)
response_data['result'] = 'ok'
response_data['data'] = {'text': cate.text, 'id': cate.id}
except:
response_data['result'] = 'error'
response_data['message'] = 'Not found.'
return JsonResponse(response_data)
# Tag list
@accept_methods(['get'])
def tag_list(request):
response_data = {}
response_data['result'] = 'error'
try:
tags = Tag.objects.all()
respdata = list()
for item in tags:
tagitem = {}
tagitem['id']=item.id
tagitem['text']=item.text
tagitem['note']=item.note
respdata.append(tagitem)
response_data['result'] = 'ok'
response_data['data'] = json.dumps(respdata)
except:
response_data['result'] = 'error'
response_data['message'] = 'Fail to fetch categories.'
return JsonResponse(response_data)
# Add cate
@csrf_exempt
@accept_methods(['post'])
@have_perms(['book.book.admin_book_tag_add'])
def tag_add(request):
response_data = {}
response_data['result'] = 'error'
req = json.loads(request.body.decode('utf-8'))
tag_name = req
tag = Tag(text=tag_name['text'])
try:
tag.save()
response_data['result'] = 'ok'
response_data['data'] = {'text': tag_name['text'], 'id': tag.id}
except:
response_data['result'] = 'error'
response_data['message'] = 'Fail to fetch taggories.'
return JsonResponse(response_data)
|
25,463 | 767a947a13122b0ecd4f13755ccb7aff13d8dff0 | import torch
from torch import nn
from smoke.modeling.smoke_coder import SMOKECoder
from smoke.layers.utils import (
nms_hm,
select_topk,
select_point_of_interest,
)
class PostProcessor(nn.Module):
def __init__(self,
smoker_coder,
reg_head,
det_threshold,
max_detection,
pred_2d):
super(PostProcessor, self).__init__()
self.smoke_coder = smoker_coder
self.reg_head = reg_head
self.det_threshold = det_threshold
self.max_detection = max_detection
self.pred_2d = pred_2d
def prepare_targets(self, targets):
trans_mat = torch.stack([t.get_field("trans_mat") for t in targets])
K = torch.stack([t.get_field("K") for t in targets])
size = torch.stack([torch.tensor(t.size) for t in targets])
return dict(trans_mat=trans_mat,
K=K,
size=size)
def forward(self, predictions, targets):
pred_heatmap, pred_regression = predictions[0], predictions[1]
batch = pred_heatmap.shape[0]
if targets is not None:
target_varibales = self.prepare_targets(targets)
heatmap = nms_hm(pred_heatmap)
scores, indexs, clses, ys, xs = select_topk(
heatmap,
K=self.max_detection,
)
pred_regression = select_point_of_interest(
batch, indexs, pred_regression
)
pred_regression_pois = pred_regression.view(-1, self.reg_head)
pred_proj_points = torch.cat([xs.view(-1, 1), ys.view(-1, 1)], dim=1)
# FIXME: fix hard code here
pred_depths_offset = pred_regression_pois[:, 0]
pred_proj_offsets = pred_regression_pois[:, 1:3]
pred_dimensions_offsets = pred_regression_pois[:, 3:6]
pred_orientation = pred_regression_pois[:, 6:]
pred_depths = self.smoke_coder.decode_depth(pred_depths_offset)
# import pdb; pdb.set_trace()
pred_locations = self.smoke_coder.decode_location(
pred_proj_points,
pred_proj_offsets,
pred_depths,
target_varibales["K"],
target_varibales["trans_mat"]
)
pred_dimensions = self.smoke_coder.decode_dimension(
clses,
pred_dimensions_offsets
)
# we need to change center location to bottom location
pred_locations[:, 1] += pred_dimensions[:, 1] / 2
pred_rotys, pred_alphas = self.smoke_coder.decode_orientation(
pred_orientation,
pred_locations
)
if self.pred_2d:
box2d = self.smoke_coder.encode_box2d(
target_varibales["K"],
pred_rotys,
pred_dimensions,
pred_locations,
target_varibales["size"]
)
else:
box2d = torch.tensor([0, 0, 0, 0])
# change variables to the same dimension
clses = clses.view(-1, 1)
pred_alphas = pred_alphas.view(-1, 1)
pred_rotys = pred_rotys.view(-1, 1)
scores = scores.view(-1, 1)
# change dimension back to h,w,l
pred_dimensions = pred_dimensions.roll(shifts=-1, dims=1)
result = torch.cat([
clses, pred_alphas, box2d, pred_dimensions, pred_locations, pred_rotys, scores
], dim=1)
keep_idx = result[:, -1] > self.det_threshold
result = result[keep_idx]
return result
def make_smoke_post_processor(cfg):
smoke_coder = SMOKECoder(
cfg.MODEL.SMOKE_HEAD.DEPTH_REFERENCE,
cfg.MODEL.SMOKE_HEAD.DIMENSION_REFERENCE,
cfg.MODEL.DEVICE,
)
postprocessor = PostProcessor(
smoke_coder,
cfg.MODEL.SMOKE_HEAD.REGRESSION_HEADS,
cfg.TEST.DETECTIONS_THRESHOLD,
cfg.TEST.DETECTIONS_PER_IMG,
cfg.TEST.PRED_2D,
)
return postprocessor
|
25,464 | 28efbfde6d3e5a7ce4d31866c1ac36f7872f1ff2 | from GameNave4.src.util.Build import NaveJogoDirector
class Personagem(object):
def __init__(self, nave):
self.veiculo = Personagem.criando_nave(nave)
@staticmethod
def criando_nave(nave_builder):
nave_jogador = NaveJogoDirector.NaveJogoDirector(nave_builder)
nave_jogador.contruir_nave()
nave = nave_jogador.get_nave()
nave = nave.nave_fabrica
return nave
def get_area(self):
return self.veiculo.get_area()
def municao(self):
return self.veiculo.municao
def remove_tiro(self, tiro):
assert isinstance(self.veiculo, object)
self.veiculo.municao.remove(tiro)
def get_posicao_y(self):
return self.veiculo.posicao["y"]
def get_posicao_x(self):
return self.veiculo.posicao["x"]
def set_posicao_y(self, valor):
self.veiculo.posicao["y"] = valor
def set_posicao_x(self, valor):
self.veiculo.posicao["x"] = valor
def start_area(self):
return self.veiculo.cria_area()
def atira(self):
self.veiculo.atira()
def figura(self):
return self.veiculo.imagemObjeto
def atingido(self):
return self.veiculo.atingido
def foi_atingido(self):
self.veiculo.atingido = True
def move(self):
self.veiculo.move()
|
25,465 | a067626dafa2d8144cc2df8afc8297c80655b077 | import os
import json
import subprocess
import time
#Files for introspection
#/etc/virtualimage.properties
#
#WCA_VIRTUAL_MACHINE=/resources/virtualSystems/77/virtualMachines/96
#WCA_VIRTUAL_SYSTEM=/resources/virtualSystems/77
#WCA_IPADDRESS=fd8c:215d:178e:2222:290:fa72:fa1e:9346,fd8c:215d:178e:888:290:fa72:fa1e:9346
#PURESCALE_IPADDRESS=fd8c:215d:178e:888:290:fa72:fa1e:9346
#curl -u "admin:babyrack" -H "X-IBM-Workload-Deployer-API-Version:5.0.0.1" -H "X-IBM-Workload-Deployer-API-Session:NONE" -g -kv --url https://[fd8c:215d:178e:888:290:fa72:fa1e:9346]/resources/virtualSystems/77/virtualMachines/
class Topology():
topology = None
user = None
password = None
def __init__(self, config) :
self.topology = {}
self.user = config['user']
self.password = config['password']
self.apiversion = config['apiversion']
def create(self) :
self._load()
self._search_nodes()
def list_nodes(self):
nodes = []
for n,v in self.topology['nodes'].items() :
nodes.append(n)
return nodes
def scale_out(self):
'''
POST /resources/virtualSystems/{vs_id}/virtualMachines/
--data:
{
"desiredcount": 1,
"virtualmachine": "/resources/virtualSystems/212/virtualMachines/288",
"identifier": "hello_nick"
}
'''
print 'scale out'
#curl -u "admin:babyrack" -X POST -H "X-IBM-Workload-Deployer-API-Version:5.0.0.1" -H "X-IBM-Workload-Deployer-API-Session:NONE" -H "Content-Type:application/json" -kv --data @x.json --url "https://9.111.142.16/resources/virtualSystems/212/virtualMachines"
url = "https://[%s]%s/virtualMachines/" % ( self.topology['PURESCALE_IPADDRESS'], self.topology['WCA_VIRTUAL_SYSTEM'])
#print 'url=', url
cloned = self.topology['WCA_VIRTUAL_MACHINE']
identifier = 'scaled_'+ str(time.time()).split('.')[0]
data_string = '{"desiredcount": 1,"virtualmachine": "%s", "identifier": "%s"}'%(cloned, identifier)
#print 'data=', data_string
datafile = '/tmp/data.json'
f = None
try :
f = open(datafile, 'w')
f.write(data_string)
finally :
if f :
f.close()
cmd = "curl -u \"%s:%s\" -X POST -H \"X-IBM-Workload-Deployer-API-Version:%s\" -H \"Content-Type:application/json\" -k --data @%s -g --url %s" % (self.user, self.password, self.apiversion, datafile,url)
print 'cmd = ' , cmd
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
#print p.stdout.readlines()
number = len(self.list_nodes())
self.create()
while len(self.list_nodes()) <= number :
print 'wca>>>>Refresh the topology'
print str(self.list_nodes())
time.sleep(30)
self.create()
for n, v in self.topology['nodes'].items() :
if cmp (identifier, v['wca_identifier']) == 0 :
return n
def scale_in(self, node = None):
'''
TODO
DELETE /resources/virtualSystems/{vs_id}/virtualMachines/{vm_id}
'''
# must check it is not this node
vm_id = ''
if node :
if self.topology['nodes'].has_key(node):
vm_id = self.topology['nodes'][node]['wca_id']
else :
# TODO pick up a node
pass
url = "https://[%s]%s/virtualMachines/%s" % ( self.topology['PURESCALE_IPADDRESS'], self.topology['WCA_VIRTUAL_SYSTEM'], vm_id)
#print 'url=', url
cmd = "curl -u \"%s:%s\" -X DELETE -H \"X-IBM-Workload-Deployer-API-Version:%s\" -H \"Content-Type:application/json\" -k -g --url %s" % (self.user, self.password, self.apiversion,url)
#print 'cmd = ' , cmd
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return node
def get_node_info(self, node):
for n, v in self.topology['nodes'].items() :
if cmp (n, node) == 0 :
return v
def get_node_ipaddress(self, node):
for n, v in self.topology['nodes'].items() :
if cmp (n, node) == 0 :
return v['ip_address']
def _load(self) :
f = open('/etc/virtualimage.properties', 'r')
lines = f.readlines()
for line in lines :
if line.find('WCA_VIRTUAL_SYSTEM') == 0 :
self.topology['WCA_VIRTUAL_SYSTEM'] = line[len('WCA_VIRTUAL_SYSTEM='):].rstrip()
if line.find('PURESCALE_IPADDRESS') == 0 :
self.topology['PURESCALE_IPADDRESS'] = line[len('PURESCALE_IPADDRESS='):].rstrip()
if line.find('WCA_VIRTUAL_MACHINE') == 0 :
self.topology['WCA_VIRTUAL_MACHINE'] = line[len('WCA_VIRTUAL_MACHINE='):].rstrip()
def _search_nodes(self) :
nodes = {}
tmpfile = '/tmp/virtualsystem.json'
url = "https://[%s]%s/virtualMachines/" % ( self.topology['PURESCALE_IPADDRESS'], self.topology['WCA_VIRTUAL_SYSTEM'])
print 'url = ', url
cmd = "curl -u \"%s:%s\" -H \"X-IBM-Workload-Deployer-API-Version:%s\" -k -g --url %s -o %s" % (self.user, self.password, self.apiversion, url, tmpfile)
print 'cmd = ' , cmd
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
f = open(tmpfile)
try:
json_str = f.read()
vs = json.loads(json_str)
#print '-------------vs : ', str(vs)
for vm in vs :
if vm[u'currentstatus_text'] and cmp(vm['currentstatus_text'], 'Started') == 0 :
for nic in vm['nics'] :
#print nic
if nic['ip_hostname'] and cmp(nic['type'], 'public') == 0:
nodes[nic['ip_hostname']]= {'ip_address':nic['ip_address'],'wca_id' : vm['id'],'wca_identifier':vm['identifier']}
#nodes[nic['ip_hostname']] = {'wca_id' : vm['id']}
finally:
if f :
f.close()
os.remove(tmpfile)
self.topology['nodes'] = nodes
def get_topology(self):
return self.topology
|
25,466 | a90b6327d442abfb61e74dc01c85262d76fb8342 | #!/usr/bin/env python
import os
from flask_jwt import JWT
from app.api_1_0.models import InnerResult
from app.datasource.models import OriginData
COV = None
if os.environ.get("FLASK_COVERAGE"):
import coverage
# TODO:here is not understand
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
if os.path.exists('.env'):
'''import env config'''
print('Importing environment from .env ...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from flask_script import Manager, Shell
from app.models import User, Role, Permission
from flask_migrate import MigrateCommand, Migrate
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
def authenticate(username, password):
user = User.query.filter_by(username=username).first()
if user and user.verify_password(password):
return user
def identity(payload):
user_id = payload['identity']
user = User.query.filter_by(id=user_id).first()
return user
jwt = JWT(app, authenticate, identity)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role, Permission=Permission,
InnerResult=InnerResult, OriginData=OriginData)
migrate = Migrate(app, db)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("db", MigrateCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
if __name__ == '__main__':
manager.run()
|
25,467 | 5912bb8484b343e30a6804c9603ba8f4fd238075 | # test code
# 代码测试 |
25,468 | fde669be0ab96ff7b7ef3ee29fe88d8ddc0234ea | # -*- coding: utf-8 -*-
"""
Auxiliary data handling function: Case study UK CPI inflation projections
-------------------------------------------------------------------------
from Bank of England SWP 674: Machine learning at central banks (September 2017)
- authors: Chiranjit Chakraborty & Andreas Joseph
- disclaimer: licence.txt and SWP 674 disclaimer apply
- documentation: see README.txt for structure and comments for details
"""
import numpy as np
import pandas as pd
def data_transformer(data, trafo, power=1.):
"""Transforms data including power.
Parameters
----------
data : 1-d numpy array
data to be transformed
trafo : str , format trafo-shift
trafo legend
NA : none
pow : power transformation
log : logarithm base 10
d1 : first difference
pch : percentile change between subsequent elements
ld : log-difference
sift only applies to 'd1', 'pch' and 'ld'
power : float, optional (Default value = 1.)
exponent (used as trafo+power)
Returns
-------
1-d numpy array
transformed data
Raises
------
ValueError
Invalid transformation value.
"""
tf = trafo.split('-')
if tf[0] == 'NA': # only power transform
return data
elif tf[0] == 'pow': # log_10 transform
return data ** power
elif tf[0] == 'log': # log_10 transform
return np.log10(data) ** power
elif tf[0] == 'd1': # first difference over period dx
i = int(tf[1])
return (data[i:] - data[:-i]) ** power
elif tf[0] == 'pch': # percentage change over period px
i = int(tf[1])
return (100. * (data[i:] - data[:-i]) / data[:-i]) ** power
elif tf[0] == 'ld': # log difference (approx pch for small changes)
i = int(tf[1])
return (100 * np.log(data[i:] / data[:-i])) ** power
else:
raise ValueError("Invalid transformation value.")
def data_framer(data, target, features='all', index=None, start_i=None, end_i=None, shift=0, trafos=[], power=[], \
name_trafo=True, drop_missing=True, write=False, out_name='output', CSV_input=True, \
delimiter=',', CSV_output=True, in_sheet='Sheet1', out_sheet='Sheet1', \
print_summary=False, corr_matrix=False, plot_data=False):
"""Select, transform and frame data.
Parameters
----------
data : pandas.DataFrame or filename
input data form data frame or file
target : str
name of target variable (column name of data)
features : list of str, optional (Default value = [])
name of feature variables (if 'all', use data.columns, excl 'target')
index : name, optional (Default value = None)
name of index variable
start_i : value, optional (Default value = None)
index start of observations to be considered
end_i : value, optional (Default value = None)
index end of observations to be considered
shift : int, optional (Default value = 0)
shift between target and features in units of index
trafos : list of str, optional (Default value = [])
transformations for each column in target and features
power : list, optional (Default value = [])
exponent of power transformations
name_trafo : bool, optional (Default value = True)
if True, include trafos in columns names of output frame
drop_missing : bool, optional (Default value = True)
if True, drop missing observations
write : bool, optional (Default value = False)
if True, write output frame to file
out_name : str, optional (Default value = 'output')
name of output file
CSV_input : bool, csv input, optional (Default value = True)
if True, csv-format expected, else Excel
delimiter : str, optional (Default value = ',')
columns separator
CSV_output : bool, csv output, optional (Default value = True)
if True, csv-format used, else Excel
in_sheet : str, optional (Default value = 'Sheet1')
name of input sheet for Excel format
out_sheet : str, optional (Default value = 'Sheet1')
name of output sheet for Excel format
print_summary : bool, optional (Default value = False)
if True, print summary statistics of output frame to screen
corr_matrix : bool, optional (Default value = False)
if True, print correlation matrix of output frame to screen
plot_data : bool, optional (Default value = False)
if True, plot output frame
Returns
-------
pandas.DataFrame
output data
"""
# load dataframe from file (if not given)
if type(data) == str: # load data if filename is given
if CSV_input == True:
data = pd.read_csv(data, sep=delimiter)
else:
data = pd.read_excel(data, in_sheet)
# set index
if not (data.index.name == index or index == None): # if not yet set
data.set_index(index, inplace=True)
# set start and end end indices
if start_i == None:
iS = 0
else:
try:
iS = list(data.index).index(start_i)
except ValueError:
print("'Value or type of given start index value not matching index.")
if end_i == None:
iE = len(data.index)
else:
try:
iE = list(data.index).index(end_i)
except ValueError:
print("Value or type of given end index value not matching index.")
# set feature variable
if type(features) == str and not features == 'all':
features = [features]
elif features == 'all':
features = list(data.columns)
features.remove(target)
features.remove(index)
# set no trafos if empty lists are given
if len(trafos) == 0: # no level transformations
trafos = ['NA' for i in range(len(features) + 1)]
if len(power) == 0: # no power transformations
power = np.ones(len(features) + 1)
# initiate new output dataframe
data_new = pd.DataFrame(columns=[index])
data_new[index] = np.array(data.index[iS:iE + 1])
data_new.set_index(index, inplace=True)
# get, slice and transform data (loop over target and features)
for c, col in enumerate([target] + features):
tf = trafos[c].split('-')
if len(tf) == 1:
t = 0
else:
t = int(tf[1])
col_name = col
if name_trafo == True:
col_name += '-' + trafos[c]
# target
if c == 0:
if power[c] != 1 and name_trafo == True:
col_name += '-E' + str(power[c])
if ((iS - t) < 0):
raise ValueError('Target index transformation led to negative index.')
data_slice = np.array(data[col][iS - t:iE + 1])
data_new[col_name] = data_transformer(data_slice, trafos[c], power[c])
# features
else:
if name_trafo == True:
col_name += '-T' + str(shift)
if power[c] != 1 and name_trafo == True:
col_name += '-E' + str(power[c])
if ((iS - t - shift) < 0):
raise ValueError('Feature index shift or transformation led to negative index.')
data_slice = np.array(data[col][iS - t - shift:iE - shift + 1])
data_new[col_name] = data_transformer(data_slice, trafos[c], power[c])
if drop_missing == True:
data_new = data_new.dropna()
# write new data to file
if write == True:
if CSV_output == True:
data_new.to_csv(out_name, sep=delimiter)
else:
data_new.to_excel(out_name, out_sheet)
# print summary stats of new data
if print_summary == True:
print("\nData summary:")
print(data_new.describe())
# output correlation structure of new data
if corr_matrix == True:
print("'\nData correlations matrix:")
print(data_new.corr())
# plot new data
if plot_data == True:
df_plot = data_new.plot(lw=2)
df_plot.legend(loc=2, prop={'size': 9})
return data_new
def get_alerts(df, features=None, cutoff_sides=None, n_min_alert=1, \
p_cutoff=20, ID_name=None, add_alerts=True):
"""Generate outlier-based alerts for observations.
Parameters
----------
df : pandas.DataFrame
input data form dataframe
features : list, optional (Default value = None)
list of names of columns in df to use for alerts
cutoff_sides : str, optional (Default value = None)
set outlier sides of distributions for features
(L: left, R: right, LR: left & right)
n_min_alert : int, optional (Default value = 1)
minimal number of features outliers needed for overall alert of observation
p_cutoff : float, optional (Default value = None)
percentile cutoff to define outliers
ID_name : str, optional (Default value = None)
target or index colum neglected when creating alerts
add_alerts : bool, optional (Default value = True)
If True, alert column is added to df
Returns
-------
dictionary
incudes original data, alerts and alert stats
"""
# get features and check names
if features == None:
features == df.columns
if ID_name != None:
if ID_name in features:
features = features[features != ID_name]
else:
cols = df.columns
for f in features:
if not f in cols:
raise ValueError('Got invalid feature name.')
if (not ID_name == None) and (not ID_name in df.columns):
raise ValueError('ID_name not in dataframe columns.')
# cutoff sides
if cutoff_sides == None:
cutoff_sides = ['LR' for f in features]
else:
for f in range(len(features)):
if not cutoff_sides[f] in ['LR', 'L', 'R']: # left-and-right, left-only, righ-only outliers
raise ValueError('Got invalid value for cutoff side.')
# get single outliers
data = df[features].copy()
df_out = df[features].copy()
cut_values = np.zeros((len(features), 2)) * np.nan
for i, (name, side) in enumerate(zip(features, cutoff_sides)):
vals = data[name].values
if side == 'LR':
cut_values[i, 0] = np.percentile(vals, p_cutoff / 2, interpolation='nearest') # LHS cutoff
cut_values[i, 1] = np.percentile(vals, 100 - p_cutoff / 2, interpolation='nearest') # RHS
df_out[name] = (vals <= cut_values[i, 0]) | (vals >= cut_values[i, 1]) # get cutoffs
elif side == 'L':
cut_values[i, 0] = np.percentile(vals, p_cutoff, interpolation='nearest')
df_out[name] = vals <= cut_values[i, 0]
elif side == 'R':
cut_values[i, 1] = np.percentile(vals, 100 - p_cutoff, interpolation='nearest')
df_out[name] = vals >= cut_values[i, 1]
df_cutoffs = pd.DataFrame(cut_values, columns=['left', 'right'])
df_cutoffs['features'] = features
df_cutoffs.set_index('features', inplace=True)
# get joint outliers
M = len(data)
has_alert = np.zeros(M, dtype=bool)
for r in range(M): # iterate over rows
vals = df_out.iloc[r].values
if np.sum(vals) >= n_min_alert:
has_alert[r] = True
alert_fraction = np.sum(has_alert, dtype=float) / len(has_alert)
if add_alerts == True:
alert_name = 'has-' + str(n_min_alert) + '-alerts'
data[alert_name] = has_alert
df_out[alert_name] = has_alert
if not ID_name == None:
data[ID_name] = df[ID_name]
df_out[ID_name] = df[ID_name]
out_dict = {'cutoffs': df_cutoffs,
'has_alert': has_alert,
'fraction': alert_fraction,
'all_alerts': df_out,
'data': data}
return out_dict
def is_iterable(thing):
"""Test of input is iterable.
Parameters
----------
thing : object
input to be tested
Returns
-------
bool, if True thing is iterable
"""
try:
iter(thing)
except TypeError:
return False
return True
def to_zero_one(thing):
"""Set values to nearest zero or one value.
Parameters
----------
thing : number or array of numbers
input data
Returns
-------
number or array of numbers with entries being either zero or one
"""
if is_iterable(thing):
zero_or_one = np.zeros(len(thing))
zero_or_one[np.array(thing) >= 0.5] = 1
else:
if thing >= 0.5:
zero_or_one = 1
else:
zero_or_one = 0
return zero_or_one
def compare_LR(value, val_L=0, val_R=0, side='LR'):
"""Check if value is beyond left/right boundary values.
Parameters
----------
value : float
value to compare to val_L, val_R
val_L : float
left comparison value
val_R : float
right comparison value
side : str (L,R or LR)
indicate side of comparison: left (L), right (R) or both (LR)
Returns
-------
bool
"""
if len(side) == 2:
is_beyond = (value < val_L) | (value > val_R)
elif side == 'L':
is_beyond = (value < val_L)
elif side == 'R':
is_beyond = (value > val_R)
else:
raise ValueError('Invalid side given.')
return is_beyond
|
25,469 | 3fa2531761cce6caf07d0040ce706353c78e5984 | from pyzz import *
def bmc(N, max):
# create an unroll object with the Flops initialized in the first frame
U = unroll(N, init=True)
# create a solver of the unrolled netlist
S = solver(U.F)
prop = conjunction( N, N.get_properties() ) # conjunction of the properties
constr = N.get_constraints() # constraints
for i in xrange(max):
print "Frame:", i
fprop = U[prop, i] # unroll prop to frame i
S.cube( U[constr, i] ) # unroll the constraits to frame i
rc = S.solve( ~fprop ) # run the solver
if rc == solver.SAT:
print "SAT"
return solver.SAT
print "UNDEF"
return solver.UNDEF
import click # pip install click
@click.command()
@click.argument("aig", type=click.Path(exists=True, dir_okay=False))
@click.option("--max", type=int, default=25)
def main(aig, max):
N = pyzz.netlist.read_aiger(aig)
bmc(N, max)
if __name__ == "__main__":
main()
|
25,470 | 8f6902c7664c01b9be28d3a78eea8b7d4b3aa84d | print("Hello Python now")
x = 10
y = 20
print(x+y) |
25,471 | 0c769d1c92999ec1c0d1758a36f63db9d64c47db |
import unittest
class test_xmloutput(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_pass_longtime(self):
a=30000000
while (a>0):
a=a-1
def test_error(self):
msg = "Hello this test should error ... (not fail)"
#print(msg)
#self.assertTrue(False)
def test_fail(self):
msg = "Hello this test should fail ... (not error)"
#print(msg)
#self.fail(msg)
if __name__ == '__main__':
unittest.main()
|
25,472 | 2b9859673353fb3707c27ee679ee3cbdef1f60d1 | # Define a class called Bike that accepts a string and a float as input, and assigns those inputs respectively to two instance variables, color and price.
# Assign to the variable testOne an instance of Bike whose color is blue and whose price is 89.99.
# Assign to the variable testTwo an instance of Bike whose color is purple and whose price is 25.0.
class Bike:
def __init__(self):
self.color = input("Enter a colour: ")
self.price = float(input("Enter the price: "))
def getinfo(self):
print("color =",self.color + " and price =",self.price)
testOne = Bike()
testOne.getinfo()
testTwo = Bike()
testTwo.getinfo()
|
25,473 | d117cd356049795880669d10cd04b1c2be488791 | from sklearn.externals import joblib
import pandas as pd
from hm import *
from matplotlib import pyplot as plt
pth = '/home/hezhiyua/desktop/DeepTop/LLP/Limits/'+'MA/'+'bdt/'
#pth_out = '/beegfs/desy/user/hezhiyua/LLP/bdt_output/result/Lisa/'+'v6'+'/'+'punzi/'
pth_out = '/beegfs/desy/user/hezhiyua/2bBacked/skimmed/LLP/allInOne/nn_format/4jets/v6/punzi/'
in_name = 'store_punzi.pkl'
trn_m = '30'
trn_l = '500'
mass_list = [20,30,40,50]
ctau_list = [500,1000,2000,5000]
#n_digits = '.2f'
in_dic = joblib.load(pth+in_name)
#print in_dic
models = ['cut_nhf','LoLa']
models = ['cut_nhf','BDT']
models = ['cut_nhf']
plt.title('Punzi.')
plt.xlabel(r'$c\tau$')
plt.ylabel('Punzi.')
punzi_dic = {}
for mi in mass_list:
punzi_dic[mi] = {}
for li in ctau_list:
punzi_dic[mi][li] = {}
stri = str(mi)+'_'+str(li)
for key in models:
#print key
#print stri
#print type(in_dic[stri]['plt'])
tpli = in_dic[stri]['plt'][key]
punzi_dic[mi][li][key] = tpli
for li in ctau_list:
for mi in mass_list:
for mdl in models:
if mdl == 'cut_nhf':
styl = 'dashed'
MD = 'cut-based (nHadEFrac)'
else :
styl = None
MD = mdl
plt.plot(punzi_dic[mi][li][mdl][0], punzi_dic[mi][li][mdl][1], linestyle=styl, label=str(mi)+' GeV '+MD)
plt.yscale('log')
plt.title(r'Punzi. (c$\tau$ = '+str(li)+' mm)')
plt.xlabel('classification output')
plt.ylabel('Punzi.')
cutL = 0.45
#cutL = 0.9
#utL = 0.5
#plt.axvline(x=cutL,label=r'$cut_{LoLa}$ = '+str(cutL))
#plt.axvline(x=cutL,label=r'$cut_{BDT}$ = '+str(cutL))
plt.axvline(x=cutL,label=r'$cut_{nHadEFrac}$ = '+str(cutL))
plt.legend()
plt.savefig(pth_out+'punzi_stacked__'+str(li)+'mm.png')
#plt.show()
plt.close()
exit()
imDict = {}
for i in mass_list:
imDict[i] = {}
#err_dict[i] = {}
for j in ctau_list:
imDict[i][j] = 0.
#err_dict[i][j] = 0.
for mmi in mass_list:
for lli in ctau_list:
tmp_str = str(mmi)+'_'+str(lli)
imDict[mmi][lli] = in_dic[tmp_str]['impr']
print in_dic[tmp_str]['impr']
df_val = pd.DataFrame(imDict)
df = df_val
val_label = r'$\frac{ punzi_{BDT} }{ punzi_{cut_nhf} }$'
m_L = df.columns.values.tolist()
c_L = [500,1000,2000,5000]
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig, ax = plt.subplots()
im, cbar = heatmap(df, c_L, m_L, ax=ax, cmap="YlGn", cbarlabel=val_label)
texts = annotate_heatmap(im, valfmt='{x:'+n_digits+'}', fsize=16)#6)
fig.tight_layout()
#plt.show()
outName = 'punzi_2Dmap_bdt_vs_'+'_trn_'+str(trn_m)+'_'+str(trn_l)#+'_'+input_string+'_'+val
fig.savefig(pth_out + outName + '.png', bbox_inches='tight')
|
25,474 | 92f2d1925f4ebe7146bf43225a272433823b00d5 | def solveIncreasing(currentIndex,lastValue,ans,kRemain):
global finalAns
print currentIndex,lastValue,ans,kRemain
if currentIndex == n:
finalAns = max(ans,finalAns)
else:
for value in posValues[currentIndex]:
if value<lastValue:
finalAns = max(ans,finalAns)
continue
elif currentIndex == 0 or value==lastValue:
solveIncreasing(currentIndex+1,value,ans+value,kRemain)
else:
if kRemain > 0:
solveIncreasing(currentIndex+1,value,ans+value,kRemain-1)
else:
finalAns = max(ans,finalAns)
continue
for _ in range(input()):
n,k = map(int,raw_input().split())
a = map(int,raw_input().split())
posValues = []
for i in range(0,n):
temp = [0]
if a[i] == 0:
continue
temp.append(a[i])
if(i>0 and a[i]>a[i-1]):
temp.append(a[i-1])
if(i<n-1 and a[i]>a[i+1]):
if i!=0 and a[i-1] != a[i+1]:
temp.append(a[i+1])
else:
temp.append(a[i+1])
posValues.append(temp)
print posValues
finalAns = 0
solveIncreasing(0,0,0,k)
print finalAns
|
25,475 | 91eb1f7b267935e432be390fc27eb7df56cab587 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import time
import datetime
import calendar
print(datetime.time())
print(time.localtime())
print(calendar.firstweekday()) |
25,476 | 461d2b9f059e49220e0dd43a16fa7771cc99e5cf | from pymysql import connect
db = connect(host='192.168.40.131', port=3306, database='test', user='root', password='mysql', charset='utf8')
cur = db.cursor()
|
25,477 | ae5128cd094f5b7472c4a0d8d4e68e06bdb0c82e | import sys
import os
import requests
import re
from collections import defaultdict
from bs4 import BeautifulSoup
# echo "PATH=\$PATH:~/.local/bin" >> ~/.bashrc
# easy_install --user pip
# pip install --user requests
# or install pip with
# wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py && python get-pip.py --user
# comment for example
in_file_path = "dumps/%s" % os.path.basename(sys.argv[1])
in_file = open(in_file_path, 'r')
out_file_path = "data/%s" % os.path.basename(sys.argv[1])
out_file = open(out_file_path, 'w')
procs = {}
procs_count = {}
proc_errs = {}
sec_rating = {}
for line in in_file:
line = line.rstrip().lower()
desc = ""
if ".exe" not in line:
line += ".exe"
if line == "wininit.exe":
continue
# if the line is already in procs, dont request again
if line not in procs:
url = "https://www.file.net/process/%s.html" % line
page = requests.get(url)
# only try and parse the page if successful request
if page.status_code == 200:
soup = BeautifulSoup(page.text, "html.parser")
# get description above the picture
for para in soup.find(id="GreyBox").find_all("p"):
# don't get ad
if not para.find(text=re.compile("Click to Run a Free")):
# don't get exe wanting
if not para.find(text=re.compile("exe extension on a")):
if desc:
desc += "\n\n"
desc += str(para.text)
additional_desc = soup.find(itemprop="description").parent.text
additional_desc = additional_desc.replace("\n", "\n\n")
if desc:
desc += "\n\n"
if additional_desc[0:25] != desc[0:25]:
desc += additional_desc
rating = re.findall(r'\d+% dangerous', desc)
if rating:
rating = re.findall(r'\d+%', rating[0])[0]
sec_rating[line] = rating
url2 = "https://www.neuber.com/taskmanager/process/%s.html" % line
page2 = requests.get(url2)
if page2.status_code == 200:
soup = BeautifulSoup(page2.text, "html.parser")
content = ""
try:
content = soup.find(id="content").find_all("br")[3].next_sibling.next_sibling.text
except:
pass
if content:
if desc:
desc += "\n\n"
desc += content
if page.status_code != 200 and page2.status_code != 200:
proc_errs[line] = page.status_code
if desc:
procs[line] = desc
if line in procs_count:
procs_count[line] += 1
else:
procs_count[line] = 1
# file header
out_file.write("ANALYSIS OF: %s\n--------------------------------\n" % os.path.basename(in_file_path))
# attributes section
out_file.write("ATTRIBUTES:\n\n")
out_file.write("Processes: %s\n" % len(procs))
out_file.write("Retrieval Errors: %s\n" % len(proc_errs))
# high_ratings = {k:v for k:v in sec_rating.iteritems() if v >}
# out_file.write("Technical Security Ratings above 50%: %s\n", high_ratings)
out_file.write("\n--------------------------------\n")
# error section
out_file.write("RETRIEVAL ERRORS:\n\n")
for proc, error_code in proc_errs.items():
out_file.write("%s: %s\n" % (proc, error_code))
out_file.write("\n--------------------------------\n")
# plain service section
out_file.write("SERVICE LIST:\n\n")
for proc, description in procs.items():
rating = ""
if sec_rating.get(proc):
rating = "(%s)" % sec_rating.get(proc)
out_file.write("%s %s\n" % (proc, rating))
out_file.write("\n--------------------------------\n")
# process descriptions
out_file.write("PROCESS DESCRIPTIONS\n\n")
for proc, description in procs.items():
out_file.write("\nProcess: %s\n" % proc)
out_file.write("Count: %s\n" % procs_count[proc])
if sec_rating.get(proc):
out_file.write("Technical Security Rating: %s\n\n" % sec_rating[proc])
else:
out_file.write("\n")
out_file.write(description)
out_file.write("\n\n----------------\n")
in_file.close()
out_file.close()
|
25,478 | 877fdb4a55c659b863a467fe7550acf518a7e69e | import geocoder
from math import radians, cos, sin, asin, sqrt
from geopy.geocoders import Nominatim
from collections import defaultdict
from heapq import *
def currentlocation():
g = geocoder.ip('me')
cur_position = (g.latlng[0], g.latlng[1])
return cur_position
def getlatlong(lokasi):
geolocator = Nominatim()
location = geolocator.geocode(lokasi)
dest_position = (location.latitude, location.longitude)
return dest_position
def getdistance(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371
return c * r
def dijkstra(edges, f, t):
g = defaultdict(list)
for l,r,c in edges:
g[l].append((c,r))
q, seen, mins = [(0,f,())], set(), {f: 0}
while q:
(cost,v1,path) = heappop(q)
if v1 not in seen:
seen.add(v1)
path = (v1, path)
if v1 == t: return (cost, path)
for c, v2 in g.get(v1, ()):
if v2 in seen: continue
prev = mins.get(v2, None)
next = cost + c
if prev is None or next < prev:
mins[v2] = next
heappush(q, (next, v2, path))
return float("inf")
if __name__ == "__main__":
print "\n"
print "Quiz 2 PAA F"
print "======================================"
print "Anggota : "
print "5116100164 | Hilmi Raditya Prakoso"
print "5116100151 | Falah Ath Thaariq Razzaq"
print "5116100159 | Rahmad Yanuar Darmansyah"
print "======================================"
print "\n"
nama_lokasi = list()
distance = list()
for a in range(0,7):
temp = a+1
input_nama_lokasi = raw_input("Masukkan nama lokasi ("+str(temp)+" dari 7) > ")
nama_lokasi.append(input_nama_lokasi)
print "\n"
curlac = currentlocation()
for a in range(0,7):
print "lat dan long pada lokasi "+nama_lokasi[a]+" : "+str(getlatlong(nama_lokasi[a]))
print "\n"
print "harap menunggu, sedang membentuk graph !"
edges = [
(nama_lokasi[0], nama_lokasi[1], getdistance(getlatlong(nama_lokasi[0])[0],getlatlong(nama_lokasi[0])[1],getlatlong(nama_lokasi[1])[0],getlatlong(nama_lokasi[1])[1])),
(nama_lokasi[0], nama_lokasi[3], getdistance(getlatlong(nama_lokasi[0])[0],getlatlong(nama_lokasi[0])[1],getlatlong(nama_lokasi[3])[0],getlatlong(nama_lokasi[3])[1])),
(nama_lokasi[1], nama_lokasi[2], getdistance(getlatlong(nama_lokasi[1])[0],getlatlong(nama_lokasi[1])[1],getlatlong(nama_lokasi[2])[0],getlatlong(nama_lokasi[2])[1])),
(nama_lokasi[1], nama_lokasi[3], getdistance(getlatlong(nama_lokasi[1])[0],getlatlong(nama_lokasi[1])[1],getlatlong(nama_lokasi[3])[0],getlatlong(nama_lokasi[3])[1])),
(nama_lokasi[1], nama_lokasi[4], getdistance(getlatlong(nama_lokasi[1])[0],getlatlong(nama_lokasi[1])[1],getlatlong(nama_lokasi[4])[0],getlatlong(nama_lokasi[4])[1])),
(nama_lokasi[2], nama_lokasi[4], getdistance(getlatlong(nama_lokasi[2])[0],getlatlong(nama_lokasi[2])[1],getlatlong(nama_lokasi[4])[0],getlatlong(nama_lokasi[4])[1])),
(nama_lokasi[3], nama_lokasi[4], getdistance(getlatlong(nama_lokasi[3])[0],getlatlong(nama_lokasi[3])[1],getlatlong(nama_lokasi[4])[0],getlatlong(nama_lokasi[4])[1])),
(nama_lokasi[3], nama_lokasi[5], getdistance(getlatlong(nama_lokasi[3])[0],getlatlong(nama_lokasi[3])[1],getlatlong(nama_lokasi[5])[0],getlatlong(nama_lokasi[5])[1])),
(nama_lokasi[4], nama_lokasi[5], getdistance(getlatlong(nama_lokasi[4])[0],getlatlong(nama_lokasi[4])[1],getlatlong(nama_lokasi[5])[0],getlatlong(nama_lokasi[5])[1])),
(nama_lokasi[4], nama_lokasi[6], getdistance(getlatlong(nama_lokasi[4])[0],getlatlong(nama_lokasi[4])[1],getlatlong(nama_lokasi[6])[0],getlatlong(nama_lokasi[6])[1])),
(nama_lokasi[5], nama_lokasi[6], getdistance(getlatlong(nama_lokasi[5])[0],getlatlong(nama_lokasi[5])[1],getlatlong(nama_lokasi[6])[0],getlatlong(nama_lokasi[6])[1])),
]
print "graph telah dibentuk !"
print "\n"
while True:
print "List Lokasi yang telah diinput :"
for a in range(0,7):
nomor = a+1
print str(nomor)+". "+nama_lokasi[a]
pilihan_dari = input("Lokasi dari (masukkan nomor index) > ")
pilihan_tujuan = input("Lokasi tujuan (masukkan nomor index) > ")
print "\n"
print "Hasil Pencarian Shortest-Path menggunakan Djikstra Algorithm :"
print dijkstra(edges, nama_lokasi[pilihan_dari-1], nama_lokasi[pilihan_tujuan-1])
print "\n"
|
25,479 | fcc1d6600ea144e3914b8e4bdd7567f904b0fe15 | import cv2
import numpy as np
import os
from pytesseract import image_to_string
from classifier import keyword_detection, sentence_classifier
import shutil
from block_seg import block_segment
import math
from webcolors import name_to_rgb
import random
import time
import json
class Box_Info:
def __init__(self, xmin, ymin, xmax, ymax, flag_key, textbox_content, percent_k, textbox_key, stroke_width):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.flag_key = flag_key
self.textbox_content = textbox_content
self.percent_k = percent_k
self.textbox_key = textbox_key
self.stroke_width = stroke_width
########################################
dict_label_color = {'SHIPPER': "blue", 'CONSIGNEE': "green", 'NOTIFY': "red", 'ALSO_NOTIFY': "magenta", 'POR': "yellow", 'POL': "cyan",
'POD': "navy", 'DEL': "pink", 'DESCRIPTION': "purple", 'VESSEL': "gray", 'Gross Weight': "lavender", 'Measurement': "orange"}
key_cluster_1 = ['SHIPPER', 'CONSIGNEE', 'NOTIFY', 'ALSO_NOTIFY']
key_cluster_2 = ['POL', 'POD', 'DEL', 'VESSEL', 'POR']
def calculateDistance(x1, y1, x2, y2):
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist
def crop_contours(img, cnt):
x0, y0, w0, h0 = cv2.boundingRect(cnt)
th1 = img[y0:y0+h0, x0:x0+w0]
_, contours, _ = cv2.findContours(
th1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
list_x = []
list_y = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
list_x.append(x)
list_x.append(x+w)
list_y.append(y)
list_y.append(y+h)
x1 = min(list_x)
y1 = min(list_y)
x2 = max(list_x)
y2 = max(list_y)
return x0+x1, y0+y1, x2-x1, y2-y1
def take_character_boxes(image):
# find contours
_, contours, _ = cv2.findContours(
image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
height, width = image.shape
output = np.zeros((height, width), np.uint8)
# loop in all the contour areas
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if (w < 200) and (h < 70):
output[y:y+h, x:x+w] = image[y:y+h, x:x+w]
return output, contours
def ocr_textbox(textbox_img):
result = None
ocr_result = ""
try:
ocr_result = image_to_string(
textbox_img, config='-l eng --tessdata-dir "tessdata" --psm 13').lower()
except Exception as e:
print("ocr error: " + str(e))
# flag key or not
flag_key = False
textbox_content = ocr_result
percent_k = 0
# detect key or not
try:
keyword = keyword_detection(ocr_result)
label = keyword[0]
percent = keyword[1]
textbox_key = ocr_result
if label != None:
flag_key = True
textbox_content = label
percent_k = percent
result = (flag_key, textbox_content, percent_k, textbox_key)
except Exception as e:
print("can not detect key: " + str(e))
return result
def get_block_img_info(index_block, image):
height, width, _ = image.shape
output_img = image.copy()
ocr_img = image.copy()
image = binary_img(image)
# find all text boxes
thresh, _ = take_character_boxes(image)
kernel2 = np.ones((1, 60), np.uint8)
line_img = cv2.dilate(thresh, kernel2, iterations=1)
_, contours, _ = cv2.findContours(
line_img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
color_red = (0, 0, 255)
# for each textbox
count_key = 0
list_boxes_info = []
for index, cnt in enumerate(contours):
# try crop text region
x, y, w, h = crop_contours(thresh, cnt)
if w < 10 or h < 10 or h > 100:
continue
cv2.rectangle(output_img, (x, y), (x + w, y + h),
color=color_red, thickness=2)
# ocr text box
xmin_ocr = x - 3
if xmin_ocr < 0:
xmin_ocr = 0
ymin_ocr = y - 3
if ymin_ocr < 0:
ymin_ocr = 0
xmax_ocr = x + w + 3
if xmax_ocr > width:
xmax_ocr = width - 1
ymax_ocr = y + h + 3
if ymax_ocr > height:
ymax_ocr = height - 1
textbox_img = ocr_img[ymin_ocr:ymax_ocr, xmin_ocr:xmax_ocr]
h_ocr, w_ocr, _ = textbox_img.shape
if h_ocr < 5 or w_ocr < 50:
continue
result_ocr = ocr_textbox(textbox_img)
if result_ocr == None:
continue
(flag_key, textbox_content, percent_k, textbox_key) = result_ocr
# print(str(index_block) + " : '" + textbox_content + "' : " +
# str(flag_key) + " : " + str(percent_k))
# calculate stroke width
stroke_width = get_stroke_width(textbox_img)
box_info = Box_Info(x, y, x + w, y + h, flag_key,
textbox_content, percent_k, textbox_key, stroke_width)
list_boxes_info.append(box_info)
# for visual key
if flag_key == True:
count_key += 1
cv2.putText(output_img, str(textbox_content), (x, y),
cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=color_red, thickness=2)
return output_img, count_key, list_boxes_info
def draw_key_value_image(dict_key_value, folder_name, output_folder, file_name):
img_result = cv2.imread(folder_name + file_name)
img_result_copy = img_result.copy()
for k, v in dict_key_value.items():
for item in v:
list_values = item[1]
color_value = name_to_rgb(dict_label_color[k])
for value in list_values:
cv2.rectangle(
img_result, (value[0], value[1]), (value[2], value[3]), color=color_value, thickness=cv2.FILLED)
cv2.putText(img_result, str(k) + " : " + str(item[0]), (item[2][0], item[2][1]), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=color_value, thickness=2)
opacity = 0.5
cv2.addWeighted(img_result, opacity, img_result_copy,
1 - opacity, 0, img_result_copy)
# cv2.imwrite(output_folder + "result_" + file_name, img_result_copy)
def make_json_dict(file_name, folder_name, dict_key_value):
json_dict = {}
json_dict['file_name'] = file_name
json_dict['folder_name'] = folder_name
json_dict["keys"] = {}
list_keys = ['SHIPPER', 'CONSIGNEE', 'NOTIFY',
'ALSO_NOTIFY', 'POL', 'POD', 'DEL', 'VESSEL', 'POR']
# init dict
for k in list_keys:
json_dict["keys"][k] = {}
json_dict["keys"][k]["key_pos"] = {}
json_dict["keys"][k]["key_pos"]["xmin"] = str(0)
json_dict["keys"][k]["key_pos"]["ymin"] = str(0)
json_dict["keys"][k]["key_pos"]["xmax"] = str(0)
json_dict["keys"][k]["key_pos"]["ymax"] = str(0)
json_dict["keys"][k]["key_content"] = ""
json_dict["keys"][k]["value_pos"] = {}
json_dict["keys"][k]["value_pos"]["xmin"] = str(0)
json_dict["keys"][k]["value_pos"]["ymin"] = str(0)
json_dict["keys"][k]["value_pos"]["xmax"] = str(0)
json_dict["keys"][k]["value_pos"]["ymax"] = str(0)
json_dict["keys"][k]["value_content"] = ""
for k, v in dict_key_value.items():
if k not in list_keys:
continue
if len(v) > 0:
(percent_k, list_values, key_pos, key_content) = v[0]
list_xmin = []
list_ymin = []
list_xmax = []
list_ymax = []
value_text = []
for value_info in list_values:
(xmin, ymin, xmax, ymax, value_content) = value_info
list_xmin.append(xmin)
list_ymin.append(ymin)
list_xmax.append(xmax)
list_ymax.append(ymax)
value_text.append(value_content)
if len(list_values) > 0:
xmin = min(list_xmin)
ymin = min(list_ymin)
xmax = max(list_xmax)
ymax = max(list_ymax)
else:
xmin = 0
ymin = 0
xmax = 0
ymax = 0
(xmin_k, ymin_k, xmax_k, ymax_k) = key_pos
json_dict["keys"][k] = {}
json_dict["keys"][k]["key_pos"] = {}
json_dict["keys"][k]["key_pos"]["xmin"] = str(xmin_k)
json_dict["keys"][k]["key_pos"]["ymin"] = str(ymin_k)
json_dict["keys"][k]["key_pos"]["xmax"] = str(xmax_k)
json_dict["keys"][k]["key_pos"]["ymax"] = str(ymax_k)
json_dict["keys"][k]["key_content"] = key_content
json_dict["keys"][k]["value_pos"] = {}
json_dict["keys"][k]["value_pos"]["xmin"] = str(xmin)
json_dict["keys"][k]["value_pos"]["ymin"] = str(ymin)
json_dict["keys"][k]["value_pos"]["xmax"] = str(xmax)
json_dict["keys"][k]["value_pos"]["ymax"] = str(ymax)
if len(value_text) == 0:
value_text = ""
else:
value_text.reverse()
value_text = '\n'.join(value_text)
json_dict["keys"][k]["value_content"] = value_text
return json_dict
def read_json_data(filename_json):
with open(filename_json, 'r') as f:
datastore = json.load(f)
list_test = []
for index, json_dict in enumerate(datastore):
file_name = json_dict['file_name']
folder_name = json_dict['folder_name']
list_test.append((folder_name, file_name))
return list_test
def get_values_free_key(key_info, list_boxes_info, xmin_block, ymin_block):
list_k_value = []
list_candidate = []
for box_info in list_boxes_info:
if box_info.ymax > key_info.ymax and box_info.xmax > key_info.xmin and box_info.xmin < key_info.xmax:
list_candidate.append(box_info)
sorted_list_candidate = sorted(
list_candidate, key=lambda x: x.ymax)
ymax_k_new = key_info.ymax
y_range_under = 0
for box_info in sorted_list_candidate:
if box_info.flag_key == True or box_info.stroke_width == key_info.stroke_width or box_info.ymin - ymax_k_new > 100:
y_range_under = box_info.ymin
break
k_value_box = (box_info.xmin + xmin_block, box_info.ymin + ymin_block, box_info.xmax + xmin_block,
box_info.ymax + ymin_block, box_info.textbox_content)
list_k_value.append(k_value_box)
ymax_k_new = box_info.ymax
if len(list_k_value) > 0:
return list_k_value
# find value on the left
list_candidate = []
for box_info in list_boxes_info:
if box_info.xmax > key_info.xmax and np.absolute(box_info.ymin - key_info.ymin) < 10:
list_candidate.append(box_info)
sorted_list_candidate = sorted(
list_candidate, key=lambda x: x.xmax)
if len(sorted_list_candidate) > 0:
box_info = sorted_list_candidate[0]
if box_info.flag_key == False and box_info.stroke_width != key_info.stroke_width and box_info.xmin - key_info.xmax <= 300:
k_value_box = (box_info.xmin + xmin_block, box_info.ymin + ymin_block, box_info.xmax + xmin_block,
box_info.ymax + ymin_block, box_info.textbox_content)
list_k_value.append(k_value_box)
box_info_new = Box_Info(box_info.xmin, box_info.ymin, box_info.xmax, box_info.ymax, box_info.flag_key,
box_info.textbox_content, box_info.percent_k, box_info.textbox_key, key_info.stroke_width)
# continue go to underlines
list_k_value_under = get_values_under_first_value(
box_info_new, list_boxes_info, y_range_under, xmin_block, ymin_block)
list_k_value += list_k_value_under
return list_k_value
def get_values_under_first_value(key_info, list_boxes_info, y_range_under, xmin_block, ymin_block):
list_k_value = []
list_candidate = []
for box_info in list_boxes_info:
if box_info.ymax > key_info.ymax and box_info.xmax > key_info.xmin and box_info.xmin < key_info.xmax and box_info.ymax < y_range_under:
list_candidate.append(box_info)
sorted_list_candidate = sorted(
list_candidate, key=lambda x: x.ymax)
ymax_k_new = key_info.ymax
for box_info in sorted_list_candidate:
if box_info.flag_key == True or box_info.stroke_width == key_info.stroke_width or box_info.ymin - ymax_k_new > 100:
break
k_value_box = (box_info.xmin + xmin_block, box_info.ymin + ymin_block,
box_info.xmax + xmin_block, box_info.ymax + ymin_block, box_info.textbox_content)
list_k_value.append(k_value_box)
ymax_k_new = ymax
return list_k_value
def get_stroke_width(img_crop):
# gray = cv2.cvtColor(img_crop, cv2.COLOR_BGR2GRAY)
# gray = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold = 128 / 255
# gray = gray / 255
# tmp = gray[gray < threshold].flatten()
# return round(tmp.sum() / len(tmp), 2)
img_crop = binary_img(img_crop)
stroke_width = 0
kernel = np.ones((3, 3), np.uint8)
num_white_origin = np.sum(img_crop == 255)
while np.sum(img_crop == 255) >= 0.01 * num_white_origin:
img_crop = cv2.erode(img_crop, kernel, iterations=1)
stroke_width += 1
if stroke_width == 10:
return stroke_width
return stroke_width
def key_value_detection(folder_name, file_name):
img = cv2.imread(folder_name + file_name)
bboxs = block_segment(folder_name, file_name)
# check if it is not a table form
if table_from_detection(len(bboxs)) == False:
return None
h, w, _ = img.shape
list_block_info = []
for index, bb in enumerate(bboxs):
# ignore some boxes
if bb[1] > int(3*h/5):
continue
img_box = img[bb[1]:bb[3], bb[0]:bb[2]]
output_img, count_key, list_boxes_info = get_block_img_info(
index, img_box)
block_info = ((bb[0], bb[1], bb[2], bb[3]), list_boxes_info)
list_block_info.append(block_info)
# cv2.imwrite(output_folder + file_name.split(".")
# [0] + "_" + str(index) + "_" + str(count_key) + ".png", output_img)
dict_key_value = find_key_value_block(list_block_info)
return dict_key_value
def find_key_value_block(list_block_info):
dict_key_value = {'SHIPPER': [], 'CONSIGNEE': [], 'NOTIFY': [],
'ALSO_NOTIFY': [], 'POR': [], 'POL': [],
'POD': [], 'DEL': [], 'DESCRIPTION': [],
'VESSEL': [], 'Gross Weight': [], 'Measurement': []}
for index_block, block_info in enumerate(list_block_info):
list_result_kv = get_kv_from_block(
index_block, block_info, list_block_info)
for kv in list_result_kv:
(key_name, percent_key, list_k_value, key_pos, key_content) = kv
dict_key_value[key_name].append(
(percent_key, list_k_value, key_pos, key_content))
dict_key_value_select(dict_key_value)
return dict_key_value
def dict_key_value_select(dict_key_value):
# if key is higher than value, it is ok
for k, v in dict_key_value.items():
list_new_v = []
for item in v:
if check_key_position(item) == True:
list_new_v.append(item)
dict_key_value[k] = list_new_v
# if have muliple key, only the highest key is selected
for k, v in dict_key_value.items():
if len(v) > 1:
sorted_v = sorted(v, key=lambda tup: (tup[2][1], tup[2][0]))
highest_v = sorted_v[0]
list_new_v = []
list_new_v.append(highest_v)
dict_key_value[k] = list_new_v
return dict_key_value
def check_key_position(item):
ymin_k = item[2][1]
list_values = item[1]
for value in list_values:
if value[1] < ymin_k:
return False
return True
def get_kv_from_block(index_block, block_info, list_block_info):
num_key, num_value = count_item_value(block_info)
list_key_value = []
(xmin_block, ymin_block, xmax_block, ymax_block) = block_info[0]
list_boxes_info = block_info[1]
if num_key == 1 and num_value > 0:
key_name = ""
key_content = ""
list_k_value = []
percent_key = 0
for box_info in list_boxes_info:
if box_info.flag_key == True:
key_name = box_info.textbox_content
key_content = box_info.textbox_key
percent_key = box_info.percent_k
key_pos = (xmin_block + box_info.xmin, ymin_block + box_info.ymin,
xmin_block + box_info.xmax, ymin_block + box_info.ymax)
else:
k_value_box = (xmin_block + box_info.xmin, ymin_block + box_info.ymin,
xmin_block + box_info.xmax, ymin_block + box_info.ymax, box_info.textbox_content)
list_k_value.append(k_value_box)
list_key_value.append(
(key_name, percent_key, list_k_value, key_pos, key_content))
return list_key_value
if num_key > 1:
for box_info in list_boxes_info:
if box_info.flag_key == True:
key_name = box_info.textbox_content
key_content = box_info.textbox_key
percent_key = box_info.percent_k
key_pos = (xmin_block + box_info.xmin, ymin_block + box_info.ymin,
xmin_block + box_info.xmax, ymin_block + box_info.ymax)
# list_k_value = find_value_by_key(
# box_info, list_boxes_info, xmin_block, ymin_block)
list_k_value = get_values_free_key(
box_info, list_boxes_info, xmin_block, ymin_block)
list_key_value.append(
(key_name, percent_key, list_k_value, key_pos, key_content))
return list_key_value
if num_key == 1 and num_value == 0:
if is_key_no_value(block_info) == True:
return list_key_value
under_block, left_block = get_nearest_under_left_block(
block_info, list_block_info)
percent_key = 0
key_name = ""
key_content = ""
box_info = list_boxes_info[0]
if box_info.flag_key == True:
key_name = box_info.textbox_content
key_content = box_info.textbox_key
percent_key = box_info.percent_k
key_pos = (xmin_block + box_info.xmin, ymin_block + box_info.ymin,
xmin_block + box_info.xmax, ymin_block + box_info.ymax)
list_k_value_u = []
list_k_value_l = []
if under_block != None:
num_key_u, num_value_u = count_item_value(under_block)
if num_key_u == 0 and num_value_u > 0:
list_k_value_u = get_all_values_in_block(under_block)
if left_block != None:
num_key_l, num_value_l = count_item_value(left_block)
if num_key_l == 0 and num_value_l > 0:
list_k_value_l = get_all_values_in_block(left_block)
list_k_value = []
# check if key of cluster 1 or not ?
len_u = len(list_k_value_u)
len_l = len(list_k_value_l)
if key_name in key_cluster_1:
if len_u >= len_l:
list_k_value = list_k_value_u
else:
list_k_value = list_k_value_l
else:
if len_u == 1 and check_colon(list_k_value_u) == False:
list_k_value += list_k_value_u
if len_l == 1 and check_colon(list_k_value_l) == False:
list_k_value += list_k_value_l
list_key_value.append(
(key_name, percent_key, list_k_value, key_pos, key_content))
return list_key_value
return list_key_value
def is_key_no_value(block_info):
(xmin_block, ymin_block, xmax_block, ymax_block) = block_info[0]
list_boxes_info = block_info[1]
for box_info in list_boxes_info:
if (box_info.ymax - box_info.ymin) < 0.5 * (ymax_block - ymin_block):
return True
return False
def check_colon(list_k_value):
for item in list_k_value:
content = item[4]
if content.endswith(':'):
return True
return False
def get_all_values_in_block(block_info):
(xmin_block, ymin_block, xmax_block, ymax_block) = block_info[0]
list_boxes_info = block_info[1]
list_k_value = []
for box_info in list_boxes_info:
k_value_box = (xmin_block + box_info.xmin, ymin_block + box_info.ymin,
xmin_block + box_info.xmax, ymin_block + box_info.ymax, box_info.textbox_content)
list_k_value.append(k_value_box)
return list_k_value
def get_nearest_under_left_block(block_info, list_block_info):
under_block = None
left_block = None
(xmin_block, ymin_block, xmax_block, ymax_block) = block_info[0]
# find nearest under box
list_candidate = []
for block in list_block_info:
(xmin_b, ymin_b, xmax_b, ymax_b) = block[0]
if ymax_b > ymax_block and np.absolute(xmin_block - xmin_b) < 10:
list_candidate.append(block)
sorted_list_candidate = sorted(
list_candidate, key=lambda tup: tup[0][3])
if len(sorted_list_candidate) > 0:
under_block = sorted_list_candidate[0]
# find nearest left box
list_candidate = []
for block in list_block_info:
(xmin_b, ymin_b, xmax_b, ymax_b) = block[0]
if xmax_b > xmax_block and np.absolute(ymin_block - ymin_b) < 10:
list_candidate.append(block)
sorted_list_candidate = sorted(
list_candidate, key=lambda tup: tup[0][2])
if len(sorted_list_candidate) > 0:
left_block = sorted_list_candidate[0]
return under_block, left_block
def find_value_by_key(box_info_key, list_boxes_info, xmin_block, ymin_block):
list_k_value = []
list_candidate = []
for box_info in list_boxes_info:
# find a box below key
if box_info.ymin > box_info_key.ymax and (box_info_key.xmin < box_info.xmax) and (box_info_key.xmax > box_info.xmin):
list_candidate.append(box_info)
sorted_list_candidate = sorted(list_candidate, key=lambda x: x.ymin)
if len(sorted_list_candidate) > 0:
box_info = sorted_list_candidate[0]
if box_info.flag_key == False:
k_value_box = (xmin_block + box_info.xmin, ymin_block + box_info.ymin,
xmin_block + box_info.xmax, ymin_block + box_info.ymax, box_info.textbox_content)
list_k_value.append(k_value_box)
# find value on same line
if len(list_k_value) == 0:
list_candidate = []
for box_info in list_boxes_info:
if box_info.xmin > box_info_key.xmax and np.absolute(box_info_key.ymin - box_info.ymin) < 10:
list_candidate.append(box_info)
sorted_list_candidate = sorted(list_candidate, key=lambda x: x.xmin)
if len(sorted_list_candidate) > 0:
box_info = sorted_list_candidate[0]
if box_info.flag_key == False:
k_value_box = (xmin_block + box_info.xmin, ymin_block + box_info.ymin,
xmin_block + box_info.xmax, ymin_block + box_info.ymax, box_info.textbox_content)
list_k_value.append(k_value_box)
return list_k_value
def count_item_value(block_info):
num_key = 0
num_value = 0
(xmin_block, ymin_block, xmax_block, ymax_block) = block_info[0]
list_boxes_info = block_info[1]
for box_info in list_boxes_info:
if box_info.flag_key == True:
num_key += 1
else:
num_value += 1
return num_key, num_value
def table_from_detection(num_block):
if num_block > 5:
return True
return False
def binary_img(img):
gray1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret1, th1 = cv2.threshold(
gray1, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
return th1
def recreate_folder(folder):
if os.path.isdir(folder) == True:
shutil.rmtree(folder)
os.makedirs(folder)
def read_flags():
"""Returns flags"""
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--images_out", default="result_images", help="Image result folder")
parser.add_argument("--json_test", default="test_label.json",
help="Json files for testing")
parser.add_argument(
"--json_predict", default="test_predict.json", help="Json files predicted")
flags = parser.parse_args()
return flags
def main(flags):
output_folder = flags.images_out + "/"
json_test_file = flags.json_test
json_predict_file = flags.json_predict
recreate_folder(output_folder)
list_file_process = []
json_list = []
total_time = 0
list_test = read_json_data(json_test_file)
print(len(list_test))
for index, item in enumerate(list_test):
folder_name = item[0]
file_name = item[1]
# if "76552.png" not in file_name:
# continue
print(file_name + "(" + str(index) + ")")
start_time = time.time()
try:
list_file_process.append(file_name)
dict_key_value = key_value_detection(folder_name, file_name)
if dict_key_value == None:
print(file_name + " is not a table form")
else:
json_dict = make_json_dict(
file_name, folder_name, dict_key_value)
json_list.append(json_dict)
draw_key_value_image(dict_key_value, folder_name,
output_folder, file_name)
except Exception as e:
print("image error at " + file_name + " : " + str(e))
if index % 5 == 0:
with open(json_predict_file, 'w') as outfile:
json.dump(json_list, outfile)
print("json saved")
new_time = (time.time() - start_time)
total_time += new_time
print("--- %s seconds ---" % new_time)
with open(json_predict_file, 'w') as outfile:
json.dump(json_list, outfile)
print("json saved")
# print(list_file_process)
print("total time : --- %s hours ---" % (total_time / 3600))
if __name__ == "__main__":
flags = read_flags()
main(flags)
|
25,480 | d3bdf6f900c18ad33733b9b9dc5598694607613e | try:
import torch
except ImportError:
# No installation required if not using this function
pass
import numpy as np
import random
class Randomness:
@staticmethod
def seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
|
25,481 | 63b14556a9f7a6c5849bcf6f728e7e5e7665e32f | import psycopg2
import boto3
import base64
import cfnresponse
from botocore.exceptions import ClientError
import traceback
import json
def get_secret(secret_name, region_name):
session = boto3.session.Session()
client = session.client(
service_name = 'secretsmanager',
region_name = region_name
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return secret
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return decoded_binary_secret
def create_conn(db_name, db_user, db_host, db_pass):
print ("creating connection")
conn = None
try:
conn = psycopg2.connect("dbname={} user={} host={} password={}".format(db_name,db_user,db_host,db_pass))
except Exception as e:
print("Cannot connect.")
raise e
return conn
def fetch(conn):
result = []
print("Now executing sql commands")
file = open('schema.sql', 'r')
script_file = file.read()
file.close
print(script_file)
try:
cursor = conn.cursor()
cursor.execute(script_file)
print(cursor.description)
cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE';")
raw = cursor.fetchall()
for line in raw:
print(line)
result.append(line)
except Exception as e:
print("Cannot fetch." + str(e) + traceback.format_exc())
raise e
return result
def get_cfn_response_data(message):
response_data = {}
data = {'Message': message}
response_data['Data'] = data
return response_data
def lambda_handler(event, context):
try:
print(event['ResourceProperties']['secret_name'])
secret = json.loads(get_secret(event['ResourceProperties']['secret_name'], event['ResourceProperties']['region']))
print(secret)
db_host = secret["host"]
db_name = secret["dbname"]
db_port = secret["port"]
db_user = secret["username"]
db_pass = secret["password"]
if event['RequestType'] == 'Create':
try:
# get a connection, if a connect cannot be made an exception will be raised here
conn = create_conn(db_name, db_user, db_host, db_pass)
result = fetch(conn)
conn.close()
return result
except Exception as e:
cfnresponse.send(event, context, cfnresponse.FAILED, get_cfn_response_data('failed: '+str(e)))
raise Exception(e)
else:
print('Delete/Update CF initiated')
cfnresponse.send(event, context, cfnresponse.SUCCESS, get_cfn_response_data('delete'))
except Exception as e:
cfnresponse.send(event, context, cfnresponse.FAILED, get_cfn_response_data('failed: '+str(e)))
raise Exception(e)
|
25,482 | 73d76b651525fa1e9b8ae2a4fefce85d20db3d23 | # @Time : 2020/6/29 19:34
# @Author : Shang
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pandas as pd
# import matplotlib
# # import numpy as np
# import matplotlib.pyplot as plt
# plt.rcParams['font.sans-serif'] = ['SimHei']
# plt.rcParams['axes.unicode_minus'] = False
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.layers import Dropout
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
#第三步 LSTM建模
#3.1
# 设置最频繁使用的50000个词(在texts_to_matrix是会取前MAX_NB_WORDS,会取前MAX_NB_WORDS列)
df=pd.read_excel(r'./alldata_lstm.xlsx')
for i in range(1):
df_train = pd.read_excel(r'./train'+str(i)+'.xls')
df_test = pd.read_excel(r'./test'+str(i)+'.xls')
MAX_NB_WORDS = 50000
# 每条cut_review最大的长度
MAX_SEQUENCE_LENGTH = 300
# 设置Embeddingceng层的维度
EMBEDDING_DIM = 100
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(df['cut_review'].values)
word_index = tokenizer.word_index
print('共有 %s 个不相同的词语.' % len(word_index)) # 结果 69599
# 3.2
X = tokenizer.texts_to_sequences(df['cut_review'].values) # 定义模型需要用到X
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
X_train = tokenizer.texts_to_sequences(df_train['cut_review'].values)
X_train = pad_sequences(X_train, maxlen=MAX_SEQUENCE_LENGTH)
X_test = tokenizer.texts_to_sequences(df_test['cut_review'].values)
X_test = pad_sequences(X_test, maxlen=MAX_SEQUENCE_LENGTH)
Y_train = pd.get_dummies(df_train['cat_id']).values
Y_test = pd.get_dummies(df_test['cat_id']).values
# 3.3 划分训练集和测试集
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape)
# 3.4 定义模型
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# 3.5 训练模型
epochs =10
batch_size = 32
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_split=0.1,
callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
accr = model.evaluate(X_test, Y_test)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0], accr[1]))
# 3.6 结果评估
y_pred = model.predict(X_test)
y_pred = y_pred.argmax(axis=1)
Y_test = Y_test.argmax(axis=1)
print('accuracy %s' % accuracy_score(y_pred, Y_test))
cat_id_df = df[['类别', 'cat_id']].drop_duplicates().sort_values('cat_id').reset_index(drop=True)
print(classification_report(Y_test, y_pred, digits=4, target_names=cat_id_df['类别'].values))
|
25,483 | 9731882b2fda80defcbaefb41b1233dfe629eb46 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 20:26:48 2020
RL Project 1: Frozen Lake MDP
@author: Manasi Shrotri
"""
### MDP Value Iteration and Policy Iteration
### Reference: https://web.stanford.edu/class/cs234/assignment1/index.html
import numpy as np
np.set_printoptions(precision=3)
"""
For policy_evaluation, policy_improvement, policy_iteration and value_iteration,
the parameters P, nS, nA, gamma are defined as follows:
P: nested dictionary
From gym.core.Environment
For each pair of states in [1, nS] and actions in [1, nA], P[state][action] is a
tuple of the form (probability, nextstate, reward, terminal) where
- probability: float
the probability of transitioning from "state" to "nextstate" with "action"
- nextstate: int
denotes the state we transition to (in range [0, nS - 1])
- reward: int
either 0 or 1, the reward for transitioning from "state" to
"nextstate" with "action"
- terminal: bool
True when "nextstate" is a terminal state (hole or goal), False otherwise
nS: int
number of states in the environment
nA: int
number of actions in the environment
gamma: float
Discount factor. Number in range [0, 1)
"""
def policy_evaluation(P, nS, nA, policy, gamma=0.9, tol=1e-8):
"""Evaluate the value function from a given policy.
Parameters:
----------
P, nS, nA, gamma:
defined at beginning of file
policy: np.array[nS,nA]
The policy to evaluate. Maps states to actions.
tol: float
Terminate policy evaluation when
max |value_function(s) - prev_value_function(s)| < tol
Returns:
-------
value_function: np.ndarray[nS]
The value function of the given policy, where value_function[s] is
the value of state s
"""
value_function = np.zeros(nS)
############################
# YOUR IMPLEMENTATION HERE #
def next_state_reward(P,state,action,gamma,value_function):
sum_reward=0
for p,nextS,r,boolean_v in P[state][action]:
sum_reward+=p*( r + gamma* value_function[nextS])
#print(sum_reward)
return sum_reward
while True:
delta=0;
for state in range(nS):
new_value=0;
for action in range(nA):
sum_reward=next_state_reward(P,state,action,gamma,value_function)
new_value+=policy[state][action]*sum_reward
delta= max(delta, abs(new_value-value_function[state]))
value_function[state] = new_value
#print(value_function)
if(delta < tol):
break
############################
return value_function
def policy_improvement(P, nS, nA, value_from_policy, gamma=0.9):
"""Given the value function from policy improve the policy.
Parameters:
-----------
P, nS, nA, gamma:
defined at beginning of file
value_from_policy: np.ndarray
The value calculated from the policy
Returns:
--------
new_policy: np.ndarray[nS,nA]
A 2D array of floats. Each float is the probability of the action
to take in that state according to the environment dynamics and the
given value function.
"""
new_policy = np.ones([nS, nA]) / nA
############################
# YOUR IMPLEMENTATION HERE #
#iteration_policy=new_policy
for state in range(nS):
#current_policy=new_policy[state]
action_policy = np.zeros(nA)
for action in range(nA):
for p,nextS,r,boolean_v in P[state][action]:
action_policy[action] += p*( r + gamma* value_from_policy[nextS])
#print(action_policy)
updated_policy=np.zeros(nA)
updated_policy[np.argmax(action_policy)]= 1
#print(updated_policy)
new_policy[state]=updated_policy
############################
return new_policy
def policy_iteration(P, nS, nA, policy, gamma=0.9, tol=1e-8):
"""Runs policy iteration.
You should call the policy_evaluation() and policy_improvement() methods to
implement this method.
Parameters
----------
P, nS, nA, gamma:
defined at beginning of file
policy: policy to be updated
tol: float
tol parameter used in policy_evaluation()
Returns:
----------
new_policy: np.ndarray[nS,nA]
V: np.ndarray[nS]
"""
new_policy_iter = policy.copy()
############################
# YOUR IMPLEMENTATION HERE #
while True:
current_policy=new_policy_iter.copy()
Val_iter = policy_evaluation(P, nS, nA, new_policy_iter, gamma=0.9, tol=1e-8)
new_policy_iter= policy_improvement(P, nS, nA, Val_iter, gamma=0.9)
if np.array_equal(current_policy, new_policy_iter):
break
############################
return new_policy_iter, Val_iter
def value_iteration(P, nS, nA, V, gamma=0.9, tol=1e-8):
"""
Learn value function and policy by using value iteration method for a given
gamma and environment.
Parameters:
----------
P, nS, nA, gamma:
defined at beginning of file
V: value to be updated
tol: float
Terminate value iteration when
max |value_function(s) - prev_value_function(s)| < tol
Returns:
----------
policy_new: np.ndarray[nS,nA]
V_new: np.ndarray[nS]
"""
V_new = V.copy()
############################
# YOUR IMPLEMENTATION HERE #
while True:
deltaV=0;
for state in range(nS):
# new_value=0;
action_policy = np.zeros(nA)
for action in range(nA):
for p,nextS,r,boolean_v in P[state][action]:
action_policy[action] += p*( r + gamma* V[nextS])
V_new[state]=max(action_policy)
deltaV= max(deltaV, abs(V_new[state]-V[state]))
V[state] = V_new[state]
#print(value_function)
if(deltaV < tol):
break
policy_new = policy_improvement(P, nS, nA, V_new, gamma)
############################
return policy_new, V_new
def render_single(env, policy, render = False, n_episodes=100):
"""
Given a game envrionemnt of gym package, play multiple episodes of the game.
An episode is over when the returned value for "done" = True.
At each step, pick an action and collect the reward and new state from the game.
Parameters:
----------
env: gym.core.Environment
Environment to play on. Must have nS, nA, and P as attributes.
policy: np.array of shape [env.nS, env.nA]
The action to take at a given state
render: whether or not to render the game(it's slower to render the game)
n_episodes: the number of episodes to play in the game.
Returns:
------
total_rewards: the total number of rewards achieved in the game.
"""
total_rewards = 0
for _ in range(n_episodes):
ob = env.reset() # initialize the episode
done = False
while not done:
if render:
env.render() # render the game
############################
# YOUR IMPLEMENTATION HERE #
#env.step(np.where(policy[0]==1)[0].tolist()[0])
agent_next_step=env.step(np.argmax(policy[ob,:]))
ob=agent_next_step[0]
reward= agent_next_step[1]
done= agent_next_step[2]
total_rewards+=reward
if done:
break
return total_rewards
|
25,484 | 84038701b4af3677c25b592008b78c5c0fc54618 | user_input1 = int(raw_input("enter your number"))
user_input2 = int(raw_input("enter your second number"))
user_input3 = int(raw_input("enter your third number"))
if user_input1<user_input2 and user_input3<user_input2:
print user_input2
elif user_input3>user_input1 and user_input3>user_input2:
print user_input3
else:
print user_input1
|
25,485 | 18ed443602c1902876814c82c0c3d1f9e6c5be18 | import re
from pyswmm import Simulation, Nodes
import os.path
import csv
def extract_basin_wl(swmm_inputfile, basin_id, time_step, csv_file_basename):
"""
Extracts the time sequential water level in the specified basin from swmm model and write it to
a csv file.
:param str swmm_inputfile: swmm model path
:param str basin_id: basin id from swmm
:param int time_step: time interval in seconds
:param str csv_file_basename: csv file basename
"""
time_series = []
water_depth = []
with Simulation(swmm_inputfile) as sim:
su = Nodes(sim)[basin_id]
sim.step_advance(time_step)
for step in sim:
time_series.append(sim.current_time)
water_depth.append(su.depth)
dirname = os.path.dirname(swmm_inputfile)
output_csv_file = os.path.join(dirname, csv_file_basename + "." + "csv")
with open(output_csv_file, "w") as f:
writer = csv.writer(f)
for i, j in zip(time_series, water_depth):
writer.writerow([i, j])
def insert_rain_data_file_path(swmm_inputfile, rain_data_file):
"""
Insert the provided rain data file path into the swmm model.
:param str swmm_inputfile: swmm model path
:param str rain_data_file: rain data file path
"""
with open(swmm_inputfile, "r+") as f:
file_content = f.read()
new_line = "long_term_rainfallgauge5061 FILE \"" + rain_data_file + "\""
file_content = re.sub(r"long_term_rainfallgauge5061 FILE \"[^\"]*\"", new_line,
file_content, count=1)
f.seek(0)
f.write(file_content)
f.truncate()
if __name__ == "__main__":
# First figure out where the swmm model file is located. This is also OS dependent.
this_file = os.path.realpath(__file__)
base_folder = os.path.dirname(os.path.dirname(this_file))
swmm_folder = "swmm_models"
swmm_inputfile = os.path.join(base_folder, swmm_folder, "test4_swmm_simulation_control.inp")
assert(os.path.isfile(swmm_inputfile))
# We found the model. Now we have to include the correct path to the rain data into the model.
rain_data_file = "swmm_5061.dat" # Assumed to be in the same folder as the swmm model input file.
rain_data_file = os.path.join(base_folder, swmm_folder, rain_data_file)
insert_rain_data_file_path(swmm_inputfile, rain_data_file)
# Finally we can specify other variables and start the swmm script.
basin_id = "SU1"
time_step = 60
csv_file_basename = "basin_wl"
extract_basin_wl(swmm_inputfile, basin_id, time_step, csv_file_basename)
print("procedure completed!")
|
25,486 | 1427e9f4825ba23f9dfeb12aaf9c1de8ca1d2b75 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LassoCV
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import ElasticNetCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from joblib import dump, load
#from modelling_helpers import model_diagnostics
MODEL_DATA = pd.read_csv("opt/bitnami/airflow/task_data/features_added/features_added_reddit.csv")
MODEL_PERFORMANCE = dict()
def model(data=MODEL_DATA,name, model_function, X_train, y_train, **kwargs): # add back X_test, y_test and performance
y = data['score'].values
x = data[['comms_num', 'gilded', 'subjectivity', 'word_count', 'senti_comp']]
x = x.to_numpy()
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=10)
model = model_function(kwargs)
model.fit(X_train,y_train)
#performance[name] = model_diagnostics(model, X_test, y_test)
#dump(model, 'C:\\Users\\588175\\Projects\\ML_Flask_App\\ml_flask\\models\\{}.joblib'.format(name))
return model
if __name__ == "__main__":
model_data = preparation(MODEL_DATA)
X_train, X_test, y_train, y_test = model_data[0], model_data[1], model_data[2], model_data[3]
model('Linear Regression', LinearRegression, X_train, y_train, X_test, y_test, MODEL_PERFORMANCE)
#model('KNN Regression', KNeighborsRegressor, X_train, y_train, X_test, y_test, MODEL_PERFORMANCE)
|
25,487 | 08bcc78384761604c3cc1c16c6adcdc3a02194ff | # -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/HistoricalArticlesToPdf
import os
import re
import math
import time
import json
import shutil
import psutil
import logging
import requests
from PIL import Image
from queue import Queue
from http import cookiejar
import urllib.parse as urlcode
import libs.sql.user_sql as UserSql
import libs.sql.wechat_sql as WechatSql
from libs.core.html2pdf import HtmlToPdfThreads
log = logging.getLogger(__name__)
class WechatTask(object):
base_url = "https://mp.weixin.qq.com/cgi-bin/"
start_login_url = base_url+"bizlogin?action=startlogin"
getqrcode_url = base_url+"scanloginqrcode?action=getqrcode&random=%s"
ask_url = base_url+"scanloginqrcode?action=ask&token=&lang=zh_CN&f=json&ajax=1"
login_url = base_url+"bizlogin?action=login"
search_biz_url = base_url+"searchbiz"
appmsg_url = base_url+"appmsg"
referer = "https://mp.weixin.qq.com/"
thread_list =[]
img_path_dict = {}
diz_list =[]
def __init__(self,user_name, password, cookie, name, website_url,threads,out_path):
self.user_name = user_name
self.password = password
self.cookie = cookie
self.name = name.replace("\"","").replace(" ","")
self.website_url = website_url
self.task_queue = Queue()
self.threads = threads
self.out_path = out_path
def start(self):
self.__start_data__ = str(time.time).replace(".","")
self.__create_dir__()
self.__load_cookies__()
self.__start_threads__()
for thread in self.thread_list:
thread.join()
self.__print__()
self.__delete_file__()
def __create_dir__(self):
self.out_qrcode_path = os.path.join(self.out_path,"qrcode")
if not os.path.exists(self.out_qrcode_path):
os.makedirs(self.out_qrcode_path)
self.wx_cookie_path = os.path.join(self.out_path,"wx.info")
def __start_threads__(self):
for thread_id in range(1,self.threads):
thread_name = "Thread - " + str(thread_id)
thread = HtmlToPdfThreads(self.task_queue,thread_id,thread_name)
thread.start()
self.thread_list.append(thread)
def __data__(self,map=None):
data = {"userlang":"zh_CN","redirect_url":"","login_type":"3","token":"","lang":"","f":"json","ajax":"1"}
if map:
for key,value in map.items():
data[key] = value
return data
def __head__(self,heads=None):
head ={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0",
"Referer": self.referer
}
if self.cookie:
head["Cookie"] = self.cookie
if heads:
for key,value in heads.items():
head[key] = value
return head
def __start_login__(self):
data = {"sessionid":str(time.time()).replace(".","")}
session,result = self.__http_request__(url=self.start_login_url,data=self.__data__(data),wait=1)
if result:
self.getqrcode(session)
def getqrcode(self,session):
time_str = str(time.time()).replace(".","")
new_getqrcode_url = self.getqrcode_url.replace("%s",time_str)
qrcode_path = os.path.join(self.out_qrcode_path,time_str + ".png")
self.__http_io_request__(url=new_getqrcode_url,session=session,path=qrcode_path)
log.warn("请使用微信扫描弹出的二维码图片用于登录微信公众号!")
try:
image = Image.open(qrcode_path)
image.show()
except Exception as e:
log.error(e)
raise Exception("获取二维码失败,请重试!")
self.getqrcodeStatus(session)
def getqrcodeStatus(self,session,t=6):
while True:
session,result = self.__http_request__(method='get',url=self.ask_url,wait=t)
if not result:
return
if result.get("status") == "3":
log.warn("二维码已失效,请重新使用微信进行扫码!")
self.getqrcode(session)
return
if str(result.get("status")) == "1":
self.login(session)
return
if t == 6:
t = 7
else:
t = 6
def login(self,session):
data = {"lang":"zh_CN"}
session,result = self.__http_request__(url=self.login_url,data=self.__data__(data))
if not result:
return
redirect_url = result.get("redirect_url")
if not redirect_url:
return
token_compile = re.compile(r'.*token=(.*).*')
token = token_compile.findall(redirect_url)
if len(token) < 0:
return
token = token[0]
names = self.name.split(",")
self.__save_cookie__(session,token)
for name in names:
self.search_biz(session,token,name)
# 搜索公众号
def search_biz(self,session,token,name,no=1,begin=0,count=5,total=0):
data = {
"action":"search_biz",
"begin":begin,
"count":count,
"query":name,
"token":token,
"lang":"zh_CN",
"f":"json",
"ajax":1
}
self.referer = ("https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit_v2&action=edit&isNew=1&type=10&createType=0&token=%s&lang=zh_CN") % (token)
session,result = self.__http_request__(method='get',url=self.search_biz_url,data=data)
if not result:
return
biz_list = result.get("list") # 公众号列表
biz_total = result.get("total") # 公众号总数量
if len(biz_list) == 0:
return
for biz in biz_list:
fakeid = biz.get("fakeid")
nickname = biz.get("nickname")
alias = biz.get("alias")
if nickname != name:
continue
wi_id = WechatSql.insert_info(fakeid,alias,nickname)
out_dir = os.path.join(self.out_path , nickname)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
begin = WechatSql.select_list_num(wi_id)
app_msg_cnt = self.list_ex(session,fakeid,token,out_dir,wi_id)
diz_dict ={}
if app_msg_cnt != 0:
diz_dict["wi_id"] = wi_id
diz_dict["name"] = name
diz_dict["total"] = app_msg_cnt
diz_dict["current"] = str(app_msg_cnt - begin)
diz_dict["html"] = os.path.join(out_dir,"html")
diz_dict["pdf"] = os.path.join(out_dir,"pdf")
self.diz_list.append(diz_dict)
return
begin = count + begin
if no <= biz_total:
self.search_biz(session,token,name,no,begin,count,biz_total)
def list_ex(self,session,fakeid,token,out_dir,wi_id,no=0,begin=0,count=5,app_msg_cnt=0):
data ={
"action":"list_ex",
"begin":str(begin),
"count":str(count),
"fakeid":str(fakeid),
"type":"9",
"query":"",
"token":str(token),
"lang":"zh_CN",
"f":"json",
"ajax":"1"
}
if begin < 0: # 防止出现负数的情况
return app_msg_cnt
if app_msg_cnt == 0: # 获取文章总数量
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
nums = str(app_msg_cnt/10).split(".")
if int(nums[1]) >= 5:
start = app_msg_cnt - int(nums[1]) + 5
else:
start = app_msg_cnt - int(nums[1])
self.list_ex(session,fakeid,token,out_dir,wi_id,begin=start, app_msg_cnt = app_msg_cnt) # 设置文章起始编号和文章总数量
return app_msg_cnt
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
app_msg_list = result.get("app_msg_list")
if len(app_msg_list) == 0:
return app_msg_cnt
for app in list(reversed(app_msg_list)):
link = app.get("link")
title = app.get("title")
digest = app.get("digest")
title_list = WechatSql.select_list_title(wi_id,begin)
if title in title_list:
continue
i_date = str(time.time).replace(".","")
WechatSql.insert_list(wi_id,no,title,link,digest,i_date)
self.__get_article_details__(no,title,link,out_dir)
no = no + 1
begin = begin - count
self.list_ex(session,fakeid,token,out_dir,wi_id,no,begin,count,app_msg_cnt)
def __get_article_details__(self,no,title,link,out_dir):
filters = {'/','\\','?','*',':','"','<','>','|',' ','?','(',')','!',',','“',"”"}
for filter in filters:
title = title.replace(filter,"")
html_path = os.path.join(out_dir,"html")
pdf_path = os.path.join(out_dir,"pdf")
image_path = os.path.join(html_path,"image")
if not os.path.exists(image_path):
os.makedirs(image_path)
if not os.path.exists(pdf_path):
os.makedirs(pdf_path)
html_file = os.path.join(html_path,str(no)+ "-" +title+".html")
pdf_file = os.path.join(pdf_path,str(no)+ "-" +title+".pdf")
if os.path.exists(pdf_file): # PDF文件存在则不生成对应的PDF文件,否则继续
return
if not os.path.exists(html_file):
content = self.__get_content__(link,image_path)
with open(html_file,"w") as f:
f.write(content)
f.flush()
f.close()
task_info = {"html":html_file,"pdf":pdf_file}
self.task_queue.put(task_info)
def __get_content__(self,link,image_path):
self.referer = link
session,content = self.__http_request__(method="get",url=link,flag=True)
if not content:
return
src_compile = re.compile(r'data-src=\"(.*?)\"')
src_urls = src_compile.findall(content)
if len(src_urls) < 0:
return
for img_url in src_urls:
if not (img_url.startswith("http://") or img_url.startswith("https://")):
continue
img_url_compile = re.compile("wx_fmt=(.*)?")
img = img_url_compile.findall(img_url)
suffix = ".png"
if len(img)>0:
suffix = "."+ str(img[0])
img_name = str(time.time()).replace(".","") + suffix
img_file = os.path.join(image_path,img_name)
self.__http_io_request__(url=img_url,path=img_file)
self.img_path_dict[img_url] = "./image/"+img_name
content = content.replace("data-src","src")
for key,value in self.img_path_dict.items():
content = content.replace(key,value)
return content
def __http_io_request__(self,method='get',url=None,data=None,headers=None,session=requests.session(),stream=True,path=None):
if method =='get':
resp = session.get(url=url,params=data,headers=self.__head__(headers),stream=stream)
else:
resp = session.post(url=url,data=data,headers=self.__head__(headers),stream=stream)
if resp.status_code == 200:
with open(path, 'wb+') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
f.close()
return session,True
time.sleep(1)
return session,False
def __http_request__(self,method='post',url=None,data=None,headers=None,session=requests.session(),wait=5,flag=False):
time.sleep(wait)
if method == "get":
resp = session.get(url = url, params = data, headers = self.__head__(headers))
else:
resp = session.post(url = url, data = data, headers = self.__head__(headers))
if resp.status_code != 200:
log.error("网络异常或者错误:"+str(resp.status_code))
return session,None
if flag:
content = resp.text
if not content:
return session,None
return session,content
resp_json = resp.json()
if not resp_json:
return session,None
log.debug(resp_json)
base_resp = resp_json.get("base_resp")
if base_resp:
ret = base_resp.get("ret")
err_msg = base_resp.get("err_msg")
if ret == 0:
return session,resp_json
elif err_msg == "default" or err_msg == "invalid csrf token" or err_msg=="invalid session" :
UserSql.delete_user_info(0)
self.__start_login__()
return
else:
return session,None
def __print__(self):
change = 0
current = 0
for diz in self.diz_list:
titles = WechatSql.select_list_to_diz(diz['wi_id'],self.__start_data__)
for title in titles:
if os.path.exists(os.path.join(diz["pdf"],title+".pdf")):
change = change + 1
if os.path.exists(os.path.join(diz["html"],title+".html")):
current = current + 1
print(("公众号: %s ,共计 %s 篇文章" %(diz["name"],diz["total"])))
print(("==> 本次共计获取 %s 篇文章,成功将 %s 篇文章转换为PDF文件。")%(str(current),str(change)))
print(("==> PDF文件输出目录为: %s")%(diz["pdf"]))
print(("==> HTML文件输出目录为: %s")%(diz["html"]))
def __delete_file__(self):
if os.path.exists(self.out_qrcode_path):
shutil.rmtree(self.out_qrcode_path)
def __save_cookie__(self,session,token):
cookie = ""
cookies = session.cookies.items()
for key,value in cookies:
cookie = cookie + '{0}={1};'.format(key,value)
UserSql.insert_user_info(self.user_name ,self.password,token,cookie,0)
def __load_cookies__(self):
session = requests.session()
user_info = UserSql.select_user_info(0)
if user_info:
token = user_info[0]
self.cookie = user_info[1]
names = self.name.split(",")
for name in names:
self.search_biz(session,token,name)
else:
self.__start_login__()
|
25,488 | f903a8d86c3bb38675aa02601e844f150498cc78 | #!/usr/bin/env python
import os
import sys
import json
import select
PYTHON3 = sys.version_info[0] == 3
SYSEX_START = 0xf0
SYSEX_STOP = 0xf7
MANUFACTURER = 0x47
MODEL1 = 0x7f
MODEL2 = 0x75
def requestdata(fd, program):
data = (
SYSEX_START,
MANUFACTURER,
MODEL1, MODEL2,
0x63, 0x00, 0x01,
program,
SYSEX_STOP,
)
if PYTHON3:
os.write(fd, bytes(data))
else:
os.write(fd, ''.join(chr(x) for x in data))
def read_channel(data):
return data.pop(0) + 1
def read_pad(data):
note = data.pop(0)
pc = data.pop(0)
cc = data.pop(0)
toggle = data.pop(0) == 1
return {
'note': note,
'pc': pc,
'cc': cc,
'toggle': toggle,
}
def read_knob(data):
cc = data.pop(0)
low = data.pop(0)
high = data.pop(0)
return {
'cc': cc,
'min': low,
'max': high,
}
def read_data(fd):
ch = 0
data = []
# Wait until sysex start
while ch != SYSEX_START:
ch = ord(os.read(fd, 1))
# Read whole sysex message
while True:
nextbyte = os.read(fd, 1)
if ord(nextbyte) == SYSEX_STOP: break
data.append(ord(nextbyte))
# Skip sysex header
data = data[7:]
channel = read_channel(data)
pads = [read_pad(data) for _ in range(8)]
knobs = [read_knob(data) for _ in range(8)]
return {
'channel': channel,
'pads': pads,
'knobs': knobs,
}
def write_pad(data, pad):
data.append(pad['note'])
data.append(pad['pc'])
data.append(pad['cc'])
data.append(int(pad['toggle']))
def write_knob(data, knob):
data.append(knob['cc'])
data.append(knob['min'])
data.append(knob['max'])
def write_data(fd, program):
js = json.load(sys.stdin)
data = [
SYSEX_START,
MANUFACTURER,
MODEL1, MODEL2,
0x61, 0x00, 0x3a, program
]
data.append(js['channel'] - 1)
for pad in js['pads']:
write_pad(data, pad)
for knob in js['knobs']:
write_knob(data, knob)
data.append(SYSEX_STOP)
if PYTHON3:
os.write(fd, bytes(data))
else:
os.write(fd, ''.join(chr(x) for x in data))
def exit_usage():
sys.stderr.write(
'Usage: %s <program> <device file>\n\n' % sys.argv[0]
)
sys.stderr.write(
'When stdin is a pipe, lpd8 loads a program from stdin\n'
)
sys.stderr.write(
'Otherwise, it writes the current program to stdout.\n'
)
sys.exit(1)
def clearpending(fd):
while True:
l, _, _ = select.select((fd,), (), (), 0.1)
if not l: break
os.read(fd, 1)
if __name__ == '__main__':
if len(sys.argv) != 3: exit_usage()
try:
program = int(sys.argv[1])
except ValueError:
exit_usage()
if program < 1 or program > 4:
exit_usage()
device = sys.argv[2]
fd = os.open(device, os.O_RDWR)
clearpending(fd)
try:
if sys.stdin.isatty():
requestdata(fd, program)
print(json.dumps(
read_data(fd),
indent=2,
separators=(',', ': '),
sort_keys=True
))
else:
write_data(fd, program)
finally:
os.close(fd)
|
25,489 | 3adf8d2f5b4d3562d2bb0e85da6ac22c87025d8d |
from ch1.part2 import *
from ch1.part2 import _neighbors
import unittest
class Part2Tests(unittest.TestCase):
def test_skew_i(self):
self.assertListEqual(skew_i("CATGGGCATCGGCCATACGCC"),
[0, -1, -1, -1, 0, 1, 2, 1, 1, 1, 0, 1, 2, 1, 0, 0, 0, 0, -1, 0, -1, -2])
def test_min_skew(self):
self.assertListEqual(min_skew("TAAAGACTGCCGAGAGGCCAACACGAGTGCTAGAACGAGGGGCGTAAACGCGGGTCCGAT"), [11, 24])
def test_hamming_count(self):
self.assertEqual(hamming_distance("GGGCCGTTGGT", "GGACCGTTGAC"), 3)
def test_approximate_pattern_match(self):
self.assertListEqual(approximate_pattern_matching(
"ATTCTGGA", "CGCCCGAATCCAGAACGCATTCCCATATTTCGGGACCACTGGCCTCCACGGTACGGACGTCAATCAAAT", 3),
[6, 7, 26, 27])
def test_count_2(self):
self.assertEqual(count_2("AACAAGCTGATAAACATTTAAAGAG", "AAAAA"), 11)
def test_approximate_pattern_count(self):
self.assertEqual(
approximate_pattern_count("GAGG", "TTTAGAGCCTTCAGAGG", 2),
4)
def test_neighbor(self):
self.assertSetEqual(_neighbors("ACG", 1),
{"CCG", "TCG", "GCG", "AAG", "ATG", "AGG", "ACA", "ACC", "ACT", "ACG"})
self.assertSetEqual(_neighbors("AAT", 0), {"AAT"})
def test_frequent_words_with_mismatch(self):
self.assertSetEqual(
frequent_words_with_mismatch("ACGTTGCATGTCGCATGATGCATGAGAGCT", 4, 1),
{"GATG", "ATGC", "ATGT"})
def test_frequent_words_with_mismatch_and_revese_complement(self):
self.assertSetEqual(
frequent_words_with_mismatch_and_reverse_complement("ACGTTGCATGTCGCATGATGCATGAGAGCT", 4, 1),
{"ATGT", "ACAT"}
)
self.assertSetEqual(
frequent_words_with_mismatch_and_reverse_complement("AAAAAAAAAA", 2, 1),
{"AT", "TA"}
)
self.assertSetEqual(
frequent_words_with_mismatch_and_reverse_complement("AGTCAGTC", 4, 2),
{"AATT", "GGCC"}
)
self.assertSetEqual(
frequent_words_with_mismatch_and_reverse_complement("AATTAATTGGTAGGTAGGTA", 4, 0),
{"AATT"}
)
self.assertSetEqual(
frequent_words_with_mismatch_and_reverse_complement("ATA", 3, 1),
{"AAA", "AAT", "ACA", "AGA", "ATA", "ATC", "ATG", "ATT", "CAT", "CTA", "GAT", "GTA", "TAA", "TAC", "TAG",
"TAT", "TCT", "TGT", "TTA", "TTT"}
)
self.assertSetEqual(
frequent_words_with_mismatch_and_reverse_complement("AAT", 3, 0),
{"AAT", "ATT"}
)
self.assertSetEqual(
frequent_words_with_mismatch_and_reverse_complement("TAGCG", 2, 1),
{"CA", "CC", "GG", "TG"}
)
|
25,490 | 3976034e59e2cb0d19a0fac1bda36b1ed5c9a7c5 | #!/usr/bin/python3
import numpy as np
class MDPSolver:
def __init__(self, mdp_file):
self.load_mdp(mdp_file)
self.v_star = None
self.a_star = None
self.has_end = self.end[0] != -1
def load_mdp(self, filepath):
self.T = None; self.R = None;
with open(filepath, 'r') as fp:
for line in fp:
arr = line.strip().split()
if arr[0] == "numStates":
self.num_states = int(arr[1])
elif arr[0] == "numActions":
self.num_actions = int(arr[1])
elif arr[0] == "start":
self.start = int(arr[1])
elif arr[0] == "end":
self.end = [int(x) for x in arr[1:]]
elif arr[0] == "mdptype":
self.mdptype = arr[1]
elif arr[0] == "discount":
self.g = float(arr[1])
elif arr[0] == "transition":
if (self.T is None) or (self.R is None):
self.T = np.zeros((self.num_states, self.num_states, self.num_actions))
self.R = np.zeros((self.num_states, self.num_states, self.num_actions))
s1, a, s2, r, p = int(arr[1]), int(arr[2]), int(arr[3]), float(arr[4]), float(arr[5])
self.T[s1][s2][a] = p
self.R[s1][s2][a] = r
else:
raise Exception("Invalid file format")
def print(self):
for s in range(self.num_states):
print("{:.6f} {}".format(self.v_star[s], self.a_star[s]))
def run(self):
raise NotImplementedError("method not implemented")
|
25,491 | 76c9f189418432606680cc1ac78c3b08cf84beac | from django.shortcuts import render
from . forms import SignUpForm
from .models import SignUp
def home(request):
title = "My Title"
form = SignUpForm(request.POST,request.FILES)
context = {
"template_title":title,
"form": form
}
email = SignUp.objects.all()
print(type(email))
for i in email:
print(i)
#full_name = request.POST.getlist('full_name')
#print(form.fields.values())
if form.is_valid():
# form.save()
instance = form.save(commit = False)
instance.save()
print(instance)
context["template_title"] = "WELCOME" + " " + instance.full_name
return render(request,"home.html",context) |
25,492 | 8b313f6b4b840081d61ee415adca126fd5c2fa93 | # -*- coding: utf-8 -*-
class Solution(object):
def tupleSameProduct(self, nums):
import math
res = 0
dic = {}
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
dic[nums[i] * nums[j]] = dic.get(nums[i] * nums[j], 0) + 1
for k in dic.keys():
if dic[k] > 0:
res += math.comb(dic[k], 2) * 8
return res
# TLE, 应该不用一直乘,a从最小开始,b从最大开始,这样永远能找到结,而不是contest时写的b也从最小开始
# https://leetcode.com/problems/tuple-with-same-product/discuss/1020605/JavaPython-3-O(n-3)-and-O(n-2)-codes
def tupleSameProduct2(self, nums):
res = 0
nums.sort()
for a in range(len(nums)):
for b in range(a + 1, len(nums)):
if a == b:
continue
c = 0
d = len(nums) - 1
while c < d:
if a == b or b == c or c == d or a == c or a == d or b == d:
break
if nums[a] * nums[b] == nums[c] * nums[d]:
res += 1
c += 1
d -= 1
elif nums[a] * nums[b] < nums[c] * nums[d]:
d -= 1
else:
c += 1
return res * 8
test = Solution()
print test.tupleSameProduct([2,3,4,6])
print test.tupleSameProduct([1,2,4,5,10])
print test.tupleSameProduct([2,3,4,6,8,12])
print test.tupleSameProduct([2,3,5,7])
print test.tupleSameProduct([3589,2387,958,1670,3580,3600,645,1419,1821,3021,3166,2965,846,1912,2217,500,1207,758,1537,3659,3773,3215,2661,2839,2095,59,3290,1348,230,2037,163,2674,2853,2223,3458,2274,1828,2761,3528,81,3071,747,2729,1288,3212,3699,1774,1957,873,2654,53,336,207,3393,3477,3361,1268,477,2197,3360,1578,628,1579,957,735,3462,2737,1962,2067,461,2826,3184,1776,3631,2115,397,3588,1759,3160,1731,363,2304,3172,3621,1624,3729,1466,2651,1059,3114,1893,2415,3975,2485,786,3690,3234,1123,2176,3571,715,2171,1259,1296,673,225,2748,405,698,1997,2291,2838,3432,3684,97,793,139,2840,3821,3255,2677,233,3805,2712,1270,2854,2815,745,3285,3738,584,617,1195,281,3557,3419,1684,1698,3742,1166,146,711,770,2419,742,2716,267,668,3899,1360,2725,3880,3672,1875,761,1685,1474,3323,2931,912,3995,3764,1635,1513,1056,2248,505,3978,2500,3110,181,3,3391,998,12,206,2949,1277,1715,3518,3109,3248,3411,2244,3799,2245,3400,2148,3424,2573,946,3804,1063,1183,1171,1459,2985,3395,1094,3940,409,338,340,3676,2617,1165,1224,508,1253,920,3667,1464,507,821,2722,3990,3356,3784,936,3892,2718,522,2403,941,3886,489,1727,2126,3133,1692,411,1538,1674,2810,1643,105,3670,2168,1870,3055,2345,625,1879,1938,1523,1597,2515,1649,709,3177,1020,1948,1111,3705,2812,1672,3926,1215,2266,2354,780,2388,3430,2337,3746,1313,1548,2447,1499,2124,1308,771,3041,1897,3258,1441,2057,183,2328,3398,647,1999,1030,3985,1558,1098,2633,2881,1025,2073,3336,2818,3983,2502,3573,3844,112,884,2234,3751,352,1278,1385,3707,796,1669,877,537,2471,2628,1252,1930,974,74,1645,1937,1182,1053,226,1008,3585,1,3574,2130,3982,1151,2169,565,3657,1887,1623,1258,2820,3157,3611,574,2039,1634,1985,2510,3745,1591,2189,2816,3442,2833,2292,3275,1690,182,1075,3501,672,2298,568,1661,2225,2948,2731,854,3102,2946,2522,1066,3363,794,1235,1273,680,3587,3996,1505,504,3154,3992,3223,3230,484,2660,1005,309,593,608,2117,2323,755,1146,1517,3352,3998,1002,2800,332,1562,606,2715,2789,430,3293,2879]) |
25,493 | 85bbf0008ab6760304bc498902e9dcc305b2ef86 | from constants_two import color_dictionary
for key, val in enumerate(color_dictionary):
print(f"<img id=\"{color_dictionary[val]}\" src=\"/assets/svgs/{color_dictionary[val]}.svg\">") |
25,494 | e2ca4ae824ce37e73962319e54b85fd741eea473 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import check_password_hash
from werkzeug.security import generate_password_hash
from flask import redirect,render_template,url_for,session,request
from form import AccountForm,RegisterForm,UserForm,GroupForm,LoginForm,UserUpdateForm,AddToGroupForm
from requests import post
app=Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']='postgresql://postgres:36network@localhost/app2'
app.config['SECRET_KEY']="I am Bapan"
db=SQLAlchemy(app)
BASE_URL="http://127.0.0.1:5000"
group=db.Table('groups',db.Column('user_id',db.Integer,db.ForeignKey('users.id')),db.Column('group_id',db.Integer,db.ForeignKey('usergroups.id')))
class Account(db.Model):
__tablename__='accounts'
id=db.Column(db.Integer,primary_key=True,autoincrement=True)
name=db.Column(db.String(200),nullable=False)
email=db.Column(db.String(200),unique=True)
owner=db.relationship('User',backref='owner')
admin_owner=db.relationship('Admin',backref='admin_owner')
alluser_owner=db.relationship('AllUser',backref='alluser_role')
account_group=db.relationship('UserGroup',backref='account_group')
def __init__(self,name,email):
self.name=name
self.email=email
def __str__(self):
return f"{self.name}"
class User(db.Model):
__tablename__='users'
id=db.Column(db.Integer,primary_key=True,autoincrement=True)
username=db.Column(db.String(200),unique=True)
email=db.Column(db.String(200),nullable=False)
firstname=db.Column(db.String(200),nullable=False)
lastname=db.Column(db.String(200),nullable=False)
password=db.Column(db.String(200))
is_active=db.Column(db.Boolean,nullable=False)
owner_id=db.Column(db.Integer,db.ForeignKey('accounts.id'))
mygroup=db.relationship('UserGroup',secondary=group,backref=db.backref('addgroup',lazy='dynamic'))
def __init__(self,username,email,firstname,lastname,password,accountId,is_active):
self.firstname=firstname
self.lastname=lastname
self.email=email
self.username=username
self.password=generate_password_hash(password)
self.owner_id=accountId
self.is_active=is_active
def check_password(self,password):
return check_password_hash(self.password,password)
def __str__(self):
return f"{self.username}"
class UserGroup(db.Model):
__tablename__="usergroups"
id=db.Column(db.Integer,primary_key=True,autoincrement=True)
groupname=db.Column(db.String(200),nullable=False)
description=db.Column(db.String(200))
group_id=db.Column(db.Integer,db.ForeignKey('accounts.id'))
def __init__(self,groupname,description,accountId):
self.groupname=groupname
self.description=description
self.group_id=accountId
def __str__(self):
return f"{self.description}"
class Admin(db.Model):
__tablename__="adminrole"
id=db.Column(db.Integer,primary_key=True,autoincrement=True)
userid=db.Column(db.Integer,nullable=False)
is_admin=db.Column(db.Boolean,nullable=False)
account_id=db.Column(db.Integer,db.ForeignKey('accounts.id'))
def __init__(self,userid,is_admin,accountId):
self.userid=userid
self.is_admin=is_admin
self.account_id=accountId
def __str__(self):
return f"{self.userid} admin role"
class AllUser(db.Model):
__tablename__="alluser"
id=db.Column(db.Integer,primary_key=True,autoincrement=True)
userid=db.Column(db.Integer,nullable=False)
is_user=db.Column(db.Boolean,nullable=False)
account_id=db.Column(db.Integer,db.ForeignKey('accounts.id'))
def __init__(self,userid,is_user,accountId):
self.userid=userid
self.is_user=is_user
self.account_id=accountId
def __str__(self):
return f"{self.userid} user role"
@app.route('/account',methods=['GET','POST'])
def create_account():
try:
form=AccountForm(request.form)
if request.method=='POST' and form.validate:
name=request.form["name"]
email=request.form["email"]
account=Account(name,email)
db.session.add(account)
db.session.commit()
admin=UserGroup('Admin','All admin group',account.id)
user=UserGroup('All User','All user group',account.id)
db.session.add(admin)
db.session.add(user)
db.session.commit()
return redirect(url_for('add_root',accountId=account.id))
return render_template('account.html',form=form)
except Exception as e:
print(e)
@app.route('/root/<int:accountId>',methods=["GET","POST"])
def add_root(accountId):
form=RegisterForm(request.form)
account=Account.query.get(accountId)
if request.method=="POST" and form.validate:
firstname=request.form["firstname"]
lastname=request.form["lastname"]
username=request.form["username"]
email=account.email
password=request.form["password"]
user=User(username,email,firstname,lastname,password,account.id,True)
db.session.add(user)
db.session.commit()
admin=UserGroup.query.filter_by(groupname='Admin').filter_by(group_id=account.id).first()
alluser=UserGroup.query.filter_by(groupname='All User').filter_by(group_id=account.id).first()
admin.addgroup.append(user)
alluser.addgroup.append(user)
alladmin=Admin(user.id,True,account.id)
all_user=AllUser(user.id,True,account.id)
db.session.add(alladmin)
db.session.add(all_user)
db.session.commit()
return redirect(url_for('login'))
return render_template("root.html",form=form,email=account.email)
@app.route('/addUser',methods=["GET","POST"])
def add_user():
admin=Admin.query.filter_by(userid=session["userid"]).first()
if admin.is_admin==True:
form=UserForm(request.form)
if request.method=="POST" and form.validate:
email=request.form["email"]
password='XX'
firstname=request.form["firstname"]
lastname=request.form["lastname"]
user=User(firstname,email,firstname,lastname,password,session["accountId"],False)
db.session.add(user)
db.session.commit()
all_user=AllUser(user.id,True,user.owner_id)
all_admin=Admin(user.id,False,user.owner_id)
alluser=UserGroup.query.filter_by(groupname='All User').filter_by(group_id=user.owner_id).first()
alluser.addgroup.append(user)
db.session.add(all_user)
db.session.add(alluser)
db.session.add(all_admin)
db.session.commit()
return redirect(url_for('admin_home'))
return render_template('create_user.html',form=form)
else:
return "Unauthorized!"
@app.route('/addGroup',methods=["GET","POST"])
def add_group():
admin=Admin.query.filter_by(userid=session["userid"]).first()
if admin.is_admin==True:
form=GroupForm(request.form)
if request.method=="POST" and form.validate:
groupname=request.form["groupname"]
description=request.form["description"]
group=UserGroup(groupname,description,session["accountId"])
db.session.add(group)
db.session.commit()
return redirect(url_for('admin_home'))
return render_template('create_group.html',form=form)
@app.route('/adminHome',methods=["GET"])
def admin_home():
admin=Admin.query.filter_by(userid=session["userid"]).first()
if admin.is_admin==True:
all_user=User.query.filter_by(owner_id=session["accountId"])
all_groups=UserGroup.query.filter_by(group_id=session["accountId"])
return render_template('admin_home.html',users=all_user,groups=all_groups)
@app.route('/login',methods=["GET","POST"])
def login():
form=LoginForm(request.form)
if request.method=="POST" and form.validate:
username=request.form["username"]
password=request.form["password"]
user=User.query.filter_by(username=username).first()
if user:
if check_password_hash(user.password,password):
if not user.is_active:
return redirect(url_for('edit_user',userid=user.id))
else:
admin=Admin.query.filter_by(userid=user.id).first()
alluser=AllUser.query.filter_by(userid=user.id).first()
if admin.is_admin==True:
session["is_admin"]=True
else:
session["is_admin"]=False
if alluser.is_user==True:
session["is_user"]=True
else:
session["is_user"]=False
session["accountId"]=user.owner_id
session["userid"]=user.id
session["logged_in"]=True
return redirect(url_for('home'))
else:
return redirect(url_for('login'))
return render_template('login.html',form=form)
@app.route('/home',methods=["GET"])
def home():
permission=dict()
user=User.query.get(session["userid"])
usergroups=UserGroup.query.join(group).filter(group.c.group_id==UserGroup.id).filter(UserGroup.group_id==user.owner_id).filter(group.c.user_id==user.id).all()
if session["is_admin"]==True:
permission["is_admin"]=True
else:
permission["is_admin"]=False
if session["is_user"]==True:
permission["is_user"]=True
else:
permission["is_user"]=False
return render_template("user_home.html",groups=usergroups,user=user,permission=permission)
@app.route('/updateUser/<int:userid>',methods=['GET','POST'])
def edit_user(userid):
user=User.query.filter_by(id=userid).first()
print(user)
userform=UserUpdateForm(obj=user)
if request.method=="POST" and userform.validate:
username=request.form["username"]
password=request.form["password"]
user.username=username
user.password=generate_password_hash(password)
user.is_active=True
db.session.commit()
return redirect(url_for('login'))
return render_template("update_user.html",form=userform)
@app.route('/usergroup/<int:group_id>/users',methods=["GET"])
def get_users_group(group_id):
admin=Admin.query.filter_by(userid=session["userid"]).first()
if admin.is_admin==True:
group=UserGroup.query.get(group_id)
return render_template('groupusers.html',users=group.addgroup,group=group)
else:
return "Unauthorized"
@app.route('/logout',methods=["GET"])
def logout():
session.clear()
return redirect(url_for('login'))
@app.route('/usergroup/<int:usergroupid>',methods=["GET","POST"])
def usergroup_detail(usergroupid):
form=AddToGroupForm()
group=UserGroup.query.get(usergroupid)
admin=Admin.query.filter_by(userid=session["userid"]).first()
if admin.is_admin==True:
if request.method=="POST" and form.validate:
email=request.form["email"]
print(email)
user=User.query.filter_by(email=email).first()
response=post("http://127.0.0.1:5000"+"/userGroup/"+str(usergroupid)+"/user/"+str(user.id))
if response.status_code!=200:
raise Exception
return redirect(url_for('get_users_group',group_id=usergroupid))
return render_template("user_usergroup.html",form=form,group=group)
@app.route('/userGroup/<int:usergroupid>/user/<int:userid>',methods=["POST"])
def add_user_usergroup(usergroupid,userid):
group=UserGroup.query.filter_by(id=usergroupid).first()
user=User.query.get(userid)
group.addgroup.append(user)
db.session.commit()
return "200"
if __name__=="__main__":
db.create_all()
app.run(debug=True) |
25,495 | 65f3f25985bf8df278892bef74152b13789e32cd | #
# Program:
#
# endJobStream.py
#
# Original Author:
#
# Lori Corbani
#
# Purpose:
#
# End a Job Stream record in RADAR
#
# Requirements Satisfied by This Program:
#
# Usage:
# endJobStream.py
#
# Envvars:
#
# RADAR_DBSERVER
# RADAR_DBNAME
# RADAR_DBUSER
# RADAR_DBPASSWORDFILE
# JOBSTREAMKEY
# JOBSTREAMRETURNCODE
#
# Inputs:
#
# None
#
# Outputs:
#
# None
#
# Exit Codes:
#
# 0 if Job Stream is successfully ended, else 1
#
# Assumes:
#
# Envvars are set
#
# Bugs:
#
# Implementation:
#
# Modification History:
#
# 04/20/2006 lec
# - MGI 3.5; DBSERVER => RADAR_DBSERVER, DBNAME => RADAR_DBNAME,
# DBUSER => RADAR_DBUSER, DBPASSWORDFILE => RADAR_DBPASSWORDFILE
#
import sys
import os
import db
#
# Main
#
server = os.environ['RADAR_DBSERVER']
database = os.environ['RADAR_DBNAME']
user = os.environ['RADAR_DBUSER']
passwordFileName = os.environ['RADAR_DBPASSWORDFILE']
password = str.strip(open(passwordFileName, 'r').readline())
jobStreamKey = os.environ['JOBSTREAMKEY']
jobStreamRC = os.environ['JOBSTREAMRETURNCODE']
# Initialize db.py DBMS parameters
db.set_sqlLogin(user, password, server, database)
db.useOneConnection(1)
# End the Job Stream
results = db.sql("select * from APP_endJobStream (%s, %s)" % ((jobStreamKey, jobStreamRC)), 'auto')
status = int(results[0]['app_endjobstream'])
db.commit()
db.useOneConnection(0)
sys.exit(status)
|
25,496 | 6cabe0cf0ec5c196f268fe841cf2c290a0d25e20 | # -*- coding: utf-8 -*-
import scrapy
class MovieSpider(scrapy.Spider):
name = 'movie'
allowed_domains = ['movie.douban.com']
start_urls = ['http://movie.douban.com/top250']
def parse(self, response):
# 获取内容
movieList = response.xpath('//*[@id="content"]//ol[contains(@class, "grid_view")]/li/div[contains(@class, "item")]')
for movie in movieList:
yield {
'title': movie.xpath('//div[contains(@class, "info")]/div[contains(@class, "hd")]/a/span[@class="title"][1]/text()').extract_first(),
'img': movie.xpath('//div[@class="pic"]/a/img/@src').extract_first()
}
# 下一页逻辑
nextPage = response.xpath('//*[@id="content"]/div/div[1]/div[2]/span[3]/a/@href').extract_first()
if nextPage is not None:
print(nextPage)
yield response.follow(nextPage, callback=self.parse)
|
25,497 | 6b5c7251536fb620a2d12eeaf4b92cd06500bf84 | from rest_framework import serializers
from .models import Cal
class CalSerializer(serializers.ModelSerializer):
class Meta:
model = Cal
fields = ('id','var2','var1','result') |
25,498 | 3c9c8c0466ff3d219e5af6f88f456b9b4b928575 | def hangman_game():
import random
from hangman_words import word_list
from hangman_art import logo, stages
chosen_word = random.choice(word_list)
word_length = len(chosen_word)
end_of_game = False
lives = 6
print(logo)
# Testing code
#print(f'Pssst, the solution is {chosen_word}.')
print("hint: bike and car names")
# Creating blanks
display = []
for _ in range(word_length):
display += "_"
while not end_of_game:
guess = input("Guess a letter: ").lower()
if guess in display:
print(f"you have already guessed {guess}")
# Checking guessed letter
for position in range(word_length):
letter = chosen_word[position]
print(f"Current position: {position}\n Current letter: {letter}\n Guessed letter: {guess}")
if letter == guess:
display[position] = letter
# Checking if user is wrong.
if guess not in chosen_word:
print(f"you enterd {guess}, that's not in the display,so you will lose a life")
lives -= 1
if lives == 0:
end_of_game = True
print("You lose.")
print(f"{' '.join(display)}")
if "_" not in display:
end_of_game = True
print("You win.")
print(stages[lives])
|
25,499 | 94269bdebba77b58ad4739095e33173ece110fb6 | print('Welcome to JC Evolution Tree Generation') # this is the title
import random #idk what these do but i need a random number generator
import numpy
import scipy
import math
randomtree = input('Would you like to generate a random tree? (yes/no) ')
#notes are written using pounds
#int() is for integer input"
lengt = int(input('How long would you like your sequence to be? '))
term = int(input('How many sequences do you want back? '))
alpha = float(input('Please input the mutations per site per generation: '))
#total terms are term for end, lengt for length of sequence and randomtree for gen rand or not
#generate or input the initial sequence and store it in isq
if randomtree == "yes" or randomtree == "Yes":
isq = ''
for m in range(lengt):
letterm = random.choice(['A','T','G','C'])
isq += letterm
#this stores the sequence in isq
#generates the DNA sequence
print('Generating random tree')
elif randomtree == "no" or randomtree == "No":
print('Generating tree with given sequence')
isq = input('Please input the DNA sequence ') #input sequence
else:
print('CODE ERROR BEEP BOOP BEEP BOOP FORCE QUIT PLS')
print('Please try again using only yes or no')
n = 1
time = 0
D = numpy.zeros((term+1,term+1))
print('This is the sequence you are using:', isq)
#doing the JC evolution
Mi = numpy.matrix([[1 - alpha, alpha / 3, alpha / 3, alpha / 3],
[alpha / 3, 1 - alpha, alpha / 3, alpha / 3],
[alpha / 3, alpha / 3, 1 - alpha, alpha / 3],
[alpha / 3, alpha / 3, alpha / 3, 1 - alpha]])
#INSERT MATRIX HERE
#JC Matrix
M = Mi
slist = [isq]
timelist = [0]
while n <= term:
#read isq
time = time + 1
nsq = ''
for letter in isq:
ranmut = random.uniform(0,1) #generate a random number between 0 and 1
if letter == 'A':
if ranmut <= M[0,0]: #first term in the matrix
letter = 'A' #prob is A
#somehow replace A with A in the original sequence
elif ranmut <= M[0,1] + M[0,0]:
letter = 'T'
#replace A with T
elif ranmut <= M[0,2] + M[0,1] + M[0,0]:
letter = 'G'
#replace A with G
elif ranmut <= 1:
letter = 'C'
#replace A with C
elif letter == 'T':
if ranmut <= M[1,0]:
letter = 'A'
#somehow replace c with A in the original sequence
elif ranmut <= M[1,1] + M[1,0]:
letter = 'T'
#replace T with T
elif ranmut <= M[1,2] + M[1,1] + M[1,0]:
letter = 'G'
#replace T with G
elif ranmut <= 1:
letter = 'C'
#replace T with C
elif letter == 'G':
if ranmut <= M[2,0]:
letter = 'A'
#somehow replace G with A in the original sequence
elif ranmut <= M[2,1] + M[2,0]:
letter = 'T'
#replace G with T
elif ranmut <= M[2,2] + M[2,1] + M[2,0]:
letter = 'G'
#replace G with G
elif ranmut <= 1:
letter = 'C'
#replace G with C
elif letter == 'C':
if ranmut <= M[3,0]:
letter = 'A'
#somehow replace C with A in the original sequence
elif ranmut <= M[3,1] + M[3,0]:
letter = 'T'
#replace C with T
elif ranmut <= M[3,2] + M[3,1] + M[3,0]:
letter = 'G'
#replace C with G
elif ranmut <= 1:
letter = 'C'
#replace C with C
nsq += letter
M = M.dot(Mi) #move the matrix one more dimension down
#now check for if the sequence is the same if not add to the list, if it is then ignore and repeat
if isq != nsq:
slist.append(nsq)
isq = nsq
timelist.append(time)
n = n + 1
M = Mi
#create the Jukes Cantor Distance Matrix
dis = D
dif = 0
for l in range(0,term+1):
for k in range(0,term+1):
dif = 0
for letter, let in zip(slist[l], slist[k]):
if letter != let:
dif = dif + 1
dis[l,k] = dis[k,l] = dif/lengt
print('Proportion different = ', dis)
for i in range(1,term+1):
for j in range(1,term+1):
D[i,j] = D[j,i] = -3/4*numpy.log(1-4/3*dis[i,j])
#final step list the sequences and the final distances
print('')
num = 1
for sequence in slist:
print(num, sequence)
num = num + 1
print('')
print('Mutation times: ', timelist)
print('')
print('JC Distance Matrix in Order 1 -', term)
print(D)
#Shoulda just done this in MatLab
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.