index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
12,200 | 7772238f55b1dabedc6fa41ace509ff2df181a02 | """Write a code to find string after $5 from string
"i have $2 in my pockets and $5 in wallet and i need $3 more"""
str1 = "i have $2 in my pockets and $5 in wallet and i need $3 more"
print(str1.split("$5", 1)[1])
|
12,201 | 4ab6db4ddd15683d5486a6ce5d4a368545640938 | config = {
'system_plugins': ['former', 'logout'],
'twanager_plugins': ['former', 'lister'],
'instance_tiddlers': [
('system', [
'http://svn.tiddlywiki.org/Trunk/association/adaptors/TiddlyWebAdaptor.js',
'http://svn.tiddlywiki.org/Trunk/association/plugins/ServerSideSavingPlugin.js',
'http://svn.tiddlywiki.org/Trunk/association/plugins/TiddlyWebConfig.js',
'http://svn.tiddlywiki.org/Trunk/contributors/SaqImtiaz/verticals/unaforms/unaformsSetDefaultBagPlugin.js'
]),
('formtools', [
'http://svn.tiddlywiki.org/Trunk/contributors/SaqImtiaz/verticals/InputExForms/inputEx-subrecipe.recipe',
])
],
'log_level': 'DEBUG',
'css_uri': 'http://peermore.com/tiddlyweb.css',
}
|
12,202 | 76ebb40c1b0cbd5f736173db8f15ce502cd30526 | # from collections import Iterable
# 用迭代器实现斐波那契数列
class Iter:
def __init__(self, n):
self.a = 0
self.b = 1
self.n = n
def __iter__(self):
return self
def __next__(self):
self.a, self.b = self.b, self.a + self.b
if self.a > self.n:
raise StopIteration('超过了n')
return self.a
# 用生成器yield实现斐波那契数列
def fib(n):
current = 0
a = b = 1
while current < n:
yield a
a, b = b, a + b
current += 1
def main():
print(list(Iter(100)))
print(list(fib(10)))
if __name__ == '__main__':
main() |
12,203 | cdc18a5bd86e169f6faad5da00f6d88e8ba0ca08 | import AdventOfCode.util.input_parser as parser
PART = 2
DEBUG = False
def _debug(msg):
if DEBUG:
print(msg)
if __name__=="__main__":
filename = f"./raw_inputs/day8{'_debug' if DEBUG else ''}.txt"
instructions = parser.parse(filename, transforms=[parser.Split(' ')])
for instruction in instructions:
instruction[1] = int(instruction[1])
accumulator = 0
instr_ptr = 0
visited = set()
nop_jmp_switch = 0
nop_jmp_counter = 0
while True:
if instr_ptr in visited:
print(f"Loop detected. Accum = {accumulator}")
if PART == 1:
break
elif PART == 2:
instr_ptr = 0
accumulator = 0
nop_jmp_counter = 0
nop_jmp_switch += 1
visited = set()
if instr_ptr >= len(instructions):
print(f"Program terminated. Accum = {accumulator}")
break
_debug(f"{instr_ptr} {accumulator} {instructions[instr_ptr]}")
visited.add(instr_ptr)
op = instructions[instr_ptr][0]
if op in ('jmp', 'nop'):
_debug(f"{op} detected at {instr_ptr}")
if nop_jmp_counter == nop_jmp_switch:
op = 'jmp' if op == 'nop' else 'nop'
_debug(f"Op changed to {op}")
nop_jmp_counter += 1
arg = instructions[instr_ptr][1]
jmp = 1
if op == 'acc':
accumulator += arg
elif op == 'jmp':
jmp = arg
elif op == 'nop':
pass
instr_ptr += jmp
print("fin.")
|
12,204 | 5fefacde36d88b946d4b46ff13ef236f31c5a0c5 | import math
n = int(input())
a=[]
for i in range(n):
a.append(int(input()))
for i in range(max(a)):
print(i)
print(a)
|
12,205 | 7e7cd5dc99e6a03e47d2e22efc07aca532176ae7 | import re
import xml.etree.ElementTree as ET
from logwatch import log
from dbwatch import trends
from config import GROUPS
#processRRU.py
# Python based script watches redis queue for Sev3 or lower IR/RRU and RRU adds to GOS FU as soon as hits queue.
# Removes from GOS FU as soon as leaves queue or is closed...
# Changes it to Info section if RRU is implemented
# Uses user goswatch in trends
record_type="RRU"
def is_record_tracked(assignee_code, record_num, redis_handler):
try:
if redis_handler.hexists(assignee_code, record_num):
return record_num
else:
return False
except:
log.error("Error: could not query redis for %s %s." % (record_type, record_num))
def add_redis_record(assignee_code, record_num, status, redis_handler):
try:
log.info("%s %s Adding key value to redis" % (record_type, record_num))
redis_handler.hset(assignee_code, record_num, status)
except:
log.error("Error: could not add %s %s to redis." % (record_type, record_num))
def delete_redis_record(assignee_code, record_num, redis_handler):
try:
log.info("%s %s Removing from redis" % (record_type, record_num))
redis_handler.hdel(assignee_code, record_num)
except:
log.error("Error: could not delete %s %s from redis." % (record_type, record_num))
def follow_up_record(record_num, title, app, description, comment):
kwargs={'action': 'update_to_follow','record_num': record_num, \
'title': title, 'app': app, 'description':description, 'comment': comment, 'rectype': record_type}
query_result = trends(**kwargs)
# update record...(will fail if record not in DB)
if not query_result:
# if record does not exist insert it
log.info("%s %s UPDATE affected %s rows. Executing INSERT" % (record_type, record_num, query_result))
kwargs={'action': 'follow-up','record_num': record_num, \
'title': title, 'app': app, 'description':description, 'comment': comment, 'rectype': record_type}
trends(**kwargs)
def schedule_record(record_num, title, start_date, start_time, comment):
kwargs = {'action': 'update_to_schedule', 'record_num': record_num,
'title': title, 'planned_date': planned_date,
'planned_time': planned_time, 'comment': comment,
'rectype': record_type}
query_result = trends(**kwargs)
# update record...(will fail if record not in DB)
if not query_result:
# if record does not exist insert it
log.info("%s %s UPDATE affected %s rows. Executing INSERT" %
(record_type, record_num, query_result))
kwargs = {'action': 'schedule', 'record_num': record_num,
'title': title, 'start_date': start_date,
'start_time': start_time, 'comment': comment,
'rectype': record_type}
trends(**kwargs)
def delete_trends_record (record_num, status):
# All RRUs should go to info section
kwargs={'action': 'moveto', 'record_num': record_num, 'section': 4, 'status': status, 'rectype': record_type}
trends(**kwargs)
def extract_xml_data(xml_record):
title, assignee_code, status, target, targetsystems, priority, planned_date, planned_time, risk, qatteststatus, frtteststatus, pptteststatus, \
application = "", "", "", "", "", "", "", "", "", "", "", "",""
record_num = xml_record.attrib['id']
for node in xml_record.iter('field'):
if node.attrib.get('xmlname') == 'Title':
title = node.text.lstrip()
if node.attrib.get('xmlname') == "AssigneeGroup":
assignee_code = node.attrib.get('code')
if node.attrib.get('xmlname') == "Status":
status = node.text
if node.attrib.get('xmlname') == "Target":
target = node.text
if node.attrib.get('xmlname') == "TargetSystems":
targetsystems = node.text
if node.attrib.get('xmlname') == "Priority":
priority = node.text
if node.attrib.get('xmlname') == "PlannedDate":
planned_date = node.text
if node.attrib.get('xmlname') == "PlannedTime":
planned_time = node.text
if node.attrib.get('xmlname') == "Risk":
risk = node.text
if node.attrib.get('xmlname') == "QATTestStatus":
qatteststatus = node.text
if node.attrib.get('xmlname') == "FRTTestStatus":
frtteststatus = node.text
if node.attrib.get('xmlname') == "PPTTestStatus":
pptteststatus = node.text
if node.attrib.get('xmlname') == "Application":
application = node.text
if target == "Production Systems":
target = "PRD"
elif target == "Test Systems":
target = "TST"
if qatteststatus == "Not Applicable":
qatteststatus = "N/A"
elif qatteststatus == "Passed":
qatteststatus = "OK"
if frtteststatus == "Not Applicable":
frtteststatus = "N/A"
elif frtteststatus == "Passed":
frtteststatus = "OK"
if pptteststatus == "Not Applicable":
pptteststatus = "N/A"
elif pptteststatus == "Passed":
pptteststatus = "OK"
if priority != "Pri/A":
priority = ""
return [record_num, title, assignee_code, status, target, targetsystems, \
priority, planned_date, planned_time, risk, qatteststatus, frtteststatus, pptteststatus, application]
def process_not_in_groups(aproach_fields, redis_handler):
cached_status = None
record_num, title, assignee_code, status, target, targetsystems, priority, planned_date, \
planned_time, risk, qatteststatus, frtteststatus, pptteststatus = aproach_fields
# record is being tracked, but assigned to another GROUP..
for assignee in GROUPS:
if is_record_tracked(assignee, record_num,redis_handler):
# retrieve the cached Aproach status in redis
cached_status = redis_handler.hget(assignee, record_num)
# delete (or move to info) record from trends
# stop tracking record...
delete_redis_record (assignee,record_num,redis_handler)
delete_trends_record (record_num, status)
break
# Record was not being tracked and not with GROUP, so can ignore
if not cached_status:
log.info("%s %s not our record, for group %s" % (record_type, record_num, assignee_code))
def process_in_groups(aproach_fields, redis_handler):
record_num, title, assignee_code, status, target, targetsystems, priority, planned_date, \
planned_time, risk, qatteststatus, frtteststatus, pptteststatus, application = aproach_fields
# record already tracked and assigned to GROUP
if is_record_tracked(assignee_code, record_num,redis_handler):
# retrieve the cached Aproach status in redis
cached_status = redis_handler.hget(assignee_code, record_num)
# RRU back to logger for closure.
if cached_status != status:
# delete (or move to info) record from trends
# stop tracking record...
delete_redis_record (assignee_code,record_num,redis_handler)
delete_trends_record (record_num, phase, cached_status, status)
else:
log.info("%s %s already being tracked" % (record_type, record_num))
# record not being tracked (first time in GROUP Queue )...
else:
# track it...
closed_status = re.search(r'Closed', status, re.I)
#if record is Closed or Solved no need to track
if closed_status:
log.info("%s %s already is in %s status no need to track" % (record_type, record_num, status))
else:
add_redis_record(assignee_code, record_num, status, redis_handler)
if target == "PRD":
comment = "%s RRU %s [%s] \n Sign-Offs: QAT: %s FRT: %s PPT: %s" % (target, priority, status, qatteststatus, frtteststatus, pptteststatus)
else:
comment = "%s RRU %s [%s]" % (target, priority, status)
appacronym = re.search(r"[A-Z]{3,}", application)
appdescription = re.match(r"[^\(]+", application)
app = appacronym.group()
description = appdescription.group()
follow_up_record(record_num, title, app, description, comment)
def process_RRU(xml_record, redis_handler):
#print ET.tostring(xml_record)
#return
aproach_fields = extract_xml_data(xml_record)
print aproach_fields
# record assigned to GROUP...
# aproach_fields[2] = assignee_code
if aproach_fields[2] in GROUPS:
process_in_groups(aproach_fields, redis_handler)
# record is not assigned to GROUP
else:
process_not_in_groups(aproach_fields, redis_handler)
|
12,206 | 723026633661221304f950cb04a3731ace71f977 | from __future__ import unicode_literals
from django.db import models
from add_user.models import *
# Create your models here.
STATUS_CHOICES = (
(0, 'Pending'),
(1, 'Confirm'),
(2, 'Rejected'))
class customer_data(models.Model):
name=models.CharField(max_length=20,blank=True,null=True)
mobile=models.CharField(max_length=40,null=True,blank=True)
address=models.CharField(max_length=200,null=True,blank=True)
email=models.CharField(max_length=30,null=True,blank=True)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
followup=models.DateField(null=True)
status=models.CharField(max_length=100,choices=STATUS_CHOICES,default="Pending")
dse=models.ForeignKey(dse_data,null=True)
application=models.CharField(max_length=20,blank=True,null=True)
town=models.CharField(max_length=20,blank=True,null=True)
district=models.CharField(max_length=20,blank=True,null=True)
tehsil=models.CharField(max_length=20,blank=True,null=True)
financier=models.CharField(max_length=20,blank=True,null=True)
location=models.CharField(max_length=20,blank=True,null=True)
def __unicode__(self):
return self.name
class vehicle_selected_data(models.Model):
customer=models.ForeignKey(customer_data,null=True)
name=models.CharField(max_length=20,blank=True,null=True)
model=models.CharField(max_length=40,null=True,blank=True)
quantity=models.IntegerField(default=0)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
def __unicode__(self):
return self.name
class followup_data(models.Model):
customer=models.ForeignKey(customer_data,null=True)
followupby=models.CharField(max_length=20,blank=True,null=True)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
class district_data(models.Model):
name=models.CharField(max_length=20,blank=True,null=True)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
class town_data(models.Model):
name=models.CharField(max_length=20,blank=True,null=True)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
class vehicle_data(models.Model):
name=models.CharField(max_length=20,blank=True,null=True)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
class vehicle_model_data(models.Model):
name=models.CharField(max_length=20,blank=True,null=True)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
class financier_data(models.Model):
name=models.CharField(max_length=20,blank=True,null=True)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
class application_data(models.Model):
name=models.CharField(max_length=20,blank=True,null=True)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
class location_data(models.Model):
name=models.CharField(max_length=20,blank=True,null=True)
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
|
12,207 | 54905961f5da67d188acd3d289b59b48346852ab | __author__ = 'idfl'
def urlencoding(param):
param = param.__str__()
if '_' in param:
return param.replace('_', ' ')
elif ' ' in param:
return param.replace(' ', '_')
else:
return param |
12,208 | 893765a68d8911343d4a1fd5c8acfc72d5eea102 | '''
python 魔法函数--指的是双下划线开头、双下划线结尾的函数
'''
class Company(object):
def __init__(self, employ_list):
self.employee = employ_list
def __getitem__(self, item):
# 循环对象的时候会执行该方法,执行到抛异常截至
# print("开始执行__getitem__")
return self.employee[item]
def __len__(self):
# 执行类实例对象的len方法会执行该方法,如未定义即报错
return len(self.employee)
def __str__(self):
return ".".join(self.employee)
def __repr__(self):
return "__repr__"
employee = Company(["Jack", "Jones", "Lily"])
# 下面这一行其实调用的就是__str__
print("employee---", employee)
# employee 和employee.__repr__() 意义是一样的
employee
employee.__repr__()
# 执行该方法需要company实现__len__方法
print( ("employee的类型是%s 长度是%d") % (type(employee), len(employee)))
for em in employee:
print(em)
# __add__ 数学运算使用
class Vector(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other_instance):
vec = Vector(self.x + other_instance.x, self.y + other_instance.y)
return vec
def __str__(self):
return 'x is %s y is %s' % (self.x, self.y)
vector1 = Vector(1, 2)
vector2 = Vector(11, 12)
print(vector1+vector2)
class Num(object):
def __init__(self, x):
self.x = x
def __abs__(self):
return abs(self.x)
num1 = Num(-99)
print(abs(num1))
|
12,209 | 41e87d3269a12ec19d325b4946d225e07c2d0c29 | class Solution(object):
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 0:
return 0
count = 0
base = 1
num = n
while num:
remainder = num % 10
num = num // 10
count += (num * base)
if remainder == 1:
count += (n % base + 1)
elif remainder > 1:
count += base
base *= 10
return count
|
12,210 | 69f40c0408b32fb04baee4f5348a2e45e699024b | from django.apps import AppConfig
class AdministracjaConfig(AppConfig):
name = 'administracja'
|
12,211 | 5328dd2508fcf4716c6dce8d4352941c06d8e643 | import requests
import re
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1'
}
r = requests.get('https://music.163.com/#/user/home?id=1436779441', headers=headers)
text = re.findall(r'<img src="(.*?)', r.text)
print(text) |
12,212 | 248f96ad9588539b7944676596752fd3648a1855 | from graph import Node
class Category(Node):
def __init__(self, title):
self.title = title
self.id = title
super().__init__(self.id) |
12,213 | 80b425ecbd70bb2aec48b2ac40636ac915ff2731 | from .index_parser import IndexParser, InfoParser
from .page_parser import PageParser, Weibo, CommentParser, HotCommentParser
from .follow_parser import FollowParser
from .fans_parser import FansParser
from .search_weibo_parser import SearchWeiboParser
from .search_users_parser import SearchUsersParser |
12,214 | 3e43025c481b47671655d9537e7e8cfd9bea3f7e | from dlflow.tasks import TaskNode
from dlflow.mgr import task, model, config
from dlflow.features import Fmap
from dlflow.utils.sparkapp import HDFS
from dlflow.utils.locale import i18n
from pathlib import Path
from absl import logging
import tensorflow as tf
import shutil
@task.reg("train", "training")
class _Train(TaskNode):
parent_tag = TaskNode.set_tag("_BUILD", "TFRECORD_FEATURE")
output_tag = TaskNode.set_tag("TRAINED_MODEL")
bind_tasks = "_Build"
def __init__(self):
super(_Train, self).__init__()
@TaskNode.timeit
def run(self):
gpus = tf.config.experimental.list_physical_devices(
device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(device=gpu,
enable=True)
hdfs = HDFS()
dirs = config._build_dirs
ckpt_dir = dirs["ckpt_dir"]
tmp_fmap_dir = dirs["tmp_fmap_dir"]
tmp_ckpt_dir = dirs["tmp_ckpt_dir"]
hdfs_ckpt_dir = dirs["hdfs_ckpt_dir"]
hdfs_static_dir = dirs["hdfs_static_dir"]
local_ckpt_dir = dirs["local_ckpt_dir"]
local_ckpt_link = dirs["local_ckpt_link"]
local_static_dir = dirs["local_static_dir"]
fmap = Fmap.load(tmp_fmap_dir)
input_cls = model[config.MODEL.input_name]
files_pattern = hdfs.hdfs_whole_path(
Path(config.HDFS_TFRECORD_DIR).joinpath("part*").as_posix())
files = tf.io.gfile.glob(files_pattern)
dataset = input_cls(fmap).tfr_inputs(files)
model_cls = model[config.MODEL.model_name]
model_ins = model_cls(fmap)
if tmp_ckpt_dir.joinpath("h5weights", "weights.h5").exists():
logging.info(i18n("Loading model weight success."))
model_ins.load_weights(tmp_ckpt_dir)
model_ins.train_act(dataset)
if local_ckpt_dir.exists():
logging.warning(
i18n("Local ckpt directory already exists, "
"it will be overwritten: {}")
.format(local_ckpt_dir))
shutil.rmtree(local_ckpt_dir)
logging.info(i18n("New ckpt is saved to {}").format(local_ckpt_dir))
model_ins.save(local_ckpt_dir)
logging.info(i18n("Creating soft link to local ckpt {}")
.format(local_ckpt_link))
if local_ckpt_link.is_symlink():
local_ckpt_link.unlink()
local_ckpt_link.symlink_to(ckpt_dir)
if hdfs.exists(hdfs_ckpt_dir):
logging.warning(i18n("The ckpt already exists on HDFS, "
"the old one will be overwritten."))
hdfs.delete(hdfs_ckpt_dir)
logging.info(i18n("Put ckpt to HDFS: {}").format(hdfs_ckpt_dir))
hdfs.put(local_ckpt_dir, hdfs_ckpt_dir)
if hdfs.exists(hdfs_static_dir):
hdfs.delete(hdfs_static_dir)
hdfs.put(local_static_dir, hdfs_static_dir)
logging.info(i18n("Training don."))
|
12,215 | 256c00847e3a54a16ab80beab2565b2af93a0f4a | from textblob import TextBlob
from textblob.exceptions import NotTranslated
import tweepy
import re
class Sentiment():
def __init__(self,count):
self.count = count
consumer_key = 'M79jyuM8cWJC3MBLZMT1LAwft'
consumer_secret = 'bk6Ukwej6Dd42mU3Hq4wgeBbjlZ5qFekHfCqTQjkRaxlQgKu6Y'
access_token = '897799694440640513-ZsA5Whc5OHKpYCu1oumrf2EQ8SViUkl'
access_token_secret = 'YAPBvOFG2GAxigo3yG13Ca6HpOQIJczgjZqIWscH3VJBN'
try:
self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self.auth)
except:
print("Error: Authentication Failed")
def clean_tweet(self,tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def tweets_analysis(self,query):
public_tweets = self.api.search(q=query,count=self.count)
tweets = set([tweet.text for tweet in public_tweets])
return self.analyze(tweets)
def replies_analysis(self,tweet_url):
tweet_id = self.get_tweet_id(tweet_url)
tweet = self.api.get_status(str(tweet_id))
query = 'to:'+tweet.author.screen_name
tweets = self.api.search(q=query,since_id=tweet_id,count=self.count)
replies = []
for tweet in tweets:
if tweet.in_reply_to_status_id == tweet_id:
replies.append(tweet.text)
return self.analyze(replies)
def analyze(self,tweets):
positive_count = negative_count = neutral_count = 0
total = 1
for tweet in tweets:
analysis = TextBlob(tweet)
try:
analysis = analysis.translate(to='en')
except NotTranslated:
pass
if analysis.sentiment.polarity >0:
positive_count += 1
elif analysis.sentiment.polarity==0:
neutral_count += 1
else:
negative_count += 1
total = positive_count+negative_count+neutral_count
negative = int(negative_count/total*100)
positive= int(positive_count/total*100)
neutral = int(neutral_count/total*100)
return {'positive':positive,'neutral':neutral,'negative':negative}
def get_tweet_id(self,url):
r = re.findall(r'\d+',url,re.M|re.I)
return int(r[-1])
|
12,216 | 84c11f7f2301dfb7527fb210e6753c858c1782db | '''
Descripttion:
version:
Author: nlpir team
Date: 2020-08-08 10:20:49
LastEditors: cjh
LastEditTime: 2020-09-13 15:39:31
'''
import sys, os
import time
from threading import Thread
sys.path.insert(0, os.getcwd())
from nlpir047.corrector_dict.corrector_dict import CorrectorDict
from nlpir047.utils.corrector_utils.get_corrector_text_utils import get_correct_text
correctorDict = CorrectorDict()
correctorDict.check_detector_dict_initialized()
class MyThread(Thread):
def __init__(self, sentence, start_idx):
Thread.__init__(self)
self.sentence = sentence
self.start_idx = start_idx
def run(self):
self.pred_details = correctorDict.correct_dict_short(self.sentence, self.start_idx)
def get_result(self):
return self.pred_details
def correct_dict_multi_thread(text, thread_max_count=10):
text_new = ''
details = []
blocks = correctorDict.split_2_short_text(text, include_symbol=True)
blocks_list = list()
for i in range(0, len(blocks), thread_max_count):
blocks_list.append(blocks[i:i + thread_max_count])
for block in blocks_list:
pred_details = block_correct_dict_multi_thread(block, thread_max_count)
details.extend(pred_details)
text_new = get_correct_text(text, details)
return text_new, details
def block_correct_dict_multi_thread(block, thread_max_count):
threads_list = []
details = []
for blk, start_idx in block:
threads_list.append(MyThread(blk, start_idx))
for thread in threads_list:
thread.start()
for thread in threads_list:
thread.join()
pred_details = thread.get_result()
details.extend(pred_details)
return details
if __name__ == '__main__':
text = '(香港综合讯)香港大学校务委员会昨天(7月28日)在一项会议上以大比数通过解雇“占中三子”之一、法律系副教授戴耀廷。戴耀廷事后指该决定“是由大学以外的势力透过它的代理人作出”,香港中联办则大赞有关决定是“惩恶扬善、顺应民心的正义之举”。\
据香港01报道,校委会是18比2通过即时解雇戴耀廷的决定。校委会本科生代表成员李梓成昨天受访时基于保密理由表示不便透露投票情况,但直言对会议结果感到失望及愤怒,并说成员当中有不同观点,例如要求等候戴的上诉结果再作决定,他也赞成相关建议,但校委会最终决定就戴耀廷去留的议案付诸表决。\
戴耀廷担任香港大学法律系副教授多年,曾在2000年至2008年出任法律学院副院长。因倡导2014年的占中运动争取香港真普选,他去年以“串谋犯公众妨扰罪”“煽惑他人犯公众妨扰罪”被判监16个月,其后获准保释等候上诉。\
港大去年6月成立“探讨充分解雇理由委员会”,处理戴耀廷的教席问题。校方早前启动调查程序,并于昨天的校务委员会例会上,决定戴耀廷的去留。\
戴耀廷昨晚在面簿发表声明,指“辞退我的决定,并不是由香港大学,而是由大学以外的势力透过它的代理人作出”。\
他认为,此事标志着香港学术自由的终结,“香港学术机构的教研人员,再难自由地对公众,就一些政治或社会争议事情,发表具争议的言论。香港的学术机构再不能保护其成员免受内部及外在的干预”。\
他说,若仍有疑问“一国一制”是否已降临香港,“我的个案应足以释疑”。他也说:“当目睹所爱的大学沉沦,我感到心痛。不过,我会以另外的身份继续法治的研究及教学工作,也不会停止为香港的法治而战。”\
除了是占中运动发起人之一,戴耀廷倡导的公民抗命、违法达义等理念都不见容于北京。他针对去年区议会选举提出的风云计划,针对今年立法会选举提出的雷动计划,均被中国官媒斥为港版“颜色革命”、夺权阴谋等。\
香港中联办昨晚就在官网发文称港大的决定是“惩恶扬善、顺应民心的正义之举”,净化大学正常教学秩序和教学环境,坚定捍卫大学之道,对香港社会整体利益高度负责,维护社会公义。\
香港昨天(7月27日)新增145起冠病确诊病例,再破单日新高。香港特区政府宣布四项抗疫紧缩措施,包括餐馆全天禁止堂食、“限聚令”由四人收紧至两人,以及室内室外公共场所都强制戴口罩等。\
“禁堂食令”“限聚令”“戴口罩令”及体育场所及泳池暂时关闭等措施,自明天凌晨起生效,为期7天至8月4日。港府同时呼吁雇主容许雇员在家工作,市民减少聚会和去市场买菜。\
据《明报》报道,香港卫生防护中心传染病处主任张竹君昨天在记者会公布,在昨天新增确诊病例中,142起属于本土病例,其余为输入病例。在本土确诊病例中,59起源头不明。这也是香港连续第六天确诊病例数量破百起,目前累计确诊病例达到2778起。\
政务司长张建宗坦言,第三波疫情严峻,在社区大规模暴发的风险非常高。他说,“过去14天香港新增逾千宗病例,是之前的总和。”不少源头不明,而不同群组、行业都出现病例,范围广泛,难以消除隐形传播链,所以港府一定要严阵以待。\
对于建制派要求9月的立法会选举推迟一年,张建宗表示,能否如期举行要视乎疫情,底线是安全有序、公平公正地进行。由于投票人数众多,聚集风险非常高。\
他又称,中国中央政府高度关注香港疫情,提出全力援助。他形容香港有中国这个强大的后盾,特首林郑月娥也已向中央政府请求,包括加大力度检测,及协助在位于大屿山的亚洲国际博览馆建立“方舱医院”等。\
香港餐饮联业协会会长黄家和表示,目前已有约1200家食店停业,3000多家选择不做晚市提早关门,生意减少三成以上。若港府再禁午市堂食,预料生意将录得六成以上跌幅,整个7月会有50亿港元(8.9亿新元)亏损。\
他说,部分食店难以转做外卖,例如专门举办婚宴及宴会的酒楼等。而且,业界也难以单靠外卖就可应付人力及其他成本开支,他形容饮食业如风烛残年,希望港府再提供及时援助。\
香港医学会传染病顾问委员会主席梁子超认为,港府最好是同时要求更多港人居家工作,并确保有稳定的外卖供应,让市民不用抢购物资。\
香港理工大学医疗科技及信息学系副教授萧杰恒表示,其研究团队近日为香港出现的确诊病例进行基因排序,发现当地病例与欧洲输入病例的基因突变特征相似,相信新一波疫情源头,很大机会是由外地输入。港府对船员提供免检疫安排,近日惹来舆论狠批。\
(记者是《联合早报》香港特派员)一位的维权人士,疆独,六四\
'
t1 = time.time()
pred_text, pred_detail = correct_dict_multi_thread(text)
print(pred_text, pred_detail)
print(time.time() - t1)
|
12,217 | 8e91d0a26684dec52569ac87ff5819b95c4ce8f1 | # -*- coding: utf-8 -*-
import pytest
import subprocess
import sys
sys.setrecursionlimit(65535)
@pytest.mark.light
def test_wn18_cli():
# Checking if results are still the same
cmd = ['./bin/kbp-cli.py',
'--train', 'data/wn18/wordnet-mlj12-train.txt',
'--lr', '0.1',
'--model', 'TransE',
'--similarity', 'l1',
'--margin', '2',
'--embedding-size', '50',
'--nb-epochs', '10']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
for line in str(err).split("\\n"):
if "Epoch: 1/1\\tLoss:" in line:
assert line.split()[2] == "3.3778"
assert line.split()[4] == "0.5889"
if "Epoch: 2/1\\tLoss:" in line:
assert line.split()[2] == "1.3837"
assert line.split()[4] == "0.1561"
if "Epoch: 3/1\\tLoss:" in line:
assert line.split()[2] == "0.5752"
assert line.split()[4] == "0.0353"
if "Epoch: 4/1\\tLoss:" in line:
assert line.split()[2] == "0.2984"
assert line.split()[4] == "0.0071"
if "Epoch: 5/1\\tLoss:" in line:
assert line.split()[2] == "0.1842"
if "Epoch: 6/1\\tLoss:" in line:
assert line.split()[2] == "0.1287"
if "Epoch: 7/1\\tLoss:" in line:
assert line.split()[2] == "0.0980"
if "Epoch: 8/1\\tLoss:" in line:
assert line.split()[2] == "0.0795"
if "Epoch: 9/1\\tLoss:" in line:
assert line.split()[2] == "0.0653"
if "Epoch: 10/1\\tLoss:" in line:
assert line.split()[2] == "0.0562"
# Checking if results are still the same
cmd = ['./bin/kbp-cli.py',
'--train', 'data/wn18/wordnet-mlj12-train.txt',
'--lr', '0.1',
'--model', 'TransE',
'--similarity', 'l1',
'--margin', '2',
'--embedding-size', '50',
'--nb-epochs', '5',
'--clauses', 'data/wn18/clauses/clauses_0.9.pl',
'--adv-weight', '1000',
'--adv-lr', '0.1']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
for line in str(err).split("\\n"):
if "Epoch: 1/1\\tLoss:" in line:
assert line.split()[2] == "3.7271"
assert line.split()[4] == "0.6187"
if "Epoch: 2/1\\tLoss:" in line:
assert line.split()[2] == "1.9572"
assert line.split()[4] == "0.7526"
if "Epoch: 3/1\\tLoss:" in line:
assert line.split()[2] == "1.0932"
assert line.split()[4] == "0.6586"
if "Epoch: 4/1\\tLoss:" in line:
assert line.split()[2] == "0.6326"
assert line.split()[4] == "0.4441"
if "Epoch: 5/1\\tLoss:" in line:
assert line.split()[2] == "0.5513"
if __name__ == '__main__':
pytest.main([__file__])
|
12,218 | f9cda23525f18e2cd7c6a73377c29201154b0b66 | #!/usr/bin/env python
# This is a stub script loading test_server module, used by PyInstaller.
import runpy, sys, os
# Make sure hidden imports are found
import sitescripts.cms.bin.test_server
import markdown.extensions.attr_list
sys.argv[1:] = [os.curdir]
runpy.run_module("sitescripts.cms.bin.test_server", run_name="__main__")
|
12,219 | 90955415024c4c911ed93b6e21612fb028149f89 | #!/usr/bin/env python2
import os
import glob
import subprocess
import re
import optparse
def main():
parser = optparse.OptionParser(usage='Usage: %prog -i <source directory> <options> -o <output file>')
parser.add_option('-i', dest='djvu', action='store',\
help='the source djvu file to perfrom OCR on')
parser.add_option('-l', dest='lang', action='store', default='eng',\
help="OCR language (default: 'eng')" )
parser.add_option('-d', dest='debug', action='store_true', default=False,\
help='enable debugging information' )
parser.add_option('-t', dest='tess_out', action='store_true', default=False,\
help='enable tesseract output' )
parser.add_option('-b', dest='bitonal', action='store',\
help='use imagemagick to convert the image to bitonal black and white, with a threshold given in %' )
parser.add_option('-o', dest='output', action='store',\
help='output a human readable text file to a given file path' )
parser.add_option('-u', dest='update', action='store_true', default=False,\
help='update the djvu file text layer' )
(opts, args) = parser.parse_args()
# check mandatory options
if opts.djvu is None:
print("The input file '-i' must be given\n")
parser.print_help()
exit(-1)
DjvuTesseract(opts)
class DjvuTesseract():
def command(self, command, out=False, err=False):
"""Use subprocess.Popen" to run a command on the terminal and return the s result
Required for python 2.6 since subprocess.check_output doesn't exist
This function will trash output unless you explicitly ask it not to
with quiet=False. This is so tesseract won't spam you with rubbish"""
if out:
std_out = subprocess.PIPE
else:
std_out = None
if not err:
std_err = subprocess.PIPE
else:
std_err = None
proc = subprocess.Popen(command, stdout = std_out, stderr=std_err)#std_out)
out, err = proc.communicate()
return out, err
def calculate_djvu_length(self):
cmd = ['djvused', self.opts.djvu, '-e', 'n']
out, err = self.command(cmd, True)
self.num_pages = int(out)
if self.opts.debug:
print "\t(INF) number of pages: %d\n" % self.num_pages
def format_ocr_text(self, page):
"""Format a page's OCR'd text into a DJVU friendly form"""
#read out of the text file that tesseract made
ocr_text = open(self.ocr_text, 'r')
# write into this file
djvu_text = open( self.djvu_text, 'w' )
text = "(page 0 0 1 1\n"
self.out_text.write('\n## Page %d ###\n\n' % page )
for line in ocr_text:
#write to the human readable file
self.out_text.write(line)
# add each line of text
# escaping " to \" as we go
text += '(line 0 0 1 1 "%s")\n' % line.replace('"', r'\"').strip()
text += ")\n"
djvu_text.write( text )
ocr_text.close()
djvu_text.close()
def process_pages(self):
for page in range(1, self.num_pages+1): #djvu pages are 1-indexed
if self.opts.debug:
print "\tPerforming OCR on page %d" % page
# Extract page an image
cmd = ['ddjvu', '-format=tiff', '-page=%d' % page, self.opts.djvu, self.temp_img]
out, err = self.command(cmd)
#Convert to bitonal if required
if self.opts.bitonal:
if self.opts.debug:
print "\tApplying bitonal conversion"
cmd = ['convert', self.temp_img, '-threshold', self.opts.bitonal, self.temp_img]
out, err = self.command(cmd)
# Perform OCR on the image
cmd = ['tesseract', self.temp_img, self.temp_ocr, '-l', self.opts.lang]
out, err = self.command(cmd, err=self.opts.tess_out)
if self.opts.debug:
print "\t OCR complete"
# convert the OCR'd text to a DJVU friendly fomat and a human-friendly format
self.format_ocr_text(page)
# update the DJVU text layer
if self.opts.update:
# replace the text in the DJVU file
cmd = ['djvused', self.opts.djvu, '-e', 'select %d; remove-txt' % page, "-s"]
out, err = self.command(cmd)
cmd = ['djvused', self.opts.djvu, '-e', 'select %d; set-txt %s'% (page, self.djvu_text), "-s"]
out, err = self.command(cmd)
def process_djvu(self):
if self.opts.debug:
print "(INF) Processing %s" % self.opts.djvu
# calculate DJVU length
self.calculate_djvu_length()
self.process_pages()
def __init__(self, opts):
self.opts = opts
self.temp_img = "/tmp/TESSERACT-OCR-TEMP.tif"
self.temp_ocr = "/tmp/TESSERACT-OCR-TEMP" #tesseract adds .txt
self.ocr_text = self.temp_ocr + '.txt'
# file to dump pase-wise formatted OCR'd text into
self.djvu_text = "/tmp/TESSERACT-OCR-TEMP.djvu.txt"
# file to dump human readable output into for the whole file
if self.opts.output:
output_filename = self.opts.output
else: #dump in /tmp/
output_filename = "/tmp/TESSERACT-OCR-TEMP.output.txt"
self.out_text = open(output_filename, 'w')
self.process_djvu()
if __name__ == "__main__":
try:
main()
finally:
None
"""
# note: structure which works
# print TXTDJVU "(page 0 0 1 1\n" ;
# print TXTDJVU " (line 0 0 1 1 \"toto\")\n" ;
# print TXTDJVU " (line 0 0 1 1 \"toto la la\")\n";
# print TXTDJVU ")\n" ;
"""
|
12,220 | 674b384f5f8039cd22c8879ccb7676e1c9e1ea60 | def greetings(get_data):
def get_greeting(*args):
data = ""
split_string = get_data(*args).split()
if len(split_string)>0:
name = split_string[0]
if len(split_string)==3:
second_name = split_string[1]
surname = split_string[2]
data+="Hello "+name[0].upper()+name[1:].lower()+" "+second_name[0].upper()+second_name[1:].lower()+" "+surname[0].upper()+surname[1:].lower()
else:
surname = split_string[1]
data+="Hello "+name[0].upper()+name[1:].lower()+" "+surname[0].upper()+surname[1:].lower()
return data
return get_greeting
@greetings
def name_surname(name):
return name
# assert name_surname("jan nowak") == "Hello Jan Nowak"
def is_palindrome(palindrome):
def check_palindrome(*args):
palindrome_to_check = palindrome(*args)
translate_dict = {".":"",",":""," ":"","?":"","!":""}
palindrome_to_check_clean = palindrome_to_check.translate(palindrome_to_check.maketrans(translate_dict))
if palindrome_to_check_clean.lower() == palindrome_to_check_clean[::-1].lower():
palindrome_to_check+=" - is palindrome"
else:
palindrome_to_check+=" - is not palindrome"
return palindrome_to_check
return check_palindrome
@is_palindrome
def sentence(palindrome):
return palindrome
#assert sentence("Eva, can I see bees in a cave?") == "Eva, can I see bees in a cave? - is palindrome"
#import pytest
def format_output(*args):
list_of_key = []
list_of_key.extend(args)
def dec_make_dict(dict_received):
def make_dict(*args):
dict_new = {}
data_dict = dict_received(*args)
for key in list_of_key:
try:
all_key = key.split("__")
temp = ""
for i in all_key:
temp+=data_dict[i]+" "
dict_new[key] = temp.strip()
except:
raise ValueError
return dict_new
return make_dict
return dec_make_dict
@format_output("first_name__last_name", "city","lato")
def first_func(dict_received):
return dict_received
@format_output("first_names", "age")
def second_func(dict_received):
return dict_received
first_func({
"first_name": "Jan",
"last_name": "Kowalski",
"city": "Warsaw",
"lato":"nie"
})
# with pytest.raises(ValueError):
# second_func({
# "first_name": "Jan",
# "last_name": "Kowalski",
# "city": "Warsaw",
# "lato":"nie"
# })
class A:
pass
from functools import wraps
def add_class_method(class_name):
def add_method(method):
@classmethod
@wraps(method)
def wrapper(*args,**kwargs):
return method(*args,**kwargs)
setattr(class_name, method.__name__, wrapper)
return method
return add_method
def add_instance_method(class_name):
def instance_method(method):
@wraps(method)
def wrapper(*args,**kwargs):
return method(*args,**kwargs)
setattr(class_name, method.__name__, staticmethod(wrapper))
return method
return instance_method
@add_class_method(A)
def foo():
return "Hello again!"
@add_instance_method(A)
def boo():
return "Hello again!"
A.boo()
A().foo()
|
12,221 | 150364697fa10fd52cd71b5a9961a187de8f3c13 | # -*- coding: utf-8 -*-
# 斐波那契的四种实现与比较
# 用fib(100)来比较
def fib1(n):
"""递归法, 复杂度高"""
if n==1 or n==2:
return 1
else:
return fib1(n-1)+fib1(n-2)
def fib2(n):
"""迭代法, 复杂度一般"""
if n==1 or n==2:
return 1
first = 1
second = 1
temp = 0
for i in xrange(n-2):
temp = first + second
first, second = second, temp
return temp
def fib3(n):
"""通项法, 复杂度极低, 但在计算机上有精度问题"""
import math
sqrt_5 = math.sqrt(5)
return int(sqrt_5/5.0*(((1+sqrt_5)/2.0)**n-((1-sqrt_5)/2.0)**n))
def fib4(n):
"""矩阵乘法, 复杂度低, 无精度问题, Ok"""
t = [[1, 1], [1, 0]]
two(n, t)
return t[0][0]
def two(n, t):
"""计算2维矩阵的辅助函数"""
if n == 1:
n = 2
for i in range(n-2):
t[0][0], t[0][1], t[1][0], t[1][1] = t[0][0]+t[0][1], t[0][0], t[1][0]+t[1][1], t[1][0]
if __name__ == '__main__':
#print fib1(100)
print fib2(10)
print fib3(10)
print fib4(10)
|
12,222 | 4db9df3b60c70b39d5b1ca2043304e3e8afb9106 | import numpy as np # for matrix computation and linear algebra
import matplotlib.pyplot as plt # for drawing and image I/O
import scipy.io as sio # for matlab file format output
import itertools # for generating all combinations
import scipy.linalg as scpl
def estimate_Q(u, x, ix):
"""
:return: Q, points_sel, err_max, err_points, Q_all
where
Q: best projection matrix
points_sel: indices of the 6 points
err_max: vector of all maximal errors for all tested matrices
err_points: vector of point errors for the best camera
Q_all: cell matrix containing all tested camera matrices
"""
points_sel, Q, Q_all, m_all = list(), list(), list(), list()
max_err = []
xs = np.array([x.T[i] for i in ix])
us = np.array([u.T[i] for i in ix])
for i in range(len(ix)):
m1 = list(xs[i])
m2 = [0] * 4
m1.append(1)
m2.extend(m1)
m1.extend([0] * 4)
m1.extend((-us[i][0]) * np.array(m1)[:4])
m2.extend((-us[i][1]) * np.array(m2)[4:])
m_all.append([m1, m2])
m_all = np.array(m_all)
for inx in itertools.combinations(range(0, len(m_all)), 6):
points_sel = np.array([ix[i] for i in range(len(ix)) if i in inx])
M = m_all[inx, :]
M = M.reshape(12, 12)
for i in itertools.combinations(range(0, len(M)), 11):
M11 = M[i, :]
Q = scpl.null_space(M11).reshape(3, 4)
errors = list()
for point_i in range(len(x[0])):
point = list(x.T[point_i])
point.append(1)
point = np.array(point)
projected = Q @ point
errors.append(np.linalg.norm((projected / projected[-1])[
:-1] - u[:, point_i]))
max_err.append(max(errors))
Q_all.append((Q, max(errors), points_sel, errors))
Q_all.sort(key=lambda x: x[1])
return Q_all[0][0], Q_all[0][2], max_err, Q_all[0][3], Q_all
if __name__ == "__main__":
img = plt.imread('daliborka_01.jpg')
img = img.copy()
f = sio.loadmat("daliborka_01-ux.mat")
u = f["u"]
x = f["x"]
ix = np.array([86, 77, 83, 7, 20, 45, 63, 74, 26, 38])
Q, points_sel, err_max, err_points, Q_all = estimate_Q(u, x, ix)
####### img 1 ########
fig = plt.figure() # figure handle to be used later
fig.clf()
plt.title('Maximal reprojection error for each tasted Q')
plt.xlabel('selection index')
plt.ylabel('log_10 of maximum reprojection error [px]')
plt.plot(np.log10(err_max))
plt.show()
plt.legend(loc='best')
fig.savefig("02_Q_maxerr.pdf")
####### img 2 ########
fig = plt.figure() # figure handle to be used later
fig.clf()
plt.title('original and reprojected points')
plt.imshow(img)
plt.xlabel('x [px]')
plt.ylabel('y [px]')
plt.plot(u[0], u[1], 'b.', fillstyle='none', label="Orig. pts")
plt.plot(u[0][points_sel], u[1][points_sel],
'y.',
fillstyle='full',
label="Used for Q")
pr_array = []
for i in range(len(u[0])):
point = list(x.T[i])
point.append(1)
point = np.array(point)
projected = Q @ point
projected /= projected[-1]
projected = projected[:-1]
pr_array.append(projected)
pr_array = np.array(pr_array)
pr_array = pr_array.T
plt.plot(pr_array[0], pr_array[1],
'ro',
fillstyle='none',
label="Reprojected")
plt.legend(loc='best')
plt.show()
fig.savefig("02_Q_projections.pdf")
####### img 3 ########
fig = plt.figure() # figure handle to be used later
fig.clf()
plt.title('Reprojected errors (100x enlarged)')
plt.imshow(img)
plt.xlabel('x [px]')
plt.ylabel('y [px]')
plt.plot(u[0], u[1], 'b.', fillstyle='none', label="Orig. pts")
plt.plot(u[0][points_sel], u[1][points_sel],
'y.',
fillstyle='full',
label="Used for Q")
e = 100 * (pr_array - u)
plt.plot((u[0][0], u[0][0] + e[0][0]), (u[1][0], u[1][0] + e[1][0]),
'r-',
fillstyle='none',
label="Errors (100x)")
plt.plot((u[0], u[0] + e[0]), (u[1], u[1] + e[1]),
'r-',
fillstyle='none')
plt.legend(loc='best')
plt.show()
fig.savefig("02_Q_projections_errors.pdf")
####### img 4 ########
fig = plt.figure() # figure handle to be used later
fig.clf()
plt.title('All point reprojection errors for the best Q')
plt.xlabel('point index')
plt.ylabel('reprojection error [px]')
plt.plot(Q_all[0][3])
plt.show()
plt.legend(loc='best')
fig.savefig("02_Q_pointerr.pdf") |
12,223 | c87a8cb8228f0d289fe90f9a6e7799fbd21990da | import unittest
from .bubble_sort import bubble_sort
class TestBubbleSort(unittest.TestCase):
def test_case1(self):
input = [3, 2, 1]
actual = bubble_sort(input)
self.assertEqual(actual, sorted(input))
def test_case2(self):
input = [5, 4, 3, 2, 1]
actual = bubble_sort(input)
self.assertEqual(actual, sorted(input))
def test_case3(self):
input = [0, 4, 6, 8, 4, 7, 9, 10]
actual = bubble_sort(input)
self.assertEqual(actual, sorted(input))
def test_case4(self):
input = [1]
actual = bubble_sort(input)
self.assertEqual(actual, sorted(input)) |
12,224 | 1517ce7474962dcfdaee5106b6de7d8f8e886169 | """
챕터: day7
주제: file 쓰기
문제:
현재 디렉토리 아래에 fruit.txt 파일을 생성하여, 사용자가 입력하는 과일을 한 줄에 하나씩 3개를 저장하라.
작성자: 윤경환
작성일: 2018.12.06
"""
import os.path
print(os.getcwd())
f = open("friut.txt","wt") # at, wt
x = input("과일: ") # 과일이름
x1 = int(input("가격: ")) # 과일가격
y = input("과일: ")
y1 = int(input("가격: "))
z = input("과일: ")
z1 = int(input("가격: "))
f.write(x+" - ")
f.write(str(x1)+"\n")
f.write(y+" - ")
f.write(str(y1)+"\n")
f.write(z+" - ")
f.write(str(z1)+"\n")
f.close() |
12,225 | 008e99b5d06db24d69a60f0972d1a99eeb8e3ed2 | import random
while True:
try:
k=random.randint(0,100)
x=int(input("请输入0~100之间的整数:"))
tem=0
while x != k:
tem +=1
if(x>k):
print("遗憾,太大了")
else:
print("遗憾,太小了")
x=eval(input("请输入0~100之间的整数:"))
except:
print("输入内容必须为整数!")
else:
print("预测{}次,你猜中了".format(tem))
break
|
12,226 | 7e635eedbd50de0588ac09a0e470e54436f72dea | # import necessary libraries
from models import create_classes
import os
from flask import (
Flask,
render_template,
jsonify,
request,
redirect)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Database Setup
#################################################
from flask_sqlalchemy import SQLAlchemy
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get "sqlite:///db.sportsbetting.sqlite"
# Remove tracking modifications
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
sportsbetting = create_classes(db)
# create route that renders index.html template
@app.route("/")
def home():
return render_template("index.html")
# Query the database and send the jsonified results
@app.route("/send", methods=["GET", "POST"])
def send():
if request.method == "POST":
UFC_Red_Fighter1 = request.form["Red_Corner_Fighter1"]
UFC_Blue_Fighter1 = request.form["Blue_Corner_Fighter1"]
UFC_Winner1 = request.form["Winner1"]
UFC_Red_Fighter_odds1 = request.form["Red_Fighter_odds1"]
UFC_Blue_Fighter_odds1 = request.form["Blue_Fighter_odds1"]
UFC_Red_Fighter2 = request.form["Red_Corner_Fighter2"]
UFC_Blue_Fighter2 = request.form["Blue_Corner_Fighter2"]
UFC_Winner2 = request.form["Winner2"]
UFC_Red_Fighter_odds2 = request.form["Red_Fighter_odds2"]
UFC_Blue_Fighter_odds2 = request.form["Blue_Fighter_odds2"]
sportsbetting = Sportsbetting(UFC_Red_Fighter1=Red_Corner_Fighter1,
UFC_Blue_Fighter1=Blue_Corner_Fighter1,
UFC_Winner1=Winner1,
UFC_Red_Fighter_odds1=Red_Fighter_odds1,
UFC_Blue_Fighter_odds1=Blue_Fighter_odds1,
UFC_Red_Fighter2=Red_Corner_Fighter2,
UFC_Blue_Fighter2=Blue_Corner_Fighter2,
UFC_Winner2=Winner2,
UFC_Red_Fighter_odds2=Red_Fighter_odds2,
UFC_Blue_Fighter_odds2=Blue_Fighter_odds2
)
db.session.add(sportsbetting)
db.session.commit()
return redirect("/", code=302)
return render_template("form.html")
@app.route("/send", methods=["GET", "POST"])
def send():
if request.method == "POST":
Soccer_Match = request.form["Matchup_US_P"]
Visitor_Odd = request.form["Visitor_Odd"]
Draw_Odd = request.form["Draw_Odd"]
Home_Odd = request.form["Home_Odd"]
Soccer_Match_Result = request.form["True_Result"]
sportsbetting = Sportsbetting(Soccer_Match=Matchup_US_P,
Soccer_Visitor_Odd=Visitor_Odd,
Soccer_Draw_Odd=Draw_Odd,
Soccer_Home_Odd=Home_Odd,
Soccer_Match_Result=True_Result
)
db.session.add(sportsbetting)
db.session.commit()
return redirect("/", code=302)
return render_template("form.html")
@app.route("/send", methods=["GET", "POST"])
def send():
if request.method == "POST":
Horse = request.form["Horse"]
Horse_Odds = request.form["Odds"]
Horse_Bet_Type = request.form["Bet Type"]
Horse_Result = request.form["Result"]
sportsbetting = Sportsbetting(Horse=Horse,
Horse_Odds=Odds,
Horse_Bet_Type=Bet Type,
Horse_Result=Result
)
db.session.add(sportsbetting)
db.session.commit()
return redirect("/", code=302)
return render_template("form.html")
@app.route("/send", methods=["GET", "POST"])
def send():
if request.method == "POST":
Football_Home_Team = request.form["Home Team"]
Football_Away_Team = request.form["Away Team"]
Football_Home_Odds = request.form["Home Odds Open"]
Football_Away_Odds = request.form["Away Odds Open"]
Football_Home_Win = request.form["Home Win"]
sportsbetting = Sportsbetting(Football_Home_Team=Home Team,
Football_Away_Team=Away Team,
Football_Home_Odds=Home Odds Open,
Football_Away_Odds=Away Odds Open,
Football_Home_Win=Home Win
)
db.session.add(sportsbetting)
db.session.commit()
return redirect("/", code=302)
return render_template("form.html")
return jsonify(pet_data)
if __name__ == "__main__":
app.run()
|
12,227 | b05620dfd2637cb21295ce8f50f781a1a418fd22 | import unittest
from python_katas.isograms.src import Isograms
class IsogramsTest(unittest.TestCase):
def test_should_return_true_for_isograms(self):
self.assertEqual(Isograms.is_isogram("Dermatoglyphics"), True)
self.assertEqual(Isograms.is_isogram("isogram"), True)
self.assertEqual(Isograms.is_isogram(""), True, "an empty string is a valid isogram")
def test_should_return_false_for_no_isograms(self):
self.assertEqual(Isograms.is_isogram("aba"), False, "same chars may not be adjacent")
self.assertEqual(Isograms.is_isogram("moOse"), False, "same chars may not be same case")
self.assertEqual(Isograms.is_isogram("isIsogram"), False)
|
12,228 | 5fecedc6364dd905b8f6346ae95cf9b3709d42bf | from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from config.messages import Messages
from utility.requestErrorFormate import requestErrorMessagesFormate
from utility.authMiddleware import isAuthenticate
from utility.rbacService import RbacService
from .requestSchema import NotificationListValidator
from .models import Notification
from .serializers import NoitifcationListSerializer
from utility.loggerService import logerror
# Create your views here.
# notification list
@api_view(['GET'])
@isAuthenticate
@RbacService('notification:read')
def notification_list(request):
"""
@api {GET} v1/user/notification/list Notifications list
@apiName Notifications list
@apiGroup Notification
@apiHeader {String} authorization Users unique access-token
@apiHeader {integer} page_limit
@apiHeader {integer} page_offset
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"data": [
{
"id": 1,
"refrence_id": 1,
"event_id": 1,
"title": "hi",
"message": "hey you",
"is_read": 0,
"is_deleted": 0,
"created_on": "21 Aug 2020"
}
]
}
"""
try:
validator = NotificationListValidator(request.GET)
valid = validator.validate() # Validate the request
if valid:
current_user_id = request.user_id
page_limit = int(request.GET['page_limit'])
page_offset = int(request.GET['page_offset'])
# notification listing
notification_list = Notification.objects.filter(user_id=current_user_id).all().order_by('-created_on')[page_offset:page_limit+page_offset]
serializer = NoitifcationListSerializer(notification_list, many=True)
# set is_read = 1
Notification.objects.filter(user_id=current_user_id).update(
is_read=1
)
return Response({'data':serializer.data}, status=status.HTTP_200_OK)
else:
return Response({'error':requestErrorMessagesFormate(validator.get_message())}, status=status.HTTP_200_OK)
except Exception as exception:
logerror('notifications/views.py/notification_list', str(exception))
return Response({'error':str(exception)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# Clear notifications
@api_view(['POST'])
@isAuthenticate
@RbacService('notification:read')
def clear_notifiactions(request):
"""
@api {POST} v1/user/notification/clear Clear Notifications
@apiName Clear Notifications
@apiGroup Notification
@apiHeader {String} authorization Users unique access-token
@apiHeader {integer} is_clear_all 1 for clear all notifications otherwise 0
@apiHeader {list} notification_ids blank in case of `is_clear_all` 1
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"message": "Notifications removed successfully"
}
"""
try:
current_user_id = request.user_id
is_clear_all = request.data.get('is_clear_all')
notification_ids = list(request.data.get('notification_ids'))
# if is clear all 1 then clear all notifications
if is_clear_all == 1:
Notification.objects.filter(user_id=current_user_id).delete()
return Response({'message':Messages.NOTIFICATION_REMOVED}, status=status.HTTP_200_OK)
Notification.objects.filter(id__in=notification_ids, user_id=current_user_id).delete()
return Response({'message':Messages.NOTIFICATION_REMOVED}, status=status.HTTP_200_OK)
except Exception as exception:
logerror('notifications/views.py/clear_notifiactions', str(exception))
return Response({'error':str(exception)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# Get notifications count
@api_view(['GET'])
@isAuthenticate
@RbacService('notification:read')
def get_notification_count(request):
"""
@api {GET} v1/user/notification/count Get notification count
@apiName Get notification count
@apiGroup Notification
@apiHeader {String} Content-Type application/json
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"notification_count": 2
}
"""
try:
current_user_id = request.user_id
notification_count = Notification.objects.filter(user_id=current_user_id, is_read=0).count()
response = {
"notification_count": notification_count
}
return Response(response, status=status.HTTP_200_OK)
except Exception as exception:
logerror('user_profile/views.py/user_delete_profile', str(exception))
return Response({'error': str(exception)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
12,229 | eb736bcc63ccce4971bf4a35456ae36964f1b79b | count=0;
while(count<9):
print('count is ',count);
count=count+1;
print("Good Byee!");
|
12,230 | 6f53191ebda210e7e966e4442c2bed8094e4895d | # -*- coding: utf-8 -*-
# ======================================
# @File : leetcode_nm3.py
# @Time : 2019/10/3 22:54
# @Author : Rivarrl
# ======================================
from typing import List
from algorithm_utils import *
def findPeakElement(nums):
"""
162. 寻找峰值
峰值元素是指其值大于左右相邻值的元素。
给定一个输入数组 nums,其中 nums[i] ≠ nums[i+1],找到峰值元素并返回其索引。
数组可能包含多个峰值,在这种情况下,返回任何一个峰值所在位置即可。
你可以假设 nums[-1] = nums[n] = -∞。
示例 1:
输入: nums = [1,2,3,1]
输出: 2
解释: 3 是峰值元素,你的函数应该返回其索引 2。
示例 2:
输入: nums = [1,2,1,3,5,6,4]
输出: 1 或 5
解释: 你的函数可以返回索引 1,其峰值元素为 2;
或者返回索引 5, 其峰值元素为 6。
说明:
你的解法应该是 O(logN) 时间复杂度的。
:param nums: List[int]
:return: int
"""
"""
# 暴力
nums.insert(0, -float('inf'))
nums.append(-float('inf'))
n = len(nums)
for i in range(1, n - 1):
if nums[i - 1] < nums[i] > nums[i + 1]:
return i - 1
"""
# 二分查找
l, r = 0, len(nums) - 1
while l < r:
m = l + (r - l) // 2
if nums[m] > nums[m+1]:
r = m
else:
l = m + 1
return l
def largerstNumber(nums):
"""
179. 最大数
给定一组非负整数,重新排列它们的顺序使之组成一个最大的整数。
示例 1:
输入: [10,2]
输出: 210
示例 2:
输入: [3,30,34,5,9]
输出: 9534330
说明: 输出结果可能非常大,所以你需要返回一个字符串而不是整数。
:param nums: List[int]
:return: str
"""
# 冒泡排序
for i in range(len(nums)-1):
for j in range(i+1, len(nums)):
if int(str(nums[i]) + str(nums[j])) < int(str(nums[j]) + str(nums[i])):
nums[i], nums[j] = nums[j], nums[i]
return str(int("".join([str(i) for i in nums])))
def reverseParentheses(s):
"""
1190. 反转每对括号间的子串
给出一个字符串 s(仅含有小写英文字母和括号)。
请你按照从括号内到外的顺序,逐层反转每对匹配括号中的字符串,并返回最终的结果。
注意,您的结果中 不应 包含任何括号。
示例 1:
输入:s = "(abcd)"
输出:"dcba"
示例 2:
输入:s = "(u(love)i)"
输出:"iloveu"
示例 3:
输入:s = "(ed(et(oc))el)"
输出:"leetcode"
示例 4:
输入:s = "a(bcdefghijkl(mno)p)q"
输出:"apmnolkjihgfedcbq"
提示:
0 <= s.length <= 2000
s 中只有小写英文字母和括号
我们确保所有括号都是成对出现的
:param s: str
:return: str
"""
n = len(s)
stk = []
i = 0
while i < n:
if s[i] == '(':
stk.append(i)
elif s[i] == ')':
j = stk.pop()
s = s[:j] + s[j+1:i][::-1] + s[i+1:]
i -= 2
n -= 2
i += 1
return s
def kConcatenationMaxSum(arr, k):
"""
1191. K 次串联后最大子数组之和
给你一个整数数组 arr 和一个整数 k。
首先,我们要对该数组进行修改,即把原数组 arr 重复 k 次。
举个例子,如果 arr = [1, 2] 且 k = 3,那么修改后的数组就是 [1, 2, 1, 2, 1, 2]。
然后,请你返回修改后的数组中的最大的子数组之和。
注意,子数组长度可以是 0,在这种情况下它的总和也是 0。
由于 结果可能会很大,所以需要 模(mod) 10^9 + 7 后再返回。
示例 1:
输入:arr = [1,2], k = 3
输出:9
示例 2:
输入:arr = [1,-2,1], k = 5
输出:2
示例 3:
输入:arr = [-1,-2], k = 7
输出:0
提示:
1 <= arr.length <= 10^5
1 <= k <= 10^5
-10^4 <= arr[i] <= 10^4
:param arr: List[int]
:param k: int
:return: int
"""
mod = 10 ** 9 + 7
s, maxs = 0, 0
for a in arr * min(2, k):
s = a if s < 0 else s + a # 连续和
if s > maxs:
maxs = s # 最大连续和
if k <= 2:
return maxs # 两个周期以内之间返回最大连续和
return (max(sum(arr), 0) * (k - 2) + maxs) % mod
def sortArray(nums):
"""
912. 排序数组
给定一个整数数组 nums,将该数组升序排列。
示例 1:
输入:[5,2,3,1]
输出:[1,2,3,5]
示例 2:
输入:[5,1,1,2,0,0]
输出:[0,0,1,1,2,5]
提示:
1 <= A.length <= 10000
-50000 <= A[i] <= 50000
:param nums: List[int]
:return: List[int]
"""
def sort(l, r, arr):
if l >= r: return
base = arr[l]
i, j = l, r - 1
while i < j:
while i <= j and arr[i] <= base:
i += 1
while i <= j and arr[j] >= base:
j -= 1
if i <= j:
arr[i], arr[j] = arr[j], arr[i]
arr[l], arr[i] = arr[i], arr[l]
sort(l, i, arr)
sort(i+1, r, arr)
sort(0, len(nums), nums)
return nums
def sumSubarrayMins(A):
"""
907. 子数组的最小值之和
给定一个整数数组 A,找到 min(B) 的总和,其中 B 的范围为 A 的每个(连续)子数组。
由于答案可能很大,因此返回答案模 10^9 + 7。
示例:
输入:[3,1,2,4]
输出:17
解释:
子数组为 [3],[1],[2],[4],[3,1],[1,2],[2,4],[3,1,2],[1,2,4],[3,1,2,4]。
最小值为 3,1,2,4,1,1,2,1,1,1,和为 17。
提示:
1 <= A <= 30000
1 <= A[i] <= 30000
:param A: List[int]
:return: int
"""
len_A = len(A)
if len_A == 0:
return 0
if len_A == 1:
return A[0]
ans = 0
left = [0] * len_A
right = [0] * len_A
stack = []
for i in range(len_A):
while stack and A[stack[-1]] > A[i]:
stack.pop()
if not stack:
left[i] = -1
else:
left[i] = stack[-1]
stack.append(i)
stack = []
for i in range(len_A - 1, -1, -1):
while stack and A[stack[-1]] >= A[i]:
stack.pop()
if not stack:
right[i] = len_A
else:
right[i] = stack[-1]
stack.append(i)
for i in range(len_A):
ans += (i - left[i]) * (right[i] - i) * A[i]
ans %= 1000000007
return ans
def shoppingOffers(price, special, needs):
"""
638. 大礼包
在LeetCode商店中, 有许多在售的物品。
然而,也有一些大礼包,每个大礼包以优惠的价格捆绑销售一组物品。
现给定每个物品的价格,每个大礼包包含物品的清单,以及待购物品清单。请输出确切完成待购清单的最低花费。
每个大礼包的由一个数组中的一组数据描述,最后一个数字代表大礼包的价格,其他数字分别表示内含的其他种类物品的数量。
任意大礼包可无限次购买。
示例 1:
输入: [2,5], [[3,0,5],[1,2,10]], [3,2]
输出: 14
解释:
有A和B两种物品,价格分别为¥2和¥5。
大礼包1,你可以以¥5的价格购买3A和0B。
大礼包2,你可以以¥10的价格购买1A和2B。
你需要购买3个A和2个B, 所以你付了¥10购买了1A和2B(大礼包2),以及¥4购买2A。
示例 2:
输入: [2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1]
输出: 11
解释:
A,B,C的价格分别为¥2,¥3,¥4.
你可以用¥4购买1A和1B,也可以用¥9购买2A,2B和1C。
你需要买1A,2B和1C,所以你付了¥4买了1A和1B(大礼包1),以及¥3购买1B, ¥4购买1C。
你不可以购买超出待购清单的物品,尽管购买大礼包2更加便宜。
说明:
最多6种物品, 100种大礼包。
每种物品,你最多只需要购买6个。
你不可以购买超出待购清单的物品,即使更便宜。
:param price: List[int]
:param special: List[List[int]]
:param needs: List[int]
:return: int
"""
# 回溯
def shopping(special, needs): # 从special里刚好购买needs所需的最低花费
if not sum(needs): # needs已没有
return 0
# 先过滤掉special里已经有某一种物品超过了needs的礼包
special = list(filter(lambda x: all(x[i] <= needs[i] for i in range(l)), special))
if not special: # 如果过滤后为空,那么返回直接以单品购买needs的价格
return sum(needs[i] * price[i] for i in range(l))
res = []
for pac in special: # 回溯,收集本次购买每种礼包的花费加上若购买该礼包后剩余needs递归的最低花费
res.append(pac[-1] + shopping(special, [needs[i] - pac[i] for i in range(l)]))
return min(res) # 返回本次购买的几种选择中的最低花费
l = len(price)
# 先过滤掉不比原价买划算的礼包
special = list(filter(lambda x: x[-1] < sum(x[i] * price[i] for i in range(l)), special))
return shopping(special, needs)
def countNumbersWithUniqueDigits(n):
"""
357. 计算各个位数不同的数字个数
给定一个非负整数 n,计算各位数字都不同的数字 x 的个数,其中 0 ≤ x < 10n 。
示例:
输入: 2
输出: 91
解释: 答案应为除去 11,22,33,44,55,66,77,88,99 外,在 [0,100) 区间内的所有数字。
:param n: int
:return: int
"""
n = min(10, n)
dp = [0] * (n+1)
dp[0] = 1
q = [1, 9] + [i for i in range(9, 0, -1)]
c = 1
for i in range(1, n+1):
c *= q[i]
dp[i] = c + dp[i-1]
print(dp)
return dp[n]
def findPaths(m, n, N, i, j):
"""
576. 出界的路径数
给定一个 m × n 的网格和一个球。球的起始坐标为 (i,j) ,你可以将球移到相邻的单元格内,或者往上、下、左、右四个方向上移动使球穿过网格边界。但是,你最多可以移动 N 次。找出可以将球移出边界的路径数量。答案可能非常大,返回 结果 mod 109 + 7 的值。
示例 1:
输入: m = 2, n = 2, N = 2, i = 0, j = 0
输出: 6
说明:
球一旦出界,就不能再被移动回网格内。
网格的长度和高度在 [1,50] 的范围内。
N 在 [0,50] 的范围内。
:param m: int
:param n: int
:param N: int
:param i: int
:param j: int
:return: int
"""
"""
# memo
def bfs(i, j, step):
if i < 0 or i >= m or j < 0 or j >= n:
return 1
if step == N:
return 0
if memo[i][j][step] >= 0:
return memo[i][j][step]
cur = 0
cur += bfs(i-1, j, step+1)
cur %= mod
cur += bfs(i+1, j, step+1)
cur %= mod
cur += bfs(i, j-1, step+1)
cur %= mod
cur += bfs(i, j+1, step+1)
cur %= mod
memo[i][j][step] = cur
return cur
mod = 10 ** 9 + 7
memo = [[[-1] * N for _ in range(n)] for _ in range(m)]
return bfs(i, j, 0)
"""
# dp
if N == 0: return 0
mod = 10 ** 9 + 7
dp = [[[0] * (N) for _ in range(n+2)] for _ in range(m+2)]
res = 0
dp[i+1][j+1][0] = 1
if i == 0: res += 1
if i == m - 1: res += 1
if j == 0: res += 1
if j == n - 1: res += 1
for step in range(1, N):
for a in range(1, m+1):
for b in range(1, n+1):
dp[a][b][step] += dp[a-1][b][step-1] + dp[a][b-1][step-1] + dp[a+1][b][step-1] + dp[a][b+1][step-1]
dp[a][b][step] %= mod
if a == 1: res += dp[a][b][step]
if a == m: res += dp[a][b][step]
if b == 1: res += dp[a][b][step]
if b == n: res += dp[a][b][step]
res %= mod
return res
def triangleNumber(nums):
"""
611. 有效三角形的个数
给定一个包含非负整数的数组,你的任务是统计其中可以组成三角形三条边的三元组个数。
示例 1:
输入: [2,2,3,4]
输出: 3
解释:
有效的组合是:
2,3,4 (使用第一个 2)
2,3,4 (使用第二个 2)
2,2,3
注意:
数组长度不超过1000。
数组里整数的范围为 [0, 1000]。
:param nums: List[int]
:return: int
"""
nums.sort()
res = 0
for k in range(2, len(nums)):
i, j = 0, k-1
while i < j:
if nums[i] + nums[j] > nums[k]:
res += j - i
j -= 1
else:
i += 1
return res
def validSquare(p1, p2, p3, p4):
"""
593. 有效的正方形
给定二维空间中四点的坐标,返回四点是否可以构造一个正方形。
一个点的坐标(x,y)由一个有两个整数的整数数组表示。
示例:
输入: p1 = [0,0], p2 = [1,1], p3 = [1,0], p4 = [0,1]
输出: True
注意:
所有输入整数都在 [-10000,10000] 范围内。
一个有效的正方形有四个等长的正长和四个等角(90度角)。
输入点没有顺序。
:param p1: List[int]
:param p2: List[int]
:param p3: List[int]
:param p4: List[int]
:return: bool
"""
arr = [tuple(p1), tuple(p2), tuple(p3), tuple(p4)]
if len(set(arr)) != 4: return False
dis = lambda p1, p2: (p1[0]-p2[0]) ** 2 + (p1[1] - p2[1]) ** 2
s = set()
for i in range(1, 4):
for j in range(i):
d = dis(arr[i], arr[j])
if not d in s:
s.add(d)
return len(s) == 2
def minDistance(word1, word2):
"""
583. 两个字符串的删除操作
给定两个单词 word1 和 word2,找到使得 word1 和 word2 相同所需的最小步数,
每步可以删除任意一个字符串中的一个字符。
示例 1:
输入: "sea", "eat"
输出: 2
解释: 第一步将"sea"变为"ea",第二步将"eat"变为"ea"
说明:
给定单词的长度不超过500。
给定单词中的字符只含有小写字母。
:param word1: str
:param word2: str
:return: int
"""
"""
# 方法一
# 当作 72 编辑距离 题做
n1, n2 = len(word1), len(word2)
dp = [[0] * (n2+1) for _ in range(n1+1)]
for i in range(n1+1):
dp[i][0] = i
for j in range(n2+1):
dp[0][j] = j
for i in range(1, n1+1):
for j in range(1, n2+1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i-1][j], dp[i][j-1]) + 1
return dp[n1][n2]
"""
"""
# 方法二
# 当作最长公共子串 lcs 做 (超时)
def lcs(k1, k2):
# 返回word1(0-k1) 和 word2(0-k2)的lcs长度
if k1 == 0 or k2 == 0:
return 0
if memo[k1][k2] > 0:
return memo[k1][k2]
if word1[k1-1] == word2[k2-1]:
memo[k1][k2] = 1 + lcs(k1-1, k2-1)
else:
memo[k1][k2] = max(lcs(k1-1, k2), lcs(k1, k2-1))
return memo[k1][k2]
n1, n2 = len(word1), len(word2)
memo = [[0] * (n2+1) for _ in range(n1+1)]
return n1 + n2 - 2 * lcs(n1, n2)
"""
# 方法二,用自底向上的动态规划做
n1, n2 = len(word1), len(word2)
dp = [[0] * (n2+1) for _ in range(n1+1)]
for i in range(1, n1+1):
for j in range(1, n2+1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return n1 + n2 - 2 * dp[n1][n2]
def removeDuplicates(nums):
"""
80. 删除排序数组中的重复项 II
给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素最多出现两次,返回移除后数组的新长度。
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
示例 1:
给定 nums = [1,1,1,2,2,3],
函数应返回新长度 length = 5, 并且原数组的前五个元素被修改为 1, 1, 2, 2, 3 。
你不需要考虑数组中超出新长度后面的元素。
示例 2:
给定 nums = [0,0,1,1,1,1,2,3,3],
函数应返回新长度 length = 7, 并且原数组的前五个元素被修改为 0, 0, 1, 1, 2, 3, 3 。
你不需要考虑数组中超出新长度后面的元素。
:param nums: List[int]
:return: int
"""
"""
# pop 50%
c = 0
for i in range(len(nums)-2, -1, -1):
if nums[i] == nums[i+1]:
c += 1
else:
c = 0
if c >= 2:
nums.pop(i)
print(nums)
return len(nums)
"""
# swap 90%
i = 0
for e in nums:
if i < 2 or e != nums[i-2]:
nums[i] = e
i += 1
return i
def deleteDuplicates(head):
"""
82. 删除排序链表中的重复元素 II
给定一个排序链表,删除所有含有重复数字的节点,只保留原始链表中 没有重复出现 的数字。
示例 1:
输入: 1->2->3->3->4->4->5
输出: 1->2->5
示例 2:
输入: 1->1->1->2->3
输出: 2->3
:param head: ListNode
:return: ListNode
"""
p, q = head, ListNode(0.0)
res = q
while p:
if p.next == None:
q.next = p
q = q.next
break
if p.val == p.next.val:
while p.next and p.val == p.next.val:
p = p.next
else:
q.next = p
q = q.next
p = p.next
q.next = None
return res.next
def search(nums, target):
"""
81. 搜索旋转排序数组 II
假设按照升序排序的数组在预先未知的某个点上进行了旋转。
( 例如,数组 [0,0,1,2,2,5,6] 可能变为 [2,5,6,0,0,1,2] )。
编写一个函数来判断给定的目标值是否存在于数组中。若存在返回 true,否则返回 false。
示例 1:
输入: nums = [2,5,6,0,0,1,2], target = 0
输出: true
示例 2:
输入: nums = [2,5,6,0,0,1,2], target = 3
输出: false
进阶:
这是 搜索旋转排序数组 的延伸题目,本题中的 nums 可能包含重复元素。
这会影响到程序的时间复杂度吗?会有怎样的影响,为什么?
:param nums: List[int]
:param target: int
:return: bool
"""
i, j = 0, len(nums) - 1
while i <= j:
m = i + (j - i) // 2
if nums[m] == target: return True
if nums[m] == nums[i] == nums[j]:
i += 1
j -= 1
elif nums[i] <= nums[m]:
if nums[i] <= target < nums[m]:
j = m - 1
else:
i = m + 1
else:
if nums[m] < target <= nums[j]:
i = m + 1
else:
j = m - 1
return False
def minAvailableDuration(slots1: List[List[int]], slots2: List[List[int]], duration: int) -> List[int]:
"""
5089. 安排会议日程
你是一名行政助理,手里有两位客户的空闲时间表:slots1 和 slots2,以及会议的预计持续时间 duration,请你为他们安排合适的会议时间。
「会议时间」是两位客户都有空参加,并且持续时间能够满足预计时间 duration 的 最早的时间间隔。
如果没有满足要求的会议时间,就请返回一个 空数组。
「空闲时间」的格式是 [start, end],由开始时间 start 和结束时间 end 组成,表示从 start 开始,到 end 结束。
题目保证数据有效:同一个人的空闲时间不会出现交叠的情况,
也就是说,对于同一个人的两个空闲时间 [start1, end1] 和 [start2, end2],要么 start1 > end2,要么 start2 > end1。
示例 1:
输入:slots1 = [[10,50],[60,120],[140,210]], slots2 = [[0,15],[60,70]], duration = 8
输出:[60,68]
示例 2:
输入:slots1 = [[10,50],[60,120],[140,210]], slots2 = [[0,15],[60,70]], duration = 12
输出:[]
提示:
1 <= slots1.length, slots2.length <= 10^4
slots1[i].length, slots2[i].length == 2
slots1[i][0] < slots1[i][1]
slots2[i][0] < slots2[i][1]
0 <= slots1[i][j], slots2[i][j] <= 10^9
1 <= duration <= 10^6
"""
def crossrange(e1, e2, duration):
m1, m2 = max(e1[0], e2[0]), min(e1[1], e2[1])
if m2 - m1 >= duration:
return [m1, m1+duration]
import heapq
q1, q2 = [], []
for s1 in slots1:
heapq.heappush(q1, tuple(s1))
for s2 in slots2:
heapq.heappush(q2, tuple(s2))
e1, e2 = None, None
while (q1 or e1) and (q2 or e2):
if e1 == None: e1 = heapq.heappop(q1)
if e2 == None: e2 = heapq.heappop(q2)
print(e1, e2)
c = crossrange(e1, e2, duration)
if c != None: return c
if e1[1] == e2[1]: e1, e2 = None, None
elif e1[1] < e2[1]: e1 = None
else: e2 = None
return []
def probabilityOfHeads(prob: List[float], target: int) -> float:
"""
5090. 抛掷硬币
有一些不规则的硬币。在这些硬币中,prob[i] 表示第 i 枚硬币正面朝上的概率。
请对每一枚硬币抛掷 一次,然后返回正面朝上的硬币数等于 target 的概率。
示例 1:
输入:prob = [0.4], target = 1
输出:0.40000
示例 2:
输入:prob = [0.5,0.5,0.5,0.5,0.5], target = 0
输出:0.03125
提示:
1 <= prob.length <= 1000
0 <= prob[i] <= 1
0 <= target <= prob.length
如果答案与标准答案的误差在 10^-5 内,则被视为正确答案。
"""
n = len(prob)
dp = [[0] * (n+1) for _ in range(n+1)]
dp[0][0] = dp[0][1] = 1.0
for i in range(1, n+1):
k = min(target, i)
for j in range(k+1):
if j > 0:
dp[i][j] += dp[i-1][j-1] * prob[i-1]
if j < i:
dp[i][j] += dp[i-1][j] * (1-prob[i-1])
print(dp)
return dp[n][target]
def removeSubfolders(folder: List[str]) -> List[str]:
"""
5231. 删除子文件夹
你是一位系统管理员,手里有一份文件夹列表 folder,你的任务是要删除该列表中的所有 子文件夹,并以 任意顺序 返回剩下的文件夹。
我们这样定义「子文件夹」:
如果文件夹 folder[i] 位于另一个文件夹 folder[j] 下,那么 folder[i] 就是 folder[j] 的子文件夹。
文件夹的「路径」是由一个或多个按以下格式串联形成的字符串:
/ 后跟一个或者多个小写英文字母。
例如,/leetcode 和 /leetcode/problems 都是有效的路径,而空字符串和 / 不是。
示例 1:
输入:folder = ["/a","/a/b","/c/d","/c/d/e","/c/f"]
输出:["/a","/c/d","/c/f"]
解释:"/a/b/" 是 "/a" 的子文件夹,而 "/c/d/e" 是 "/c/d" 的子文件夹。
示例 2:
输入:folder = ["/a","/a/b/c","/a/b/d"]
输出:["/a"]
解释:文件夹 "/a/b/c" 和 "/a/b/d/" 都会被删除,因为它们都是 "/a" 的子文件夹。
示例 3:
输入:folder = ["/a/b/c","/a/b/d","/a/b/ca"]
输出:["/a/b/c","/a/b/ca","/a/b/d"]
提示:
1 <= folder.length <= 4 * 10^4
2 <= folder[i].length <= 100
folder[i] 只包含小写字母和 /
folder[i] 总是以字符 / 起始
每个文件夹名都是唯一的
"""
folder.sort(key=lambda x:len(x))
parent = []
for f in folder:
isc = False
for p in parent:
idx = f.find(p)
if idx >= 0 and f[idx+len(p)] == '/':
isc = True
break
if not isc: parent.append(f)
print(parent)
return parent
def balancedString(s: str) -> int:
"""
5232. 替换子串得到平衡字符串
有一个只含有 'Q', 'W', 'E', 'R' 四种字符,且长度为 n 的字符串。
假如在该字符串中,这四个字符都恰好出现 n/4 次,那么它就是一个「平衡字符串」。
给你一个这样的字符串 s,请通过「替换子串」的方式,使原字符串 s 变成一个「平衡字符串」。
你可以用和「待替换子串」长度相同的 任何 其他字符串来完成替换。
请返回待替换子串的最小可能长度。
如果原字符串自身就是一个平衡字符串,则返回 0。
示例 1:
输入:s = "QWER"
输出:0
解释:s 已经是平衡的了。
示例 2:
输入:s = "QQWE"
输出:1
解释:我们需要把一个 'Q' 替换成 'R',这样得到的 "RQWE" (或 "QRWE") 是平衡的。
示例 3:
输入:s = "QQQW"
输出:2
解释:我们可以把前面的 "QQ" 替换成 "ER"。
示例 4:
输入:s = "QQQQ"
输出:3
解释:我们可以替换后 3 个 'Q',使 s = "QWER"。
提示:
1 <= s.length <= 10^5
s.length 是 4 的倍数
s 中只含有 'Q', 'W', 'E', 'R' 四种字符
"""
def ok(l, r):
for i in range(4):
if A[-1][i] + A[l][i] - A[r][i] > avg:
return False
return True
ctr = [0] * 4
A = [tuple(ctr)]
for c in s:
ctr['QWER'.index(c)] += 1
A.append(tuple(ctr))
n = len(s)
avg = n // 4
if max(ctr) == avg: return 0
l, r = 0, n - 1
while r <= n:
if ok(l, r):
r -= 1
else:
l += 1
r += 1
return r - l + 1
def longestSubsequence(arr: List[int], difference: int) -> int:
"""
1218. 最长定差子序列
给你一个整数数组 arr 和一个整数 difference,请你找出 arr 中所有相邻元素之间的差等于给定 difference 的等差子序列,并返回其中最长的等差子序列的长度。
示例 1:
输入:arr = [1,2,3,4], difference = 1
输出:4
解释:最长的等差子序列是 [1,2,3,4]。
示例 2:
输入:arr = [1,3,5,7], difference = 1
输出:1
解释:最长的等差子序列是任意单个元素。
示例 3:
输入:arr = [1,5,7,8,5,3,4,2,1], difference = -2
输出:4
解释:最长的等差子序列是 [7,5,3,1]。
提示:
1 <= arr.length <= 10^5
-10^4 <= arr[i], difference <= 10^4
"""
d = {}
for e in arr:
if not e in d:
d[e] = int(bool(difference))
if e - difference in d:
d[e] = max(d[e], d[e-difference] + 1)
return max(d.values())
def getMaximumGold(grid: List[List[int]]) -> int:
"""
1219. 黄金矿工
你要开发一座金矿,地质勘测学家已经探明了这座金矿中的资源分布,并用大小为 m * n 的网格 grid 进行了标注。每个单元格中的整数就表示这一单元格中的黄金数量;如果该单元格是空的,那么就是 0。
为了使收益最大化,矿工需要按以下规则来开采黄金:
每当矿工进入一个单元,就会收集该单元格中的所有黄金。
矿工每次可以从当前位置向上下左右四个方向走。
每个单元格只能被开采(进入)一次。
不得开采(进入)黄金数目为 0 的单元格。
矿工可以从网格中 任意一个 有黄金的单元格出发或者是停止。
示例 1:
输入:grid = [[0,6,0],[5,8,7],[0,9,0]]
输出:24
解释:
[[0,6,0],
[5,8,7],
[0,9,0]]
一种收集最多黄金的路线是:9 -> 8 -> 7。
示例 2:
输入:grid = [[1,0,7],[2,0,6],[3,4,5],[0,3,0],[9,0,20]]
输出:28
解释:
[[1,0,7],
[2,0,6],
[3,4,5],
[0,3,0],
[9,0,20]]
一种收集最多黄金的路线是:1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7。
提示:
1 <= grid.length, grid[i].length <= 15
0 <= grid[i][j] <= 100
最多 25 个单元格中有黄金。
"""
def dfs(i, j):
if i < 0 or i == n or j < 0 or j == m or grid[i][j] == 0 or vis[i][j]:
return 0
vis[i][j] = True
res = grid[i][j]
ma = 0
for dx, dy in d:
x, y = i + dx, j + dy
ma = max(dfs(x, y), ma)
res += ma
vis[i][j] = False
return res
n = len(grid)
m = len(grid[0])
d = ((1, 0), (-1, 0), (0, 1), (0, -1))
res = 0
for i in range(n):
for j in range(m):
vis = [[False] * m for _ in range(n)]
res = max(dfs(i, j), res)
# print(res)
return res
def removeNearDuplicates(s: str, k: int) -> str:
"""
1209. 删除字符串中的所有相邻重复项 II
给你一个字符串 s,「k 倍重复项删除操作」将会从 s 中选择 k 个相邻且相等的字母,并删除它们,使被删去的字符串的左侧和右侧连在一起。
你需要对 s 重复进行无限次这样的删除操作,直到无法继续为止。
在执行完所有删除操作后,返回最终得到的字符串。
本题答案保证唯一。
示例 1:
输入:s = "abcd", k = 2
输出:"abcd"
解释:没有要删除的内容。
示例 2:
输入:s = "deeedbbcccbdaa", k = 3
输出:"aa"
解释:
先删除 "eee" 和 "ccc",得到 "ddbbbdaa"
再删除 "bbb",得到 "dddaa"
最后删除 "ddd",得到 "aa"
示例 3:
输入:s = "pbbcggttciiippooaais", k = 2
输出:"ps"
提示:
1 <= s.length <= 10^5
2 <= k <= 10^4
s 中只含有小写英文字母。
"""
"""
# 递归
def helper(s, k):
if not s: return s
last = s[0]
sign = False
ctr = 1
i, n = 1, len(s)
while i < n:
if last == s[i]:
ctr += 1
else:
last = s[i]
ctr = 1
i += 1
if ctr == k:
sign = True
s = s[:i - ctr] + s[i:]
i -= ctr
n -= ctr
return helper(s, k) if sign else s
return helper(s, k)
"""
# 栈
stk = []
for c in s:
if not stk or stk[-1][0] != c:
stk.append([c, 1])
else:
stk[-1][1] += 1
if stk[-1][1] == k:
stk.pop()
return ''.join(e[0]*e[1] for e in stk)
def queensAttacktheKing(queens: List[List[int]], king: List[int]) -> List[List[int]]:
"""
1222. 可以攻击国王的皇后
在一个 8x8 的棋盘上,放置着若干「黑皇后」和一个「白国王」。
「黑皇后」在棋盘上的位置分布用整数坐标数组 queens 表示,「白国王」的坐标用数组 king 表示。
「黑皇后」的行棋规定是:横、直、斜都可以走,步数不受限制,但是,不能越子行棋。
请你返回可以直接攻击到「白国王」的所有「黑皇后」的坐标(任意顺序)。
示例 1:
输入:queens = [[0,1],[1,0],[4,0],[0,4],[3,3],[2,4]], king = [0,0]
输出:[[0,1],[1,0],[3,3]]
解释:
[0,1] 的皇后可以攻击到国王,因为他们在同一行上。
[1,0] 的皇后可以攻击到国王,因为他们在同一列上。
[3,3] 的皇后可以攻击到国王,因为他们在同一条对角线上。
[0,4] 的皇后无法攻击到国王,因为她被位于 [0,1] 的皇后挡住了。
[4,0] 的皇后无法攻击到国王,因为她被位于 [1,0] 的皇后挡住了。
[2,4] 的皇后无法攻击到国王,因为她和国王不在同一行/列/对角线上。
示例 2:
输入:queens = [[0,0],[1,1],[2,2],[3,4],[3,5],[4,4],[4,5]], king = [3,3]
输出:[[2,2],[3,4],[4,4]]
示例 3:
输入:queens = [[5,6],[7,7],[2,1],[0,7],[1,6],[5,1],[3,7],[0,3],[4,0],[1,2],[6,3],[5,0],[0,4],[2,2],[1,1],[6,4],[5,4],[0,0],[2,6],[4,5],[5,2],[1,4],[7,5],[2,3],[0,5],[4,2],[1,0],[2,7],[0,1],[4,6],[6,1],[0,6],[4,3],[1,7]], king = [3,4]
输出:[[2,3],[1,4],[1,6],[3,7],[4,3],[5,4],[4,5]]
提示:
1 <= queens.length <= 63
queens[0].length == 2
0 <= queens[i][j] < 8
king.length == 2
0 <= king[0], king[1] < 8
一个棋盘格上最多只能放置一枚棋子。
"""
direction = ((1,0), (0,1), (1,1), (1,-1), (-1,-1), (-1,1), (-1,0), (0,-1))
q = set(tuple(x) for x in queens)
res = []
for dx, dy in direction:
x, y = king
while 0 <= x < 8 and 0 <= y < 8:
x, y = x + dx, y + dy
if (x, y) in q:
res.append([x, y])
break
return res
def equalSubstring(s: str, t: str, maxCost: int) -> int:
"""
1208. 尽可能使字符串相等
给你两个长度相同的字符串,s 和 t。
将 s 中的第 i 个字符变到 t 中的第 i 个字符需要 |s[i] - t[i]| 的开销(开销可能为 0),也就是两个字符的 ASCII 码值的差的绝对值。
用于变更字符串的最大预算是 maxCost。在转化字符串时,总开销应当小于等于该预算,这也意味着字符串的转化可能是不完全的。
如果你可以将 s 的子字符串转化为它在 t 中对应的子字符串,则返回可以转化的最大长度。
如果 s 中没有子字符串可以转化成 t 中对应的子字符串,则返回 0。
示例 1:
输入:s = "abcd", t = "bcdf", cost = 3
输出:3
解释:s 中的 "abc" 可以变为 "bcd"。开销为 3,所以最大长度为 3。
示例 2:
输入:s = "abcd", t = "cdef", cost = 3
输出:1
解释:s 中的任一字符要想变成 t 中对应的字符,其开销都是 2。因此,最大长度为 1。
示例 3:
输入:s = "abcd", t = "acde", cost = 0
输出:1
解释:你无法作出任何改动,所以最大长度为 1。
提示:
1 <= s.length, t.length <= 10^5
0 <= maxCost <= 10^6
s 和 t 都只含小写英文字母。
"""
n = len(s)
p = [0] * n
for i, (x, y) in enumerate(zip(s, t)):
p[i] = abs(ord(x) - ord(y))
i, j, c, q, r = 0, 0, 0, 0, 0
for i in range(n):
while j < n and c+p[j] <= maxCost:
c += p[j]
j += 1
r = max(r, j - i)
c -= p[i]
print(maxCost)
print(r)
return r
if __name__ == '__main__':
equalSubstring("abcd", "cdef", 1)
# res = removeNearDuplicates("yfttttfbbbbnnnnffbgffffgbbbbgssssgthyyyy", 4)
# print(res)
# getMaximumGold([[1,0,7,0,0,0],[2,0,6,0,1,0],[3,5,6,7,4,2],[4,3,1,0,2,0],[3,0,5,0,20,0]])
# longestSubsequence([4,12,10,0,-2,7,-8,9,-9,-12,-12,8,8], 0)
# removeSubfolders(["/a/b/c","/a/b/d","/a/b/ca","/a/b/d/c"])
# res = probabilityOfHeads([0.5], 0)
# print(res)
# res = minAvailableDuration([[10,50],[60,120],[140,210]], [[0,15],[60,70]], 8)
# print(res)
# b = search([1,3,1,1,1], 3)
# print(b)
# x = construct_list_node([1,1,2,3,3,4,5,5])
# deleteDuplicates(x)
# removeDuplicates([0,0,1,1,1,1,2,3,3,3])
# a = minDistance("sea", "eat")
# print(a)
# triangleNumber([2,2,3,4])
# a = findPaths(1,3,3,0,1)
# print(a)
# countNumbersWithUniqueDigits(10)
# shoppingOffers([2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1])
# arr = [10,2]
# res = largerstNumber(arr)
# s = "(ed(et(oc))el)"
# reverseParentheses(s)
pass |
12,231 | 683b279a752760f0d4bd2d152fa84945d3340656 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import os
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_text_and_label(data_file):
"""
Loads polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# load data from file
# splite by word
dfRaw = pd.read_csv(data_file)
dfRec = dfRaw[['Review Text', 'Recommended IND']].dropna()
pos_examples = dfRec[dfRec['Recommended IND'] == 1]['Review Text'].tolist()
neg_examples = dfRec[dfRec['Recommended IND'] == 0]['Review Text'].tolist()
x_text = pos_examples + neg_examples
x_text = np.array([clean_str(sentence) for sentence in x_text])
# generate label (y)
pos_labels = [[0,1] for _ in pos_examples]
neg_labels = [[1,0] for _ in neg_examples]
y = np.array(pos_labels + neg_labels)
return [x_text, y]
def batch_iter(data, batch_size, num_epochs, shuffle=True):
'''
Generates a batch iterator for a dataset.
'''
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((data_size-1)/batch_size)+1
for epoch in range(num_epochs):
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num*batch_size
end_index = min((batch_num+1)*batch_size, data_size)
yield shuffled_data[start_index:end_index]
|
12,232 | a01a810fd74d2a68f1e8e4c85a31f363cf2c1359 | # most straightforward solution would be more efficient with multiple return values, and Python makes those easy to handle
def tilt_and_sum(node):
if node == None:
return (0,0)
left_sum, left_tilt = tilt_and_sum(node.left)
right_sum, right_tilt = tilt_and_sum(node.right)
return (node.val + left_sum + right_sum, abs(left_sum-right_sum) + left_tilt + right_tilt)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
full_sum, full_tilt = tilt_and_sum(root)
return full_tilt
|
12,233 | 8235ac5f16d54b726258bc5687caf26ac296d7de | #!/usr/bin/env python
# coding: utf-8
# Matplotlib: sigmoidal functions
# ======================================================================
#
# matplotlib's approach to plotting functions requires you to compute the
# x and y vertices of the curves you want to plot and then pass it off to
# plot. Eg for a normal pdf, matplotlib.mlab provides such a function:
from matplotlib.mlab import normpdf
import matplotlib.numerix as nx
import pylab as p
x = nx.arange(-4, 4, 0.01)
y = normpdf(x, 0, 1) # unit normal
p.plot(x,y, color='red', lw=2)
p.show()
# Of course, some curves do not have closed form expressions and are not
# amenable for such treatment. Some of the matplotlib backends have the
# capability to draw arbitrary paths with splines (cubic and quartic) but
# this functionality hasn't been exposed to the user yet (as of 0.83). If
# you need this, please post to the [mailing
# list](http://sourceforge.net/mail/?group_id=80706) or submit a
# sourceforge [support
# request](http://sourceforge.net/tracker/?group_id=80706&atid=560721).
#
# Rich Shepard was interested in plotting "S curves" and "Z curves", and a
# little bit of googling suggests that the S curve is a sigmoid and the Z
# curve is simply 1.0-sigmoid. There are many simple forms for sigmoids:
# eg, the hill, boltzman, and arc tangent functions. Here is an example of
# the boltzman function:
# In[ ]:
import matplotlib.numerix as nx
import pylab as p
def boltzman(x, xmid, tau):
"""
evaluate the boltzman function with midpoint xmid and time constant tau
over x
"""
return 1. / (1. + nx.exp(-(x-xmid)/tau))
x = nx.arange(-6, 6, .01)
S = boltzman(x, 0, 1)
Z = 1-boltzman(x, 0.5, 1)
p.plot(x, S, x, Z, color='red', lw=2)
p.show()
# See also [sigmoids at
# mathworld](http://mathworld.wolfram.com/SigmoidFunction.html).
#
# People often want to shade an area under these curves, eg [under their
# intersection](http://www.appl-ecosys.com/newstuff.html), which you can
# do with the magic of numerix and the matplotlib
# [<http://matplotlib.sourceforge.net/matplotlib.pylab.html>\#-fill fill]
# function:
# In[ ]:
import matplotlib.numerix as nx
import pylab as p
def boltzman(x, xmid, tau):
"""
evaluate the boltzman function with midpoint xmid and time constant tau
over x
"""
return 1. / (1. + nx.exp(-(x-xmid)/tau))
def fill_below_intersection(x, S, Z):
"""
fill the region below the intersection of S and Z
"""
#find the intersection point
ind = nx.nonzero( nx.absolute(S-Z)==min(nx.absolute(S-Z)))[0]
# compute a new curve which we will fill below
Y = nx.zeros(S.shape, typecode=nx.Float)
Y[:ind] = S[:ind] # Y is S up to the intersection
Y[ind:] = Z[ind:] # and Z beyond it
p.fill(x, Y, facecolor='blue', alpha=0.5)
x = nx.arange(-6, 6, .01)
S = boltzman(x, 0, 1)
Z = 1-boltzman(x, 0.5, 1)
p.plot(x, S, x, Z, color='red', lw=2)
fill_below_intersection(x, S, Z)
p.show()
# As these examples illustrate, matplotlib doesn't come with helper functions for all the kinds of curves people want to plot, but along with numerix and python, provides the basic tools to enable you to build them yourself.
#
# 
#
|
12,234 | e452b7802c3af6218a006877a1f141a78462a518 | # -*- coding: utf-8 -*-
"""
Attempt to scrape scholar.google.com results for Stan usage tracking"""
#from selenium import webdriver
import requests
from bs4 import BeautifulSoup
import redis
import re
import numpy
import time
import subprocess
import os
REDIS = redis.Redis()
# retrieve
# set of queries, broken out by 3 level strucuture for each category.
# for each query
# collect docs and estimated count
# shove in doc store
# filter
# negative regex
# positive regex
# classifier
HEADERS = {'User-Agent': 'Mozilla/5.0(Macintosh; Intel Mac OS X 10_11_6) AppleWebkit/537.36 (KHTML, like Gecko) Chrome/61.0.3163'}
URL_PREFIX = "https://scholar.google.com/scholar?"
QUERY_PREFIX = "hl=en&as_sdt=0%2C33&q="
START_YEAR = "&hl=en&as_sdt=0%2C33&as_ylo="
END_YEAR = "&as_yhi="
#page = requests.get("http://alias-i.com/index.html", headers=headers)
YEAR_START = 2011
YEAR_END = 2020
OFFSET_PATTERN = re.compile("start....(\d+)") # needs to be fixed, hacky
GOOGLE_COUNT = 'google_count'
FILTERED_COUNT = 'filtered_count'
RAW_COUNT = 'raw_count'
PKG_QUERY = ['rstan']
def cache_retrieve(query,headers):
page = REDIS.get(query)
if (not page):
print("miss cache: \n" + query)
wait = abs(numpy.random.normal(loc=300, scale=150, size=1)[0])
print("waiting ", wait, " seconds")
time.sleep(wait)
result = requests.get(query, headers=headers)
page = result.content
REDIS.set(query,page)
# else:
# print("hit cache: \n" + query)
return page
def cache_only(query,headers):
return REDIS.get(query)
def interactive_cache_retrieve(query,headers):
page = REDIS.get(query)
if (not page):
done = False
while (not done):
print("miss cache: \n" + query)
old_file_mod_time = os.stat('/Users/breck/git/rosetta-stan.github.io/usage/file.html').st_mtime
subprocess.call(['open','--wait-apps','--new',query]) # blocking
#subprocess.call(['open',query])
# answer = input("<ctl-c> to quit,<return> when /Users/breck/git/rosetta-stan.github.io/usage/file.html is saved")
new_file_mod_time = os.stat('/Users/breck/git/rosetta-stan.github.io/usage/file.html').st_mtime
if (old_file_mod_time - new_file_mod_time >= 0):
print("/Users/breck/git/rosetta-stan.github.io/usage/file.html has not changed")
continue;
f=open('/Users/breck/git/rosetta-stan.github.io/usage/file.html', "r")
page = f.read()
f.close()
REDIS.set(query,page)
done = True
return page
for i in range(0,len(PKG_QUERY)):
package = PKG_QUERY[i]
package_d = {}
for year in range(YEAR_START,YEAR_END):
package_d[year] = {GOOGLE_COUNT:0,FILTERED_COUNT:0, RAW_COUNT:0}
done = False
result_start_str = ''
first_page= True
filtered_count = 0
while not done:
query = URL_PREFIX + result_start_str + "q=" + package + START_YEAR + str(year) + END_YEAR + str(year)
#page = cache_only(query,HEADERS)
#page = interactive_cache_retrieve(query,HEADERS)
page = cache_retrieve(query,HEADERS)
if not page:
done = True
continue
soup = BeautifulSoup(page, 'html.parser')
count_match = soup.find(attrs={"id":"gs_ab_md"})
if not count_match:
raise ValueError("query got unexpected result:" + query)
# <div id="gs_ab_md"><div class="gs_ab_mdw">Page 15 of about 137,000 results (
if count_match and first_page:
google_count = count_match.get_text()
RESULTS_COUNT_PATTERN = re.compile("([\d,]+) result") # About 3,339 results (0.03 sec)'
google_count_value = RESULTS_COUNT_PATTERN.search(google_count).group(1)
package_d[year][GOOGLE_COUNT] = int(google_count_value.replace(',',''))
print("Got ",package_d[year][GOOGLE_COUNT]," estimated hits" )
first_page = False
if package_d[year][GOOGLE_COUNT] > 3000:
print("too many results to page", package_d[year][GOOGLE_COUNT]," for query:", query);
break
for mention in soup.find_all(attrs={"class": "gs_r gs_or gs_scl"}): #go through indivudual mentions
package_d[year][RAW_COUNT] += 1
abstract = mention.find(attrs={"class":"gs_rs"})
if abstract:
abs_text = mention.get_text()
#print("TEXT:",abs_text)
if package=='rstan':
abs_text_clean = re.sub(r'</b>|<b>|\s|-','',abs_text) # 'U n d e <b>rstan</b> d th e '
understand_p = re.compile('understand',re.IGNORECASE)
if understand_p.search(abs_text_clean):
# print("skipping as part of 'understand':", abs_text_clean)
continue
rstan_token_p = re.compile('\srstan\s',re.IGNORECASE)
rstan_substr_p = re.compile('rstan',re.IGNORECASE)
if rstan_substr_p.search(abs_text) and not rstan_token_p.search(abs_text):
#print("skipping for no token 'rstan' but substring:",abs_text)
continue
package_d[year][FILTERED_COUNT] += 1
#print("kept:",abs_text)
next_button = soup.find(name='button', attrs={"aria-label":"Next"})
if next_button and next_button.attrs.get('onclick'):
link = next_button.attrs.get('onclick')
result_start = re.compile(OFFSET_PATTERN).search(link).group(1)
result_start_str = 'start=' + result_start + '&'
else:
done = True
print("Package:", package, "year: ", year, "google count:",
package_d[year][GOOGLE_COUNT], "filter count:", package_d[year][FILTERED_COUNT],
"raw_count:",package_d[year][RAW_COUNT])
#print("Package: ", package, " year: ", year, "count: ", package_d[year][FILTERED_COUNT])
# first abstract
#abstract = soup.find_all(attrs={"class": "gs_r gs_or gs_scl"})[0].find(attrs={"class":"gs_rs"})
#abstract.get_text()
#soup.find_all(attrs={"class": "gs_or"})[0].a
#entry = soup.find_all(attrs={"class": "gs_or"})[0]
#link = soup.find_all(attrs={"class": "gs_or"})[0].a['href']
|
12,235 | 61494260399c8bcc2c956e89fc4fee9cf3490dcc | #导入traceback模块
import traceback
class SelfException(Exception): pass
def main():
firstMethod()
def firstMethod():
secondMethod()
def secondMethod():
thridmethod()
def thridmethod():
raise SelfException("自定义异常信息")
try:
main()
except:
#捕获异常,并将异常传播信息输出到控制台
traceback.print_exc()
#捕获异常,并将异常传播信息输出到指定文件中
traceback.print_exc(file=open('log.txt','a')) |
12,236 | 5d53d876db69aef9c0006f33b3612dc83afbf296 | #!/usr/bin/env python3
#
# Author: Soft9000.com
# 2018/11/24: GUI Project Begun
# Mission: Create a graphical user interface to PyDAO.
# Status: Released to PyPi
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from tkinter import *
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
from collections import OrderedDict
from SqltDAO.CodeGen01.OrderClass import OrderClass
from SqltDAO.CodeGen01.SqlSyntax import SqliteCrud
from SqltDAO.SchemaDef.OrderDef import OrderDef1 as OrderDef
from SqltDAO.SchemaDef.Factory import Factory1
from SqltDAO.Gui.Data2Code import Data2Code
from SqltDAO.Gui.StandardEntry import LabelEntry
from SqltDAO.Gui.TableDef import TableDef as TableDef2
from SqltDAO.SchemaDef.Table import TableDef as TableDef1
from SqltDAO.Gui.DataPreferences import Dp1 as DataPreferences
class Main(Tk):
def __init__(self, *args, **kwargs):
from SqltDAO.CodeGen01.Meta import Meta
super().__init__(*args, **kwargs)
self.ztitle = Meta.Title()
self.d2c = None
self.project = None
self.zoptions = (
("Projects", [("New Project...", self._on_new),
("Open Project...", self._on_open),
("Save Project...", self._on_save),
("Create Code", self._on_code_create)],),
("Tools", [("Data2Code...", self._on_d2c),
("Data2Project...", self._on_d2p),
("Preferences...", self._on_d2pref)]),
("About", [("About PyDao...", self._on_about),
("Quit", self.destroy)]),
)
self.table_frame = None
self.home = "."
self.order_def = OrderDef()
'''
activeBackground, foreground, selectColor,
activeForeground, highlightBackground, selectBackground,
background, highlightColor, selectForeground,
disabledForeground, insertBackground, troughColor.
'''
self.tk_setPalette(
background="Light Green",# e.g. Global
foreground="dark blue", # e.g. Font color
insertBackground="blue", # e.g. Entry cursor
selectBackground="gold", # e.g. Editbox selections
activeBackground="gold", # e.g. Menu selections
)
def _on_new(self):
self.title(self.ztitle)
self.order_def = OrderDef()
self.table_frame.empty()
self.table_frame.got_results()
self.table_frame.table_name.set(TableDef1.DEFAULT_NAME)
self._show_order()
def _on_open(self):
pref = DataPreferences.Load(self.home)
self.project = askopenfilename(
title="Open Project File",
initialdir=pref['Projects'],
filetypes=[("PyDAO Project", OrderDef.ProjType)]
)
if not self.project:
return
zdef = Factory1.LoadFile(self.project)
if not zdef:
messagebox.showerror(
"Schema File / Format Error",
"Unable to import " + self.project)
else:
self.table_frame.got_results()
self.title(self.project)
self.order_def = zdef
self._show_order()
def do_save(self):
''' Project file must be created for both saving same, as well as for creating code. '''
ztbl = self.table_frame.pull_results()
zdict = ztbl.__dict__()
if not zdict:
messagebox.showerror(
"No Data",
"Schema Definition Required.")
return False
self.order_def = OrderDef(name=ztbl.get_table_name())
if not self.order_def.add_table(ztbl):
messagebox.showerror(
"Invalid Table",
"Please verify SQL Table Definition.")
return False
if Factory1.SaveFile(DataPreferences.Load(self.home), self.order_def, overwrite=True) is False:
messagebox.showerror(
"Exportation Error",
"Please verify user locations.")
return False
self.table_frame.got_results()
return True
def _on_save(self):
if self.do_save() is True:
val = os.path.split(self.order_def.project_name)
messagebox.showinfo(
"Project Saved",
"Project file saved as " + val[-1] + " in preference location.")
def _on_code_create(self):
''' Generate Python code '''
if self.do_save() is True:
pref = DataPreferences.Load(self.home)
order_class = Factory1.Extract(self.order_def, pref)
zfields = OrderedDict()
ztables = self.order_def.table_names()
table_def = self.order_def.find_table(ztables[0]) # TODO: Highlander hack.
for row in table_def:
zfields[row[1]] = row[2]
sql = SqliteCrud(order_class, zfields)
zcode = sql.code_class_template(
self.order_def.database_name + OrderDef.TEXT_DATA_TYPE)
with open(self.order_def.code_name, 'w') as fh:
print(zcode, file=fh)
val = os.path.split(self.order_def.code_name)
messagebox.showinfo(
"Source Code Success",
"Class created as " + val[-1] + " in preference location.")
def _on_d2c(self):
Data2Code(self, pref=DataPreferences.Load(self.home), verbose=True)
def _on_d2p(self):
Data2Code(self, pref=DataPreferences.Load(self.home), gendef=True, verbose=True)
def _on_d2pref(self):
zpref = DataPreferences(self, self.home)
if zpref.has_changed():
pass
def _on_about(self):
messagebox.showinfo(
self.ztitle,
"Official Release")
def _show_order(self):
if not self.order_def:
return False
self.table_frame.empty()
for key in self.order_def._zdict_tables:
td1 = self.order_def._zdict_tables[key]
if self.table_frame.put_results(td1) is False:
messagebox.showerror(
"Display Error",
"Critical: _show_order regression.")
return False
def _set_frame(self):
zframe = Frame(self)
self.table_frame = TableDef2(zframe)
zframe.pack(fill=BOTH)
def begin(self):
self.title(self.ztitle)
try:
image = PhotoImage(file="zicon.png")
self.wm_iconphoto(self, image)
except:
pass
zmain = Menu(self)
for zsub in self.zoptions:
zdrop = Menu(zmain, tearoff=False)
zmain.add_cascade(label=zsub[0], menu=zdrop)
for zz in zsub[1]:
zdrop.add_command(label=zz[0], command=zz[1])
self.config(menu=zmain)
self._set_frame()
return True
def run(self):
super().mainloop()
return True
def end(self):
return True
@staticmethod
def mainloop():
main = Main()
try:
if main.begin():
main.run()
return True
except Exception as ex:
print(str(ex))
return False
finally:
try:
main.end()
except:
pass
if __name__ == "__main__":
Main.mainloop()
|
12,237 | 9a32952621ce6b312453c20566e27288997ba7de | from django.conf import settings
from django.urls import reverse
from ..models import Topic
from ..models.Command0Arg import Command0Arg
TEXT = """Great! Now you have a new topic!
You can send `HTTP POST` requests to:
`{webhook_endpoint}`
If you want more people receiving the notifications, send them this TopicCode:
`{topic_code}`
I've already subscribed you to this topic.
"""
class NewCmd(Command0Arg):
cmd = '/new'
def without_argument(self):
topic = Topic.objects.generate_new(self.chat)
return TEXT.format(webhook_endpoint=topic.webhook_endpoint,
topic_code=topic.code)
|
12,238 | 63b3dcf8298a34089ec2be3c4ee15cdd73c1c76d | from numbers import Number
import math
# All the imports from pyparsing go here
from pyparsing import (delimitedList, Forward, Literal,
stringEnd, nums, Word, CaselessLiteral, Combine,
Optional, Suppress, OneOrMore, ZeroOrMore, opAssoc,
operatorPrecedence, oneOf, ParseException,
ParserElement,
alphas, alphanums, ParseFatalException,
ParseSyntaxException, FollowedBy, NotAny, Or,
MatchFirst, Keyword, Group, White, lineno, col)
# from .pyparsing_utils import myOperatorPrecedence
# Enable memoization (much faster!)
if True:
ParserElement.enablePackrat()
else:
# Pyparsing 2.0
from pyparsing import infixNotation
myOperatorPrecendence = infixNotation
from .interface import Where
class ParsingTmp():
# TODO: FIXME: decide on an order, if we do the opposite it doesn't work.
contract_types = []
keywords = []
def add_contract(x):
ParsingTmp.contract_types.append(x)
def add_keyword(x):
""" Declares that x is a keyword --- this is useful to have more
clear messages. "keywords" are not parsed by Extension.
(see extensions.py) and allows to have "deep" error indications.
See http://pyparsing.wikispaces.com/message/view/home/620225
and the discussion of the "-" operator in the docs.
"""
ParsingTmp.keywords.append(x)
W = Where
O = Optional
S = Suppress
basenumber = Word(nums)
point = Literal('.')
e = CaselessLiteral('E')
plusorminus = Literal('+') | Literal('-')
integer = Combine(O(plusorminus) + basenumber)
integer.setParseAction(lambda tokens: SimpleRValue(int(tokens[0])))
floatnumber = Combine(
O(plusorminus) + integer + (point + O(basenumber)) ^ (e + integer))
floatnumber.setParseAction(lambda tokens: SimpleRValue(float(tokens[0])))
pi = Keyword('pi').setParseAction(
lambda tokens: SimpleRValue(math.pi, 'pi')) # @UnusedVariable
try:
import numpy
except ImportError:
numpy = None
def isnumber(x):
# These are scalar quantities that we can compare (=,>,>=, etc.)
if isinstance(x, Number):
return True
if numpy is not None and isinstance(x, numpy.number):
return True
return False
rvalue = Forward()
rvalue.setName('rvalue')
contract_expression = Forward()
contract_expression.setName('contract')
simple_contract = Forward()
simple_contract.setName('simple_contract')
# Import all expressions -- they will call add_contract()
from .library import (EqualTo, Unary, Binary, composite_contract,
identifier_contract, misc_variables_contract,
scoped_variables_ref,
int_variables_contract, int_variables_ref,
misc_variables_ref, SimpleRValue)
number = pi | floatnumber | integer
operand = number | int_variables_ref | misc_variables_ref | scoped_variables_ref
operand.setName('r-value')
op = operatorPrecedence
# op = myOperatorPrecedence
rvalue << op(operand, [
('-', 1, opAssoc.RIGHT, Unary.parse_action),
('*', 2, opAssoc.LEFT, Binary.parse_action),
('-', 2, opAssoc.LEFT, Binary.parse_action),
('+', 2, opAssoc.LEFT, Binary.parse_action),
('^', 2, opAssoc.LEFT, Binary.parse_action),
])
# I want
# - BindVariable to have precedence to EqualTo(VariableRef)
# but I also want:
# - Arithmetic to have precedence w.r.t BindVariable
# last is variables
add_contract(misc_variables_contract)
add_contract(int_variables_contract)
add_contract(rvalue.copy().setParseAction(EqualTo.parse_action))
hardwired = MatchFirst(ParsingTmp.contract_types)
hardwired.setName('Predefined contract expression')
simple_contract << (hardwired | identifier_contract)
simple_contract.setName('simple contract expression')
any_contract = composite_contract | simple_contract
any_contract.setName('Any simple or composite contract')
contract_expression << (any_contract) # Parentheses before << !!
|
12,239 | 7865e85501fb7cb316f1b97220505a9886643ef9 | import re
import sys
keywords = ['if','btn','p','pln','instr']
def clear (filename):
slist = open(filename).readlines()
i = 0
for s in slist:
n = s.lower().count(" then ")
s = s + " & <<endif>>" * n
s = re.sub(" then "," & ",s,flags=re.I)
s = re.sub(" else ","& <<else>> &",s,flags=re.I)
s = re.sub("perkill","<<display 'perkill'>>",s,flags=re.I)
s = re.sub("invkill","<<display 'perkill'>>",s,flags=re.I)
s = re.sub("inv_","",s,flags=re.I)
alist = s.split('&')
j = 0
for a in alist:
alist[j] = a.strip()
j = j+1
s = " & ".join(alist)
slist[i] = s.strip()
i = i+1
text = "\n".join(slist)
text = re.sub(";.*$","",text,flags=re.M)
text = re.sub("\n+","\n",text)
return text
def start (text):
title = re.findall("^:.*$",text,flags=re.M)[0]
text = re.sub("(goto|btn|:)( *)start","\\1\\2start_old",text,flags=re.I)
text = "\n" + ":start" + "\n" + "<<display 'perkill'>>" + '\n' + "goto "+ title[1:] + '\n' + "end" + "\n" + text
return text
def btnuse(text):
text = re.sub("^: ",":",text,flags=re.M)
text = re.sub("^:Use",":use",text,flags=re.M)
uselist = re.findall("^:use_[^_\n]*$",text,flags=re.M)
actlist = re.findall("^:use_.*_.*$",text,flags=re.M)
for a in actlist:
newuse = re.sub("(:use_.*)_.*","\\1",a)
if newuse not in uselist:
name = re.sub(":use_","",newuse)
text = text + "\n" + newuse + "\n" + "pln " + name + '\n' + "end" + '\n'
plist = re.findall("^:use_[^_\n]*\n[\s\S]*?\nend",text,flags=re.I|re.M)
for p in plist:
pname = p.split('\n')[0]
pname = re.sub(":use_","",pname)
alist = re.findall(":use_"+pname+"_.*$",text,flags=re.M)
newp = p[:-3]
for a in alist:
bname = a[1:]
btext =a.split('_')[2]
newp = newp + "btn " + bname + ',' + btext + '\n'
newp = newp + "end"
text = text.replace(p,newp)
return text
def inventory(text):
res = "::StoryMenu[::]1-1-1\n"
invlist = re.findall(":use_[^_\n]*\n",text)
for i in invlist:
pred = re.sub(".*use_","",i.strip())
btntext = i.strip()[1:]
predname = pred.replace(' ','_').replace('.','')
res = res + "<<if $"+predname+" gt 0>>[[+"+pred+"|"+btntext+"]] <<endif>>"
res = res + '\n'
return res
def perkill (text):
perlist = re.findall("<<set \$.*?=",text)
plist = []
macr = ":: perkill[::]1-1-1\n"
for p in perlist:
p = p[6:-1]
if p not in plist:
plist.append(p)
for i in plist:
macr = macr + "<<set " + i + "=0>>\n"
return macr
def end (text):
text = re.sub("^end$","endendend",text,flags=re.I|re.M)
return text
def name (text):
text = re.sub(":(.*)","::\\1[::]1-1-1",text)
return text
def ifthen (text):
if text[:3].lower() <> "if ": return text
newi = re.sub(" *(<|>|=|<=|>=|<>) *","\\1",text)
newi = re.sub(" +"," ",newi)
newi = re.sub("\)(\S)",") \\1",newi)
newi = re.sub("((if |and |or |not |\()+)(\S)","\\1$\\3",newi, flags=re.I)
newi = re.sub(" +","_",newi[3:])
newi = re.sub("[_ ](and|or|not)([_ ])"," \\1 ", newi, flags=re.I).replace('=','~')
re.sub("_*([<>~]+)_*","\\1",newi)
newi = "if " + newi.replace('>~',' gte ').replace('<>'," neq ").replace('<~',' lte ').replace('~',' eq ').replace('>',' gt ').replace('<',' lt ').replace(' $and ',' and ').replace(' $or ',' or ').replace(' $not ',' !')
newi = newi.replace("if _","if ").replace('.','').replace(',','')
newi =re.sub("(eq |gt |lt |neq |lte | gte )([^0-9\'])","\\1$\\2", newi)
newi =re.sub("not_([^\s\>]+)","!(\\1)",newi)
newi = re.sub("\$(\d)","$_\\1",newi)
text = "<<" + newi + ">>"
return text
def btn (text):
if text[:4].lower() <> 'btn ': return text
text = re.sub("btn (.*?), *(.*)","[[\\2|\\1]]",text, flags=re.I)
text = re.sub("(\[\[)([^\]]+\|use[^\]]+\]\])","\\1+\\2",text)
text = text + '\n'
return text
def set (text):
t1 = text.split(' ')[0]
if t1 in keywords: return text
tlist = text.split('=')
if len(tlist) <> 2: return text
tlist[0] = '$' + tlist[0].strip().replace(' ','_')
newi = '=' + tlist[1].strip()
newi = re.sub(" *(=\*|\+|\-) *","\\1",newi)
newi = newi.replace(' ','_')
newi = re.sub("([=\*\-\+])([^0-9\'])","\\1$\\2",newi)
newi = newi.replace('.','').replace(',','')
tlist[1] = newi
newi = tlist[0]+tlist[1]
newi = re.sub("\$(\d)","$_\\1",newi)
text = "<<set " + newi + ">>"
return text
def inv (text):
if text[:3].lower() <> "inv": return text
count = text.find(',')
if count == -1:
text = re.sub("inv\+ (.*)","\\1=1",text,flags=re.I)
text = re.sub("inv\- (.*)","\\1=0",text,flags=re.I)
return text
else:
text = re.sub("inv\+ (.*), *(.*)","\\2=\\2+\\1",text,flags=re.I)
text = re.sub("inv\- (.*), *(.*)","\\2=\\2-\\1",text,flags=re.I)
return text
def instr(text):
if text[:5].lower <> "instr": return text
text = re.sub("instr (.*)=(.*)","\\1='\\2'",text)
text = "<<set " + text + ">>"
return text
def goto(text):
text = re.sub("goto (.*)","<<display '\\1'>>\n",text,flags=re.I)
return text
def rnd (text):
text = re.sub("<<set (.*)=\$rnd\*(.*)>>","<<random \\1 = \\2>>",text)
return text
def proc (text):
text = re.sub("proc (.*)","<<display '\\1'>>",text,flags=re.I)
return text
def cls (text):
if text.lower() == "cls":
text = "<<clrscr>>"
return text
def pause (text):
text = re.sub("pause (\d+)","",text)
return text
def label (text):
if text == "": return text
if text[0] == ':':
text = re.sub(":(.*)","<<display '\\1'>>\n::\\1[::]1-1-1\n",text)
return text
def pln (text):
if text.lower() == "pln": text = "\n"
if text[:4].lower() == "pln ":
text = text[4:] + '\n'
if text[:2].lower() == "p ":
text = text[2:].replace('#$',' ')
return text
def btnstr (text):
text = re.sub("^(.*)\[\[","\\1\n[[",text,flags=re.M)
return text
def podst (text):
text = re.sub("\#(\S+)\$","<<print $\\1>>",text)
text = text.replace("#$","")
return text
urqfile = clear(sys.argv[1])
smfile = open("hamster1.sm",'w')
urqfile = start(urqfile)
urqfile = btnuse(urqfile)
urqfile = end(urqfile)
list_par = urqfile.split("endendend")
resuilt = "\n"
resuilt = resuilt + inventory(urqfile)
for par in list_par:
plist = par.strip().split('\n')
plist[0] = name(plist[0])
i = 1
for ptext in plist[1:]:
alist = ptext.split(" & ")
j = 0
for a in alist:
a = ifthen(a)
a = btn(a)
a = inv(a)
a = set(a)
a = rnd(a)
a = goto(a)
a = proc(a)
a = cls(a)
a = pause(a)
a = label(a)
a = podst(a)
a = pln(a)
alist[j] = a
j = j+1
plist[i] = ' '.join(alist)
i = i+1
resuilt = resuilt + plist[0] + '\n' + ' '.join(plist[1:]) + '\n' + '\n'
resuilt = btnstr(resuilt)
resuilt = resuilt + perkill(resuilt)
smfile.write(resuilt.decode('cp1251').encode('utf8')) |
12,240 | 9f15ce3d019c95272d87b4a218abc7c6600169c7 | entry = int(input("enter a number: "))
if entry % 2 == 0:
print("this is even")
else:
print("this is odd")
if entry % 4 == 0:
print("this is a multiple of 4")
num = int(input("enter a number to check: "))
check = int(input("enter a number to divide by: "))
if num % check == 0:
print("this divides evenly")
else:
print("this does not divide evenly") |
12,241 | 99dc07fb24195078fb5c79b833b0fc6bbbc84523 | def not_null_field(arr):
for i in range(36):
if arr[i] == 'X':
return False
else:
return True
|
12,242 | 8060200fa2ee8aeab14ef8bbc0a263990462f9ef | from __future__ import unicode_literals
from django.db import models
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class UserManager(models.Manager):
def email_validate(self, email):
return EMAIL_REGEX.match(email)
def pw_validate(self, password, confirm):
return password == confirm
# Create your models here.
class User(models.Model):
email = models.CharField(max_length=45)
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
password = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
objects = UserManager()
|
12,243 | 6289b407cd36c8845f48284ed20908a3a58d150b | import requests
import os
from brews.models.models import Brewery
import pdb
BASE_URL = 'http://api.brewerydb.com/v2/locations'
SEARCH_URL = 'https://maps.googleapis.com/maps/api/place/textsearch/json'
BREWERYDB_KEY = os.environ['BREWERYDB_KEY']
PLACES_KEY = os.environ['PLACES_KEY']
def nearby_search(brewery, radius):
keyword = '{}, {}, {}, {} {}'.format(brewery.get('name', ''), brewery.get('streetAddress', ''), brewery.get('locality', ''), brewery.get('region', ''), brewery.get('postalCode', ''))
query_txt = '?key={}&location={},{}&radius={}&query={}'.format(PLACES_KEY, brewery['latitude'], brewery['longitude'], radius, keyword)
response = requests.get('{}{}'.format(SEARCH_URL, query_txt))
return response.json()['results']
def get_page(page_num):
response = requests.get('{}?key={}®ion=Massachusetts&p={}'.format(BASE_URL, BREWERYDB_KEY, page_num)).json()
return response
def get_breweries():
response = get_page(1)
total_results = response['totalResults']
num_pages = response['numberOfPages']
data = response['data']
breweries = []
for brewery in data:
search_results = nearby_search(brewery, 2)
if len(search_results):
content = search_results[0]
brewery['formatted_address'] = content['formatted_address']
brewery['latitude'] = content['geometry']['location']['lat']
brewery['longitude'] = content['geometry']['location']['lng']
brewery['name'] = content['name']
brewery['rating'] = content.get('rating', '')
breweries.append(Brewery(**brewery))
for page_num in range(2, num_pages + 1):
response = get_page(page_num)
data = response['data']
for brewery in data:
search_results = nearby_search(brewery, 2)
if len(search_results):
content = search_results[0]
brewery['formatted_address'] = content['formatted_address']
brewery['latitude'] = content['geometry']['location']['lat']
brewery['longitude'] = content['geometry']['location']['lng']
brewery['name'] = content['name']
brewery['rating'] = content.get('rating')
breweries.append(Brewery(**brewery))
return breweries
|
12,244 | 3edbd46d90302981e5931adc6c725471f85111e4 | """
Contains basic utility functions.
"""
import keras.backend as K
from keras.layers import Input, Dense
from keras.models import Model, Sequential
import numpy as np
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
sf = np.exp(x)
sf = sf / np.sum(sf, axis=0)
return sf
def softmax_policy(Q):
"Compute the softmax policy and choose action according to Q"
pi = softmax(Q)
action = int(np.random.choice(np.arange(len(Q)), 1, p=list(pi)))
return pi, action
def epsilon_greedy(Q, eps=0.1):
"Compute the policy according to an epsilon greedy schedule"
n = len(Q)
eps2 = eps / (n - 1)
pi = eps2 * np.ones(n)
max_elem = np.argmax(Q)
pi[max_elem] = 1 - eps
action = int(np.random.choice(np.arange(len(Q)), 1, p=list(pi)))
return pi, action
def policy_evaluation(actor, env, num_episodes=10, max_steps=250):
"Evaluate quality of policy by performing rollouts | actor should predict action given state"
evaluation = 0.0
for _ in range(num_episodes):
state = env.reset()
episode_reward = 0.0
done = 0
time = 0
while (done != True and time < max_steps):
action = actor.predict(state.reshape(1,-1))[0]
sp, r, done, info = env.step(action)
if (done != True):
episode_reward += r
state = sp
time += 1
evaluation = evaluation + episode_reward/num_episodes
env.reset()
return evaluation
def inspect_performance(actor, env, num_episodes=1):
max_steps = env.spec.timestep_limit
for _ in range(num_episodes):
s = env.reset()
time = 0
done = 0
while (done != True and time < max_steps):
env.render()
a = np.array(actor.predict(s.reshape(1, -1)))
sp, r, done, info = env.step(a)
s = sp
time += 1
print "Completed in: ", time, " (max allowed = ", max_steps, ")"
def get_trainable_weights(model):
""" Get the trainable weights of the model """
trainable_weights = []
for layer in model.layers:
# trainable_weights += keras.engine.training.collect_trainable_weights(layer)
trainable_weights += layer.trainable_weights
return trainable_weights
def unpack_theta(model, trainable_weights=None):
""" Flatten a set of shared variables from model """
if trainable_weights == None:
trainable_weights = get_trainable_weights(model)
x = np.empty(0)
for param in trainable_weights:
val = K.eval(param)
x = np.concatenate([x, val.reshape(-1)])
return x
def pack_theta(model, theta):
""" Converts flattened theta back to tensor shape compatible with network """
weights = []
idx = 0
for layer in model.layers:
layer_weights = []
for param in layer.get_weights():
plen = np.prod(param.shape)
layer_weights.append(np.asarray( theta[idx:(idx+plen)].reshape(param.shape),
dtype=np.float32 ))
idx += plen
weights.append(layer_weights)
weights = [item for sublist in weights for item in sublist] # change from (list of list) to list
return weights
def set_model_params(model, theta):
""" Sets the Keras model params from a flattened numpy array of theta """
weights = pack_theta(model, theta)
model.set_weights(weights)
return model
def get_exploration(env, ep, num_episodes, high=0.5, low=0.1):
a_high = np.asarray(env.action_space.high)
a_low = np.asarray(env.action_space.low)
a_dim = len(a_high)
slp = (high-low)/num_episodes
val = (high - slp*ep)*(a_high-a_low)
return a_low + val*np.random.rand(a_dim) |
12,245 | dabf47f94c409db18c89068a452d64a9aedc1849 | # Generated by Django 3.2.3 on 2021-06-24 13:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_booksincart'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='customer',
),
migrations.DeleteModel(
name='BooksInCart',
),
migrations.DeleteModel(
name='Cart',
),
]
|
12,246 | 1fe03a9016cae02563d587ee5a0c7555e14e65a4 | ROLES = {
'commissioner': 'commissioner',
'player': 'player',
}
|
12,247 | ac17a2c4270f48c1c22e21cdd3996511143ff7ef | from flask import Flask, request, render_template, jsonify
from flask_cors import CORS
from slackclient import SlackClient
from backend.slack.request_handler import GraphRequestHandler
import configparser
app = Flask(__name__,
static_folder = "./dist/static",
template_folder = "./dist")
CORS(app)
rh = GraphRequestHandler()
@app.route('/')
def index():
return render_template("index.html")
@app.route('/api/load/<token_start>/<chan_name>/<int:nb_days>/<code>')
def get_graphs_data(token_start, chan_name, nb_days, code):
response = rh.handle_request(code, token_start, chan_name, nb_days)
return jsonify(response)
@app.route('/api/load/available_channels')
def get_available_channels():
return None
@app.route('/api/auth/<code>')
def get_user_token(code):
sc = SlackClient("")
# Read config
config = configparser.ConfigParser()
config.read('config.ini')
CLIENT_ID = config['SLACK_API']['CLIENT_ID']
CLIENT_SECRET = config['SLACK_API']['CLIENT_SECRET']
# Request the auth tokens from Slack
auth_response = sc.api_call(
"oauth.access",
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
code=code
)
token = auth_response['access_token']
rh.register_new_client(code, token)
return jsonify(auth_response) |
12,248 | 6d326ffa13a0e764d69ec74f1bdd71ef964084ac | #!/root/bin/python3
from twisted.internet import protocol, reactor
host = 'localhost'
port = 9999
class TCP(protocol.Protocol):#TwistedClientProtocol,not tcp
def sendData(self):
data = input(">>>")
if data:
print("正在发送消息:" + data)
self.transport.write(data.encode('utf-8'))
else:
self.transport.loseConnection()
def connectionMade(self):
self.sendData()
def dataReceived(self, data):
print(data.decode('utf-8'))
self.sendData()
class TCF(protocol.ClientFactory):
protocol = TCP
clientConnectionLost = clientConnectionFailed = lambda self, connector, reason: reactor.stop()
reactor.connectTCP(host, port, TCF())
reactor.run()
|
12,249 | 1cf7456fb6c41b8c6d75ad62aae5d2ce7b595ab5 | #!/usr/bin/env python3.8
"""pegen -- PEG Generator.
Search the web for PEG Parsers for reference.
"""
import argparse
import sys
import time
import token
import traceback
from typing import Tuple
from pegen.build import Grammar, Parser, Tokenizer, ParserGenerator
from pegen.validator import validate_grammar
def generate_c_code(
args: argparse.Namespace,
) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
from pegen.build import build_c_parser_and_generator
verbose = args.verbose
verbose_tokenizer = verbose >= 3
verbose_parser = verbose == 2 or verbose >= 4
try:
grammar, parser, tokenizer, gen = build_c_parser_and_generator(
args.grammar_filename,
args.tokens_filename,
args.output,
args.compile_extension,
verbose_tokenizer,
verbose_parser,
args.verbose,
keep_asserts_in_extension=False if args.optimized else True,
skip_actions=args.skip_actions,
)
return grammar, parser, tokenizer, gen
except Exception as err:
if args.verbose:
raise # Show traceback
traceback.print_exception(err.__class__, err, None)
sys.stderr.write("For full traceback, use -v\n")
sys.exit(1)
def generate_python_code(
args: argparse.Namespace,
) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
from pegen.build import build_python_parser_and_generator
verbose = args.verbose
verbose_tokenizer = verbose >= 3
verbose_parser = verbose == 2 or verbose >= 4
try:
grammar, parser, tokenizer, gen = build_python_parser_and_generator(
args.grammar_filename,
args.output,
verbose_tokenizer,
verbose_parser,
skip_actions=args.skip_actions,
)
return grammar, parser, tokenizer, gen
except Exception as err:
if args.verbose:
raise # Show traceback
traceback.print_exception(err.__class__, err, None)
sys.stderr.write("For full traceback, use -v\n")
sys.exit(1)
argparser = argparse.ArgumentParser(
prog="pegen", description="Experimental PEG-like parser generator"
)
argparser.add_argument("-q", "--quiet", action="store_true", help="Don't print the parsed grammar")
argparser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Print timing stats; repeat for more debug output",
)
subparsers = argparser.add_subparsers(help="target language for the generated code")
c_parser = subparsers.add_parser("c", help="Generate C code for inclusion into CPython")
c_parser.set_defaults(func=generate_c_code)
c_parser.add_argument("grammar_filename", help="Grammar description")
c_parser.add_argument("tokens_filename", help="Tokens description")
c_parser.add_argument(
"-o", "--output", metavar="OUT", default="parse.c", help="Where to write the generated parser"
)
c_parser.add_argument(
"--compile-extension",
action="store_true",
help="Compile generated C code into an extension module",
)
c_parser.add_argument(
"--optimized", action="store_true", help="Compile the extension in optimized mode"
)
c_parser.add_argument(
"--skip-actions", action="store_true", help="Suppress code emission for rule actions",
)
python_parser = subparsers.add_parser("python", help="Generate Python code")
python_parser.set_defaults(func=generate_python_code)
python_parser.add_argument("grammar_filename", help="Grammar description")
python_parser.add_argument(
"-o",
"--output",
metavar="OUT",
default="parse.py",
help="Where to write the generated parser",
)
python_parser.add_argument(
"--skip-actions", action="store_true", help="Suppress code emission for rule actions",
)
def main() -> None:
from pegen.testutil import print_memstats
args = argparser.parse_args()
if "func" not in args:
argparser.error("Must specify the target language mode ('c' or 'python')")
t0 = time.time()
grammar, parser, tokenizer, gen = args.func(args)
t1 = time.time()
validate_grammar(grammar)
if not args.quiet:
if args.verbose:
print("Raw Grammar:")
for line in repr(grammar).splitlines():
print(" ", line)
print("Clean Grammar:")
for line in str(grammar).splitlines():
print(" ", line)
if args.verbose:
print("First Graph:")
for src, dsts in gen.first_graph.items():
print(f" {src} -> {', '.join(dsts)}")
print("First SCCS:")
for scc in gen.first_sccs:
print(" ", scc, end="")
if len(scc) > 1:
print(
" # Indirectly left-recursive; leaders:",
{name for name in scc if grammar.rules[name].leader},
)
else:
name = next(iter(scc))
if name in gen.first_graph[name]:
print(" # Left-recursive")
else:
print()
if args.verbose:
dt = t1 - t0
diag = tokenizer.diagnose()
nlines = diag.end[0]
if diag.type == token.ENDMARKER:
nlines -= 1
print(f"Total time: {dt:.3f} sec; {nlines} lines", end="")
if dt:
print(f"; {nlines / dt:.0f} lines/sec")
else:
print()
print("Caches sizes:")
print(f" token array : {len(tokenizer._tokens):10}")
print(f" cache : {len(parser._cache):10}")
if not print_memstats():
print("(Can't find psutil; install it for memory stats.)")
if __name__ == "__main__":
if sys.version_info < (3, 8):
print("ERROR: using pegen requires at least Python 3.8!", file=sys.stderr)
sys.exit(1)
main()
|
12,250 | 3af892da0e8254cc6ff0d1ef8811aaee2d44b15b | # Exercise_1
# Import LabelEncoder
from sklearn.preprocessing import LabelEncoder
# Fill missing values with 0
df.LotFrontage = df.LotFrontage.fillna(0)
# Create a boolean mask for categorical columns
categorical_mask = (df.dtypes == object)
# Get list of categorical column names
categorical_columns = df.columns[categorical_mask].tolist()
# Print the head of the categorical columns
print(df[categorical_columns].head())
# Create LabelEncoder object: le
le = LabelEncoder()
# Apply LabelEncoder to categorical columns
df[categorical_columns] = df[categorical_columns].apply(lambda x: le.fit_transform(x))
# Print the head of the LabelEncoded categorical columns
print(df[categorical_columns].head())
--------------------------------------------------
# Exercise_2
# Import OneHotEncoder
from sklearn.preprocessing import OneHotEncoder
# Create OneHotEncoder: ohe
ohe = OneHotEncoder(categorical_features=categorical_mask, sparse=False)
# Apply OneHotEncoder to categorical columns - output is no longer a dataframe: df_encoded
df_encoded = ohe.fit_transform(df)
# Print first 5 rows of the resulting dataset - again, this will no longer be a pandas dataframe
print(df_encoded[:5, :])
# Print the shape of the original DataFrame
print(df.shape)
# Print the shape of the transformed array
print(df_encoded.shape)
--------------------------------------------------
# Exercise_3
# Import DictVectorizer
from sklearn.feature_extraction import DictVectorizer
# Convert df into a dictionary: df_dict
df_dict = df.to_dict("records")
# Create the DictVectorizer object: dv
dv = DictVectorizer(sparse=False)
# Apply dv on df: df_encoded
df_encoded = dv.fit_transform(df_dict)
# Print the resulting first five rows
print(df_encoded[:5,:])
# Print the vocabulary
print(dv.vocabulary_)
--------------------------------------------------
# Exercise_4
# Import necessary modules
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
# Fill LotFrontage missing values with 0
X.LotFrontage = X.LotFrontage.fillna(0)
# Setup the pipeline steps: steps
steps = [("ohe_onestep", DictVectorizer(sparse=False)),
("xgb_model", xgb.XGBRegressor())]
# Create the pipeline: xgb_pipeline
xgb_pipeline = Pipeline(steps)
# Fit the pipeline
xgb_pipeline.fit(X.to_dict("records"), y)
--------------------------------------------------
# Exercise_5
# Import necessary modules
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
# Fill LotFrontage missing values with 0
X.LotFrontage = X.LotFrontage.fillna(0)
# Setup the pipeline steps: steps
steps = [("ohe_onestep", DictVectorizer(sparse=False)),
("xgb_model", xgb.XGBRegressor(max_depth=2, objective="reg:linear"))]
# Create the pipeline: xgb_pipeline
xgb_pipeline = Pipeline(steps)
# Cross-validate the model
cross_val_scores = cross_val_score(xgb_pipeline,X.to_dict('records'),y,cv=10,scoring='neg_mean_squared_error')
# Print the 10-fold RMSE
print("10-fold RMSE: ", np.mean(np.sqrt(np.abs(cross_val_scores))))
--------------------------------------------------
# Exercise_6
# Import necessary modules
from sklearn_pandas import DataFrameMapper
from sklearn_pandas import CategoricalImputer
# Check number of nulls in each feature column
nulls_per_column = X.isnull().sum()
print(nulls_per_column)
# Create a boolean mask for categorical columns
categorical_feature_mask = X.dtypes == object
# Get list of categorical column names
categorical_columns = X.columns[categorical_feature_mask].tolist()
# Get list of non-categorical column names
non_categorical_columns = X.columns[~categorical_feature_mask].tolist()
# Apply numeric imputer
numeric_imputation_mapper = DataFrameMapper(
[([numeric_feature], Imputer(strategy="median")) for numeric_feature in non_categorical_columns],
input_df=True,
df_out=True
)
# Apply categorical imputer
categorical_imputation_mapper = DataFrameMapper(
[(category_feature, CategoricalImputer()) for category_feature in categorical_columns],
input_df=True,
df_out=True
)
--------------------------------------------------
# Exercise_7
# Import FeatureUnion
from sklearn.pipeline import FeatureUnion
# Combine the numeric and categorical transformations
numeric_categorical_union = FeatureUnion([
("num_mapper", numeric_imputation_mapper),
("cat_mapper", categorical_imputation_mapper)
])
--------------------------------------------------
# Exercise_8
# Create full pipeline
pipeline = Pipeline([
("featureunion", numeric_categorical_union),
("dictifier", Dictifier()),
("vectorizer", DictVectorizer(sort=False)),
("clf", xgb.XGBClassifier(max_depth=3))
])
# Perform cross-validation
cross_val_scores = cross_val_score(pipeline, kidney_data, y, scoring="roc_auc", cv=3)
# Print avg. AUC
print("3-fold AUC: ", np.mean(cross_val_scores))
--------------------------------------------------
# Exercise_9
# Create the parameter grid
gbm_param_grid = {
'clf__learning_rate': np.arange(0.05, 1, 0.05),
'clf__max_depth': np.arange(3, 10, 1),
'clf__n_estimators': np.arange(50, 200, 50)
}
# Perform RandomizedSearchCV
randomized_roc_auc = RandomizedSearchCV(estimator=pipeline,param_distributions=gbm_param_grid,scoring='roc_auc',cv=2,n_iter=2,verbose=1)
# Fit the estimator
randomized_roc_auc.fit(X, y)
# Compute metrics
print(randomized_roc_auc.best_score_)
print(randomized_roc_auc.best_estimator_)
--------------------------------------------------
|
12,251 | 1a4224b881345da1cca289542aa8681c4e7791fa | from rplidar import RPLidar
#Lidar launching
PORT_NAME ='/dev/ttyUSB0'
lidar = RPLidar(PORT_NAME)
def run():
'''This function returns a table of distances for each angle of a complete rotation.'''
try:
for i,scan in enumerate(lidar.iter_scans()):
if i==0:
lidar.set_pwm(660)
data=[]
for temp in scan:
if temp[1]>=90 and temp[1]<=270:
data.append(([temp[1]-180)%360,temp[2]])
yield data
except (KeyboardInterrupt, SystemExit):
lidar.stop()
lidar.stop_motor()
lidar.disconnect()
|
12,252 | 64b11ed0a9961f63b122681bf1ddb74169bb0b49 | %tensorflow_version 1.x
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import helper as hlp
dataD = 2
# Loading data
if dataD == 100:
data = np.load('data100D.npy')
else:
data = np.load('data2D.npy')
[num_pts, dim] = np.shape(data)
is_valid = False
# For Validation set
if is_valid:
valid_batch = int(num_pts / 3.0)
np.random.seed(45689)
rnd_idx = np.arange(num_pts)
np.random.shuffle(rnd_idx)
val_data = data[rnd_idx[:valid_batch]]
data = data[rnd_idx[valid_batch:]]
[num_pts, dim] = np.shape(data)
# Distance function for GMM
def distanceFunc(X, MU):
# Inputs
# X: is an NxD matrix (N observations and D dimensions)
# MU: is an KxD matrix (K means and D dimensions)
# Outputs
# pair_dist: is the pairwise distance matrix (NxK)
# TODO
N = X.get_shape().as_list()[0];
K = MU.get_shape().as_list()[0];
X_repeat = tf.repeat(X,repeats=K,axis=0);
MU_tile = tf.tile(MU, [N,1]);
reducedSum = tf.reduce_sum(tf.square(X_repeat-MU_tile),1);
pair_dist = tf.reshape(reducedSum,[-1,K]) #tf.transpose(reducedSum)
return pair_dist;
#assume sigma is already squared, or else need to fix it
def log_GaussPDF(X, mu, sigma):
# Inputs
# X: N X D
# mu: K X D
# sigma: K X 1
# Outputs:
# log Gaussian PDF N X K
# TODO
N = X.get_shape().as_list()[0];
pair_dist = distanceFunc(X,mu)
sigma_repeat = tf.repeat(tf.transpose(sigma),repeats = N, axis=0)
result = -(dim/2)*tf.log(2*np.pi*sigma_repeat) - (pair_dist)/(2*sigma_repeat)
return result;
def log_posterior(log_PDF, log_pi):
# Input
# log_PDF: log Gaussian PDF N X K
# log_pi: K X 1
# Outputs
# log_post: N X K
log_pi = tf.squeeze(log_pi)
return log_PDF + log_pi - hlp.reduce_logsumexp(log_PDF + log_pi, 1, True)
def MoG(numClusters):
print("\n\nNow Running with K =", numClusters)
MU = tf.Variable(tf.truncated_normal(shape=(numClusters, dim), stddev=0.5, dtype=tf.float32))
X = tf.placeholder(tf.float32, shape=(num_pts, dim))
phi = tf.Variable(tf.truncated_normal(shape=(numClusters, 1), dtype=tf.float32))
psi = tf.Variable(tf.truncated_normal(shape=(numClusters, 1), dtype=tf.float32))
sig_sqrt = tf.exp(phi)
log_pi = hlp.logsoftmax(psi)
log_gauss_val = log_GaussPDF(X, MU, sig_sqrt)
log_post = log_posterior(log_gauss_val, log_pi)
loss = - tf.reduce_sum(hlp.reduce_logsumexp(log_gauss_val + tf.transpose(log_pi), 1, keep_dims=True), axis=0)
if (is_valid):
V = tf.placeholder(tf.float32, shape=(np.shape(val_data)))
val_log_gauss_val = log_GaussPDF(V, MU, sig_sqrt)
v_loss = -tf.reduce_sum(hlp.reduce_logsumexp(val_log_gauss_val + tf.transpose(log_pi), 1), axis=0)
optimizer = tf.train.AdamOptimizer(learning_rate=0.1, beta1=0.9, beta2=0.99, epsilon=1e-5).minimize(loss)
init_op = tf.global_variables_initializer()
trainingError = []
validationError = []
colors = []
epoch = 500
cluster1 = 0;
cluster2 = 0;
cluster3 = 0;
cluster4 = 0;
cluster5 = 0;
distanceMatrix = 0;
with tf.Session() as sess:
sess.run(init_op)
feed_dict={X: data}
for i in range(epoch):
MU_val, trainingE, sig_sqrt_val, log_gauss_val_val, log_pi_val, log_post_val, _ = sess.run(
[MU, loss, sig_sqrt, log_gauss_val, log_pi, log_post, optimizer], feed_dict=feed_dict)
#print(tf.count_nonzero(tf.is_nan(MU_val)).eval())
#print(MU.eval())
if (is_valid):
val_err = sess.run(v_loss, feed_dict={V: val_data})
validationError.append(val_err)
if 1:
trainingError.append(trainingE)
print("Mean:")
print(MU.eval())
print("pi:")
print(log_pi_val)
print("sig:")
print(sig_sqrt_val)
plt.figure(1, figsize=(10, 10))
plt.plot(trainingError)
plt.ylabel('The loss')
plt.xlabel('The number of updates')
plt.title('The loss vs the number of updates')
if dataD == 2:
cluster_pred = np.argmax(log_post_val, axis=1)
plt.figure(2, figsize=(10, 10))
plt.scatter(data[:,0], data[:,1], c=cluster_pred);
plt.scatter(MU.eval()[:,0], MU.eval()[:,1], c='red', marker='X');
if (is_valid):
plt.figure(3, figsize=(10, 10))
plt.plot(validationError)
plt.ylabel('The validation loss')
plt.xlabel('The number of updates')
plt.title('The validation loss vs the number of updates')
for i in range(numClusters):
print('cluster', i, 'contains',
np.count_nonzero(cluster_pred == i)/num_pts*100,
'% of the points');
plt.show()
print('trainDataError',trainingError[-1]);
if (is_valid):
print('validationDataError',validationError[-1]);
print('trainDataError to validationDataError ratio: ',
validationError[-1]/trainingError[-1])
return validationError
if __name__ == '__main__':
if dataD == 2:
# Training with K=1,2,3,4,5 for the 2D dataset, as the instruction requires
valdErr1 = MoG(1)
valdErr2 = MoG(2)
valdErr3 = MoG(3)
valdErr4 = MoG(4)
valdErr5 = MoG(5)
plt.figure(3, figsize=(10, 10))
plt.plot(valdErr1, label="K=1")
plt.plot(valdErr2, label="K=2")
plt.plot(valdErr3, label="K=3")
plt.plot(valdErr4, label="K=4")
plt.plot(valdErr5, label="K=5")
plt.ylabel('The validation loss')
plt.xlabel('The number of updates')
plt.title('The validation loss vs the number of updates (MoG)')
plt.legend()
|
12,253 | 076dcfccd42cdea2e5efbf321517bf762e59c1c2 | import boto3, os, time
print("The environment is ", os.environ['ENVIRONMENT'])
if os.environ['ENVIRONMENT'] == 'prod':
import prod as build
elif os.environ['ENVIRONMENT'] == 'nonprod':
import nonprod as build
elif os.environ['ENVIRONMENT'] == 'nonprodfailover':
import nonprodfailover as build
elif os.environ['ENVIRONMENT'] == 'prodfailover':
import prodfailover as build
ami_id = os.environ['AMI_ID']
print("The AMD generated by Packer is ", os.environ['AMI_ID'])
session = boto3.Session(region_name=build.region)
ec2Client = session.client('ec2')
autoscalingClient = session.client('autoscaling')
#Get AutoScalingGroup, LaunchTemplate from the environment variable import
launchtemplate = build.launch_template
asg = build.auto_scaling_group
#Read in the $Latest Launch Configuration for the non-Prod BG target group and update it to the new AMI that was created via Packer
launchtemplate_version = ec2Client.describe_launch_template_versions(LaunchTemplateId=launchtemplate, Versions=['$Latest'])
print('The current AMI for this launch template is ', launchtemplate_version['LaunchTemplateVersions'][0]['LaunchTemplateData']['ImageId'] )
print('Updating to the new ami,', os.environ['AMI_ID'])
updateversion = ec2Client.create_launch_template_version(
LaunchTemplateId=launchtemplate,
SourceVersion='$Latest',
LaunchTemplateData={
'ImageId': os.environ['AMI_ID']
})
updated_launchtemplate_version = ec2Client.describe_launch_template_versions(LaunchTemplateId=launchtemplate, Versions=['$Latest'])
print('The new AMI that will be used for this launch template is ', updated_launchtemplate_version['LaunchTemplateVersions'][0]['LaunchTemplateData']['ImageId'] )
print('We will now start an instance refresh operation to perform a rolling replacement of all EC2 instances registered to this AutoScalingGroup')
#Calling the Start_Instance_Refresh method of AutoScalingGroup. There is no Waiter Class to guarantee when the ASG picks up and performs the refresh :(
response = autoscalingClient.start_instance_refresh(
AutoScalingGroupName=asg,
Strategy='Rolling',
DesiredConfiguration={
'LaunchTemplate': {
'LaunchTemplateId': launchtemplate,
'Version': '$Latest'
}
}
)
#As there is no Waiter class to leverage, just tossing an arbitrary Sleep loop in the code to give the ASG time to refresh the instances
#This is not an absolute guarantee that the instances will be completed with this 5 minute loop, but tests seem to indicate it will have plenty of time
print('Sleeping for 300 seconds to give ASG time to refresh all of the instances within the ASG')
counter = 1
while counter < 301:
if counter % 10 == 0:
print(counter)
time.sleep(1)
counter += 1
###Determine IP addresses of ASG Instances
print('Pulling all IP addresses of EC2 instances created from this build:')
ec2_instances = autoscalingClient.describe_auto_scaling_instances()
#Dump a list of all ASG instances by EC2 instance ID
instancelist = []
for instance in ec2_instances['AutoScalingInstances']:
instancelist.append(instance['InstanceId'])
#Ouput the IPs of all instances matching the new AMI_ID - this only outputs the FAILOVER IPs created with the new AMI and not older AMIs that havent been terminated yet
for instance in instancelist:
ec2 = ec2Client.describe_instances(InstanceIds=[instance])
for reservation in ec2['Reservations']:
for instances in reservation['Instances']:
# #use os.environ['AMI_ID']
if instances['ImageId'] == os.environ['AMI_ID']:
print(instances['PrivateIpAddress'])
|
12,254 | 2a7d3b5994c3e917beb91857fabccffa463c509e | from django.shortcuts import render,HttpResponse,redirect
from blogger.models import blogpost,Comment
def home(request):
posts=blogpost.objects.all()
# for posts in posts:
# print(posts)
# # print(type(posts))
return render(request,"home.html",{"posts":posts})
def post_page(request, post_id):
mypost=blogpost.objects.get(pk=post_id)
comments=Comment.objects.filter(post=mypost)
context={"post":mypost,"comments":comments}
return render(request,"post.html",context)
def post_comment(request,post_id):
if request.method=="POST":
comment=request.POST['comment']
print(comment)
user=request.user
print(user)
post=blogpost.objects.get(pk=post_id)
comment1=Comment(post=post,text=comment,user=user)
comment1.save()
return redirect(f"/post/{post_id}")
|
12,255 | 6fadb5675756ee95b0c248c1cf5b9725d1a6ec84 | import math
number_of_students = int(input())
number_of_lectures = int(input())
additional_bonus = int(input())
max_student_score = 0
best_student_attendances = 0
for _ in range(number_of_students):
student_attendances = int(input())
student_score = (student_attendances / number_of_lectures) * (5 + additional_bonus)
if student_score > max_student_score:
max_student_score = student_score
best_student_attendances = student_attendances
print(f"Max Bonus: {math.ceil(max_student_score)}.")
print(f"The student has attended {best_student_attendances} lectures.")
|
12,256 | 96d64b3177673b03943926a66aab047514aba4ab | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# importing the Kratos Library
import KratosMultiphysics as KM
import KratosMultiphysics.ShallowWaterApplication as SW
## Import base class file
from KratosMultiphysics.ShallowWaterApplication.shallow_water_base_solver import ShallowWaterBaseSolver
def CreateSolver(model, custom_settings):
return Pfem2PrimitiveVarSolver(model, custom_settings)
class Pfem2PrimitiveVarSolver(ShallowWaterBaseSolver):
def __init__(self, model, settings):
super(Pfem2PrimitiveVarSolver, self).__init__(model, settings)
# Set the element and condition names for the replace settings
self.element_name = "PFEM2ReducedSWE"
self.condition_name = "Condition"
self.min_buffer_size = 2
# Pfem2 settings
domain_size = self.main_model_part.ProcessInfo[KM.DOMAIN_SIZE]
self.settings.AddValue("pfem2_settings", KM.Parameters("""{}"""))
self.settings["pfem2_settings"].AddEmptyValue("convection_scalar_variable").SetString("HEIGHT")
self.settings["pfem2_settings"].AddEmptyValue("convection_vector_variable").SetString("VELOCITY")
self.settings["pfem2_settings"].AddEmptyValue("maximum_number_of_particles").SetInt(8*domain_size)
self.print_particles = False # By default we don't print the particles, it is expensive
if self.print_particles:
self.lagrangian_model_part = model.CreateModelPart("pfem2_particles")
self.filter_factor = 1
def AddVariables(self):
super(Pfem2PrimitiveVarSolver, self).AddVariables()
# Variables to project unknown and update particles
self.main_model_part.AddNodalSolutionStepVariable(SW.DELTA_SCALAR1)
self.main_model_part.AddNodalSolutionStepVariable(SW.DELTA_VECTOR1)
# Specific variables to convect particles
self.main_model_part.AddNodalSolutionStepVariable(KM.YP)
self.main_model_part.AddNodalSolutionStepVariable(SW.MEAN_SIZE)
def AddDofs(self):
KM.VariableUtils().AddDof(KM.VELOCITY_X, self.main_model_part)
KM.VariableUtils().AddDof(KM.VELOCITY_Y, self.main_model_part)
KM.VariableUtils().AddDof(SW.HEIGHT, self.main_model_part)
KM.Logger.PrintInfo("::[Pfem2PrimitiveVarSolver]::", "Shallow water solver DOFs added correctly.")
def Initialize(self):
super(Pfem2PrimitiveVarSolver, self).Initialize()
# Initializing the neighbour search
domain_size = self.main_model_part.ProcessInfo[KM.DOMAIN_SIZE]
number_of_avg_elems = 10
number_of_avg_nodes = 10
self.neighbour_search = KM.FindNodalNeighboursProcess(self.main_model_part, number_of_avg_elems, number_of_avg_nodes)
self.neighbour_search.Execute()
self.neighbour_elements_search = KM.FindElementalNeighboursProcess(self.main_model_part, domain_size, number_of_avg_elems)
self.neighbour_elements_search.Execute()
# Creating the solution strategy for the particle stage
self.moveparticles = SW.MoveShallowWaterParticleUtility(self.main_model_part, self.settings["pfem2_settings"])
self.moveparticles.MountBin()
if self.print_particles:
self.moveparticles.ExecuteParticlesPrintingTool(self.lagrangian_model_part, self.filter_factor)
KM.Logger.PrintInfo("::[Pfem2PrimitiveVarSolver]::", "Pfem2 stage initialization finished")
def InitializeSolutionStep(self):
if self._TimeBufferIsInitialized():
# Move particles
self.moveparticles.CalculateVelOverElemSize()
self.moveparticles.MoveParticles()
# Reseed empty elements
pre_minimum_number_of_particles = self.main_model_part.ProcessInfo[KM.DOMAIN_SIZE]
self.moveparticles.PreReseed(pre_minimum_number_of_particles)
# Project info to mesh
self.moveparticles.TransferLagrangianToEulerian()
self.moveparticles.ResetBoundaryConditions()
# Initialize mesh solution step
self.solver.InitializeSolutionStep()
def FinalizeSolutionStep(self):
if self._TimeBufferIsInitialized():
# Finalize mesh solution step
self.solver.FinalizeSolutionStep()
# Update particles
self.moveparticles.CalculateDeltaVariables()
self.moveparticles.CorrectParticlesWithoutMovingUsingDeltaVariables()
# Reseed empty elements
post_minimum_number_of_particles = self.main_model_part.ProcessInfo[KM.DOMAIN_SIZE]*2
self.moveparticles.PostReseed(post_minimum_number_of_particles)
# Print particles if needed
if self.print_particles:
self.lagrangian_model_part.ProcessInfo[KratosMultiphysics.STEP] = self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]
self.lagrangian_model_part.ProcessInfo[KratosMultiphysics.TIME] = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
self.moveparticles.ExecuteParticlesPrintingTool(self.lagrangian_model_part, self.filter_factor) |
12,257 | 55ce1e5c09cbc047007f71a84cd22cf5a9057b15 | /home/ghaff/anaconda3/lib/python3.7/tempfile.py |
12,258 | 0fabd3ff0a9f4e3c5d37593081b9f46a288ea55f | # -*- encoding: utf-8 -*-
"""The routes.
There is a REST API and a GraphQL API.
REST:
``/api/verify`` [GET]
query parameters:
- ``cf``: the Codice Fiscale string
returns:
- ``{"isCorrect": boolean, "isOmocode": boolean, "cf": str}``
``/api/interpolate`` [GET]
query parameters:
- ``name``
- ``surname``
- ``gender``
- ``date_of_birth`` in YYYYMMDD format
- ``place_of_birth``
returns:
- ``{"cf": str}``
GraphQL:
``/graphql``
accepts the following queries
.. code-block:: graphql
query verify(cf: String!) {
isCorrect
isOmocode
}
query interpolate(
name: String!
surname: String!
gender: genderType!
dateOfBirth: String!
placeOfBirth: String!
) {
codiceFiscale
}
being ``genderType`` an enum comprising ``M`` and ``F`` values
"""
from aiohttp import web
import typing as T
from kofi import rest
from kofi import graphql
def generate_app_routes(conf: T.Dict[T.Text, T.Any]) -> T.List[web.RouteDef]:
"""Generates the app routes using the configuration parameters."""
app_routes = [
web.get("/api/verify", rest.verify),
web.get("/api/interpolate", rest.interpolate),
]
if conf.get("graphiql"):
app_routes.append(graphql.get_view(graphiql=True))
else:
app_routes.append(graphql.get_view(graphiql=False))
return app_routes
|
12,259 | 47b1cbc77add21d8756acfa2f647350a9b60319a | from .neural_network import NeuralNetwork
from .qnetwork import QNetwork
|
12,260 | 9b6297e26ff6e447fa56019d641002633de298f7 | import smtplib
conn = smtplib.SMTP('smtp.gmail.com',587)
conn.ehlo()
conn.starttls()
conn.login('Enter sender email here','Enter password here')
conn.sendmail('Sender email','Receiver email','Subject: I sent you this mail via cmd yaay!\n\nHello,\n This is an automated mail.oink oink xD\n\n')
conn.quit() |
12,261 | 1a7671942f54a8284928ed4b15722439a65c101a | import sys
def listcapacity(lst):
print("Capacity: ", (sys.getsizeof(lst)-36)//4) # initial capacity of the list
print("Size: ", len(lst)) # no. of the elements in the list
print("Space left in the list: ", ((sys.getsizeof(lst)-36) - len(lst*4))//4)
# some thing is totally depended upon the machines that we use
# (size-36)/4 == 32 bit machines
# (size-64)/8 == 64 bit machines
# 36, 64 == size of empty list based upon machines i.e 32 & 64 bits
# 4, 8 == size of an element in the list
maria_lst = [] # an empty list is created
print("Empty list is created!!!")
print("List details: ")
listcapacity(maria_lst)
|
12,262 | 1c40a255fced0ffbc3eccca9537414a74df26937 | """
This file will contain the stuff necessary for creating the roster and uploading the submissions.
"""
from fullGSapi.api.login_tokens import LoginTokens
import getpass
import os
import csv
from tqdm import tqdm
gs_roster_loc = "files/input/gs_roster.csv"
canvas_roster_loc = "files/input/canvas_roster.csv" # Currently does not work though will be smart enough later.
calcentral_roster_loc = "files/input/calcentral_roster.csv"
grade_status_roster = "files/input/calcentral_grade_roster.csv"
dest_roster_loc = "files/roster.csv"
# FIXME Go to the files/constants.py file to enter your course ID and assignment ID.
from files.constants import COURSE_ID, ASSIGNMENT_ID
def main():
import sys
argv = sys.argv
upload_to_gs = True
only_sync = False
if len(argv) > 1:
if argv[1] == "regen":
print("Only regenerating the roster.")
upload_to_gs = False
elif argv[1] == "sync":
print("Only syncing students without submissions.")
only_sync = True
print("Generating the roster...")
roster = generate_roster()
print("Generating the roster...Done!")
if upload_to_gs:
# Login to Gradescopes real api.
token: LoginTokens = LoginTokens.load()
if not token:
token = LoginTokens()
token = token.prompt_login(until_success=True)
input("Press enter to start uploading students...")
# Filter roster to only upload new students without submissions
if only_sync:
print("Mutating roster to only sync students without submissions...")
roster = only_sync_new_students(token.gsFullapi, roster, COURSE_ID, ASSIGNMENT_ID)
print("Mutating roster to only sync students without submissions...Done!")
print("Uploading the students...")
upload_sids_to_gs(roster, token.gsAPI, COURSE_ID, ASSIGNMENT_ID)
print("Uploading the students...Done!")
def generate_roster(gs_roster=gs_roster_loc, canvas_roster=calcentral_roster_loc, dest=dest_roster_loc):
print("Loading the Canvas roster...")
dup_c_sids = set()
canvas_roster_data = {}
with open(canvas_roster) as csvfile:
croster = csv.DictReader(csvfile)
for row in croster:
name = row.get("Name")
if name:
try:
name = " ".join(name.split(", ")[::-1])
except Exception as e:
print(f"Failed to flip the name {name}! Ignoring...")
sid = row.get("Student ID")
email = row.get("Email Address")
if not sid:
print(f"The student {name} does not have an SID!")
continue
sid = str(sid)
data = {
"name": name,
"email": email
}
grade = row.get("Grading Basis")
if grade is not None:
g = None
if grade == "P/NP":
g = "EPN"
elif grade == "S/U":
g = "ESU"
elif grade == "DPN":
g = "DPN"
elif grade == "CPN":
g = "CPN"
data["ForGrade"] = g
if sid in canvas_roster_data:
print(f"A student with sid {sid} already exists! (Attempted to add student {data} but failed!)")
dup_c_sids.add(sid)
continue
canvas_roster_data[sid] = data
for sid in dup_c_sids:
del canvas_roster_data[sid]
print("Loading the Canvas roster...Done!\nLoading the Gradescope roster...")
dup_g_sids = set()
gs_roster_data = {}
with open(gs_roster) as csvfile:
gsroster = csv.DictReader(csvfile)
for row in gsroster:
# name = row.get("Name")
fname = row.get("First Name")
lname = row.get("Last Name")
name = fname + " " + lname
sid = row.get("SID")
email = row.get("Email")
if not sid:
print(f"Could not find the sid of {name} (email: {email})! Checking the Canvas roster...")
def partial_match(email, name):
matched_names = []
for sid, data in canvas_roster_data.items():
if data["email"] == email:
return sid
if data["name"] == name:
matched_names.append(sid)
if len(matched_names) == 1:
return matched_names[0]
return False
pos_sid = partial_match(email, name)
if pos_sid:
print("Found a matching entry in the Canvas roster!")
sid = pos_sid
else:
print("Could not find a matching entry in the Canvas roster!")
continue
sid = str(sid)
data = {
"name": name,
"email": email
}
if sid in gs_roster_data:
print(f"A student with sid {sid} already exists! (Attempted to add student {data} but failed!)")
# dup_g_sids.add(sid)
# continue
gs_roster_data[sid] = data
for sid in dup_g_sids:
del gs_roster_data[sid]
print("Loading the Gradescope roster...Done!\nChecking missing SID's in the Gradescope roster...")
for sid, data in canvas_roster_data.items():
if sid not in gs_roster_data:
print(f"Could not find the sid {sid} ({data['name']}, {data['email']}) in the Gradescope roster!")
else:
gs_roster_data[sid]["ForGrade"] = data.get("ForGrade")
print("Added P/NP data")
if os.path.exists(grade_status_roster):
with open(grade_status_roster, "r+") as csvfile:
croster = csv.DictReader(csvfile)
for row in croster:
sid = row.get("SID")
name = row.get("Name")
grade_status = row.get("Grading Basis")
if sid not in gs_roster_data:
if sid not in canvas_roster_data:
print(f"The student {name} ({sid}) [{grade_status}] is not in the canvas or gradescope roster!")
else:
print(f"The student {name} ({sid}) [{grade_status}] is not in the gradescope roster!")
else:
if grade_status not in ["GRD", "EPN", "ESU", "DPN", "CPN"]:
print(f"The student {name} ({sid}) has an unsupported grade status [{grade_status}]")
gs_roster_data[sid]["ForGrade"] = grade_status
else:
print("Could not find the calcentral grade roster!")
print("Writing roster...")
with open(dest_roster_loc, "w+") as fd:
writer = csv.writer(fd)
writer.writerow(["Name", "SID", "Email", "InCanvas", "ForGrade", "Incomplete"])
for sid, data in gs_roster_data.items():
graded = data.get("ForGrade")
if graded is None:
graded = "GRD"
writer.writerow([data["name"], sid, data["email"], str(sid in canvas_roster_data), graded, False])
print("Writing roster...Done!")
return gs_roster_data
def only_sync_new_students(gc, roster, course_id, assignment_id):
raw_scores_csv = gc.download_scores(course_id, assignment_id)
import csv
from io import StringIO
new_roster = {}
for row in csv.DictReader(StringIO(raw_scores_csv.decode())):
if row["Status"] == "Missing":
sid = row["SID"]
if sid and sid in roster:
new_roster[sid] = roster[sid]
else:
print(f"{row} is missing and does not have an SID in the roster!")
return new_roster
def upload_sids_to_gs(roster, client, course_id, assignment_id):
for sid, data in tqdm(roster.items(), dynamic_ncols=True, unit="Student", desc="Uploading Students"):
email = data["email"]
# Files is a dictionary mapping a filename to the contents of that file. You can add as many files as you want.
files = {
"input.json": f"{{\"sid\":\"{sid}\"}}"
}
client.upload_programming_submission(course_id, assignment_id, email, files_dict=files)
print("Finished uploading students!")
if __name__ == "__main__":
main() |
12,263 | ab1d663b05a19cf16c0a9f2a97a85ffb4831ede9 | import cv2
import numpy as np
from playsound import playsound
import time
import datetime as dt
import os
import random
# the minimum distance between face and finger
# that will trigger an event
finger_distance = 90
# load classifier
face_cascade = cv2.CascadeClassifier(
'./haarcascades/haarcascade_frontalface.xml')
# connect to webcam
cap = cv2.VideoCapture(0)
while (True):
# capture frame by frame
ret, frame = cap.read()
# -- FACE DETECTION --
# convert the video frame into grayscale
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the frame using cascade
faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)
# draw a rectangles around faces
center_face = ()
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cX = np.int(x + (w / 2))
cY = np.int(y + (h / 2))
cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1, 1)
cv2.putText(frame, "face", (cX - 25, cY - 25),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
center_face = (cX, cY)
# -- HAND DETECTION --
# convert the frame into HSV (hue, saturation, value) color format
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# detect blue in the frame
# set the hsv ranges to detect blue
min_blue = np.array([89, 98, 165])
max_blue = np.array([117, 225, 255])
# apply a blur for better edge detection
hsv_frame = cv2.GaussianBlur(hsv_frame, (7, 7), 0)
# create a mask
mask = cv2.inRange(hsv_frame, min_blue, max_blue)
# remove all pixels from the mask that are smaller than a 5x5 kernel
mask = cv2.erode(mask, kernel=np.ones((5, 5), np.uint8))
# bitwise to cut out all but mask
result = cv2.bitwise_and(frame, frame, mask=mask)
# Each individual contour is a Numpy array of (x,y) coordinates of boundary points of the object.
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# draw outlines on all the contours
# cv2.drawContours(result, contours, -1, (0, 255, 255), 2)
# create a bounding rectangle for the contours
center_fingers = [] # array for center points of the fingers
for contour in contours:
# create a bounding rectangle for the contour
(x, y, w, h) = cv2.boundingRect(contour)
# draw a rectangle around the contours
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
# put a dot in the middle
cX = np.int(x + (w/2))
cY = np.int(y + (h/2))
cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1, 1)
# add the center point of each contour to the array
center_fingers.append([cX, cY])
# add some text for flavor
cv2.putText(frame, "finger", (cX - 25, cY - 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
# find the distance (D) between center of fingers and center of face
if len(center_face) > 0 and len(center_fingers) > 0:
for idx, finger in enumerate(center_fingers):
dx = center_face[0] - finger[0]
dy = center_face[1] - finger[1]
D = round(np.sqrt(dx*dx+dy*dy), 2) # pythagoras
# draw a line between the finger and the face
cv2.line(frame, center_face,
(finger[0], finger[1]), (255, 255, 255), 1)
# write the distance from the face
cv2.putText(frame, str(D), (finger[0] + 25, finger[1] + 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
if D <= finger_distance:
playsound(
f"./audio/{random.choice(os.listdir('./audio/'))}", block=False)
cv2.imwrite(
f'./face_touches/face_touch_{dt.datetime.now().strftime("%Y%m%d%h%M%S")}.jpg', frame)
cv2.imshow("Frame", frame)
cv2.imshow("Mask", mask)
cv2.imshow("Result", result)
# Press Q to quit
if cv2.waitKey(1) & 0xff == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
12,264 | 755fc8973fdfffdaa78562d4b6b54df1f5a87da6 | import getpass
import snmp_helper
DESCR = '1.3.6.1.2.1.1.1.0'
NAME = '1.3.6.1.2.1.1.5.0'
def main():
ip_addr1 = raw_input("enter pyrtr1 address: ")
ip_addr2 = raw_input("enter pyrtr2 address: ")
community = raw_input("enter community string: ")
py_rtr1 = (ip_addr1, community, 161)
py_rtr2 = (ip_addr2, community, 161)
for device in (py_rtr1, py_rtr2):
print "\n***"
for the_oid in (NAME, DESCR):
snmp_data = snmp_helper.snmp_get_oid(device, oid=the_oid)
output = snmp_helper.snmp_extract(snmp_data)
print output
print "***"
if __name__ == "__main__":
main()
|
12,265 | 570299f2231cf19c10637b76432594a3ff41ccde | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A very simple MNIST classifier.
See extensive documentation at
http://tensorflow.org/tutorials/mnist/beginners/index.md
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import data
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '/Users/ycgui/tf_experiments/mnist_data', 'Directory for storing data')
# Download and read MNIST data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Build the entire computation graph before starting a session and launching the graph.
sess = tf.InteractiveSession()
# 1. Create the model
# placeholder: a value that we'll input when we ask TensorFlow to run a computation
# We represent this as a 2-D tensor of floating-point numbers, with a shape [None, 784]. (Here None means that a dimension can be of any length.)
x = tf.placeholder(tf.float32, [None, 784])
# A Variable is a modifiable tensor that lives in TensorFlow's graph of interacting operations.
# It can be used and even modified by the computation.
# For machine learning applications, one generally has the model parameters be Variables.
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# 2. Define loss and optimizer
# Target y
y_ = tf.placeholder(tf.float32, [None, 10])
# cross entropy: h(y) = -sum(y_target * log(y_predicted))
# reduction_indices=[1]: use the second dimension of variables for computation
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Optimizer
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# 3. Train
# Initialization
tf.initialize_all_variables().run()
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_step.run({x: batch_xs, y_: batch_ys})
# 4. Test trained model
# argmax() is an extremely useful function which gives you the index of the highest entry in a tensor along some axis
# correct_prediction gives us a list of booleans
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# Cast booleans to floating point numbers and then take the mean
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Report test accuracy
print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))
|
12,266 | 8a353cd478d3277589e87166171e31e16b062c6a | import collections as cl
import math
N = int(input())
def f(n):
if n == 1:
return [1]
n_1 = f(n - 1)
return n_1 + [n] + n_1
print(*f(N))
|
12,267 | f95ae83ccd0c488b1840f0f319b8b2447f8cb624 | listintup = [(7,5), (6,4), (3,8), (9,10), (5,6)] #defining the input
def val2(y): #function to retrive the second value
return y[1]
sorted_listed=sorted(listintup, key=val2)
print("The sorted list of tuple in the increasing order of seCond element is \n", sorted_listed)
|
12,268 | 705d5be38076eefabf8a0384e45d39735198cd96 | #!/usr/bin/env python
# This software is licensed under the Apache 2 license, quoted below.
#
# Copyright 2019 Astraea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# [http://www.apache.org/licenses/LICENSE-2.0]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Translating Java version from version.sbt to PEP440 norms
__version__: str = "0.0.0"
|
12,269 | 103fbeee1878b7621f39bc0c2b98096c5764bc34 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 16 11:52:55 2019
@author: user
"""
import requests
import os
from bs4 import BeautifulSoup
import re
import time
from selenium import webdriver
def get_PDFurl():
pdf_links = []
links = soup.find_all(href=re.compile(".pdf"))
for each in links:
pdf_links.append(each.attrs['href'])
return pdf_links
def get_pageurl():
page_links = []
links = soup.find_all(href=re.compile("javascript:__doPostBack"))
for each in links:
page_links.append(each.attrs['href'])
return page_links
def download_pdf(pdf_links, current_page):
print("Downloading Page " + current_page + " : ")
j = 1
# download automatically pdf files from website
for i in pdf_links:
file_name = i.split('&name=')[-1]
r = requests.get(i)
# create new folder (change it to your directory)
if not os.path.exists('PDF' + current_page):
os.mkdir('PDF' + current_page)
with open('PDF' + current_page + '/' + file_name, 'wb') as pdf:
pdf.write(r.content)
print(str(j) + '--' + file_name + '...Successful!')
j += 1
pdf.close()
print('Done With Page ' + current_page + '!\n\n')
return
url = 'https://www.malaysiastock.biz/Annual-Report.aspx'
# Set browser environment
chrome_options = webdriver.ChromeOptions()
# Using headless mode to avroid connection error
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
browser = webdriver.Chrome(options=chrome_options)
browser.set_page_load_timeout(30)
browser.implicitly_wait(30)
# Download first page
browser.get(url)
time.sleep(3)
r = browser.page_source
soup = BeautifulSoup(r, 'html.parser')
pdf_links = get_PDFurl()
page_links = get_pageurl()
current_page = soup.find(class_='pgr').find('span').text
download_pdf(pdf_links, current_page)
time.sleep(2)
# Download all other pages
for i in page_links:
browser.execute_script(i) # turning page
time.sleep(3)
r = browser.page_source
soup = BeautifulSoup(r, 'html.parser')
pdf_links = get_PDFurl()
current_page = soup.find(class_='pgr').find('span').text
download_pdf(pdf_links, current_page)
time.sleep(2)
browser.quit()
print("\n=====================ALL Done!!!=======================\n")
|
12,270 | 15c5492122c377187c4b3d9b7f7d4f7d4d5b4f68 | from datetime import datetime
import pandas as pd
import numpy as np
np.random.seed(777)
dates = [datetime(2019,1,2),datetime(2019,1,7),datetime(2019,1,7),
datetime(2019,1,8),datetime(2019,1,10),datetime(2019,1,12)]
ts=pd.Series(np.random.randn(6),index=dates)
print(ts)
print('\n',ts['1/10/2019'])
print('\n',ts['20190110'])
longData = pd.Series(np.random.randn(1000),index=pd.date_range('1/1/2015',periods=1000))
print('\n',longData)
print('\n',longData['2016'])
print('\n',longData['2016-05'])
print('\n',longData['2016-05-01':'2016-05-10'])
ldFrame = pd.DataFrame(np.random.randn(100,3),index=pd.date_range('1/1/2017',periods=100,freq='W-WED'),columns=['Ohio','Texas','Utah'])
print('\n',ldFrame)
print('\n',ldFrame.loc['2017-05']) #loc index에서 찾음
print('\n',pd.date_range(start='4/1/2012', periods=20))
print('\n',pd.date_range(end='4/1/2012', periods=20))
print('\n',pd.date_range(start='4/1/2012',end='12/1/2012',freq="BM")) # BM : bussiness monday
print('\n',pd.date_range(start='5/2/2012 12:56:31', periods=5, freq='4h'))
print('\n',pd.date_range(start='5/2/2012 12:56:31', periods=5, freq='1h30min'))
print('\n',pd.date_range(start='4/1/2012',end='12/1/2012',freq="WOM-2FRI")) # WOM-2FRI : 각 달의 2번째 금요일
|
12,271 | d044240cb81efa30372f868e836f82d033ec000b | from os import listdir
ANALYSIS_FOLDER = 'analyses/'
FRIENDS_FOLDER = 'references/'
models_per_user = {}
for filename in listdir(ANALYSIS_FOLDER):
user, models = filename.split('.')
if '_' in models:
continue
if user not in models_per_user.keys():
models_per_user[user] = set()
models_per_user[user].add(models)
for user, models in models_per_user.items():
friends = [l.strip() for l in open(FRIENDS_FOLDER+user+'.txt')]
for friend in friends:
if friend not in models:
print(user,friend) |
12,272 | e5490a273b8d45d663cfec1337c09421d3723270 | from abc import abstractmethod
from typing import Any, Dict, final
from dramatiq import GenericActor
from pydantic.generics import GenericModel
from deployments.entities.deployment_info import DeploymentInfo
from models.model_info import ModelInfo
DEPLOYMENT_TIME_LIMIT_MSECS = 1000 * 60 * 60 * 1
class DeploymentDescription(GenericModel):
deployment_id: str
model: ModelInfo
parameters: Dict[str, Any]
runtime_config: Dict[str, Any]
env_vars: Dict[str, str]
class DeploymentActor(GenericActor):
class Meta:
abstract = True
queue_name = "auto-nlp-deployments"
max_retries = 0
store_results = True
time_limit = DEPLOYMENT_TIME_LIMIT_MSECS
@final
def perform(self, deployment_description_json: str,
**kwargs): # parameters for this method are okay since GenericActor calls perform() with kwargs
# deployment_description = DeploymentDescription.parse_raw(deployment_description_json)
self.logger.info(deployment_description_json)
deployment_description = DeploymentDescription.parse_raw(deployment_description_json)
print(deployment_description)
deployment_info = self.deploy(deployment_description, **kwargs)
return deployment_info.json()
@abstractmethod
def deploy(self, deployment_description: DeploymentDescription, **kwargs) -> DeploymentInfo:
raise NotImplementedError()
|
12,273 | 1c4ce8e9ed54b9ff4e8d04c45e205b48d05a79b4 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to read the train/eval/test data from file and process it, and read the vocab data from file and process it"""
import glob
import random
import struct
import csv
from tensorflow.core.example import example_pb2
from collections import defaultdict as ddict
import pickle
import scipy.sparse as sp
import tensorflow as tf
import numpy as np
import horovod.tensorflow as hvd
import collections
import six
import unicodedata
# <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids.
SENTENCE_START = '<s>'
SENTENCE_END = '</s>'
PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence
UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words
START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence
STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences
# Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file.
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
class Vocab(object):
"""Vocabulary class for mapping between words and ids (integers)"""
def __init__(self, vocab_file, max_size):
"""Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file.
Args:
vocab_file: path to the vocab file, which is assumed to contain "<word> <frequency>" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though.
max_size: integer. The maximum size of the resulting Vocabulary."""
self._word_to_id = {}
self._id_to_word = {}
self._count = 0 # keeps track of total number of words in the Vocab
# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.
for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
# Read the vocab file and add words up to max_size
with open(vocab_file, 'r') as vocab_f:
for line in vocab_f:
pieces = line.split()
if len(pieces) != 2:
print ('Warning: incorrectly formatted line in vocabulary file: %s\n' % line)
continue
w = pieces[0]
if w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
raise Exception(
'<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\'t be in the vocab file, but %s is' % w)
if w in self._word_to_id:
raise Exception('Duplicated word in vocabulary file: %s' % w)
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
if max_size != 0 and self._count >= max_size:
print ("max_size of vocab was specified as %i; we now have %i words. Stopping reading." % (
max_size, self._count))
break
print ("Finished constructing vocabulary of %i total words. Last word added: %s" % (
self._count, self._id_to_word[self._count - 1]))
def word2id(self, word):
"""Returns the id (integer) of a word (string). Returns [UNK] id if word is OOV."""
if word not in self._word_to_id:
return self._word_to_id[UNKNOWN_TOKEN]
return self._word_to_id[word]
def id2word(self, word_id):
"""Returns the word (string) corresponding to an id (integer)."""
if word_id not in self._id_to_word:
raise ValueError('Id not found in vocab: %d' % word_id)
return self._id_to_word[word_id]
def size(self):
"""Returns the total size of the vocabulary"""
return self._count
def write_metadata(self, fpath):
"""Writes metadata file for Tensorboard word embedding visualizer as described here:
https://www.tensorflow.org/get_started/embedding_viz
Args:
fpath: place to write the metadata file
"""
print "Writing word embedding metadata file to %s..." % (fpath)
with open(fpath, "w") as f:
fieldnames = ['word']
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
for i in xrange(self.size()):
writer.writerow({"word": self._id_to_word[i]})
def set_glove_embedding(self,fpath,embedding_dim):
""" Creates glove embedding_matrix from file path"""
emb = np.random.randn(self._count,embedding_dim)
# tf.logging.info(emb[0])
with open(fpath) as f: #python 3.x support
for k,line in enumerate(f):
fields = line.split()
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one colum n). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
#logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s",
# embedding_dim, len(fields) - 1, line)
raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s",
embedding_dim, len(fields) - 1, line)
continue
word = fields[0]
if word in self._word_to_id:
vector = np.asarray(fields[1:], dtype='float32')
emb[self._word_to_id[word]] = vector
# if k%1000 == 0:
# tf.logging.info('glove : %d',k)
self.glove_emb = emb
class BertVocab(object):
"""
While glove_vocab has been used as default. The term glove is misnomer. Glove_vocab represents normal vocab in this file
This function converts individual tokens to their respective word piece tokens
"""
def __init__(self, glove_vocab, bert_vocab_file_path):
self.bert_vocab = collections.OrderedDict()
self.glove_vocab = glove_vocab
index = 0
with tf.gfile.GFile(bert_vocab_file_path, "r") as reader: #obtain bert vocab
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
self.bert_vocab[token] = index
index += 1
not_found = 0
self.index_map_glove_to_bert = {}
for i in range(glove_vocab._count):
if glove_vocab._id_to_word[i] in self.bert_vocab:
self.index_map_glove_to_bert[i] = [self.bert_vocab[glove_vocab._id_to_word[i]]]
else: #Word Piece Tokenizer
not_found = not_found + 1
new_tokens = []
token = glove_vocab._id_to_word[i]
chars = list(token)
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.bert_vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
new_tokens.append(self.bert_vocab['[UNK]'])
else:
sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens]
new_tokens = new_tokens + sub_tokens_bert
self.index_map_glove_to_bert[i] = new_tokens
tf.logging.info(not_found)
def convert_glove_to_bert_indices(self, token_ids):
"""
Converts words to their respective word-piece tokenized indices
token_ids : ids from the word
"""
new_tokens = [self.bert_vocab['[CLS]']] #As pert the bert repo instructions
offset = 1
pos_offset = []
for token_id in token_ids:
pos_offset.append(offset) #wordpiece tokenizer can return more than one index hence we maintain an offset array. This is useful for the BERT + GCN experiments.
if token_id in self.index_map_glove_to_bert:
bert_tokens = self.index_map_glove_to_bert[token_id]
offset = offset + len(bert_tokens) - 1
#new_tokens.append(self.index_map_glove_to_bert[token_id])
new_tokens = new_tokens + bert_tokens
else:
#wordpiece might be redundant for training data. Keep for unseen instances
token = glove_vocab._id_to_word[token_id]
chars = list(token)
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
#new_tokens.append(self.index_map_glove_to_bert['[UNK]'])
new_token = new_token + self.index_map_glove_to_bert['[UNK]']
else:
sub_tokens_bert = [self.bert_vocab[s] for s in sub_tokens]
new_tokens = new_tokens + sub_tokens_bert
offset = offset + len(sub_tokens_bert) - 1
new_tokens.append(self.bert_vocab['[SEP]'])
return new_tokens, pos_offset
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
if item in vocab:
output.append(vocab[item])
else:
output.append(vocab['[UNK]'])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def example_generator(data_path, single_pass, device_rank,data_as_tf_example=True):
"""Generates tf.Examples from data files.
Binary data format: <length><blob>. <length> represents the byte size
of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains
the tokenized article text and summary.
Args:
data_path:
Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.
single_pass:
Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.
Yields:
Deserialized tf.Example.
"""
random.seed(device_rank+1)
if data_as_tf_example:
epoch = 0
while True:
filelist = glob.glob(data_path) # get the list of datafiles
assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
#tf.logging.info(filelist)
for file_no, f in enumerate(filelist):
reader = open(f, 'rb')
all_examples = []
while True:
len_bytes = reader.read(8)
if not len_bytes:
if not single_pass:
random.shuffle(all_examples)
for k in all_examples:
yield example_pb2.Example.FromString(k), epoch
break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
all_examples.append(example_str)
if single_pass:
print "example_generator completed reading all datafiles. No more data."
break
else:
#pickle format
while True:
if single_pass:
for data_ in data_path:
for i in data_:
yield i
else:
random.shuffle(data_path)
for data_ in data_path:
new_data = data_
x = np.arange(len(new_data))
np.random.shuffle(x)
# random.shuffle(new_data)
for i in x:
yield new_data[i]
if single_pass:
break
def article2ids(article_words, vocab):
"""Map the article words to their ids. Also return a list of OOVs in the article.
Args:
article_words: list of words (strings)
vocab: Vocabulary object
Returns:
ids:
A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
oovs:
A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers."""
ids = []
oovs = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in article_words:
i = vocab.word2id(w)
if i == unk_id: # If w is OOV
if w not in oovs: # Add to list of OOVs
oovs.append(w)
oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV...
ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second...
else:
ids.append(i)
return ids, oovs
def abstract2ids(abstract_words, vocab, article_oovs):
"""Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers.
Args:
abstract_words: list of words (strings)
vocab: Vocabulary object
article_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers
Returns:
ids: List of ids (integers). In-article OOV words are mapped to their temporary OOV numbers. Out-of-article OOV words are mapped to the UNK token id."""
ids = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in abstract_words:
i = vocab.word2id(w)
if i == unk_id: # If w is an OOV word
if w in article_oovs: # If w is an in-article OOV
vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number
ids.append(vocab_idx)
else: # If w is an out-of-article OOV
ids.append(unk_id) # Map to the UNK token id
else:
ids.append(i)
return ids
def outputids2words(id_list, vocab, article_oovs):
"""Maps output ids to words, including mapping in-article OOVs from their temporary ids to the original OOV string (applicable in pointer-generator mode).
Args:
id_list: list of ids (integers)
vocab: Vocabulary object
article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids (that have been assigned in pointer-generator mode), or None (in baseline mode)
Returns:
words: list of words (strings)
"""
words = []
for i in id_list:
try:
w = vocab.id2word(i) # might be [UNK]
except ValueError as e: # w is OOV
assert article_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
article_oov_idx = i - vocab.size()
try:
w = article_oovs[article_oov_idx]
except ValueError as e: # i doesn't correspond to an article oov
raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs)))
words.append(w)
return words
def abstract2sents(abstract):
"""Splits abstract text from datafile into list of sentences.
Args:
abstract: string containing <s> and </s> tags for starts and ends of sentences
Returns:
sents: List of sentence strings (no tags)"""
cur = 0
sents = []
while True:
try:
start_p = abstract.index(SENTENCE_START, cur)
end_p = abstract.index(SENTENCE_END, start_p + 1)
cur = end_p + len(SENTENCE_END)
sents.append(abstract[start_p + len(SENTENCE_START):end_p].strip())
except ValueError as e: # no more sentences
return sents
def show_art_oovs(article, vocab):
"""Returns the article string, highlighting the OOVs by placing __underscores__ around them"""
unk_token = vocab.word2id(UNKNOWN_TOKEN)
words = article.split(' ')
words = [("__%s__" % w) if vocab.word2id(w) == unk_token else w for w in words]
out_str = ' '.join(words)
return out_str
def show_abs_oovs(abstract, vocab, article_oovs):
"""Returns the abstract string, highlighting the article OOVs with __underscores__.
If a list of article_oovs is provided, non-article OOVs are differentiated like !!__this__!!.
Args:
abstract: string
vocab: Vocabulary object
article_oovs: list of words (strings), or None (in baseline mode)
"""
unk_token = vocab.word2id(UNKNOWN_TOKEN)
words = abstract.split(' ')
new_words = []
for w in words:
if vocab.word2id(w) == unk_token: # w is oov
if article_oovs is None: # baseline mode
new_words.append("__%s__" % w)
else: # pointer-generator mode
if w in article_oovs:
new_words.append("__%s__" % w)
else:
new_words.append("!!__%s__!!" % w)
else: # w is in-vocab word
new_words.append(w)
out_str = ' '.join(new_words)
return out_str
dep_list = ['cc', 'agent', 'ccomp', 'prt', 'meta', 'nsubjpass', 'csubj', 'conj', 'amod', 'poss', 'neg', 'csubjpass',
'mark', 'auxpass', 'advcl', 'aux', 'ROOT', 'prep', 'parataxis', 'xcomp', 'nsubj', 'nummod', 'advmod',
'punct', 'quantmod', 'acomp', 'compound', 'pcomp', 'intj', 'relcl', 'npadvmod', 'case', 'attr', 'dep',
'appos', 'det', 'nmod', 'dobj', 'dative', 'pobj', 'expl', 'predet', 'preconj', 'oprd', 'acl', 'flow']
dep_dict = {label: i for i, label in enumerate(dep_list)}
def get_specific_adj(batch_list, batch_size, max_nodes, label, encoder_lengths, use_both=True, keep_prob=1.0, use_bert=False, bert_mapping=None, max_length=300):
adj_main_in = []
adj_main_out = []
if bert_mapping is None:
bert_mapping = [[] for i in range(len(batch_list))] #empty array for allowing in the next loop
for edge_list, enc_length, offset_list in zip(batch_list, encoder_lengths, bert_mapping):
#print(edge_list)
curr_adj_in = []
curr_adj_out = []
curr_data_in = []
curr_data_out = []
seen_nodes = []
for s, d, lbl in edge_list:
if s >=max_nodes or d >=max_nodes or s>=max_length or d>=max_length:
continue
if use_bert:
src = s + offset_list[s]
dest = d + offset_list[d]
else:
src = s
dest = d
seen_nodes.append(src)
seen_nodes.append(dest)
if lbl!=label:
continue
#if src >= max_nodes or dest >= max_nodes:
# continue
x = np.random.uniform()
if x<=keep_prob:
curr_adj_out.append((src, dest))
curr_data_out.append(1.0)
if use_both:
curr_adj_in.append((dest, src))
curr_data_in.append(1.0)
else:
curr_adj_out.append((dest, src))
curr_data_out.append(1.0)
'''
Use this snippet when you need to use the A + I condition (refer README)
seen_nodes = list(set(seen_nodes))
for src in range(enc_length): #A + I for entity and coref
curr_adj_out.append((src, src))
curr_data_out.append(1.0)
if use_both:
curr_adj_in.append((src, src))
curr_data_in.append(1.0)
'''
if len(curr_adj_in) == 0:
adj_in = sp.coo_matrix((max_nodes, max_nodes))
else:
adj_in = sp.coo_matrix((curr_data_in, zip(*curr_adj_in)), shape=(max_nodes, max_nodes))
if len(curr_adj_out) == 0:
adj_out = sp.coo_matrix((max_nodes, max_nodes))
else:
adj_out = sp.coo_matrix((curr_data_out, zip(*curr_adj_out)), shape=(max_nodes, max_nodes))
adj_main_in.append(adj_in)
adj_main_out.append(adj_out)
return adj_main_in, adj_main_out
def get_adj(batch_list, batch_size, max_nodes, use_label_information=True, label_dict=dep_dict,flow_alone=False, flow_combined=False, keep_prob=1.0, use_bert=False, bert_mapping=None, max_length=300):
adj_main_in, adj_main_out = [], []
max_labels = 45
if bert_mapping is None:
bert_mapping = [[] for i in range(len(batch_list))] #empty array for allowing in the next loop
for edge_list, offset_list in zip(batch_list, bert_mapping):
adj_in, adj_out = {}, {}
l_e = len(edge_list)
in_ind, in_data = ddict(list), ddict(list)
out_ind, out_data = ddict(list), ddict(list)
count = 0
for s, d, lbl_ in edge_list:
if s>=max_nodes or d >= max_nodes or s>=max_length or d>=max_length:
continue
if use_bert:
try:
src = s + offset_list[s]
except:
tf.logging.info(s)
tf.logging.info(len(offset_list))
dest = d + offset_list[d]
else:
src = s
dest = d
#if src >= max_nodes or dest >= max_nodes:
# continue
if flow_alone:
lbl = 0
if src+1 < max_nodes:
x = np.random.uniform()
if x<= keep_prob:
out_ind[lbl].append((src, src+1))
out_data[lbl].append(1.0)
x = np.random.uniform()
if x<=keep_prob:
in_ind[lbl].append((src+1, src))
in_data[lbl].append(1.0)
else:
if lbl_ not in label_dict:
continue
lbl = label_dict[lbl_]
if not use_label_information: #all assigned the same label information
lbl = 0
x = np.random.uniform()
if x<=keep_prob:
out_ind[lbl].append((src, dest))
out_data[lbl].append(1.0)
x = np.random.uniform()
if x<=keep_prob:
in_ind[lbl].append((dest, src))
in_data[lbl].append(1.0)
if flow_combined and dest!=src+1:
if not use_label_information: #all assigned the same label information
lbl = 0
else:
lbl = label_dict['flow']
out_ind[lbl].append((src, src+1))
out_data[lbl].append(1.0)
in_ind[lbl].append((src+1, src))
in_data[lbl].append(1.0)
count = count + 1
if flow_combined:
max_labels = max_labels + 1
if not use_label_information:
max_labels = 1
for lbl in range(max_labels):
if lbl not in out_ind:
adj_out[lbl] = sp.coo_matrix((max_nodes, max_nodes))
else:
adj_out[lbl] = sp.coo_matrix((out_data[lbl], zip(*out_ind[lbl])), shape=(max_nodes, max_nodes))
if lbl not in in_ind:
adj_in[lbl] = sp.coo_matrix((max_nodes, max_nodes))
else:
adj_in[lbl] = sp.coo_matrix((in_data[lbl], zip(*in_ind[lbl])), shape=(max_nodes, max_nodes))
adj_main_in.append(adj_in)
adj_main_out.append(adj_out)
# print(adj_main_in)
return adj_main_in, adj_main_out
def create_glove_embedding_matrix (vocab,vocab_size,emb_dim,glove_path):
emb = np.random.rand(vocab_size,emb_dim)
count = 0
with open(args['glove_path'],encoding='utf-8') as f: #python 3.x support
#all_lines = []
#with codecs.open(args['glove_path'],'r',encoding='utf-8') as f: #python 2.x support
for line in f:
fields = line.split()
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one colum n). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
#logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s",
# embedding_dim, len(fields) - 1, line)
raise Exception("Found line with wrong number of dimensions (expected %d, was %d): %s",
embedding_dim, len(fields) - 1, line)
continue
word = fields[0]
if word in w2id:
vector = np.asarray(fields[1:], dtype='float32')
return emb
|
12,274 | 50d40092b7e167ab93635617c5f300db126db68b |
import multiprocessing as mp
from itertools import permutations
from cipher_list import cipher_list_4lw as cipher_list
def get_sorted_word_frequency(cipher):
hist = {}
for word in cipher.split(' '):
if word in hist.keys():
hist[word] += 1
else:
hist[word] = 1
hist = {k: v for k, v in sorted(hist.items(), key=lambda item: item[1])}
return hist
def get_sorted_letter_frequency(cipher, alphabet):
hist = {}
for letter in cipher:
if letter not in alphabet:
continue
elif letter in hist.keys():
hist[letter] += 1
else:
hist[letter] = 1
hist = {k: v for k, v in sorted(hist.items(), key=lambda item: item[1])}
return hist
def conversion_dict_sub(cipher, conversion_dict, alphabet):
decrypted = ''
for letter in cipher:
if letter in alphabet:
decrypted += conversion_dict[letter]
else:
decrypted += letter
return decrypted
def get_letter_frequency_conversion(cipher_letter_freq, given_letter_freq):
sorted_letters = [letter for letter in cipher_letter_freq.keys()]
sorted_letters.reverse()
conversion_dict = dict(zip(sorted_letters, given_letter_freq))
return conversion_dict
def brute_force_multi(permutations_):
return_list = []
for permutation in permutations_:
permutation = ''.join(permutation)
permutation_plus = permutation + 'hrdlcumwfgypbvkjxq'
print(permutation_plus)
conversion_dict = get_letter_frequency_conversion(sorted_letter_freq, permutation_plus)
decrypted = conversion_dict_sub(cipher_unique, conversion_dict, alphabet)
word_matches = []
for word in decrypted.split(' '):
if word in common_words.keys():
word_matches.append(word)
if len(word_matches) == 2:
decrypted = conversion_dict_sub(cipher, conversion_dict, alphabet)
return_list.append(word_matches, decrypted)
return return_list
alphabet = "abcdefghijklmnopqrstuvwxy"
letter_freq = "etaoins"
common_words = ['that', 'this', 'with', 'list', 'have', 'from', 'they', 'when',
'give', 'find', 'must', 'your', 'time', 'what', 'only', 'were',
'more', 'about', 'other', 'first', 'would', 'price',
'the', 'and', 'for', 'not', 'are']
vals = [0]*(len(common_words))
common_words = dict(zip(common_words, vals))
permutations_ = permutations(letter_freq)
cipher = ' '.join(cipher_list)
sorted_word_freq = get_sorted_word_frequency(cipher)
cipher_unique = ' '.join(sorted_word_freq.keys())
sorted_letter_freq = get_sorted_letter_frequency(cipher, alphabet)
# multi
with mp.Pool(8) as p:
result_list = p.map(brute_force_multi, [''.join(perm) for perm in permutations_])
# normal
# result_list = brute_force_multi(permutations_)
for element in result_list:
print(element) |
12,275 | 6500383c075637fc98508555891dc7635c6190fc | '''
File Decompression
Author: Syed Ahammad Newaz Saif (snewaz@unimelb.edu.au).
Student Number:684933
Summary-Sarah O'Connor,my predecessor made the compression
software but for mulititue reasons it seems that the
decompressed files are not available and the iSkynet's database
needs to check on the files.This program serves to decompress
out the contents using the previous program that was set up
by my predecessor.
The whole program works in such a way that for a given text
and its characters unused bytes,referred to as, the encoding bytes
are used to map non printable compressed text files with respect
to a dictionary file that is being made in this program. The entire
function uses each byte of the compressed text file (hexadecimal form)
to eventually end up with the original file.
The text file is broken into two parts-a.header b.content information
The header file contains information about the dictionary set up to
get the information.First byte contains the number of mappings
and the second byte contains the ngram size.The rest is then repeated
using the combination of ngrams and encoded bytes.The two bytes
work to give the header length 2 + (number_mappings *(ngram_size + 1))
in bytes.The rest is the contents that is found using the header
details and the certain functions in the program.
The file is then tested over test harness for correctness.
Example Usage:
>>> from decompress import decompress_file
>>> decompress_file("after_compress", "after_decompress")
>>> file = open("after_decompress")
>>> contents = file.read()
>>> file.close()
>>> contents
abracadabra\n
This will read the text file called after_compress, decompress its
files and write the resulting decompressed text file to
after_decompress that can be viewed by assigning it to read
and calling it using variable contents.
Testing:
run_tests()
This will run unit tests on various parts of the program
and check the output for correctness.
Revision history:
3 Sep 2014: Implemented the get_header_info
4 Sep 2014: Implemented the parse_header after analyzing
its functionality
9 Sep 2014: Implemented the get_compressed_body
10 Sep 2014: Implemented decompressed_body and
decompressed_file
11 Sep 2014: Added docstrings and comments to vital
and complicated parts
12 Sep 2014: Added test cases.
16 Sep 2014: Ran code through pylint, fixed all warnings.
Desirable features to add in the future:
- Improve for larger files.
- Decompression in less and tidy
way with cases that does not use encoding
bytes
'''
# Importing compress to different conditions
#obtain various compressed_contents set to
#variable to be used for test cases.
from compress import compress_file,get_ngrams,\
get_unused_bytes,sorted_ngrams_by_freq, \
make_ngram_encoding,make_encoded_string, freqs
compress_file(2, "before_compress.txt", "after_compress")
file = open("after_compress")
compressed_contents = file.read()
file.close()
compress_file(2, "moby_dick_chapter1.txt", "moby_compress")
file = open("moby_compress")
compressed_contents1 = file.read()
file.close()
compress_file(2, "moby_dick.txt", "moby_compress1")
file = open("moby_compress1")
compressed_contents2 = file.read()
file.close()
def get_header_info(compressed_contents):
'''
This function looks over the contents
of the compressed file and uses first
two bytes of the header to determine the
the number of mappings in the header of
the compressed contents, and ngram_size
is the length of n-grams in the compressed
file that is returned as a two-element
tuple by taking string as input.
Example:
>>> from decompress import get_header_info
>>> get_header_info(compressed_contents)
(8, 2)
'''
#It has been assumed as the argument string
#being at least two bytes long
if len(compressed_contents) >= 2:
#ord function helps to turn thenon printable
#versions(hexadecimal in most case) to be in their
#corresponding numerical form
num_mappings = ord(compressed_contents[0])
ngram = ord(compressed_contents[1])
return (num_mappings, ngram)
def parse_header(compressed_contents):
'''
This function takes a string as its argument and
returns a dictionary as its result. The argument corresponds
to the contents of a compressed file. The dictionary result
represents the mapping in the header of the compressed contents.
The keys of the dictionary should be encoding bytes and the values
of the dictionary should be n-grams
Example:
>>> from decompress import parse_header
>>> decode_map = parse_header(compressed_contents)
>>> decode_map
{\x00: 'ab', \x83: 'ra', \x04: 'br', \x87: 'ac',
\x08: 'ad', \x8b: 'ca', \x0c: 'a\n' , , \x8f: 'da'}
'''
decode_map={}
#finds out the index of the contents using previous function
num_mappings = get_header_info(compressed_contents)[0]
ngram = get_header_info(compressed_contents)[1]
for index in range(num_mappings):
#adjustment of the slicing made
start_pos = 2 + (ngram+1)*index
end_pos = start_pos + ngram
#values then keys extracted with hexadecimal values(keys) following
#two characters(values denoting ngrams) over the end of the header
values = compressed_contents[start_pos:end_pos]
keys = compressed_contents[end_pos]
#mapping of hexadecimal to corresponding ngrsm
decode_map[keys]= values
return decode_map
def get_compressed_body(compressed_contents):
'''
This function takes a string as its argument and returns a string as its result
The argument corresponds to the contents of a compressed file.
The result is the contents of the compressed file that follows
immediately after the header.
Example:
>>> from decompress import get_compressed_body
>>> compressed_body = get_compressed_body(compressed_contents)
>>> compressed_body
\x00\x83\x8b\x8f\x04\x0c
'''
#slicing technique used in such a way that it starts from the
#the 27 th position since header ends at 26th position
number_mappings = get_header_info(compressed_contents)[0]
n_gram = get_header_info(compressed_contents)[1]
compressed_body = compressed_contents[(2 +((n_gram+1)*number_mappings)):]
return compressed_body
def decompress_body(decode_map, compressed_body):
'''
This function takes two arguments and returns a string as its result.
The first argument is a dictionary which maps encoding bytes to n-grams,
it is in the same format as the output of parse_header. The second
argument is a string representing the body of the compressed file.
It is in the same format as the output of get_compressed_body.
The output string is a decompressed version of the compressed file.
Foreach input byte in compressed_body the function should produce
one or more bytes of output. Input bytes that are keys of decode_map
should be replaced by their corresponding n-grams (their values in
decode_map). All other input bytes should be copied directly
to the output unchanged.
Example:
>>> from decompress import decompress_body
>>> decompressed_contents = decompress_body(decode_map, compressed_body)
>>> decompressed_contents
abracadabra\n
By feeding the outputs of parse_header and get_compressed_body
to decompress_body we can obtain the original uncompressed
contents of the file.
'''
new_word = ''
for check in compressed_body:
if check in (decode_map).keys():
#check_pos finds out the positions
check_pos = decode_map.keys().index(check)
#the positions are then utilised to dig deep to
#find the values everytime
#the values are then replaced with each non-printable
#bytes to get contents that was compressed
new_word += decode_map.items()[check_pos][1]
else:
new_word+= check
return new_word
def decompress_file(in_filename,out_filename):
'''This function reads in_filename, decompresses
its contents, writes the decompressed contents to out_filename,
and return None as its result. Your decompress_file function
will need to call upon (some of) the functions doned in tasks 2 to 5 in
order to achieve the desired behaviour.Both arguments are strings
indicating filenames.
Example:
>>> from decompress import decompress_file
>>> decompress_file("after_compress", "after_decompress")
>>> file = open("after_decompress")
>>> contents = file.read()
>>> file.close()
>>> contents
abracadabra\n
At the end of this function the after_decompress
and before_compress should contain the exact same contents
that can be confirmed using test_harness.py module provided
Example:
>>> from test_harness import round_trip
>>> round_trip(2, "before_compress.txt", "after_compress",
"moby_decompress")
True
>>> round_trip(2, "moby_dick_chapter1.txt", "moby_compress",
"moby_decompress")
True
It contains a function called round_trip which compresses
an input file, decompresses it, and then compares the contents
of the original file to the decompressed result. If they are exactly
the same it returns True otherwise it returns False. The first argument
is an integer which does the n-gram size for compression. The second
argument is a string naming the original file. The third argument is a
string naming the file to save the compressed output. The fourth
argument is a string naming file to save the decompressed output.'''
#open, read and then close the compressed file
file = open(in_filename)
compressed_materials = file.read()
file.close()
#the functions 2,3,4 utilized
#to make the file writing and then
#closed and None returned
x = parse_header(compressed_materials)
y = get_compressed_body(compressed_materials)
z = decompress_body(x,y)
file = open(out_filename,'w')
file.write(z)
file.close()
'''In this part I have made all the separate functions that
would separately call the functions and print them as
output,categorizing them in the same order as the functions
itself and that usely act as test case for every function
over a series of input datas to check the validity of
output data,arranged in the following order-
1.get_header_info(compressed_contents)
2.parse_header(compressed_contents)
3.get_compressed_body(compressed_contents)
4.decompress_body(decode_map, compressed_body)
5.decompress_file(in_filename,out_filename)
The arguments of the functions are in ascending order'''
def get_header_info_test():
#test of get_header_info
print get_header_info(compressed_contents)
print get_header_info(compressed_contents1)
print get_header_info(compressed_contents2)
def parse_header_test():
#test of parse_header
print parse_header(compressed_contents)
print parse_header(compressed_contents1)
print parse_header(compressed_contents2)
def get_compressed_body_test():
#test of get_compressed_body
print get_compressed_body(compressed_contents)
print get_compressed_body(compressed_contents1)
print get_compressed_body(compressed_contents2)
def decompress_body_test():
#test of decompress_body
z2 = get_compressed_body(compressed_contents)
z1 = parse_header(compressed_contents)
print decompress_body(z1, z2)
z4 = get_compressed_body(compressed_contents1)
z3 = parse_header(compressed_contents1)
print decompress_body(z3, z4)
z6 = get_compressed_body(compressed_contents2)
z5 = parse_header(compressed_contents2)
print decompress_body(z5, z6)
def decompress_file_test():
#test of decompress_file
from decompress import decompress_file
z2 = get_compressed_body(compressed_contents)
z1 = parse_header(compressed_contents)
after_decompress = decompress_body(z1, z2)
print decompress_file("after_compress", "after_decompress")
z3 = get_compressed_body(compressed_contents1)
z4 = parse_header(compressed_contents1)
moby_decompress = decompress_body(z4, z3)
print decompress_file("moby_compress","moby_decompress")
z6 = get_compressed_body(compressed_contents2)
z5 = parse_header(compressed_contents2)
moby_decompress1 = decompress_body(z5, z6)
print decompress_file("moby_compress1","moby_decompress1")
'''In this part I have tried to see the correctness of
my code over a series of test cases that basically checks
if contents of before compression and the after compression
"after_decompress")
print round_trip(2, "moby_dick_chapter1.txt", "moby_compress",\
are same or not.Testing is done over various sizes of
files.It relies on functionality of compress.py that
first compresses it'''
def round_trip_test():
from test_harness import round_trip
print round_trip(2, "before_compress.txt", "after_compress",\
"moby_decompress")
print round_trip(3, "moby_dick.txt", "moby_compress1",\
"moby_decompress1")
|
12,276 | 3cdef6db35560ece2dbf2cbb727c122392eb26ba | import time
from multiprocessing import Process
def func(name):
print('hello', name)
print('我是子进程')
if __name__ == '__main__':
# 实例化一个子进程,执行func函数,传输参数
p = Process(target=func, args=('tiele',))
# 运行子进程对象
p.start()
time.sleep(1)
print('执行主进程的内容了') |
12,277 | af52f52d8a1ec3784ce3d40f936a056275642276 | #!/usr/bin/env python
#- Derive RA,dec of fibers from NGC 205 field given that they were configured
#- for a different field
import fitsio
import numpy as np
import desimodel.focalplane
import argparse
parser = argparse.ArgumentParser(usage = "{prog} [options]")
parser.add_argument("-i", "--input", type=str, help="input fiberassign file")
parser.add_argument("-o", "--output", type=str, help="output fits file")
parser.add_argument("--skyra", type=float, help="telescope RA [deg]")
parser.add_argument("--skydec", type=float, help="telescope DEC [deg]")
args = parser.parse_args()
#- Read a subset of the fiberassign columns for how the positioners were configured
columns = ['TARGETID', 'PETAL_LOC', 'DEVICE_LOC', 'FIBER',
'TARGET_RA', 'TARGET_DEC', 'FIBERASSIGN_X', 'FIBERASSIGN_Y']
fa, hdr = fitsio.read(args.input, 'FIBERASSIGN', columns=columns, header=True)
#- Remove some keywords that don't apply to this non-tile pointing
tileid = hdr['TILEID']
hdr.delete('TILEID')
hdr.delete('TILERA')
hdr.delete('TILEDEC')
#- Derive new RA, DEC based upon the X,Y and telescope pointing
#- in a 3x3 grid of 5" offsets
x = fa['FIBERASSIGN_X']
y = fa['FIBERASSIGN_Y']
ra, dec = desimodel.focalplane.xy2radec(args.skyra, args.skydec, x, y)
fa['TARGET_RA'] = ra
fa['TARGET_DEC'] = dec
#- Update header
hdr.delete('REQRA')
hdr.delete('REQDEC')
hdr['REQRA'] = args.skyra
hdr['REQDEC'] = args.skydec
hdr.add_record('COMMENT', 'tile {} x,y to diff tele ra,dec'.format(tileid))
fitsio.write(args.output, fa, header=hdr, clobber=True)
|
12,278 | 67c1384a33e28b93af46b5fa6e6bb4b61dce597d | import pytz, datetime, time
def getTimeFromEpoch(timeStp, zoneCode):
local = pytz.timezone (zoneCode)
naive = datetime.datetime.strptime (timeStp, "%Y-%m-%d %H:%M:%S")
local_dt = local.localize(naive, is_dst=None)
utc_dt = local_dt.astimezone (pytz.utc)
return (utc_dt.timestamp()) |
12,279 | 987ff22be4a99c27ecf645c260e0451c6f7335d7 | from typing import List
import cloudpickle as pickle
import numpy as np
import pandas
import sklearn.metrics as metrics
class ModelWrapper:
def __init__(self, model_path: str):
with open(model_path, "rb") as rf:
self.model = pickle.load(rf)
def predict(self, X: dict) -> List[int]:
X = pandas.DataFrame(X)
return self.model.predict(X).tolist()
def score(self, X: dict, y: List[int], method: str) -> float:
X = pandas.DataFrame(X)
scorer = metrics.get_scorer(method)
return scorer(self.model, X, y)
|
12,280 | c667e07ed6773a1d84cfec08bb729d699649c7fe | import pygame
class Starship(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([60,60])
self.rect = self.image.get_rect()
self.rect.x = 100
self.rect.y = 250
def moveToRight(self, dx=5):
self.rect.x += dx
def moveToLeft(self,dx):
self.rect.x -= dx
def getRect(self):
return self.rect
|
12,281 | a8461b4b0e5ef0fd19a8f2d44244cc3366642f50 | # analisando triângulos 2.0
a = float(input('Primeiro segmento: '))
b = float(input('Segundo segmento: '))
c = float(input('Terceiro segmento: '))
if (a+b)>c and (a+c)>b and (b+c)>a:
print('Podem formar um triângulo!')
if a == b == c:
print('Este triângulo será EQUILÁTERO!')
elif a == b and a != c and b != c:
print('Este triângulo será ISÓSCELES!')
elif a != b and a != c and b != c:
print('Este triângulo será ESCALENO!')
else:
print('Estes segmentos não podem formar um triângulo!')
|
12,282 | ed9de2216a9f3403e1cfd86f8882e583afe78368 | #coding=utf-8
'''
Created on 24.03.2013
@author: michi
'''
from PyQt4.QtCore import QObject, QAbstractItemModel, QRegExp, QStringList, \
pyqtSignal, QModelIndex, Qt
from PyQt4.QtGui import QCompleter, QSortFilterProxyModel
from ems.qt4.util import variant_to_pyobject
class FuzzyCompleter(QCompleter):
StartsWith = 1
Contains = 2
Regex = 3
activatedIndex = pyqtSignal(QModelIndex)
highlightedIndex = pyqtSignal(QModelIndex)
activationRole = Qt.EditRole
def __init__(self, model=None, parent=None):
'''void FuzzyCompleter.__init__(QAbstractItemModel model = None, QObject parent = None)'''
if parent is None and isinstance(model, QObject) and not isinstance(model, QAbstractItemModel):
QCompleter.__init__(self, model)
else:
QCompleter.__init__(self, parent)
self.localCompletionPrefix = ''
self._filtering = FuzzyCompleter.StartsWith
self._sortFilterProxyModel = QSortFilterProxyModel(self)
if isinstance(model, QAbstractItemModel):
self.setModel(model)
self.activated[QModelIndex].connect(self._onBaseCompleterActivated)
self.highlighted[QModelIndex].connect(self._onBaseCompleterHighlighted)
def _onBaseCompleterActivated(self, modelIndex):
if self._filtering == self.StartsWith:
self.activatedIndex.emit(modelIndex)
realIndex = self._sortFilterProxyModel.mapToSource(self._sortFilterProxyModel.index(modelIndex.row(), modelIndex.column()))
self.activatedIndex.emit(realIndex)
def _onBaseCompleterHighlighted(self, modelIndex):
if self._filtering == self.StartsWith:
self.highlightedIndex.emit(modelIndex)
realIndex = self._sortFilterProxyModel.mapToSource(self._sortFilterProxyModel.index(modelIndex.row(), modelIndex.column()))
self.highlightedIndex.emit(realIndex)
def filtering(self):
'''int FuzzyCompleter.filtering()'''
return self._filtering
def setFiltering(self, filtering):
'''FuzzyCompleter FuzzyCompleter.setFiltering(int filtering)'''
if self._filtering == filtering:
return self
sourceModel = self.sourceModel()
self._filtering = filtering
self.setSourceModel(sourceModel)
self.setCompletionPrefix(self.completionPrefix())
return self
def sourceModel(self):
'''QAbstractItemModel FuzzyCompleter.sourceModel()'''
if self._filtering != FuzzyCompleter.StartsWith:
return self._sortFilterProxyModel.sourceModel()
return QCompleter.model(self)
def setSourceModel(self, sourceModel):
'''FuzzyCompleter FuzzyCompleter.setSourceModel(QAbstractItemModel sourceModel)'''
if self._filtering != FuzzyCompleter.StartsWith:
self._sortFilterProxyModel.setSourceModel(sourceModel)
try:
if sourceModel.parent() is self:
sourceModel.setParent(self._sortFilterProxyModel)
self.setModel(self._sortFilterProxyModel)
except TypeError:
pass
elif sourceModel.parent() is self._sortFilterProxyModel:
sourceModel.setParent(self)
self.setModel(sourceModel)
return self
def splitPath(self, path):
'''QStringList FuzzyCompleter.splitPath(QString path)'''
if self._filtering == FuzzyCompleter.StartsWith:
paths = QCompleter.splitPath(self, path)
elif self._filtering == FuzzyCompleter.Contains:
self._updateSortFilterProxyModel();
self._sortFilterProxyModel.setFilterWildcard(path)
paths = QStringList();
elif self._filtering == FuzzyCompleter.Regex:
regex = QRegExp(QRegExp.escape(path))
regex.setCaseSensitivity(self.caseSensitivity())
self._sortFilterProxyModel.setFilterRegExp(regex)
paths = QStringList()
return paths
def _updateSortFilterProxyModel(self):
'''void FuzzyCompleter._updateSortFilterProxyModel()'''
self._sortFilterProxyModel.setFilterCaseSensitivity(self.caseSensitivity())
self._sortFilterProxyModel.setFilterKeyColumn(self.completionColumn())
def pathFromIndex(self, index):
return variant_to_pyobject(index.data(self.activationRole))
|
12,283 | a1a5452a9bcdfb5203407306237ec5b5021bfe15 | from ThoughtWorks.MarsRoverProblem.InputParser import InputParser
from ThoughtWorks.MarsRoverProblem.Plateau import Plateau
if __name__ == "__main__":
x_max, y_max = InputParser.parse_plateau_size()
Plateau.set_plataue_size(x_max, y_max)
list_of_rovers = InputParser.parse_list_of_rovers()
for rover in list_of_rovers:
x_cor, y_cor, direc = rover.final_coordinates()
if direc != 'Z':
print(x_cor, y_cor, direc, sep=" ")
else:
print("Out of range.")
"""
5 5
1 2 N
LMLMLMLMM
3 3 E
MMRMMRMRRM
"""
|
12,284 | 1df510dff127207a296e9ac0586e257f3caf73cc | from utils import Point
MESSAGE_WAIT = 'wait'
MESSAGE_COME = 'come'
class Message(object):
type = None
def __init__(self, source):
self.source = source
class ComeMessage(Message):
type = MESSAGE_COME
def __init__(self, source, x, y):
super(ComeMessage, self).__init__(source)
self.x = x
self.y = y
@property
def point(self):
return Point(self.x, self.y)
class WaitMessage(Message):
type = MESSAGE_WAIT
|
12,285 | c223daddf56815562415be06b49ff29d084ee66d | S = [80144,
112441,
28168,
55393,
20358,
42988,
16798,
24279,
18413,
56263,
19905,
60167,
8231,
36350,
16420,
5321,
11666,
13230,
18770,
6443,
7395,
147444,
2813,
2332,
28728,
882171,
7333,
57462,
36914,
71917,
59885,
2936,
39424,
35631,
14324,
34,
12,
31964,
3428,
12175,
244,
57,
957,
75,
59,
31,
2106,
49,
37,
21,
378,
13,
44,
31,
6739,
40768,
4995,
2493,
307,
15362,
3337,
3212,
5227,
2106,
1328,
3863,
443,
1628,
4894,
158,
2784,
316,
4218,
41,
534,
681,
319,
431,
396,
179,
118,
3447,
7027,
4935,
2061,
6211,
1727,
4481,
6452,
964,
312,
420,
1911,
3254,
2654,
1748,
3878,
267,
189,
235,
537,
80,
336,
353,
145,
209,
13,
13,
16738,
8974,
7438,
679,
7101,
4463,
561,
5226,
999,
1898,
788,
109,
360,
3527,
10,
155,
153,
132,
138,
1514,
67,
564,
23,
201,
101,
31,
6541,
3423,
1975,
3788,
5851,
1398,
3487,
7013,
1805,
791,
4741,
598,
157,
349,
32,
654,
122,
965,
2149,
414,
33,
159,
2252,
62,
24,
35,
1823,
1522,
3065,
885,
2933,
1635,
821,
1456,
604,
107,
33,
196,
69,
173,
1109,
56,
110,
2193,
1009,
354,
172,
165,
115,
42,
51,
5757,
11890,
3861,
2792,
6844,
662,
244,
409,
296,
122,
1923,
11,
678,
10,
1035,
73,
10,
32,
23,
84,
119,
11,
12,
33,
12,
19724,
32063,
5233,
1303,
1409,
22,
2321,
1322,
2768,
261,
149,
26,
32,
15,
68,
26,
111,
23,
20,
10,
256,
4012,
3673,
51958,
2414,
3221,
84,
135,
272,
70,
207,
1033,
168,
28,
10,
105,
13,
17,
20,
58,
95,
49,
21,
29,
11,
5259,
2900,
2217,
2465,
458,
295,
936,
157,
360,
10,
40,
978,
696,
45,
54,
44,
160,
11,
10,
27,
21,
4087,
4280,
864,
6613,
664,
71,
48,
44,
61,
60,
71,
4254,
1317,
127,
20,
90,
119,
24,
430,
54,
15,
28,
11,
12,
11,
6836,
2799,
2772,
660,
4965,
1395,
85,
77,
29,
2104,
314,
34,
44,
22,
10,
54,
12,
10,
133,
9113,
2638,
744,
2919,
1982,
1686,
1374,
358,
661,
25,
153,
26,
1379,
230,
24,
14,
48,
41,
11,
11,
17,
730,
693,
715,
708,
227,
1252,
15,
211,
78,
334,
13,
33,
45,
11,
34,
52,
14,
12,
10,
11050,
10,
351,
2364,
1312,
246,
289,
1158,
12,
11,
10,
15,
78,
253,
11,
12,
11,
12,
8907,
1407,
2492,
1977,
417,
401,
25,
24,
35,
11,
24,
85,
38,
80,
11,
58,
58,
20,
25,
10,
11,
2002,
2608,
1287,
1079,
142,
70,
64,
15,
88,
364,
41,
271,
39,
1149,
44,
231,
11,
37,
12,
24,
27,
11,
10,
13,
18124,
4219,
7182,
2103,
4265,
3927,
35,
157,
108,
1140,
66,
15,
46,
210,
20,
11,
10,
14,
7190,
1245,
2721,
1196,
300,
147,
21,
817,
823,
104,
50,
26,
81,
13,
12,
41,
12,
18,
20,
19,
1080,
488,
3235,
234,
1700,
408,
310,
61,
83,
15,
16,
21,
324,
18,
137,
82,
199,
55,
13,
1273,
1690,
101,
357,
918,
50,
93,
15,
32,
70,
42,
329,
95,
23,
17,
39,
17,
47,
1591,
408,
4012,
4711,
1418,
828,
988,
82,
44,
100,
164,
10,
335,
12,
56,
27,
21,
60,
12,
453,
183,
19,
108334,
42699,
16779,
46076,
15725,
44,
23539,
10108,
6139,
133,
9314,
15,
11095,
11,
584,
266,
139,
1160,
1063,
17,
11,
12,
12,
11,
14,
10,
27,
12,
1012,
594,
275,
23,
37,
11,
75,
10,
25,
771,
23,
10,
34,
13,
11,
71,
13,
10,
538,
204,
74,
1347,
41,
292,
286,
61,
4524,
12,
28,
124,
60,
21,
10,
236,
202,
12038,
3364,
2514,
3979,
3858,
4321,
5318,
6257,
5263,
5579,
54,
54,
103,
47,
32233,
48252,
98777,
58682,
28805,
33407,
21638,
12391,
28399,
20405,
56,
32,
312,
895,
259,
239,
883,
40,
313,
22,
411,
170,
11,
12,
39,
1020,
476,
5071,
604,
131,
855,
1819,
112,
112,
14,
11,
340,
88,
1398,
667,
13436,
8268,
1721,
1534,
3016,
3571,
5508,
2139,
11,
87,
20,
40,
24,
264,
281,
603,
8891,
657,
1793,
563,
528,
904,
1265,
47,
13,
12,
30,
31,
54,
2468,
2352,
399,
2788,
5205,
1935,
323,
182,
173,
50,
76,
132,
561,
459,
40,
137,
15,
14,
11,
49,
32,
13,
10,
498,
431,
387,
918,
3339,
3584,
390,
1575,
267,
357,
164,
70,
1753,
197,
2747,
1320,
422,
142,
417,
33276,
532,
47,
15,
7077,
42993,
15946,
11889,
10073,
12202,
20834,
30529,
36185,
27871,
94,
12,
13,
11,
12,
14,
176,
22,
17,
59,
3377,
45,
34,
36,
11,
738,
47,
10,
11,
10,
87,
24,
32,
24,
37,
158,
12,
19,
59,
61,
12,
31,
90,
10,
214,
25,
197,
11,
38,
2868,
342,
40,
40,
401,
73,
408,
161,
1430,
45,
108,
12,
159,
72,
267,
10,
23,
352,
153,
54,
247,
75,
14,
67,
83,
10,
344,
404,
588,
414,
49,
52,
299,
46,
51,
151,
11,
14,
35,
10,
25,
28,
10,
12,
44,
23,
118,
10,
14,
38,
20,
129,
262,
141,
454,
12,
2755,
489,
10,
1298,
15,
93,
78,
91,
11,
10,
90,
23,
157,
38,
346,
305,
166,
384,
220,
250,
60,
51,
31,
16,
243,
484,
345,
126,
283,
27,
243,
2680,
18,
21,
78,
133,
17,
16,
76,
12,
512,
92,
108,
10,
629,
49,
82,
213,
228,
246,
43,
50,
42,
15,
56,
22,
39,
11,
12,
21,
29,
202,
60,
11,
31,
12,
27,
133,
13,
433,
293,
150,
11,
40,
287,
44,
196,
27,
52,
69,
70,
79,
11,
31,
62,
104,
68,
650,
292,
315,
44,
35,
683,
14,
23,
120,
20,
213,
46,
30,
13,
28,
13,
30,
24,
12,
10,
42,
10,
44,
12,
906,
771,
17,
133,
11,
278,
281,
20,
213,
10,
17,
11,
1356,
29,
377,
24,
35,
146,
1243,
56,
143,
96,
161,
3037,
48,
26,
61,
33,
313,
45,
64,
47,
21,
29,
22,
12,
645,
83,
574,
95,
63,
18,
29,
26,
262,
16,
46,
27,
51,
67,
854,
47,
740,
66,
246,
26,
416,
65,
29,
13,
22,
71,
395,
14,
22,
348,
1485,
988,
300,
653,
197,
601,
47,
10,
2152,
200,
81,
31,
153,
15,
93,
130,
14,
17,
56,
248,
144,
739,
33,
25,
25,
76,
86,
10,
67,
459,
11,
48,
2192,
142,
10,
201,
20,
1862,
208,
20,
15,
138,
92,
26,
354,
27,
104,
11,
37,
348,
113,
357,
24,
48,
178,
10,
15,
300,
338,
76,
724,
589,
11,
11,
58,
233,
181,
1458,
633,
15,
453,
241,
119,
2425,
222,
1316,
12,
113,
142,
23,
78,
1476,
16,
415,
42,
21,
17,
10,
34,
10,
92,
31,
36,
66,
28,
45,
24,
98,
14,
138,
14,
214,
13,
28,
15,
11,
202,
606,
54,
14,
63,
56,
51,
1662,
64,
440,
412,
102,
238,
49,
28,
13,
15,
24,
14,
361,
110,
14,
1888,
10,
126,
541,
27,
1479,
41,
37,
25,
25,
511,
919,
10,
128,
534,
44,
88,
31,
53,
30,
276,
10,
78,
34,
193,
99,
70,
124,
4844,
21,
141,
177,
129,
87,
204,
13,
32,
90,
188,
25,
122,
25,
15,
82,
17,
111,
30,
10,
12,
58,
49,
10,
74,
74,
27,
20,
25,
346,
23,
11,
101,
29,
25,
10,
37,
229,
536,
36,
22,
29,
11,
198,
13,
46,
42,
37,
1696,
478,
27,
228,
12,
143,
343,
35,
45,
17,
1445,
70,
58,
1617,
1448,
1589,
1575,
10,
49,
919,
37,
14,
26,
38,
24,
11,
10,
214,
181,
22,
38,
146,
845,
41,
10,
254,
20,
24,
21,
215,
63,
13,
13,
159,
147,
304,
78,
10,
97,
23,
36,
126,
15,
294,
27,
52,
111,
487,
138,
367,
218,
11,
321,
25,
1149,
273,
347,
33,
108,
100,
120,
21,
427,
14,
91,
84,
112,
14,
8371,
201,
18,
10,
11416,
134,
378,
72,
468,
21,
10768,
428,
401,
419,
687,
619,
96,
131,
1110,
118,
22,
163,
11,
195,
16,
524,
349,
13,
107,
349,
21,
49,
331,
70,
443,
72,
20,
88,
569,
21,
22,
945,
65,
63,
33,
31,
122,
399,
107,
24,
730,
30,
17,
140,
13,
29,
582,
744,
101,
26,
13,
391,
20,
88,
41,
15,
293,
85,
57,
156,
161,
21,
12,
39,
22,
315,
232,
13,
102,
16,
20,
13,
30,
47,
10,
15,
141,
223,
41,
69,
15,
19,
15,
44,
10,
43,
115,
34,
51,
10,
196,
86,
18,
147,
10,
11,
12,
65,
34,
634,
13,
38,
26,
101,
43,
26,
11,
28,
12,
122,
17,
24,
853,
26,
33,
84,
53,
104,
12,
104,
34,
10,
10,
347,
20,
21,
116,
45,
10,
224,
12,
2265,
10,
369,
33,
54,
73,
42,
203,
41,
321,
92,
18,
306,
2374,
891,
13,
1395,
15,
254,
38,
11,
862,
370,
47,
16,
49,
133,
41,
33,
334,
16,
82,
431,
717,
104,
161,
85,
172,
23,
247,
80,
35,
225,
17,
278,
356,
11,
22,
35,
48,
50,
191,
65,
29,
14,
116,
23,
11,
16,
3762,
31,
159,
26,
953,
36,
709,
11,
73,
141,
64,
36,
287,
24,
43,
11,
110,
18,
16,
28,
42,
35,
246,
14,
97,
133,
179,
336,
284,
14,
12,
142,
712,
51,
121,
152,
43,
17,
10,
91,
13,
22,
85,
15,
32,
20,
11,
40,
34,
57,
17,
10,
707,
17,
26,
1096,
181,
11,
10,
68,
27,
14,
14,
118,
17,
36,
69,
45,
27,
16,
30,
29,
14,
90,
10,
38,
175,
13,
70,
59,
238,
22,
12,
10,
35,
13,
10,
167,
160,
72,
51,
13,
11,
40,
13,
13,
221,
29,
1937,
10,
71,
11,
12,
25,
64,
257,
82,
15,
196,
10,
182,
16,
61,
11,
16,
315,
31,
81,
44,
11,
207,
34,
24,
20,
16,
350,
120,
47,
22,
20,
100,
128,
400,
13,
123,
10,
10,
123,
12,
295,
317,
10,
10,
319,
10,
71,
38,
40,
31,
43,
153,
13,
53,
10,
385,
248,
11,
144,
62,
77,
136,
20,
11,
10,
1267,
318,
954,
99,
96,
53,
483,
241,
159,
14,
25,
13,
137,
128,
13,
143,
17,
52,
79,
20,
48,
10,
269,
43,
201,
12,
11,
768,
237,
42,
1342,
114,
48,
29,
24,
18,
10,
569,
11,
51,
66,
21,
99,
26,
354,
40,
2850,
31,
33,
149,
251,
128,
23,
46,
24,
199,
219,
197,
18,
20,
15,
65,
259,
86,
10,
70,
11,
41,
188,
10,
151,
39,
26,
30,
162,
1400,
27,
51,
69,
712,
37,
11,
58,
67,
42,
10,
250,
48,
12,
39,
175,
15,
18,
20,
10,
12,
57,
12,
25,
57,
102,
38,
14,
21,
36,
106,
24,
17,
14,
63,
138,
10,
21,
32,
179,
28,
13,
587,
77,
208,
83,
60,
26,
38,
11,
36,
242,
27,
62,
809,
20,
12,
58,
648,
51,
71,
802,
64,
25,
24,
29,
62,
31,
58,
152,
66,
14,
10,
1253,
1348,
103,
14,
452,
107,
182,
11,
70,
31,
171,
16,
13,
27,
848,
76,
15,
39,
21,
14,
34,
16,
2625,
18,
37,
20,
23,
95,
398,
22,
12,
926,
15,
78,
89,
68,
12,
78,
10,
65,
13,
13,
218,
11,
55,
55,
228,
25,
10,
60,
1347,
28,
1253,
81,
10,
12,
16,
322,
289,
16,
126,
159,
12,
19,
12,
54,
13,
25,
31,
283,
13,
1218,
40,
27,
993,
20,
10,
71,
64,
10,
11,
16,
36,
29,
44,
188,
51,
112,
159,
657,
14,
27,
39,
38,
74,
11,
13,
334,
27,
61,
37,
79,
10,
60,
106,
11,
31,
21,
23,
14,
11,
10,
27,
28,
111,
254,
19,
12,
527,
79,
32,
248,
914,
191,
185,
175,
868,
458,
87,
93,
12,
13,
269,
54,
34,
257,
65,
256,
1044,
43,
35,
111,
110,
37,
39,
7941,
1015,
58,
112,
21,
158,
280,
34,
83,
193,
10,
136,
39,
29,
124,
17,
19,
3671,
114,
19,
1571,
14,
59,
51,
30,
85,
238,
38,
25,
355,
278,
189,
14,
16,
10,
11,
66,
117,
25,
64,
13,
137,
66,
33,
21,
78,
188,
26,
71,
27,
11,
277,
12,
11,
40,
20,
13,
277,
12,
150,
76,
61,
23,
14,
15,
1252,
52,
78,
76,
12,
302,
42,
57,
509,
116,
91,
119,
140,
224,
50,
116,
18,
10,
65,
252,
44,
20,
43,
14,
15,
16,
10,
11,
10,
76,
17,
44,
945,
30,
10,
944,
20,
12,
24,
145,
16,
13,
11,
14,
12,
12,
100,
13,
81,
12,
14,
12,
15,
12,
11,
306,
14,
212,
438,
2197,
10,
81,
525,
12,
246,
59,
10,
3781,
53,
433,
154,
125,
137,
258,
326,
1488,
28,
28,
18,
106,
13,
27,
205,
22,
50,
95,
3641,
842,
3005,
28,
1020,
190,
70,
30,
41,
221,
1021,
54,
20,
714,
43,
143,
7517,
59,
426,
521,
976,
52,
250,
16,
267,
22,
45,
589,
106,
178,
27,
95,
53,
42,
18,
335,
2420,
52,
14,
50,
90,
249,
58,
117,
15,
65,
31,
19,
20,
38,
10,
14,
825,
22,
10,
66,
35,
13,
19,
705,
135,
10,
14,
27,
109,
145,
360,
11,
28,
90,
10,
11,
76,
104,
28,
25,
14,
10,
169,
21,
19,
26,
15,
174,
13,
696,
17,
25,
38,
10,
45,
108,
14,
13,
70,
16,
10,
36,
13,
41,
72,
39,
19,
10,
50,
10,
12,
10,
10,
183,
33,
98,
10,
15,
21,
17,
28,
150,
156,
27,
110,
233,
19,
63,
56,
131,
281,
412,
817,
396,
48,
446,
172,
1537,
275,
17,
12,
188,
67,
11,
394,
24,
24,
39,
249,
84,
772,
75,
479,
1757,
128,
46,
131,
41,
69,
60,
33,
182,
26,
236,
1743,
173,
12,
779,
43,
12,
15,
237,
25,
46,
40,
75,
10,
48,
442,
152,
105,
108,
46,
43,
234,
998,
172,
25,
36,
25,
13,
16,
106,
57,
123,
31,
918,
61,
10,
43,
10,
17,
20,
17,
70,
14,
17,
11,
11,
11,
12,
49,
14,
17,
44,
303,
10,
15,
13,
12,
43,
11,
26,
10,
26,
139,
151,
19,
32,
26,
15,
11,
287,
550,
63,
58,
137,
64,
35,
90,
107,
13,
58,
151,
10,
21,
15,
21,
28,
23,
29,
13,
40,
92,
43,
11,
92,
19,
25,
49,
51,
16,
48,
692,
219,
14,
33,
42,
32,
1482,
20,
159,
56,
22,
446,
153,
26,
53,
16,
12,
330,
17,
15,
118,
61,
92,
13,
13,
71,
4859,
11,
131,
91,
39,
46,
22,
12,
63,
466,
2638,
27,
460,
10,
26,
138,
26,
312,
195,
17,
113,
253,
18,
21,
107,
54,
39,
11,
146,
84,
30,
151,
10,
35,
11,
76,
189,
313,
367,
12,
135,
21,
16,
70,
72,
12,
16,
22,
26,
83,
93,
256,
16,
25,
76,
56,
59,
137,
13,
13,
10,
10,
74,
139,
187,
41,
21,
10,
44,
72,
14,
13,
47,
114,
31,
15,
96,
390,
21,
92,
83,
26,
19,
100,
14,
22,
27,
36,
20,
16,
19,
95,
88,
132,
14,
32,
12,
94,
25,
123,
63,
22,
71,
12,
22,
10,
124,
180,
95,
10,
15,
56,
11,
30,
10,
21,
179,
282,
134,
44,
304,
76,
45,
30,
197,
193,
61,
132,
56,
145,
41,
97,
103,
36,
88,
33,
138,
11,
255,
640,
29,
39,
141,
333,
1050,
811,
118,
53,
41,
87,
20,
855,
23,
99,
77,
109,
195,
570,
30,
97,
38,
445,
40,
13,
18,
15,
55,
68,
10,
108,
78,
115,
49,
10,
45,
10,
10,
15,
50,
1427,
16,
26,
87,
49,
10,
2513,
12,
325,
58,
14,
76,
122,
201,
356,
364,
24,
246,
230,
775,
924,
143,
12,
23,
12,
320,
93,
219,
82,
11,
10,
10,
29,
32,
37,
36,
35,
21,
27,
10,
114,
38,
27,
340,
4034,
13,
38,
55,
47,
21,
23,
39,
121,
26,
80,
11,
84,
12,
19,
99,
10,
10,
66,
261,
11,
50,
29,
10,
217,
116,
160,
212,
26,
33,
140,
173,
133,
45,
10,
10,
237,
178,
11,
370,
10,
41,
88,
91,
59,
67,
203,
167,
163,
111,
13,
12,
336,
11,
63,
228,
28,
10,
15,
66,
12,
25,
1412,
1573,
11,
11,
16,
10,
219,
118,
131,
47,
41,
14,
20,
54,
855,
51,
11,
181,
29,
195,
50,
46,
79,
33,
213,
23,
101,
92,
40,
11,
82,
829,
116,
15,
15,
36,
38,
161,
34,
14,
599,
221,
13,
31,
10,
23,
480,
32,
47,
10,
17,
245,
28,
84,
10,
75,
316,
577,
133,
83,
52,
262,
13,
40,
103,
30,
145,
114,
10,
1284,
14,
47,
654,
20,
12,
31,
13,
52,
395,
29,
29,
37,
88,
14,
10,
55,
17,
25,
120,
11,
27,
156,
20,
10,
86,
14,
671,
10,
159,
135,
31,
280,
14,
270,
14,
17,
13,
2831,
189,
11,
14,
146,
23,
32,
8078,
12,
49,
43,
11,
1711,
275,
48,
137,
51,
4581,
231,
12,
14,
165,
123,
120,
225,
18919,
37,
197,
37,
12,
632,
112,
231,
43,
415,
22,
61,
62,
39,
350,
138,
55,
191,
275,
139,
1300,
342,
30,
61,
174,
241,
11,
52,
414,
26,
18,
10,
28,
14,
249,
38,
4681,
11,
102,
570,
427,
180,
54,
37,
66,
13,
91,
40,
245,
19,
16,
199,
74,
18,
17,
2297,
22,
83,
33,
109,
16,
73,
13,
16,
12,
12,
34,
37,
11,
15,
197,
712,
28,
81,
164,
69,
86,
16,
535,
123,
37,
11,
14,
13,
63,
12,
117,
67,
114,
19,
74,
117,
376,
161,
18,
34,
12,
43,
15,
2144,
199,
39,
186,
508,
295,
286,
128,
1099,
25,
35,
19066,
16,
13,
54,
12,
83,
19,
20,
12,
10,
20,
28,
15,
53,
14,
42,
14,
15,
96,
109,
81,
98,
10,
326,
20,
172,
14,
24,
205,
39,
177,
22,
42,
12,
564,
13,
31,
46,
14,
10,
10,
10,
122,
73,
37,
13,
531,
15,
375,
24,
12,
146,
24,
107,
24,
61,
190,
47,
11,
45,
1366,
44,
18,
14,
7667,
56,
14,
24,
18,
10,
15,
62,
11,
1839,
12,
15,
23,
252,
60,
36,
99,
60,
18,
20,
37,
10,
25,
10,
31,
455,
11,
10,
15,
14,
17,
36,
10,
13,
15,
10,
40,
14,
25,
14,
26,
52,
29,
10,
234,
12,
16,
16,
23,
121,
16,
203,
426,
12,
55,
103,
591,
70,
20,
26,
19,
409,
313,
62,
10,
23,
1261,
159,
33,
48,
18,
232,
24,
81,
26,
375,
15,
13,
175,
49,
39,
16,
1849,
40,
149,
35,
246,
11,
30,
188,
247,
592,
30,
176,
1212,
58,
301,
43,
152,
782,
35,
12,
487,
28,
15,
124,
1480,
1191,
45,
29,
202,
1334,
29,
29,
204,
36,
20,
67,
38,
379,
32,
36,
33,
78,
14,
322,
57,
78,
16,
32,
40,
19,
60,
25,
10,
32,
64,
20,
10,
33,
32,
13,
57,
46,
33,
56,
13,
10,
13,
12,
14,
17,
11,
36,
20,
52,
57,
12,
17,
24,
25,
42,
24,
12,
29,
39,
24,
151,
238,
10,
774,
176,
23,
83,
211,
163,
21,
10,
47,
16,
1075,
211,
92,
67,
1595,
13,
18,
79,
61,
18,
41,
961,
11,
39,
20,
123,
11,
2276,
46,
10,
26,
18,
15,
146,
91,
159,
228,
72,
314,
205,
227,
45,
61,
63,
28,
93,
21,
11,
30,
15,
117,
108,
18,
52,
35,
16,
14,
34,
10,
11,
12,
26,
1388,
18,
17,
270,
32,
13,
26,
10,
39,
31,
22,
17,
53,
122,
11,
219,
88,
22,
95,
31,
159,
17,
42,
20,
36,
11,
30,
19,
31,
10,
16,
60,
15,
10,
33,
10,
10,
52,
10,
10,
11,
14,
24,
14,
12,
4050,
145,
12,
14,
64,
30,
113,
13,
80,
41,
74,
27,
41,
146,
91,
51,
582,
60,
102,
45,
113,
30,
58,
39,
17,
193,
80,
10,
394,
11,
51,
52,
246,
119,
198,
112,
14,
49,
80,
2158,
10,
1555,
33,
1249,
476,
113,
112,
595,
95,
88,
114,
331,
11,
55,
29,
28,
25,
52,
124,
16,
14,
10,
23,
17,
57,
284,
43,
14,
14,
12,
13,
82,
195,
10,
10,
111,
66,
54,
36,
14,
10,
30,
37,
86,
25,
17,
24,
56,
31,
62,
10,
22,
12,
56,
17,
48,
13,
29,
33,
90,
16,
85,
14,
31,
11,
31,
10,
11,
10,
13,
28,
12,
2620,
251,
10,
37,
65,
262,
58,
60,
246,
143,
91,
42,
31,
144,
72,
44,
50,
7640,
20,
615,
55,
40,
79,
52,
220,
48,
315,
10,
194,
80,
19,
10,
33,
52,
31,
40,
27,
7379,
15,
32,
83,
220,
82,
2281,
397,
242,
16,
13,
43,
11,
66,
567,
48,
404,
10,
80,
38,
25,
29,
319,
83,
155,
17,
23,
11,
43,
88,
10,
76,
60,
59,
33,
12,
27,
13,
12,
56,
75,
71,
48,
10,
530,
34,
95,
29,
25,
12,
13,
103,
254,
12,
89,
812,
16,
10,
219,
246,
42,
68,
13,
21,
42,
18,
52,
40,
36,
11,
13,
119,
13,
13,
636,
47,
25,
16,
19,
23,
187,
75,
12,
27,
188,
31,
300,
226,
13,
186,
115,
70,
78,
129,
21,
299,
46,
1680,
10,
188,
12,
1709,
51,
277,
36,
362,
101,
772,
33,
13,
11,
1130,
171,
77,
329,
10,
17,
2121,
28,
62,
63,
38,
109,
44,
171,
97,
97,
174,
31,
110,
44,
121,
20,
20,
101,
11,
45,
21,
79,
15,
32,
13,
110,
19,
194,
13,
178,
1136,
11,
40,
114,
64,
194,
139,
25,
12,
51,
121,
13,
15,
55,
63,
50,
10,
72,
40,
12,
79,
24,
13,
39,
12,
16,
11,
28,
24,
20,
12,
11,
20,
21,
67,
40,
14,
14,
97,
53,
131,
24,
124,
600,
11,
32,
16,
349,
120,
43,
23,
148,
411,
13,
12,
510,
88,
318,
410,
86,
28,
212,
787,
997,
326,
273,
62,
104,
134,
157,
17,
15,
23,
145,
74,
42,
23,
34,
146,
88,
38,
38,
22,
79,
18,
680,
10,
58,
283,
31,
266,
102,
133,
21,
21,
58,
14,
2505,
818,
53,
125,
172,
161,
74,
149,
78,
74,
17,
275,
11,
13,
12,
11,
39,
13,
132,
31,
173,
150,
27,
16,
10,
19,
127,
30,
10,
15,
22,
50,
12,
11,
11,
66,
25,
14,
11,
11,
11,
50,
23,
10,
12,
13,
56,
83,
1818,
47,
335,
27,
15,
53,
23,
632,
23,
81,
45,
22,
13,
16,
31,
13,
14,
18,
218,
33,
26,
307,
28,
42,
123,
525,
43,
374,
14,
24,
22,
28,
36,
48,
55,
126,
588,
106,
78,
542,
253,
5346,
57,
601,
99,
16,
610,
12,
15,
795,
15,
1336,
113,
72,
28,
10,
113,
19,
54,
72,
26,
45,
327,
113,
41,
44,
127,
10,
21,
177,
19,
32,
31,
11,
19,
34,
61,
184,
19,
11,
512,
29,
31,
30,
27,
12,
10,
23,
80,
15,
23,
58,
19,
142,
69,
49,
16,
34,
44,
14,
19,
92,
10,
12,
20,
51,
41,
12,
11,
15,
4992,
84,
35,
143,
64,
31,
125,
299,
351,
114,
15,
46,
11,
131,
29,
50,
11,
11,
96,
34,
106,
28,
26,
282,
26,
29,
27,
217,
96,
10,
10,
15,
21,
23,
170,
10,
17,
48,
227,
66,
616,
13,
23,
31,
12,
10,
30,
25,
183,
42,
17,
38,
15,
55,
35,
26,
24,
36,
128,
27,
254,
38,
266,
45,
11,
17,
31,
272,
29,
21,
72,
12,
70,
10,
10,
16,
50,
319,
26,
117,
30,
199,
67,
1161,
110,
58,
81,
46,
102,
18,
125,
553,
32,
111,
23,
12,
16,
10,
57,
42,
10,
110,
94,
62,
12,
49,
21,
102,
58,
14,
10,
11,
197,
13,
11,
25,
60,
10,
570,
100,
13,
11,
4677,
31,
25,
10,
19,
15,
31,
16,
29,
182,
36,
467,
10,
13,
22,
36,
11,
204,
111,
76,
38,
21,
27,
28,
52,
43,
71,
838,
23,
93,
33,
61,
57,
74,
30,
17,
11,
28,
257,
14,
108,
25,
13,
30,
227,
98,
656,
44,
52,
10,
12,
33,
13,
69,
23,
49,
833,
12,
35,
77,
15,
643,
41,
276,
107,
58,
301,
10,
34,
28,
25,
16,
11,
11,
13,
158,
46,
119,
110,
41,
194,
12,
10,
23,
28,
14,
11,
121,
17,
59,
16,
403,
465,
584,
560,
531,
432,
299,
146,
132,
428,
10,
171,
164,
187,
161,
231,
211,
318,
473,
338,
449,
15,
22,
33,
48,
59,
35,
58,
117,
60,
77,
12,
42,
15,
26,
94,
50,
10,
16,
20,
10,
10,
10,
1127,
13,
51,
10,
75,
13,
12,
38,
35,
12,
11,
14,
25,
11,
16,
2000,
19479,
128,
20,
12,
48,
68,
23,
21,
10,
15,
1135,
138,
13,
10,
14,
271,
10,
10,
30,
236,
56,
157745,
60,
300,
25,
33,
10,
46,
11,
12,
79,
81,
66,
37,
12,
12,
16,
46,
27,
4937,
10,
16,
69,
14,
4973,
29,
164,
11,
32,
11,
17,
133,
59,
47,
185,
13,
99,
15,
344,
13,
10,
183,
21,
10,
79,
21,
14,
13,
58,
12,
12,
17,
18,
11,
14,
140,
239,
14,
43,
15,
60,
10,
902,
117,
11,
26,
16,
37,
10,
13,
68,
17,
216,
60,
56,
21,
12,
14,
60,
11,
26,
10,
328,
36,
12,
11,
100,
73,
11,
10,
17,
13,
12,
56,
41,
21,
10,
10,
11,
873,
27,
12,
20,
10,
30,
33,
241,
21,
16,
15,
238,
22,
16,
14,
10,
100,
10,
92,
20,
12,
19,
34,
52,
248,
21,
10,
180,
66,
36,
10,
22,
24,
26,
19,
31,
14,
31,
2419,
118,
10,
386,
405,
11,
10,
45,
23,
110,
10,
58,
47,
75,
793,
17,
57,
14,
97,
13,
11,
24,
20,
115,
488,
52,
14,
246,
30,
88,
39,
11,
22,
365,
732,
116,
739,
29,
10,
120,
26,
17,
28,
2932,
14,
115,
1020,
13,
30,
17,
73,
10,
15,
16,
29,
76,
16,
31,
100,
17,
37,
10,
203,
64,
33,
119,
3364,
13,
25,
12,
201,
38,
548,
23,
17,
13,
86,
39,
10,
10,
16,
29,
33,
1406,
238,
12,
95,
44,
26,
20,
35,
13,
7066,
26,
76,
11,
12,
74,
48,
23,
158,
278,
14,
1568,
10,
68,
32,
109,
14,
10,
10,
15,
1412,
11,
12,
22,
30,
11,
27,
11,
98,
58,
11,
98,
12,
433,
10,
122,
79,
4788747]
|
12,286 | fccf1303ac444d7b31e3dbf4c9caad861527c5dd | def isEmpty(stack):
return len(stack) == 0
def Pop(stack):
if isEmpty(stack):
print("Stack underflow")
exit(1)
else:
popped = stack.pop()
return popped
def createStack():
stack = []
return stack
def push(stack, item):
stack.append(item)
def sort_stack(stack):
if not isEmpty(stack):
temp = Pop(stack)
sort_stack(stack)
sorted_insert(stack, temp)
def top(stack):
if not isEmpty(stack):
return stack[-1]
return False
def sorted_insert(stack, element):
if isEmpty(stack) or element > top(stack):
push(stack, element)
else:
temp = Pop(stack)
sorted_insert(stack, element)
push(stack, temp)
def print_stack(stack):
for i in range(len(stack) - 1, -1, -1):
print(stack[i], end="\n")
stack = createStack()
push( stack, 10 )
push( stack, 30 )
push( stack, 2 )
push(stack, 15)
print_stack(stack)
print("\n")
sort_stack(stack)
print_stack(stack)
|
12,287 | 1823469f105958d179c2448afa1b0fc5ab4dfd64 | import ray
import pandas as pd
import numpy as np
import itertools
""" Simulate """
@ray.remote
def msy_max_t_1fish(env, mortality, repetitions):
x = []
path = []
for rep in range(repetitions):
episode_reward = 0
observation, _ = env.reset()
T = 0
for t in range(env.Tmax):
population = env.population()
act= mortality
action = np.array([act], dtype = np.float32)
path.append([t, rep, mortality, act, episode_reward, *population])
observation, reward, terminated, done, info = env.step(action)
episode_reward += reward
if terminated:
T = t
break
else:
T = env.Tmax
x.append([T, rep, mortality, act, episode_reward, *population])
return(x, path)
@ray.remote
def msy_max_t(env, mortality_x, mortality_y, repetitions):
x = []
path = []
for rep in range(repetitions):
episode_reward = 0
observation, _ = env.reset()
T = 0
for t in range(env.Tmax):
population = env.population()
act_x, act_y= mortality_x, mortality_y
action = np.array([act_x, act_y], dtype = np.float32)
path.append([t, rep, mortality_x, mortality_y, act_x, act_y, episode_reward, *population])
observation, reward, terminated, done, info = env.step(action)
episode_reward += reward
if terminated:
T = t
break
else:
T = env.Tmax
x.append([T, rep, mortality_x, mortality_y, act_x, act_y, episode_reward, *population])
return(x, path)
def generate_msy_episodes(env, grid_nr=101, repetitions=100, only_max_times = False):
mortality_choices = np.linspace(0,0.5,grid_nr)
# define parllel loop and execute
parallel = [msy_max_t_1fish.remote(env, i, repetitions) for i in mortality_choices]
x = ray.get(parallel) # list of tuples of final point, history
cols = ["t", "rep", "mortality", "act", "reward", "X", "Y", "Z"] # assume 3 species as default
if env.num_species == 1:
cols = ["t", "rep", "mortality", "act", "reward", "X"]
X = list(map(list, zip(*x))) # [[final points], [histories]]
df_max_times = pd.DataFrame(np.vstack(X[0]), columns = cols) # X[0] = [f. pts.]
df = pd.DataFrame(np.vstack(X[1]), columns = cols) # X[1] = [histories]
if only_max_times:
return df_max_times
return df_max_times, df
def generate_msy_episodes_2fish(env, grid_nr=51, repetitions=100, only_max_times = False):
mortality_choices = itertools.product(np.linspace(0,0.5,grid_nr), repeat=2)
# define parllel loop and execute
parallel = [msy_max_t.remote(env, *i, repetitions) for i in mortality_choices]
x = ray.get(parallel) # list of tuples of final point, history
cols = ["t", "rep", "mortality_x", "mortality_y", "act_x", "act_y", "reward", "X", "Y", "Z"]
X = list(map(list, zip(*x))) # [[final points], [histories]]
df_max_times = pd.DataFrame(np.vstack(X[0]), columns = cols) # X[0] = [f. pts.]
df = pd.DataFrame(np.vstack(X[1]), columns = cols) # X[1] = [histories]
if only_max_times:
return df_max_times
return df_max_times, df
def generate_msy_episodes_1fish(env, grid_nr=101, repetitions=100, only_max_times = False):
mortality_choices = np.linspace(0,0.5,grid_nr)
# define parllel loop and execute
parallel = [msy_max_t_1fish.remote(env, i, repetitions) for i in mortality_choices]
x = ray.get(parallel) # list of tuples of final point, history
cols = ["t", "rep", "mortality", "act", "reward", "X", "Y", "Z"] # assume 1 species as default
if env.num_species == 1:
cols = ["t", "rep", "mortality", "act", "reward", "X"]
X = list(map(list, zip(*x))) # [[final points], [histories]]
df_max_times = pd.DataFrame(np.vstack(X[0]), columns = cols) # X[0] = [f. pts.]
df = pd.DataFrame(np.vstack(X[1]), columns = cols) # X[1] = [histories]
if only_max_times:
return df_max_times
return df_max_times, df
""" optimize """
def find_msy_1fish(env, grid_nr=101, repetitions=100):
df_max_times, df = generate_msy_episodes_1fish(env, grid_nr=grid_nr, repetitions=repetitions)
tmp = (
df_max_times
.groupby(['mortality'], as_index=False)
.agg({'reward': ['mean','std'] })
)
best = tmp[tmp[('reward','mean')] == tmp[('reward','mean')].max()]
return (
best, df.loc[
df.mortality == best.mortality.values[0]
]
)
def find_msy_2fish(env, grid_nr=51, repetitions=100):
df_max_times, df = generate_msy_episodes_2fish(env, grid_nr=grid_nr, repetitions=repetitions)
tmp = (
df_max_times
.groupby(['mortality_x', 'mortality_y'], as_index=False)
.agg({'reward': ['mean','std'] })
)
best = tmp[tmp[('reward','mean')] == tmp[('reward','mean')].max()]
return (
best, df.loc[
(df.mortality_x == best.mortality_x.values[0]) &
(df.mortality_y == best.mortality_y.values[0])
]
)
""" fraction-of-msy const mortality simulation """
def frac_msy_1fish(env, msy, fraction=0.8, repetitions = 100):
return ray.get(msy_max_t_1fish.remote(env, fraction * msy, repetitions=repetitions))
def frac_msy_2fish(env, msy_x, msy_y, fraction=0.8, repetitions = 100):
return ray.get(msy_max_t.remote(env, fraction * msy_x, fraction * msy_y, repetitions=repetitions))
# in practice I'll have to access the files I already made with the saved data
def csv_to_frac_msy_1fish(env, fname, fraction=0.8, repetitions = 100):
df = pd.read_csv(fname)
msy = df.mortality[0]
return frac_msy_1fish(env, msy, fraction=fraction, repetitions=repetitions)
def csv_to_frac_msy_2fish(env, fname, fraction=0.8, repetitions = 100):
df = pd.read_csv(fname)
msy_x = df.mortality_x[0]
msy_y = df.mortality_y[0]
return frac_msy_2fish(env, msy_x, msy_y, fraction=fraction, repetitions=repetitions)
""" misc """
def msy_performances_1fish(env, grid_nr = 101, repetitions = 100):
df_max_times = generate_msy_episodes_1fish(
env, grid_nr=grid_nr, repetitions=repetitions, only_max_times=True
)
df_max_times['reward_std'] = df_max_times['reward']
return (
df_max_times
.groupby(['act'], as_index=False)
.agg({'reward': 'mean', 'reward_std':'std'})
)
|
12,288 | 377fb5387d7d0127c7d81f0c9de1c5c02bd9d267 | # -*- coding: utf-8 -*-
"""Copy of ResNet
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SVTcgc1E5KpIs-UPv9ogh9kYscBUhkLN
"""
#originally coded on google colab
#code borrowed from https://github.com/minhthangdang/SignLanguageRecognitionResNet
from google.colab import drive
drive.mount('/content/drive')
import numpy as np
import pandas as pd
import tensorflow as tf
import math
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.initializers import glorot_uniform
def load_dataset():
# read train dataset
train_dataset = pd.read_csv('/content/drive/My Drive/504 Project/Data/sign_mnist_train/sign_mnist_train.csv')
train_set_y_orig = labels = train_dataset['label'].values # train set labels
train_dataset.drop('label', axis = 1, inplace = True) # drop the label coloumn from the training set
train_set_x_orig = train_dataset.values # train set features
# convert X to (m, n_H, n_W, n_C) where
# m is number of examples, n_H is height, n_W is width and n_C is number of channels
train_set_x_orig = train_set_x_orig.reshape((train_set_x_orig.shape[0], 28, 28, 1))
# read test dataset
test_dataset = pd.read_csv('/content/drive/My Drive/504 Project/Data/sign_mnist_test/sign_mnist_test.csv')
test_set_y_orig = test_dataset['label'].values # test set labels
test_dataset.drop('label', axis = 1, inplace = True) # drop the label coloumn from the test set
test_set_x_orig = test_dataset.values # test set features
# convert X to (m, n_H, n_W, n_C) where
# m is number of examples, n_H is height, n_W is width and n_C is number of channels
test_set_x_orig = test_set_x_orig.reshape((test_set_x_orig.shape[0], 28, 28, 1))
classes = np.array(labels)
classes = np.unique(classes)
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def identity_block(X, filters, stage, block):
"""
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (3, 3), strides = (1,1), padding = 'same', name = conv_name_base + '2a', kernel_initializer = glorot_uniform())(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters = F2, kernel_size = (3, 3), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform())(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
return X
def convolutional_block(X, filters, stage, block):
"""
Implementation of the convolutional block as defined in Figure 4
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(F1, (3, 3), strides = (2, 2), padding = 'same', name = conv_name_base + '2a', kernel_initializer = glorot_uniform())(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(F2, (3, 3), strides = (1, 1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform())(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
##### SHORTCUT PATH ####
X_shortcut = Conv2D(F2, (3, 3), strides = (2, 2), padding = 'same', name = conv_name_base + '1', kernel_initializer = glorot_uniform())(X_shortcut)
X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
return X
import numpy as np
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.initializers import glorot_uniform
#from utils_resnet import identity_block, convolutional_block
def ResNet18(input_shape = (28, 28, 1), classes = 24):
"""
Implementation of the popular ResNet18
Arguments:
input_shape -- shape of the images of the dataset
classes -- integer, number of classes
Returns:
model -- a Model() instance in Keras
"""
# Define the input as a tensor with shape input_shape
X = X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
#X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, [64, 64], stage=2, block='a')
X = identity_block(X, [64, 64], stage=2, block='b')
# Stage 3
X = convolutional_block(X, [128, 128], stage=3, block='a')
X = identity_block(X, [128, 128], stage=3, block='b')
# Stage 4
X = convolutional_block(X, [256, 256], stage=4, block='a')
X = identity_block(X, [256, 256], stage=4, block='b')
# Stage 5
X = convolutional_block(X, [512, 512], stage=5, block='a')
X = identity_block(X, [512, 512], stage=5, block='b')
# AVGPOOL
# X = AveragePooling2D(pool_size=(2,2), name='avg_pool')(X)
# output layer
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='ResNet18')
return model
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
#from utils_resnet import load_dataset
#from model import ResNet18
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Example of pictures
ROWS = 10
fig, axes = plt.subplots(ROWS, ROWS, figsize=(10, 10))
for i in range(ROWS):
for j in range(ROWS):
k = np.random.choice(range(X_train_orig.shape[0]))
axes[i][j].set_axis_off()
axes[i][j].imshow(X_train_orig[k].reshape((28, 28)))
#plt.show()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Convert training and test labels to one hot matrices
label_binrizer = LabelBinarizer()
Y_train = label_binrizer.fit_transform(Y_train_orig)
Y_test = label_binrizer.fit_transform(Y_test_orig)
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
"""# train the neural network
model = ResNet18(input_shape = (28, 28, 1), classes = 24)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train, Y_train, epochs = 2, batch_size = 32)
# test the neural network
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))"""
#model = load_model('my_model.h5')
#np.argmax(model.predict(m))
|
12,289 | 563fb954a662fbf7a4b86a94669cdb26236ed604 | """DjangoFPGA URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
# Include the "BoardInteraction" App
from BoardInteraction import views
# URL linkages of this project
urlpatterns = [
# UI
path('', views.detail,name="main"), # Front page -> linked to the "BoardInteraction" App
path('admin/', admin.site.urls), # /admin -> Admin interface
# HPS LED 0
path('LED0_ON',views.LED0_ON,name="scriptLED0N"), # /LED0_ON -> triggered by pushing the LED0 ON Button
path('LED0_OFF',views.LED0_OFF,name="scriptLED0F"), # /LED0_OFF -> triggered by pushing the LED0 OFF Button
# e.g. views.LED0_ON is the name of the viewer function
# With e.g. the name="scriptLED0N" the linkage is taken to the HTML event handler: '{% url 'scriptLED0N' %}'
# FPGA Configuration
path('FPGA',views.change_FPGAconfiguration,name="scriptFPGAconf"),
path('BOOTFPGA',views.change_FPGAconfigurationBack,name="BootloaderFPGAconf"),
# ADC Sensor Trigger
path('ADCtrigger',views.ADCtrigger,name="scriptADCtrigger")
]
|
12,290 | cced7930aebec77ce082ae575fe9cf9e83888ca4 | # For loop indexli
z=range(5)
for i, data in enumerate(z):
print(i, data)
#Dosya satırı okuma
for line in open("example.txt"):
print(line)
#Swapping
a=5
b=6
a,b = b,a
print(a,b)
#List e başka bir list in değerlerini ekleme
x=[]
y=[1,2,3,4]
x.append(5)
x.extend(y)
print(x)
#List seriyi tersten okuma
x.reverse()
print(x)
#Sorting
z=[("aaa",5), ("ccc",4), ("bbb",3)]
z.sort()
print(z)
#Dictionary veri ekleme, iki yöntem var
d={}
d['a']=5
d['b']=6
d=dict(a=5, b=4, c=6)
print(d)
# Text deki verileri parse edip dict e atma
d={}
for line in open("example.txt"):
val1, val2, val3 = line.split()
d[val2]= val1 + val3
print(d)
|
12,291 | 57294203b57b641b3b2f613b6e164f6acb88a13b | #!/usr/bin/python -u
import gobject
import dbus
import dbus.service
import dbus.mainloop.glib
import os
import os.path as path
import sys
from obmc.dbuslib.bindings import DbusProperties, get_dbus
settings_file_path = os.path.join(sys.prefix, 'share/obmc-phosphor-settings')
sys.path.insert(1, settings_file_path)
import settings_file as s
DBUS_NAME = 'org.openbmc.settings.Host'
OBJ_NAME = '/org/openbmc/settings/host0'
CONTROL_INTF = 'org.openbmc.Settings'
class HostSettingsObject(DbusProperties):
def __init__(self, bus, name, settings, path):
DbusProperties.__init__(self)
dbus.service.Object.__init__(self, bus, name)
self.path = path
if not os.path.exists(path):
os.mkdir(path)
# Listen to changes in the property values and sync them to the BMC
bus.add_signal_receiver(
self.settings_signal_handler,
dbus_interface="org.freedesktop.DBus.Properties",
signal_name="PropertiesChanged",
path="/org/openbmc/settings/host0")
# Create the dbus properties
for i in settings['host'].iterkeys():
shk = settings['host'][i]
self.set_settings_property(shk['name'],
shk['type'],
shk['default'])
def get_bmc_value(self, name):
try:
with open(path.join(self.path, name), 'r') as f:
return f.read()
except (IOError):
pass
return None
# Create dbus properties based on bmc value.
# This will be either a value previously set,
# or the default file value if the BMC value
# does not exist.
def set_settings_property(self, name, type, value):
bmcv = self.get_bmc_value(name)
if bmcv:
value = bmcv
if type == "i":
self.Set(DBUS_NAME, name, value)
elif type == "s":
self.Set(DBUS_NAME, name, str(value))
elif type == "b":
self.Set(DBUS_NAME, name, value)
# Save the settings to the BMC. This will write the settings value in
# individual files named by the property name to the BMC.
def set_system_settings(self, name, value):
bmcv = self.get_bmc_value(name)
if bmcv != value:
filepath = path.join(self.path, name)
with open(filepath, 'w') as f:
f.write(str(value))
# Signal handler for when one ore more settings properties were updated.
# This will sync the changes to the BMC.
def settings_signal_handler(
self, interface_name, changed_properties, invalidated_properties):
for name, value in changed_properties.items():
self.set_system_settings(name, value)
# Placeholder signal. Needed to register the settings interface.
@dbus.service.signal(DBUS_NAME, signature='s')
def SettingsUpdated(self, sname):
pass
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = get_dbus()
obj = HostSettingsObject(bus, OBJ_NAME, s.SETTINGS, "/var/lib/obmc/")
mainloop = gobject.MainLoop()
obj.unmask_signals()
name = dbus.service.BusName(DBUS_NAME, bus)
print "Running HostSettingsService"
mainloop.run()
|
12,292 | 9e51903c34b7d47d410b24d130404302be7c6a3d | from flask import Flask, render_template
from flask import Response
from flask import request
from datetime import datetime
import MySQLdb
import sys
import time
import hashlib
import os
import json
import random
import subprocess
app = Flask(__name__)
startTime = datetime.now()
db = MySQLdb.connect("mysql","root","password")
cursor = db.cursor()
@app.route('/')
def index():
output=subprocess.check_output("cat /proc/self/cgroup | grep kubepods | sed s/\\\\//\\\\n/g | tail -1", shell=True);
return render_template('index.html', str=output)
@app.route('/add')
def add():
return render_template('add_user.html')
@app.route('/init')
def init():
cursor.execute("DROP DATABASE IF EXISTS USERDB")
cursor.execute("CREATE DATABASE USERDB")
cursor.execute("USE USERDB")
sql = """CREATE TABLE users (
ID int NOT NULL AUTO_INCREMENT,
USER char(30),
CONTACT varchar(30),
GAME_TYPE varchar(30),
PRIMARY KEY ( ID )
)"""
cursor.execute(sql)
db.commit()
return "DB Init done"
@app.route("/users/add", methods=['POST'])
def add_users():
user = request.form['user']
contact = request.form['contact']
game_type = request.form['game_type']
cursor.execute("INSERT INTO USERDB.users (USER,CONTACT,GAME_TYPE) VALUES (%s,%s,%s)", (user,contact,game_type))
db.commit()
return get_all_users()
@app.route('/users/<uid>')
def get_users(uid):
cursor.execute("select * from USERDB.users where ID=" + str(uid))
data = cursor.fetchone()
if data:
return render_template('users.html',id=data[0], name=data[1], contact=data[2], game_type=data[3])
else:
return "User doesn't exist for given ID!"
@app.route('/users/edit/<uid>')
def edit_users(uid):
cursor.execute("select * from USERDB.users where ID=" + str(uid))
data = cursor.fetchone()
if data:
return render_template('edit_user.html',id=data[0], name=data[1], contact=data[2], game_type=data[3])
else:
return "Record not found!"
@app.route("/users/update/<uid>", methods=['POST'])
def update_users(uid):
user = request.form['user']
contact = request.form['contact']
game_type = request.form['game_type']
cursor.execute("UPDATE USERDB.users SET USER='"+str(user)+"',CONTACT='"+str(contact)+"',GAME_TYPE='"+str(game_type)+"' WHERE ID="+str(uid))
db.commit()
return get_all_users()
@app.route("/users/delete/<uid>")
def delete_users(uid):
cursor.execute("DELETE FROM USERDB.users WHERE ID="+str(uid))
db.commit()
return get_all_users()
@app.route('/users/all')
def get_all_users():
cursor.execute("select * from USERDB.users")
data = cursor.fetchall()
if data:
return render_template('all_users.html', data=data)
else:
return "No Records in the database!"
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True) |
12,293 | 3490ac769d3231ba6a7703185f34a74fa34bdd27 | """
10. Regular Expression Matching (Hard)
Implement regular expression matching with support for '.' and '*'.
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
The function prototype should be:
bool isMatch(const char *s, const char *p)
Some examples:
isMatch("aa","a") → false
isMatch("aa","aa") → true
isMatch("aaa","aa") → false
isMatch("aa", "a*") → true
isMatch("aa", ".*") → true
isMatch("ab", ".*") → true
isMatch("aab", "c*a*b") → true
"""
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
return self.helper(s, p, 0, 0)
def helper(self, s, p, i, j):
ls = len(s)
lp = len(p)
# boundary condition: end
if i == ls:
while j < lp - 1 and p[j + 1] == "*":
j += 2
return j == lp
if j == lp:
return i == ls
# case 1:
# print s[i], p[j]
if j < lp - 1 and p[j + 1] == "*":
c = p[j] # char
j += 2
while j < lp - 1 and p[j] == c and p[j + 1] == "*":
j += 2
i_start = i
if c != ".":
while i < ls and s[i] == c:
i += 1
else:
i = ls
ii = i
# print ii, j, c
while ii >= i_start:
if self.helper(s, p, ii, j):
return True
ii -= 1
return False
elif p[j] == "." or s[i] == p[j]:
return self.helper(s, p, i + 1, j + 1)
else:
return False
|
12,294 | 847bbca245a58385764f890e740313174383eae0 | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
import string
import random
# Create your models here.
class Club(models.Model):
club_name = models.CharField(max_length=100)
members = models.ManyToManyField(User)
invite = models.CharField(max_length=6, default=''.join(random.choice(string.ascii_uppercase+string.digits) for i in range(6)))
def __str__(self):
return self.club_name
def get_absolute_url(self):
return reverse('club', kwargs={'club_id': self.id})
class Book(models.Model):
title = models.CharField(max_length=500)
author = models.CharField(max_length=500, blank=True, null=True)
desc = models.TextField(max_length=10000, blank=True, null=True)
isbn = models.CharField(max_length=15, blank=True, null=True)
image = models.CharField(max_length=1000, blank=True, null=True)
club = models.ForeignKey(Club, on_delete=models.CASCADE)
# meeting = models.OneToOneField(Meeting, on_delete=models.CASCADE, primary_key=True)
# ratings = models.ForeignKey(Rating, on_delete=models.CASCADE)
def __str__(self):
return self.title
class Meeting(models.Model):
date = models.DateField(blank=True, null=True)
meeting_link = models.CharField(max_length=100, blank=True, null=True)
location = models.CharField(max_length=100, blank=True, null=True)
chapters = models.CharField(max_length=100, default='All')
club = models.ForeignKey(Club, on_delete=models.CASCADE)
book = models.OneToOneField(Book, on_delete=models.CASCADE, blank=True, null=True)
# def __str__(self):
# return self.date
class Discussion(models.Model):
COMMENT_TYPES = (
('comment', 'Comment'),
('quesiton', 'Discussion Question'),
('quote', 'Quote')
)
disc_type = models.CharField(max_length=100, choices=COMMENT_TYPES)
user = models.ForeignKey(User, on_delete=models.CASCADE)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
comment = models.TextField(max_length=10000)
# class Rec(models.Model):
# votes = models.IntegerField()
# user = models.ForeignKey(User, on_delete=models.CASCADE)
# book = models.ForeignKey(Book, on_delete=models.CASCADE)
# club = models.ForeignKey(Club, on_delete=models.CASCADE)
class Rating(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
rating = models.IntegerField(blank=True, null=True)
book = models.ForeignKey(Book, on_delete=models.CASCADE, blank=True, null=True)
|
12,295 | 2822275a20dd1accdab818201b60399087dd0248 | class Driver():
def method_D1(self,a,b):
c=(a+b)
print ("value of c is:-", c)
|
12,296 | e9460b41846810da68660282b0f609da6066a38a | '''
Delete contents of s3 bucket (so that delete-stack call will work)
'''
import boto3, sys
if len(sys.argv) == 1:
print ("must pass the bucketname you want to delete contents from")
sys.exit()
else:
bucketname = sys.argv[1]
client = boto3.client('s3')
s3 = boto3.resource('s3')
paginator = client.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=bucketname)
for page in page_iterator:
for item in page['Contents']:
print ('deleting: ' + item['Key'] + ' from bucket: ' + bucketname)
s3.Object(bucketname, item['Key']).delete()
|
12,297 | a0cf1b88de650b4be49ad819e4c46155044a47f4 | from flask import Flask, render_template
from modules import convert_to_dict, make_ordinal
from flask_bootstrap import Bootstrap
app = Flask(__name__)
application = app
senator_list = convert_to_dict("florida-senators.csv")
Bootstrap(app)
@app.route('/')
def index():
ids_list = []
name_list = []
for senator in senator_list:
ids_list.append(senator['District'])
name_list.append(senator['Senator'])
pairs_list = zip(ids_list, name_list)
return render_template('index.html', pairs=pairs_list, the_title="Florida Senator Index")
@app.route('/senator/<num>')
def detail(num):
for senator in senator_list:
if senator['District'] == num:
sen_dict = senator
break
ord = make_ordinal( int(num) )
return render_template('senator.html', sen=sen_dict, ord=ord, the_title=sen_dict['Senator'])
if __name__ == '__main__':
app.run(debug=True)
|
12,298 | 74222a5f7c2b727a84e17da73a476a75aad80a44 | import sys
import json
from scrapy import signals
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from searchengine.spiders.bing import BingSpider
from searchengine.spiders.sogou_wx import SogouWxSpider
from searchengine.spiders.weibo import WeiboSpider
from searchengine.spiders.baidu import BaiduSpider
from searchengine.spiders.baidunews import BaidunewsSpider
from searchengine.spiders.ss_360 import Ss360Spider
from searchengine.spiders.ss_360_zx import Ss360ZZSpider
from searchengine.spiders.chinaso import ChinaSoSpider
from searchengine.spiders.chinaso_news import ChinaSoNewsSpider
from scrapy.signalmanager import dispatcher
def spider_results(spidername, keywords, pagenum, sorttype):
spider_class = None
if spidername == 'bing':
spider_class = BingSpider
elif spidername == 'weixin':
spider_class = SogouWxSpider
elif spidername == 'weibo':
spider_class = WeiboSpider
elif spidername == 'baidu':
spider_class = BaiduSpider
elif spidername == 'baidunews':
spider_class = BaidunewsSpider
elif spidername == "ss_360":
spider_class = Ss360Spider
elif spidername == "ss_360_zx":
spider_class = Ss360ZZSpider
elif spidername == "chinaso":
spider_class = ChinaSoSpider
elif spidername == "chinaso_news":
spider_class = ChinaSoNewsSpider
else:
return []
results = []
def crawler_results(signal, sender, item, response, spider):
results.append(dict(item))
dispatcher.connect(crawler_results, signal=signals.item_passed)
process = CrawlerProcess(get_project_settings())
process.crawl(spider_class, keywords=keywords,
pagenum=pagenum, sorttype=sorttype)
process.start() # the script will block here until the crawling is finished
return json.dumps(results, ensure_ascii=False).encode('gbk', 'ignore').decode('gbk')
if __name__ == '__main__':
if len(sys.argv) >= 4:
spidername = sys.argv[1]
keywords = sys.argv[2]
pagenum = int(sys.argv[3])
sorttype = 1 if len(sys.argv) == 4 else sys.argv[4]
if keywords:
if pagenum <= 0:
pagenum = 1
searchresult = spider_results(
spidername, keywords, pagenum, sorttype)
print(searchresult)
|
12,299 | 8e0a18522553d1dc7acfeb8e0f117cdbe898d296 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import pandas as pd
from data_process import data_preprocess
from random_sample import rand_sample
from cal_auc import auc
#读取数据集
data = pd.read_csv("testset.csv",sep=",")
"""特征处理"""
data = data_preprocess.del_id_get_hot(data)
"""特征选择"""
"""选择正负样本"""
org_pos_sample = data.loc[data['hypertension']!=0]
org_neg_sample = data.loc[data['hypertension']==0]
"""选择模型,随机采样n次,返回auc的平均值"""
auc = rand_sample(org_pos_sample,org_neg_sample,"lr",100)
print(auc) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.