index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
17,300 | dd24c22f5ecb13eefac3be66e3a8aede4a750867 | """
@author: Ferdinand E. Silva
@email: ferdinandsilva@ferdinandsilva.com
@website: http://ferdinandsilva.com
"""
import os
class qqFileUploader(object):
def __init__(self, allowedExtensions = [], sizeLimit = 1024):
self.allowedExtensions = allowedExtensions
self.sizeLimit = sizeLimit
def handleUpload(self, djangoRequest, uploadDirectory):
#read file info from stream
uploaded = djangoRequest.read
#get file size
fileSize = int(uploaded.im_self.META["CONTENT_LENGTH"])
#get file name
fileName = uploaded.im_self.META["HTTP_X_FILE_NAME"]
#check first for allowed file extensions
if self._getExtensionFromFileName(fileName) in self.allowedExtensions or ".*" in self.allowedExtensions:
#check file size
if fileSize <= self.sizeLimit:
#upload file
#write file
file = open(os.path.join(uploadDirectory,fileName),"wb+")
file.write(djangoRequest.read(fileSize))
file.close()
return "{success:true}"
else:
return '{"error":"File is too large."}'
else:
return '{"error":"File has an invalid extension."}'
def _getExtensionFromFileName(self,fileName):
import os
filename, extension = os.path.splitext(fileName)
return extension
|
17,301 | 6c7a8df13d243d691f94c81f55b4390dc965f532 | import pandas as pd
import numpy as np
import os
import sys
import time
from getArgs import getArgs
from getConfig import getConfig
from getData import getData
from getModelParms import getParms
from preProcess import preProcess
from nn import run
from calcMAPE import calcMAPE
from calcRMSE import calcRMSE
import jobNumber as job
# This file stores the results for each set of parameters so you can review a series
# of runs later
def writeResults(results):
delim = ","
with open("/home/tbrownex/NNscores.csv", 'w') as summary:
hdr = "L1"+delim+"Lambda"+delim+"activation"+delim+"batchSize"+delim+"LR"+\
delim+"StdDev"+delim+"MAPE"+delim+"RMSE"+"\n"
summary.write(hdr)
for x in results:
rec = str(x[0][0])+delim+str(x[0][1])+delim+str(x[0][2])+\
delim+str(x[0][3])+delim+str(x[0][4])+delim+str(x[0][5])+\
delim+str(x[1])+delim+str(x[2])+"\n"
summary.write(rec)
if __name__ == "__main__":
args = getArgs()
config = getConfig()
df = getData(config)
dataDict = preProcess(df, config, args)
jobId = job.getJob()
parms = getParms("NN") # The hyper-parameter combinations to be tested
results = []
count = 1
start_time = time.time()
print("\n{} parameter combinations".format(len(parms)))
print("\n{:<10}{:<10}{}".format("Count", "MAPE","RMSE"))
for x in parms:
parmDict = {} # holds the hyperparameter combination for one run
parmDict['l1Size'] = x[0]
parmDict['lambda'] = x[1]
parmDict['activation'] = x[2]
parmDict['batchSize'] = x[3]
parmDict['lr'] = x[4]
parmDict['std'] = x[5]
jobName = "job_" + jobId +"/"+ "run_" + str(count)
preds = run(dataDict, parmDict, config, jobName)
mape = calcMAPE(dataDict["testY"], preds)
rmse = calcRMSE(dataDict["testY"], preds)
print("{:<10}{:<10.1%}{:.2f}".format(count, mape, rmse))
tup = (x, mape, rmse)
results.append(tup)
count +=1
# Write out a summary of the results
writeResults(results)
jobId = int(jobId)
job.setJob(jobId+1)
print("\nJob {} complete after {:,.0f} minutes".format(str(jobId), (time.time() -start_time)/60)) |
17,302 | 1b66a5953f1dba8e310661570d43ea5695738197 | # get the list of clusters in the account
import boto3
import datetime
import json
import pprint
ecs_client = boto3.client('ecs')
cw_client = boto3.client('cloudwatch', region_name='us-east-1')
ec2_client = boto3.client('ec2')
asg_client = boto3.client('autoscaling')
def cluster_list():
return ecs_client.list_clusters()
def desc_clusters(a_clustrs):
return ecs_client.describe_clusters(clusters=a_clustrs)
def service_list(clustr):
return ecs_client.list_services(cluster=clustr)
def tasks_list(clustr, srvc):
return ecs_client.list_tasks(cluster=clust,serviceName=srvc)
def task_desc(a_clustr,a_task):
return ecs_client.describe_tasks(cluster=a_clustr,tasks=[a_task])
def task_def(task_def_arn):
return ecs_client.describe_task_definition(taskDefinition=task_def_arn)
def cont_inst_list(a_clustr):
return ecs_client.list_container_instances(cluster=a_clustr)
def desc_ec2_instance(a_inst_id):
return ec2_client.describe_instances(InstanceIds=[a_inst_id])
def desc_cont_instances(a_clustr, a_cont_instas):
return ecs_client.describe_container_instances(
cluster=a_clustr,
containerInstances= a_cont_instas
)
def desc_asgs():
return asg_client.describe_auto_scaling_groups()
def ecs_cluster_metrics(a_clustr,a_srvc,a_metric_name,a_sec):
clust_metics = cw_client.get_metric_statistics(
Namespace='AWS/ECS',
Dimensions=[
{
'Name': 'ClusterName',
'Value': a_clustr.split('/')[-1]
},
{
'Name': 'ServiceName',
'Value': a_srvc.split('/')[-1]
}
],
MetricName=a_metric_name,
StartTime=datetime.datetime.utcnow() - datetime.timedelta(seconds=a_sec),
EndTime=datetime.datetime.utcnow(),
Period=300,
Statistics=[
'Average'
]
)
return clust_metics
cluster_ls = cluster_list()
# print cluster_ls['clusterArns']
# print len(cluster_ls['clusterArns'])
# for clust in cluster_ls:
# ec2_inst_ls = cont_inst_list(clust)
# pprint.pprint(ec2_inst_ls['containerInstanceArns'][0])
# service_ls = service_list(clust)
# pprint.pprint(service_ls)
# print service_ls['serviceArns']
# print len(service_ls['serviceArns'])
# for srvc in service_ls['serviceArns']:
# print ecs_cluster_metrics(clust,srvc,'CPUUtilization',1800)
# print ecs_cluster_metrics(clust,srvc,'MemoryUtilization',1800)
# tasks = tasks_list(clust,srvc)
# print tasks['taskArns']
# for task in tasks['taskArns']:
# task_details = task_desc(clust,task.split('/')[-1])
# print task_details
# print task_details['tasks'][0]['taskDefinitionArn']
# print task_details['tasks'][0]['containerInstanceArn']
# task_df = task_def(task_details['tasks'][0]['taskDefinitionArn'])
# insta_desc = desc_cont_instances(clust,ec2_inst_ls['containerInstanceArns'])
# pprint.pprint(inst['containerInstances'][0]['ec2InstanceId'])
# print insta_desc['containerInstances'][0]['ec2InstanceId']
# pprint.pprint(desc_ec2_instance(insta_desc['containerInstances'][0]['ec2InstanceId']))
asgs = asg_client.describe_auto_scaling_groups()
# pprint.pprint(asgs)
clusters = desc_clusters(cluster_ls['clusterArns'])
pprint.pprint(clusters)
for asg in asgs['AutoScalingGroups']:
for tag in asg['Tags']:
if tag['Key'] == 'Name':
print(tag['Value'])
print(tag['Value'].split(' '))
print clusters
print type(clusters)
print clusters['clusters'][0]['clusterName']
if clusters['clusters'][0]['clusterName'] in tag['Value']:
print tag['ResourceId']
# pprint.pprint(asgs[])
def launch_ec2_instance():
response = client.run_instances(
BlockDeviceMappings=[
{
'DeviceName': 'string',
'VirtualName': 'string',
'Ebs': {
'Encrypted': True|False,
'DeleteOnTermination': True|False,
'Iops': 123,
'SnapshotId': 'string',
'VolumeSize': 123,
'VolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1'
},
'NoDevice': 'string'
},
],
ImageId='string',
InstanceType='t1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'x1.16xlarge'|'x1.32xlarge'|'x1e.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'g3.4xlarge'|'g3.8xlarge'|'g3.16xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.16xlarge',
Ipv6AddressCount=123,
Ipv6Addresses=[
{
'Ipv6Address': 'string'
},
],
KernelId='string',
KeyName='string',
MaxCount=123,
MinCount=123,
Monitoring={
'Enabled': True|False
},
Placement={
'AvailabilityZone': 'string',
'Affinity': 'string',
'GroupName': 'string',
'HostId': 'string',
'Tenancy': 'default'|'dedicated'|'host',
'SpreadDomain': 'string'
},
RamdiskId='string',
SecurityGroupIds=[
'string',
],
SecurityGroups=[
'string',
],
SubnetId='string',
UserData='string',
AdditionalInfo='string',
ClientToken='string',
DisableApiTermination=True|False,
DryRun=True|False,
EbsOptimized=True|False,
IamInstanceProfile={
'Arn': 'string',
'Name': 'string'
},
InstanceInitiatedShutdownBehavior='stop'|'terminate',
NetworkInterfaces=[
{
'AssociatePublicIpAddress': True|False,
'DeleteOnTermination': True|False,
'Description': 'string',
'DeviceIndex': 123,
'Groups': [
'string',
],
'Ipv6AddressCount': 123,
'Ipv6Addresses': [
{
'Ipv6Address': 'string'
},
],
'NetworkInterfaceId': 'string',
'PrivateIpAddress': 'string',
'PrivateIpAddresses': [
{
'Primary': True|False,
'PrivateIpAddress': 'string'
},
],
'SecondaryPrivateIpAddressCount': 123,
'SubnetId': 'string'
},
],
PrivateIpAddress='string',
ElasticGpuSpecification=[
{
'Type': 'string'
},
],
TagSpecifications=[
{
'ResourceType': 'customer-gateway'|'dhcp-options'|'image'|'instance'|'internet-gateway'|'network-acl'|'network-interface'|'reserved-instances'|'route-table'|'snapshot'|'spot-instances-request'|'subnet'|'security-group'|'volume'|'vpc'|'vpn-connection'|'vpn-gateway',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
)
|
17,303 | 1266c10d0834fb0cff11560eda342f149333632a | #Professor Fernando Amaral
#2 Calculadora de somar inteiros
val1 = input("Informe o primeiro valor: ")
val2 = input("Informe o segundo valor: ")
val1 = int(val1)
val2 = int(val2)
val3 = val1 + val2
print("Total dos valores: ", val3)
#segunda versao
val1 = input("Informe o primeiro valor: ")
val2 = input("Informe o segundo valor: ")
print("Total dos valores: ", int(val1) + int(val2))
|
17,304 | da611729e7bafd2d807c7905178b4fc19557babc | # Generated by Django 1.11.6 on 2017-10-17 12:10
from django.db import migrations, models
import pretalx.submission.models.submission
class Migration(migrations.Migration):
dependencies = [
("submission", "0010_auto_20171006_1118"),
]
operations = [
migrations.AddField(
model_name="submission",
name="invitation_token",
field=models.CharField(
default=pretalx.submission.models.submission.generate_invite_code,
max_length=32,
),
),
]
|
17,305 | a3b844c3dce3b1294eaa08179eb6ad055d6c7536 | import pytest
from typing import List
def circular_rotation(array: List[int], rotations: int) -> List[int]:
rotations = rotations % len(array)
array = array[-rotations:] + array[:-rotations]
return array
@pytest.fixture
def get_fixtures():
first_input = [
[1, 2, 3], 2
]
first_output = [2, 3, 1]
return [
(first_input, first_output),
]
def test_code(get_fixtures):
for data in get_fixtures:
assert circular_rotation(*data[0]) == data[1] |
17,306 | 328387f44b0a99e0d28498df605e83b7497b201b | ag = "kuty"
# write a function that gets a string as an input and appends an a character to its end
def appendA(text):
return text + "a"
ag = appendA(ag)
print(ag)
|
17,307 | 8fdc059728bf11a0a45e80ea46aadfa0c97a8e8e | def maxProfit(prices):
min_price = prices[0]
max_profit = -float('inf')
for i in range(1, len(prices)):
if prices[i] < min_price:
min_price = prices[i]
else:
max_profit = max(max_profit, (prices[i] - min_price))
return max_profit
prices = [10, 7, 5, 8, 11, 9]
print(maxProfit(prices)) |
17,308 | 1e9630b51dd0cec9507cc8ef69a469715c7dea92 | """" A ride in an Amusement park starts at the ground level. it moves either up or down.
Write a program to count the number of sinks. A raise is defined as a move above ground from start position
followed by any string of moves up or down until the ride reaches the start position again.
A sink is defined as a move below ground from start position
followed by any string of moves up or down until the ride reaches the start position again.
"""
def main():
moves = 'HLLHHHHLLLLLHHHHHHLLLLLLLLHHHHLLHH'
curr_pos = 0
sinks = 0
for letter in moves:
if letter == 'H':
curr_pos += 1
elif letter == 'L':
curr_pos -= 1
if curr_pos == -1:
sinks += 1
else:
print("Invalid string")
exit()
print(sinks)
main()
|
17,309 | 9fcaf27df8cfab03dff0b7fb45a900ed9f5815a7 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from timeseries import convert_data_to_timeseries
input_file='data_timeseries.txt'
data1 = convert_data_to_timeseries(input_file,2)
data2 = convert_data_to_timeseries(input_file,3)
dataframe=pd.DataFrame({'first':data1,'second':data2})
dataframe['1952':'1955'].plot()
plt.title('data overlapped on top of each other')
plt.figure()
difference=dataframe['1952':'1955']['first']-dataframe['1952':'1955']['second']
difference.plot()
plt.title('Difference(first-second)')
dataframe[(dataframe['first'] > 60) & (dataframe['second'] < 20)].plot()
plt.title('first > 60 and second < 20')
plt.show() |
17,310 | df59b2d76095910e0c126ae204938ca2d0747f4e | def rotate_left(arr, count):
'''
In place left rotation:
temp = arr[:count]
arr = arr[count:]
arr.extend(temp)
'''
return arr[count:] + arr[:count]
def main():
n, d = map(int, input().split())
arr = list(map(int, input().split()))
print(f'Input array: {arr}')
print(f'Rotated array: {rotate_left(arr, d)}')
if __name__ == '__main__':
main()
|
17,311 | a5e42410c7c7ba586a3b1b27ec322900e596d65b | """
Parses data and returns download data.
"""
import logging
import re
import string
from os import PathLike
from os.path import join, realpath, splitext
from typing import Dict, List, Optional, Tuple
from ..models import (AnimeListSite, AnimeThemeAnime, AnimeThemeEntry,
AnimeThemeTheme, AnimeThemeVideo, DownloadData)
from ..options import OPTIONS
from .parser import get_animethemes
from .utils import Measure
logger = logging.getLogger('animethemes-dl')
FILENAME_BAD = set('#%&{}\\<>*?/$!\'":@+`|')
FILENAME_BANNED = set('<>:"/\\|?*')
FILENAME_ALLOWEDASCII = set(string.printable).difference(FILENAME_BANNED)
# this regex is for getting metadata from a song name, might be straight up wrong
FEATURED_RE = re.compile(r"""^
(.*?) # song name
(?:
\ \(?feat\.\ (
[\w\ ]+ # artist name
(?:\([\w\ ]+\))? # artists second name
)\)?
|
\(([\w\ ]+)\) # comment enclosed in "()"
(?:\ (.+))? # after comment details
)?
$""",re.VERBOSE)
def is_entry_wanted(entry: AnimeThemeEntry) -> bool:
"""
Determines wheter all the tags in the entry are the same as in OPTIONS
"""
for k in ('spoiler','nsfw'):
v = OPTIONS['filter'][k]
if v is not None and entry[k] ^ v:
return False
return True
def is_video_wanted(video: AnimeThemeVideo) -> bool:
"""
Determines wheter all the tags in the entry are the same as in OPTIONS
"""
for k in ('nc','subbed','lyrics','uncen'):
v = OPTIONS['filter'][k]
if v is not None and video[k] ^ v:
return False
if video['resolution'] < OPTIONS['filter']['resolution']:
return False
if OPTIONS['filter']['source'] is not None and video['source'] != OPTIONS['filter']['source']:
return False
if OPTIONS['filter']['overlap'] is not None and video['overlap'] not in OPTIONS['filter']['overlap']: # uses lists
return False
return True
def get_amount_episodes(episodes: str) -> int:
"""
Takes in the animethemes syntax of episodes and returns it's amoutn
"""
a = 0
for ep in episodes.split(', '):
if '-' in ep:
start,end = ep.split('-')
a += int(end)-int(start)
else:
a += int(ep)
return a
def strip_illegal_chars(filename: str) -> str:
"""
Removes all illegal chars from a filename
"""
if OPTIONS['download']['ascii']:
return ''.join(i for i in filename if i in FILENAME_ALLOWEDASCII)
else:
return ''.join(i for i in filename if i not in FILENAME_BANNED)
def get_formatter(**kwargs) -> Dict[str,str]:
"""
Generates a formatter dict used for formatting filenames.
Takes in kwargs of Dict[str,Any].
Does not keep lists, dicts and bools.
Automatically filters out` .endswith('ated_at')` for animethemes-dl.
Also adds `{video_filetype:webm,anime_filename:...}`.
"""
formatter = {}
for t,d in kwargs.items():
for k,v in d.items():
if (not isinstance(v,(list,dict,bool)) and
not k.endswith('ated_at')
):
formatter[t+'_'+k] = v
formatter['video_filetype'] = 'webm'
formatter['anime_filename'] = formatter['video_filename'].split('-')[0]
return formatter
def generate_path(
anime: AnimeThemeAnime, theme: AnimeThemeTheme,
entry: AnimeThemeEntry, video: AnimeThemeVideo) -> (
Tuple[Optional[PathLike],Optional[PathLike]]):
"""
Generates a path with animethemes api returns.
Returns `(videopath|None,audiopath|None)`
"""
formatter = get_formatter(
anime=anime,theme=theme,entry=entry,video=video,song=theme['song'])
filename = OPTIONS['download']['filename'] % formatter
filename = strip_illegal_chars(filename)
if OPTIONS['download']['video_folder']:
video_path = realpath(join(OPTIONS['download']['video_folder'],filename))
else:
video_path = None
if OPTIONS['download']['audio_folder']:
audio_path = realpath(join(OPTIONS['download']['audio_folder'],filename))
audio_path = splitext(audio_path)[0]+'.mp3'
else:
audio_path = None
return video_path,audio_path
def pick_best_entry(theme: AnimeThemeTheme) -> Optional[Tuple[AnimeThemeEntry,AnimeThemeVideo]]:
"""
Returns the best entry and video based on OPTIONS.
Returns None if no entry/video is wanted
"""
# picking best entry
entries = []
for entry in theme['entries']:
if not is_entry_wanted(entry):
continue
# picking best video
videos = []
for video in entry['videos']:
if ((is_video_wanted(video) or video['id'] in OPTIONS['download']['force_videos']) and
not (OPTIONS['filter']['smart'] and entry['spoiler'] and video['overlap']!='None')
):
videos.append(video)
# can't append empty videos
if videos:
# sort videos by giving points
videos.sort(key=lambda x: ('None','Transition','Over').index(x['overlap']))
entries.append((entry,videos[0])) # pick first (best)
# there's a chance no entries will be found
if entries:
return entries[0]
else:
logger.debug(f"removed {theme['song']['title']}/{theme['slug']} ({theme['id']})")
return None
def parse_download_data(data: List[AnimeThemeAnime]) -> List[DownloadData]:
"""
Parses a list of animethemes api returns for anime.
Returns download data.
"""
out = []
songs = set()
for anime in data:
last_group = None
for tracknumber,theme in enumerate(anime['themes']):
# # remove unwanted tags in song title (feat and brackets)
match = FEATURED_RE.match(theme['song']['title']) # .* always matches
theme['song']['title'],featured,comments,version = match.groups()
# filtering:
# theme type
if OPTIONS['filter']['type'] is not None and OPTIONS['filter']['type']!=theme['type']:
continue
# groups (for example dubs)
if last_group is not None and theme['group']!=last_group:
continue
else:
last_group = theme['group']
# video tags
best = pick_best_entry(theme)
if best is None:
continue
entry,video = best
# copies
if OPTIONS['filter']['no_copy']:
if theme['song']['title'] in songs:
continue
else:
songs.add(theme['song']['title'])
# fix some problems
video['link'] = video['link'].replace('https://v.staging.animethemes.moe','https://animethemes.moe/video')
entry['version'] = entry['version'] if entry['version'] else 1
series = [series['name'] for series in anime['series']]
# add to all the songs
if OPTIONS['filter']['no_copy']: songs.add(theme['song']['title'])
# get video path
videopath,audiopath = generate_path(anime,theme,entry,video)
out.append({
'url': video['link'],
'video_path': videopath,
'audio_path': audiopath,
'metadata': {
# anime
'series': series[0] if len(series)==1 else anime['name'], # mashups are it's own thing (ie isekai quarter)
'album': anime['name'], # discs should be numbered,
'year': anime['year'],
'track': f"{tracknumber+1}/{len(anime['themes'])}", # an ID3 "track/total" syntax
'coverarts': [i['link'] for i in anime['images']][::-1],
# theme
'title': theme['song']['title'],
'artists': [artist['name'] for artist in theme['song']['artists']],
'themetype': theme['slug'],
# entry
'version': entry['version'],
'notes': entry['notes'],
# video
'resolution': video['resolution'],
'videoid': video['id'],
'filesize': video['size'],
# const
'genre': [145], # anime
'encodedby': 'animethemes.moe',
'cgroup': 'anime theme', # content group
# data pulled from filename
'file_featured':featured,
'file_comments':comments,
'file_version':version
},
'info': {
'malid':[r['external_id'] for r in anime['resources'] if r['site']=='MyAnimeList'][0]
}
})
return out
def get_download_data(username: str, site: AnimeListSite, animelist_args={}) -> List[DownloadData]:
"""
Gets download data from themes.moe and myanimelist.net/anilist.co.
Returns a list of mirrors, save_paths and id3 tags.
Sorts using `animethemes_dl.OPTIONS['options']`
To use anilist.co instead of myanimelist.net, use `anilist`.
For additional args for myanimelist/anilist, use `animelist_args`.
"""
measure = Measure()
raw = get_animethemes(username, site, **animelist_args)
data = parse_download_data(raw)
logger.debug(f'Got {len(data)} themes from {len(raw)} anime.')
logger.info(f'[get] Got all download data ({len(data)} entries) in {measure()}s.')
return data
if __name__ == "__main__":
import json
import sys
from pprint import pprint
from .animethemes import fetch_animethemes
with open('hints/formatter.json','w') as file:
data = fetch_animethemes([(31240,'Re:Zero')])[0]
json.dump(
get_formatter(
anime=data,
theme=data['themes'][0],
entry=data['themes'][0]['entries'][0],
video=data['themes'][0]['entries'][0]['videos'][0],
song= data['themes'][0]['song']
),
file,
indent=4
)
|
17,312 | a59fab43ad5ff9f0f4c152fdd714b76901fc864c | from astropy.coordinates import SkyCoord
def parse_coords(s):
if 'RA' in s['CTYPE2']:
frame = 'fk5'
if 'GLON' in s['CTYPE2']:
frame = 'galactic'
skyc = SkyCoord(s['CRVAL2'],s['CRVAL3'],unit='deg',frame=frame)
glon = (skyc.galactic.l.degree)[0]
glat = (skyc.galactic.b.degree)[0]
return(glon,glat)
|
17,313 | d19c500afdfe0611fea0a47d67e3a86aeee0c319 | from sys import *
script, weather_info = argv
d = {}
with open(weather_info, 'r', encoding = 'utf-8') as f:
for line in f.readlines():
l = line.strip().split(',')
d[l[0]] = l[1]
# line = f.readline()
# while line:
# l = line.strip().split(',')
# d[l[0]] = l[1]
# line = f.readline()
his = "\n查询历史\n--------------"
hel = """
**帮助文档**
/ 输入 城市名称 查询当地天气 /
/ 输入 h / help 获取帮助文档 /
/ 输入 history 获取查询历史 /
/ 输入 q / quit 退出查询程序 /"""
print ("\n欢迎使用天气通~\n如需帮助请输入 h / help")
while True:
instruction = input("\n请输入城市名或其他关键词:\n> ")
if instruction in d:
print (d[instruction])
his += '\n' + instruction + ' ' + d[instruction]
elif instruction == 'help' or instruction == 'h':
print (hel)
elif instruction == 'history':
print (his)
elif instruction == 'quit' or instruction == 'q':
print ("\n感谢你的使用,再见~ ^u^")
exit(0)
else:
print ("\n未知关键词 TAT?? 请重新输入\n如需帮助请输入 h / help")
|
17,314 | e2f1c2d6a369f2ca804892aa5fbff947221c1a3d | ###
# [PROG] : Smallest and largest number in a list
# [LANG] : Python
# [AUTH] : BooeySays
# [DESC] : Enter a bunch of numbers and the script will start
# when a negative number is entered. After, it will
# spit out the smallest and the largest number from
# the list.
# [EDTR] : PyCharm 2020.2.1
# [LAB#] : 5.20.1: LAB: Brute force equation solver
###
''' Read in first equation, ax + by = c '''
a = int(input())
b = int(input())
c = int(input())
''' Read in second equation, dx + ey = f '''
d = int(input())
e = int(input())
f = int(input())
''' Type your code here. '''
solution = False
for x in range(-10,11):
for y in range(-10,11):
if (a * x + b * y == c) and (d * x + e * y ==f):
print(x, y)
solution = True
if (solution == False):
print("No solution") |
17,315 | 66f2b3b0962175e0c3408550e32f2de0187770e5 | def leapyears(x):
if(x%400 == 0):
y = "The year " + str(x) + " is a 400-leap year"
return(y)
if(x%100 == 0):
y = "The year " + str(x) + " is not a leap year"
return(y)
else:
if(x%4 == 0):
y = "The year " + str(x) + " is a leap year"
return(y)
else:
y = "The year " + str(x) + " is not a leap year"
return(y)
|
17,316 | 502a57b72a9abaac004927ee808bea5ee4c03733 | from InstrumentServiceBack.settings.base import *
CURRENT_ENV = 'dev'
|
17,317 | 6849cd20932c0ae2decb6f6b3391a4e0e1743b6d | #!/usr/bin/env python
#
# Schulze Voting Scheme
# https://en.wikipedia.org/wiki/Schulze_method
# Input is an iterable of ballots
# All ballots must have the same candidates
# Each ballot must have all candidates
#
# A ballot is a list of candidates, with candidates occuring earlier
# in the list being prefered over candidates occuring later in the list.
# Each candidate may only appear once per ballot.
# Example Ballot: ['Pete', 'Frank', 'Oliver', 'Bob']
# Output is the single winner chosen by the Schulze voting algorithm
def schulze(ballots):
candidates = ballots[0]
# d is the pairwise preference grid. d is a stupid name
d = {(i, j): 0 for i in candidates for j in candidates}
for b in ballots:
for (x, y) in d.keys():
if b.index(x) < b.index(y):
d[(x, y)] += 1
# p is the strengths of the strongest paths grid. p is also a stupid name
p = dict()
for i in candidates:
for j in candidates:
if i != j:
if d[(i, j)] > d[(j, i)]:
p[(i, j)] = d[(i, j)]
else:
p[(i, j)] = 0
for i in candidates:
for j in candidates:
if i != j:
for k in candidates:
if i != k and j != k:
p[(j, k)] = max(p[(j, k)],
min(p[(j, i)], p[(i, k)]))
# now find the candidate i for which:
# p[(i,j)] > p[(j,i)] for all j in candidates
for i in candidates:
winner = True
for j in candidates:
if i != j:
if p[(i, j)] < p[(j, i)]:
winner = False
break
if winner == True:
return i
def test():
ballots = [['A', 'C', 'B', 'E', 'D']] * 5 + \
[['A', 'D', 'E', 'C', 'B']] * 5 + \
[['B', 'E', 'D', 'A', 'C']] * 8 + \
[['C', 'A', 'B', 'E', 'D']] * 3 + \
[['C', 'A', 'E', 'B', 'D']] * 7 + \
[['C', 'B', 'A', 'D', 'E']] * 2 + \
[['D', 'C', 'E', 'B', 'A']] * 7 + \
[['E', 'B', 'A', 'D', 'C']] * 8
assert schulze(ballots) == 'E'
print "PASS"
if __name__ == "__main__":
test()
|
17,318 | 38f31c0c25a9071ffc79b8fe4ff40307ef431a4b | #!/usr/bin/env python
# Copyright 2015, Institute for Systems Biology.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import httplib2
import pprint
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client import tools
import time
import os
from os.path import basename
import argparse
from oauth2client import tools
try:
parser = argparse.ArgumentParser(parents=[tools.argparser])
flags = parser.parse_args()
FLOW = flow_from_clientsecrets('client_secrets.json',scope='https://www.googleapis.com/auth/bigquery')
storage = Storage('bigquery_credentials.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
# Run oauth2 flow with default arguments.
credentials = tools.run_flow(FLOW, storage, flags)
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
jobCollection = bigquery_service.jobs()
print jobCollection
except TypeError, error:
# Handle errors in constructing a query.
print ('There was an error in constructing your query : %s' % error)
except HttpError, error:
# Handle API errors.
print ('Arg, there was an API error : %s : %s' %
(error.resp.status, error._get_reason()))
except AccessTokenRefreshError:
# Handle Auth errors.
print ('The credentials have been revoked or expired, please re-run '
'the application to re-authorize')
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize.')
except Exception as error:
print (error)
|
17,319 | 50edaa69803048edb84a838e37b6ec9fa662e3de | from typing import Tuple, Dict
import difftools.maximization as dm
import difftools.algebra as da
import numpy as np
from joblib import Parallel, delayed
from numba import njit
from numba.pycc import CC
cc = CC("trial")
@cc.export("trial_with_sample_jit", "(i8, i8, i8, i8[:,:], f8[:,:], f8[:,:])")
@njit
def trial_with_sample_jit(
k: int,
m: int,
n: int,
adj: np.ndarray,
prob_mat: np.ndarray,
util_dists: np.ndarray,
) -> Tuple[np.ndarray, ...]:
S, _ = dm.im_greedy(None, k, m, n, adj, prob_mat)
l = len(util_dists)
Ts = np.zeros((l, n), np.float64)
sw_ims = np.zeros(l, np.float64)
sw_swms = np.zeros(l, np.float64)
for i in range(l):
util_dist = util_dists[i]
T, _ = dm.swm_greedy(None, k, m, n, adj, prob_mat, util_dist)
sw_im = dm.ic_sw_sprd_exp(None, m, n, adj, S, prob_mat, util_dist)
sw_swm = dm.ic_sw_sprd_exp(None, m, n, adj, T, prob_mat, util_dist)
Ts[i] = T
sw_ims[i] = sw_im
sw_swms[i] = sw_swm
return sw_ims, sw_swms, S, Ts
def __f(
util_dist, k, m, n, adj, prob_mat, S
) -> Tuple[np.ndarray, np.ndarray, np.number, np.number]:
T, swm_hist = dm.swm_greedy(None, k, m, n, adj, prob_mat, util_dist)
sw_im = dm.ic_sw_sprd_exp(None, m, n, adj, S, prob_mat, util_dist)
sw_swm = dm.ic_sw_sprd_exp(None, m, n, adj, T, prob_mat, util_dist)
return T, swm_hist, sw_im, sw_swm
def trial_with_sample(
k: int,
m: int,
n: int,
adj: np.ndarray,
prob_mat: np.ndarray,
util_dists: np.ndarray,
) -> Dict[str, np.ndarray]:
"""
Parameters:
k : the maximum size of seed node sets
m : an iteration number of the IC model
n : the number of nodes
adj : the adjacency matrix of a graph as 2d int64 ndarray
prob_mat : propagation probabilities on the adjacency matrix as 2d float64 ndarray
util_dists : utility distribusion samples on the indicator of $V$ as 1d float64 array
Returns:
The dictionary as:
- `sw-ims`: a list of the social welfare by an IM opt seed set under the IC model
- `sw-swms`: a list of the near maximums of social welfare for each utility distribution samples
- `im-seed`: an opt-seed by influence maximization
- `im-hist`: a history of influence maximization
- `swm-seeds`: an opt-seed list by utility maximization
- `swm-hists`: a list of a history of utility maximization
"""
S, im_hist = dm.im_greedy(None, k, m, n, adj, prob_mat)
ret = Parallel(n_jobs=-1)(
delayed(__f)(util_dist, k, m, n, adj, prob_mat, S) for util_dist in util_dists
)
ret = list(zip(*ret))
return {
"sw-ims": np.stack(ret[2]),
"sw-swms": np.stack(ret[3]),
"im-seed": S,
"im-hist": im_hist,
"swm-seeds": np.stack(ret[0]),
"swm-hists": np.stack(ret[1]),
}
def trial(
l: int,
k: int,
m: int,
n: int,
adj: np.ndarray,
prob_mat: np.ndarray,
) -> Dict[str, np.ndarray]:
"""
Parameters:
l : the number of utility distribution samples
k : the maximum size of seed node sets
m : an iteration number of the IC model
n : the number of nodes
adj : the adjacency matrix of a graph as 2d int64 ndarray
prob_mat : propagation probabilities on the adjacency matrix as 2d float64 ndarray
Returns:
The dictionary as:
- `sw-ims`: a list of the social welfare by an IM near opt seed set under the IC model
- `sw-swms`: a list of the opt-maximal social welfare for each utility distribution samples
- `im-seed`: an opt-seed by influence maximization
- `im-hist`: a history of influence maximization
- `swm-seeds`: an indicator list of SWM near optimal seed sets
- `swm-hists`: a list of a history of SWM
- `utils` : $l$-size uniform samples of utility distribusions on the indicator of $V$ as 1d float64 array
"""
util_dists = np.zeros((l, n), dtype=np.float64)
for i in range(l):
util_dists[i] = da.random_simplex(None, n)
ret = trial_with_sample(k, m, n, adj, prob_mat, util_dists)
ret["utils"] = util_dists
return ret |
17,320 | e9cd8c60772e1a4da21dcb172e3d0aa69906a123 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-08-08 20:12:27
import time
from sqlalchemy import INTEGER, Column, Integer, String, select, text, update
from sqlalchemy.dialects.mysql import TINYINT
from .basedb import AlchemyMixin, BaseDB
class PushRequest(BaseDB,AlchemyMixin):
'''
push request db
id, from_tplid, from_userid, to_tplid, to_userid, status, msg, ctime, mtime, atime
'''
__tablename__ = 'push_request'
id = Column(Integer, primary_key=True)
from_tplid = Column(INTEGER, nullable=False)
from_userid = Column(INTEGER, nullable=False)
status = Column(TINYINT, nullable=False, server_default=text("'0'"))
ctime = Column(INTEGER, nullable=False)
mtime = Column(INTEGER, nullable=False)
atime = Column(INTEGER, nullable=False)
to_tplid = Column(INTEGER)
to_userid = Column(INTEGER)
msg = Column(String(1024))
PENDING = 0
CANCEL = 1
REFUSE = 2
ACCEPT = 3
class NOTSET(object): pass
def add(self, from_tplid, from_userid, to_tplid, to_userid, msg='', sql_session=None):
now = time.time()
insert = dict(
from_tplid = from_tplid,
from_userid = from_userid,
to_tplid = to_tplid,
to_userid = to_userid,
status = PushRequest.PENDING,
msg = msg,
ctime = now,
mtime = now,
atime = now,
)
return self._insert(PushRequest(**insert), sql_session=sql_session)
def mod(self, id, sql_session=None, **kwargs):
for each in ('id', 'from_tplid', 'from_userid', 'to_userid', 'ctime'):
assert each not in kwargs, '%s not modifiable' % each
kwargs['mtime'] = time.time()
return self._update(update(PushRequest).where(PushRequest.id == id).values(**kwargs), sql_session=sql_session)
async def get(self, id, fields=None, one_or_none=False, first=True, to_dict=True, sql_session=None):
assert id, 'need id'
if fields is None:
_fields = PushRequest
else:
_fields = (getattr(PushRequest, field) for field in fields)
smtm = select(_fields).where(PushRequest.id == id)
result = await self._get(smtm, one_or_none=one_or_none, first=first, sql_session=sql_session)
if to_dict and result is not None:
return self.to_dict(result,fields)
return result
async def list(self, fields=None, limit=1000, to_dict=True, sql_session=None, **kwargs):
if fields is None:
_fields = PushRequest
else:
_fields = (getattr(PushRequest, field) for field in fields)
smtm = select(_fields)
for key, value in kwargs.items():
smtm = smtm.where(getattr(PushRequest, key) == value)
if limit:
smtm = smtm.limit(limit)
result = await self._get(smtm.order_by(PushRequest.mtime.desc()), sql_session=sql_session)
if to_dict and result is not None:
return [self.to_dict(row,fields) for row in result]
return result
|
17,321 | e7d205d25a2a30eb95dfd3df8f6ece8525fbaf51 | #24 2628 하 수학 종이자르기
###메모리 초과..####
'''
#가로 세로
width, height = map(int, input().split())
#자르는 횟수
n = int(input())
#자르는 위치
cut_garo = []
cut_sero = []
boxes = []
for i in range(n):
temp = [int(x) for x in input().split()]
# print(temp)
if temp[0] == 0:
cut_garo.append(temp[1])
else:
cut_sero.append(temp[1])
#위에서 부터+뒤에서부터 자르는 걸로 정렬 후
cut_garo.sort(reverse=True)
cut_sero.sort(reverse=True)
#가로로만 잘랐을 때 박스
for i in range(len(cut_garo)):
temp_box1 = [width, height - cut_garo[i]]
height = cut_garo[i]
boxes.append(temp_box1)
# boxes.append(temp_box2)
boxes.append([width,height])
# print(cut_sero)
#세로로 자르면
for i in range(len(cut_sero)):
for n in range(len(boxes)):
temp_box1 = [(boxes[n][0]-cut_sero[i]) ,boxes[n][1]]
temp_box2 = [cut_sero[i],boxes[n][1]]
# boxes.remove(box)
boxes.append(temp_box1)
boxes.append(temp_box2)
del boxes[:len(cut_garo)+1]
# print(boxes)
# print(max(boxes))
ans_box = max(boxes)
print(ans_box[0]*ans_box[1])
'''
#가로 세로
width, height = map(int, input().split())
#자르는 횟수
n = int(input())
garo_pt = [0,width]
sero_pt = [0,height]
for i in range(n):
temp_input = [int(x) for x in input().split()]
if temp_input[0] == 0:
sero_pt.append(temp_input[1])
else:
garo_pt.append(temp_input[1])
sero_pt.sort()
garo_pt.sort()
boxes = []
for w in range(len(garo_pt)-1):
width = garo_pt[w+1] - garo_pt[w]
for h in range(len(sero_pt)-1):
height = sero_pt[h+1]-sero_pt[h]
boxes.append([width,height])
print(max(boxes)[0]*max(boxes)[1]) |
17,322 | e17b12b90c4e48b0dc976617633f851b35f4587b | # -*- coding: utf-8 -*-
"""
@author: Wei, Shuowen
https://leetcode.com/problems/shortest-distance-from-all-buildings/
https://blog.csdn.net/qq_37821701/article/details/108906696
"""
import collections
class Solution:
def shortestDistance(self, grid):
def helper(i,j):
visited = set()
buildings = set()
q = collections.deque()
q.append((i,j,0))
visited.add((i,j))
total_step = 0
dirs = [[0,1],[0,-1],[-1,0],[1,0]]
while q:
i, j, step = q.popleft()
if grid[i][j] == 1 and (i, j) not in buildings:
total_step += step
buildings.add((i, j))
if len(buildings) == num_buildings:
break
if grid[i][j] != 1:
for d in dirs:
x = i + d[0]
y = j + d[1]
if 0<=x<m and 0<=y<n and (x,y) not in visited and grid[x][y] != 2:
q.append((x, y, step + 1))
visited.add((x, y))
return total_step if len(buildings )== num_buildings else -1
m,n = len(grid),len(grid[0])
num_buildings = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
num_buildings += 1
min_step = float('inf')
for i in range(m):
for j in range(n):
if grid[i][j] == 0:
total_step = helper(i, j)
if total_step != -1 and total_step < min_step:
min_step = total_step
return min_step if min_step != float('inf') else -1
class Solution:
def shortestDistance(self, grid):
def bfs(i,j):
visited = [[False]*n for _ in range(m)]
q = collections.deque()
q.append((i, j, 1))
dirs = [[0,1], [0,-1], [-1,0], [1,0]]
while q:
i,j,dis = q.popleft()
for d in dirs:
x = i + d[0]
y = j + d[1]
if 0<=x<m and 0<=y<n and not visited[x][y] and grid[x][y] == 0:
distance[x][y] += dis
reach_num[x][y] += 1
q.append((x, y, dis + 1))
visited[x][y] = True
m, n = len(grid), len(grid[0])
distance = [[0]*n for _ in range(m)]
reach_num = [[0]*n for _ in range(m)]
building_num = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
bfs(i,j)
building_num += 1
min_dist = float('inf')
for i in range(m):
for j in range(n):
if grid[i][j] == 0 and reach_num[i][j] == building_num:
min_dist = min(min_dist, distance[i][j])
return min_dist if min_dist!=float('inf') else -1
|
17,323 | 3d9693593d26a18534aa1f3eca5447d2ddb7bcd2 | # 4: Maak nu één functie ‘print_dictionary’ die de verschillende elementen overloopt waarbij
# telkens key & value samen op één lijn worden afgeprint.
# De functie heeft als parameters een naam (voor de dictionary) en de dictionary zelf.
# Voorbeeld:
def print_dictionary (dict, naam):
print ("voor de verzameling {0}: ". format(naam))
for key,value in dict.items():
print ("key: {0} --> Value: {1}".format(key, value))
nmct = {"1NMCT":1, "2NMCT":2, "3NMCT":3}
devine= {"1Devine":1 ,"2Devine":2, "3Devine":3}
print_dictionary(nmct, "NMCT") |
17,324 | 40f0d074aebf57b1e0870c34c7a7ff4d1fe74e3d | #Arquivo destinado aos menus de perguntas. Sao dois menus
import curses
import actions
import getData
import menu
import pyrebase
import textPrint
import scoreboard
import play
import screen
import perguntasActions
# funcao destinada ao menu Perguntas onde
# as opcoes sao Adicionao Pergunta, Editar Pergunta e Retornar
def show_perguntas_menu(stdscr, current_user, current_user_id):
# Menu com as opcoes para o jogo
menu_perguntas = ('Adicionar Pergunta', 'Editar Pergunta', 'Voltar')
# Esconde o cursor
curses.curs_set(0)
# Opcao atual do menu
current_row_idx = 0
# Esquemas de cores
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_GREEN)
# Mensagem de carregamento
textPrint.print_center(stdscr, "Carregando...")
stdscr.refresh()
# Pega os dados do usuario que esta logado
current_user_data = getData.get_user_data(current_user_id)
current_user_name = current_user_data["Name"]
current_user_high_score = current_user_data["Highscore"]
data_list = [current_user_name, current_user_high_score]
# Imprime o menu do Jogo
menu.print_menu(stdscr, current_row_idx, menu_perguntas)
# Imprime o usuario atual
textPrint.print_user_data(stdscr, data_list)
# Imprime o titulo do jogo
textPrint.print_title(stdscr)
stdscr.refresh()
while True:
key = stdscr.getch()
stdscr.clear()
if actions.keyboard(key) == 'up' and current_row_idx > 0:
textPrint.print_center(stdscr, 'up')
current_row_idx -= 1
elif actions.keyboard(key) == 'down' and current_row_idx < len(menu_perguntas) - 1:
textPrint.print_center(stdscr, 'down')
current_row_idx += 1
elif actions.keyboard(key) == 'enter':
stdscr.clear()
textPrint.print_center(stdscr, 'enter')
# Opcao Voltar:
if current_row_idx == len(menu_perguntas) - 1:
break
# Opcao Adicionar Pergunta
elif current_row_idx == 0:
perguntasActions.adiciona_pergunta(stdscr, current_user_id, current_user_data)
# Opcao Editar Pergunta
elif current_row_idx == 1:
show_editar_perguntas_menu(stdscr, current_user, current_user_id)
stdscr.refresh()
# Imprime o titulo do jogo
menu.print_menu(stdscr, current_row_idx, menu_perguntas)
# Imprime o usuario atual
textPrint.print_user_data(stdscr, data_list)
stdscr.refresh()
textPrint.print_title(stdscr)
# menu editar perguntas, onde as opcoes sao: Apagar Pergunta" "Alterar Pergunta" e "voltar"
# esse menu entra na opcao editar perguntas do menu definido acima
def show_editar_perguntas_menu(stdscr, current_user_data, current_user):
# Menu com as opcoes para o jogo
menu_editar_perguntas = ('Apagar Pergunta', 'Alterar Pergunta', 'Voltar')
# Esconde o cursor
curses.curs_set(0)
# Opcao atual do menu
current_row_idx = 0
# Esquemas de cores
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_GREEN)
# Mensagem de carregamento
textPrint.print_center(stdscr, "Carregando...")
stdscr.refresh()
# Pega os dados do usuario que esta logado
current_user_data = getData.get_user_data(current_user)
current_user_name = current_user_data["Name"]
current_user_high_score = current_user_data["Highscore"]
data_list = [current_user_name, current_user_high_score]
# Imprime o menu do Jogo
menu.print_menu(stdscr, current_row_idx, menu_editar_perguntas)
# Imprime o usuario atual
textPrint.print_user_data(stdscr, data_list)
# Imprime o titulo do jogo
textPrint.print_title(stdscr)
stdscr.refresh()
while True:
key = stdscr.getch()
stdscr.clear()
if actions.keyboard(key) == 'up' and current_row_idx > 0:
textPrint.print_center(stdscr, 'up')
current_row_idx -= 1
elif actions.keyboard(key) == 'down' and current_row_idx < len(menu_editar_perguntas) - 1:
textPrint.print_center(stdscr, 'down')
current_row_idx += 1
elif actions.keyboard(key) == 'enter':
stdscr.clear()
textPrint.print_center(stdscr, 'enter')
# Opcao Voltar:
if current_row_idx == len(menu_editar_perguntas) - 1:
break
### Opcao Apagar Pergunta ##############
elif current_row_idx == 0:
current_row_idx = 0
while True:
screen.show_erase_rules_screen(stdscr,current_row_idx)
# Entrada do teclado
key = stdscr.getch()
# Navegar pelo menu
if actions.keyboard(key) == 'left' and current_row_idx > 0:
current_row_idx -= 1
elif actions.keyboard(key) == 'right' and current_row_idx < 1:
current_row_idx += 1
# Caso selecione uma opcao
elif actions.keyboard(key) == 'enter':
# Caso selecione continuar
if current_row_idx == 0:
stdscr.clear()
escolha = show_all_questions(stdscr, current_user, "Apagar")
if escolha == -1:
stdscr.clear()
textPrint.print_center(stdscr, "Usuario ainda nao enviou perguntas")
stdscr.getch()
elif escolha != -2:
stdscr.clear()
pergunta_text = getData.get_one_question_data(escolha)
warning = ["Aperte 's' para confirmar que deseja deletar a seguinte pergunta:", pergunta_text, "Para cancelar, aperte qualquer outra tecla"]
textPrint.print_multi_lines(stdscr, warning, len(warning))
confirm_key = stdscr.getch()
if confirm_key in [83, 115]:
perguntasActions.delete_question(escolha, current_user)
stdscr.clear()
mensagem_sucesso = ['Pergunta deletada com sucesso', 'Aperte qualquer coisa para continuar']
textPrint.print_title(stdscr)
textPrint.print_multi_lines(stdscr,mensagem_sucesso,len(mensagem_sucesso))
stdscr.refresh()
stdscr.getch()
else:
current_row_idx = 0
break
### Opcao Alterar Pergunta##############
elif current_row_idx == 1:
current_row_idx = 0
while True:
screen.show_questions_rules_screen(stdscr,current_row_idx)
# Entrada do teclado
key = stdscr.getch()
# Navegar pelo menu
if actions.keyboard(key) == 'left' and current_row_idx > 0:
current_row_idx -= 1
elif actions.keyboard(key) == 'right' and current_row_idx < 1:
current_row_idx += 1
# Caso selecione uma opcao
elif actions.keyboard(key) == 'enter':
# Caso selecione continuar
if current_row_idx == 0:
stdscr.clear()
escolha = show_all_questions(stdscr, current_user, "Editar")
if escolha == -1:
stdscr.clear()
textPrint.print_center(stdscr, "Usuario ainda nao enviou perguntas")
stdscr.getch()
elif escolha != -2:
stdscr.clear()
perguntasActions.escreve_pergunta(stdscr, current_user, current_user_data, "Editar", escolha)
# Caso selecione voltar
else:
break
stdscr.refresh()
# Imprime o titulo do jogo
menu.print_menu(stdscr, current_row_idx, menu_editar_perguntas)
# Imprime o usuario atual
textPrint.print_user_data(stdscr, data_list)
stdscr.refresh()
textPrint.print_title(stdscr)
########### MOSTRA AS PERGUNTAS FEITAS PELO USER ###########
def show_all_questions(stdscr, current_user_id, mode):
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_GREEN)
stdscr.attron(curses.color_pair(1))
current_page_index = 0
textPrint.print_center(stdscr, "Carregando...")
stdscr.refresh()
questions_data = getData.get_user_questions_data(current_user_id)
# Caso user nao tenha enviado perguntas
if len(questions_data) == 1:
return -1
questions_ids = perguntasActions.get_questions_ids(questions_data)
# Caso user nao tenha enviado perguntas
if len(questions_ids) == 0:
return -1
questions = perguntasActions.get_question_list(questions_ids)
pages_id = perguntasActions.get_questions_pages(questions_ids)
pages = perguntasActions.get_questions_pages(questions)
quantidade_paginas = len(pages)
text = ["Pagina " + str(current_page_index + 1) + " / " + str(quantidade_paginas), "Para passar a pagina digite 'n'", "Para voltar a pagina, digite 'b'", "Para sair, digite 'e'"]
stdscr.clear()
while True:
current_page = pages[current_page_index]
page_with_numbers = perguntasActions.add_question_number_on_page(current_page)
textPrint.print_multi_bottom_lines(stdscr, text, len(text))
textPrint.print_multi_lines(stdscr, page_with_numbers, len(page_with_numbers))
textPrint.print_title(stdscr)
stdscr.refresh()
key = stdscr.getch()
# Caso user queira sair
if actions.verify_exit(key) == True:
return -2
elif actions.verify_next(key) == True and current_page_index < quantidade_paginas - 1:
current_page_index += 1
text = ["Pagina " + str(current_page_index + 1) + " / " + str(quantidade_paginas), "Para passar a pagina digite 'n'", "Para voltar a pagina, digite 'b'", "Para sair, digite 'e'"]
stdscr.clear()
elif actions.verify_back(key) == True and current_page_index > 0:
current_page_index -= 1
text = ["Pagina " + str(current_page_index + 1) + " / " + str(quantidade_paginas), "Para passar a pagina digite 'n'", "Para voltar a pagina, digite 'b'", "Para sair, digite 'e'"]
stdscr.clear()
# Retorna a pergunta selecionada
elif actions.verify_which_question(key) != -1 and actions.verify_which_question(key) < len(current_page) + 1:
return pages_id[current_page_index][actions.verify_which_question(key)-1]
else:
stdscr.clear()
text = ["Entrada Invalida", "Pagina " + str(current_page_index + 1) + " / " + str(quantidade_paginas), "Para passar a pagina digite 'n'", "Para voltar a pagina, digite 'b'", "Para sair, digite 'e'"]
#def test(stdscr):
# while True:
# a = show_all_questions(stdscr, 1, 0)
# stdscr.clear()
# textPrint.print_center(stdscr, str(a))
# stdscr.refresh()
# stdscr.getch()
# if a == -1:
# break
#curses.wrapper(test) |
17,325 | 758c9383cbde5dfa15ee1fd766630a8ec01861b2 | #!/usr/bin/env python
from datetime import datetime
import csv
import json
month_mapping = {
'ene': 'Jan',
'feb': 'Feb',
'mar': 'Mar',
'abr': 'Apr',
'may': 'May',
'jun': 'Jun',
'jul': 'Jul',
'ago': 'Aug',
'sep': 'Sep',
'oct': 'Oct',
'nov': 'Nov',
'dic': 'Dec',
}
def convert_month(month):
return month_mapping[month]
def to_date_object(date_str):
date_parts = date_str.split(' ')
day = date_parts[0]
month = convert_month(date_parts[1])
year = date_parts[2]
date_str = '{} {} {}'.format(year, month, day)
date_fmt = '%Y %b %d'
date = datetime.strptime(date_str, date_fmt)
return date
def get_zone_prices(row, zone_prefix):
diesel_field = '{}_DO'.format(zone_prefix)
diesel = row[diesel_field]
diesel_low_sulfur_field = '{}_DOLS'.format(zone_prefix)
diesel_low_sulfur = row[diesel_low_sulfur_field]
special_field = '{}_GS'.format(zone_prefix)
special = row[special_field]
regular_field = '{}_GR'.format(zone_prefix)
regular = row[regular_field]
return {
'diesel': diesel,
'diesel_low_sulfur': diesel_low_sulfur,
'special': special,
'regular': regular,
}
def parse_zones(row):
central = {
'name': 'central',
'prices': get_zone_prices(row, 'ZCE'),
}
western = {
'name': 'western',
'prices': get_zone_prices(row, 'ZOC'),
}
eastern = {
'name': 'eastern',
'prices': get_zone_prices(row, 'ZOR'),
}
return [
central,
western,
eastern,
]
def parse_row(row):
year = row['ANHO']
from_date_str = '{} {}'.format(row['FECHA_INIPREC'], year)
to_date_str = '{} {}'.format(row['FECHA_FINPREC'], year)
from_date = to_date_object(from_date_str)
to_date = to_date_object(to_date_str)
zones = parse_zones(row)
return {
'from': from_date,
'to': to_date,
'zones': zones
}
filename = './all.csv'
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row_dict = parse_row(row)
row_json = json.dumps(row_dict, indent=2, default=str)
print(row_json)
|
17,326 | 220d8b857825724bb7add6ef1163185d3fc0ba14 | """
Student.py
20210917
ProjectFullStack
"""
class Student:
# class variable to track the number of students
# that exist
student_count = 0
def __init__(self, name, age, student_id, courses):
"""Constructor for the Student class"""
self.name = name
self.age = age
self.student_id = student_id
self.courses = courses
# When adding a student, increment the
# class variable student_count
Student.student_count += 1
joe = Student('Joe', 18, 8429, ["MATH401", "COMP505"])
print("Created Joe!")
print("joe.student_count = ", joe.student_count)
print("Total student count is now: ", Student.student_count)
jane = Student('Jane', 20, 2931, ["HIST420", "COMP505"])
print('-------')
print("Created Jane!")
print("joe.student_count = ", joe.student_count)
print("jane.student_count = ", jane.student_count)
print("Total student count is now: ", Student.student_count)
#
#
Student.student_count += 1
print('-------')
print("Manually changed student count!")
print("joe.student_count = ", joe.student_count)
print("jane.student_count = ", jane.student_count)
print("Total student count is now: ", Student.student_count)
|
17,327 | bd70623dc78f27f6afdad405ee67db4a35a0d70e | '''
Class Fraction
input: 2 fractions
output: add and substraction capabilities using the function
'''
class fraction(object):
def __init__(self, num,deno):
self.num = num
self.deno = deno
def __str__ (self):
return str(self.num) + '/' + str(self.deno)
def getnum (self):
return self.num
def getdeno (self):
return self.deno
def __add__(self, other):
newnum = self.getnum() * other.getdeno() + self.getdeno() + other.getnum()
newdeno = self.getdeno() + other.getdeno()
return fraction (newnum,newdeno)
def __sub__ (self,other):
newnum = self.getnum() * other.getdeno() - self.getdeno() + other.getnum()
newdeno = self.getdeno() + other.getdeno()
return fraction (newnum,newdeno)
def convert(self):
return self.getnum() / self.getdeno()
onehalf = fraction(1,2)
twothird = fraction (2,3)
newfrac = onehalf + twothird
newfrac2 = onehalf - twothird
print(newfrac)
print(newfrac2)
print(newfrac2.convert())
|
17,328 | 66a019ee6a5a25ab74ea8e36408a97b399ca168d | import logging
import pandas as pd
from lib.constant import Datasets
from lib.data.accidents.scrappers import get_urls_by_dataset
from lib import utils
urls_map = get_urls_by_dataset()
dtypes_file_path = 'resources/dtypes/raw.yml'
read_csv_kwargs_by_year = {
'default': {
'sep': ',',
'header': 0,
'encoding': 'latin_1',
},
2019: {
'sep': ';',
'header': 0,
},
}
# dtypes
def dtypes(data_name: str, base_path: str = '') -> dict:
assert data_name in Datasets.list_all()
return utils.get_dtypes(data_name, dtypes_file_path, base_path=base_path)
# load datasets
def get_raw_dataset(data_name: str, base_path: str = '') -> pd.DataFrame:
assert data_name in Datasets.list_all()
urls_by_year: dict = urls_map[data_name]
df_acc = None
for year in urls_by_year:
url = urls_by_year[year]
# get read_csv kwargs
read_csv_kwargs = read_csv_kwargs_by_year['default']
for kwargs_year in read_csv_kwargs_by_year:
if year == kwargs_year:
read_csv_kwargs = read_csv_kwargs_by_year[kwargs_year]
# get and concat dataframe
df: pd.DataFrame = pd.read_csv(url, dtype=dtypes(data_name, base_path=base_path), **read_csv_kwargs)
logging.info('{} - {} ({}): {} lines'.format(data_name, year, url, df.shape[0]))
if df_acc is None:
df_acc = df.copy()
else:
df_acc = df_acc.append(df, ignore_index=True)
return df_acc
|
17,329 | 5ae6d412a388802c9b4811d8d4a209ce5925b80d | def count_letters_digits(sentence):
letter_count=0
digit_count=0
for letter in sentence:
if letter.isdigit():
digit_count+=1
if letter.isalpha():
letter_count+=1
result=[letter_count,digit_count]
return result
sentence='hi bro how are 9999'
print(count_letters_digits(sentence))
|
17,330 | d9f8e87afafade484f0fc6ba237f7a35e0722cb6 | #Antecessor e Sucessor de um número
num = float(input("Digite um número: "))
ant = num - 1
suc = num + 1
print("Antecessor: {} \nSucessor: {}" .format(ant,suc))
|
17,331 | e17b1111f2f7a66d18f78220f9782691f6598845 | """Program that outputs one of at least four random, good fortunes."""
__author__ = "730395347"
# The randint function is imported from the random library so that
# you are able to generate integers at random.
#
# Documentation: https://docs.python.org/3/library/random.html#random.randint
#
# For example, consider the function call expression: randint(1, 100)
# It will evaluate to an int value >= 1 and <= 100.
from random import randint
# Begin your solution here...
num: int = (randint(1, 4))
print("Your fortune cookie says...")
if num == 1:
print("You will reach your goals!")
else:
if num == 2:
print("You will get good grades!")
else:
if num == 3:
print("Everything will work out in the end!")
else:
print("You will have a great life!")
print("Now, go spread positive vibes!")
|
17,332 | b8bdc84a8d316c54d07ef3e9ecc44383a29d3650 | class Database():
def __init__(self, database_name):
# name needs to have '.txt' at the end
self.database_name = database_name
# appends a new data to the database
def write(self, data):
file = open(self.database_name, 'a')
file.write(str(data) + '\n')
print('Successfully saved into the database! [200]')
# return the query with all the data inside the database
def read(self):
file = open(self.database_name, 'r')
lines = file.readlines()
for i in range(len(lines)):
date = lines[i].split(' ')[-1].strip()
value = ' '.join(lines[i].split(' ')[:-1])
if (value[0] == '-'):
print(f'{date}: \033[1;31;48m{value}\033[0m')
continue
print(f'{date}: \033[92m{value}\033[0m')
# return the query with all the matches inside the database
def read_(self, value):
file = open(self.database_name, 'r')
lines = file.readlines()
for i in range(len(lines)):
if (value in lines[i]):
date = lines[i].split(' ')[-1].strip()
value_ = ' '.join(lines[i].split(' ')[:-1])
if (value_[0] == '-'):
print(f'{date}: \033[1;31;48m{value_}\033[0m')
continue
print(f'{date}: \033[92m{value_}\033[0m')
# removes the value inside the index
def delete(self, index):
if (self.check_index(index) == False):
return print('Invalid index! [204]')
file = open(self.database_name, 'r')
lines = file.readlines()
# if it's a valid index
if (len(lines) > int(index)):
del lines[int(index)]
else:
return print('Invalid index! [204]')
# clean the database
file = open(self.database_name, 'w')
for line in lines:
file.write(line)
print('Successfully removed from the database! [200]')
# changes the value inside the index
def update(self, index, new_value):
if (self.check_index(index) == False):
return print('Invalid index! [204]')
file = open(self.database_name, 'r')
lines = file.readlines()
# if it's a valid index
if (len(lines) > int(index)):
lines[int(index)] = str(new_value) + '\n'
else:
return print('Invalid index! [204]')
# clean the database
file = open(self.database_name, 'w')
for line in lines:
file.write(line)
print('Successfully updated into the database! [200]')
# get_serial will return the serial value for the new query, and increase itself's value
def get_serial(self):
file = open('.serial.txt', 'r')
lines = file.readlines()
index = 1
if (self.database_name == 'clients.txt'):
index = 0
result = int(lines[index])
lines[index] = result + 1
file = open('.serial.txt', 'w')
file.write(str(lines[0]).strip('\n') +
'\n' + str(lines[1]).strip('\n'))
return lines[index]
def check_index(self, index):
if (index == ''):
return False
try:
int(index)
except:
return False
return index
def get_month_balance(self, month=1):
# checking if the month exist
if (month >= 1 and month <= 12):
with open('profit_and_debt.txt', 'r') as file:
lines = file.readlines()
values = []
profits = []
debts = []
for line in lines:
# db_month will always be the second index ([1])
db_month = int(line.split('/')[1])
if (db_month == month):
# value will always be the third index ([2])
# and the first index is the '+' or '-' ( negative or positive )
value = float(line.split(' ')[0] + line.split(' ')[2])
values.append(value)
# item name will always be the forth index ([3])
# item name is '001' or 'Luz'
item_name = line.split(' ')[3]
query = {'name': item_name, 'value': value}
if (value >= 0):
profits.append(query)
else:
debts.append(query)
total = self.get_total_income(values)
painted_total = self.get_painted_value(total)
return {
'total': total,
'painted_total': painted_total,
'profits': profits,
'debts': debts
}
# TODO: Instead of raising an exception, think of another method to show case the error
raise 'Invalid Month'
def get_total_income(self, values=[]):
total = 0
for value in values:
total += value
return total
# return the value, painted ( green if positive, red if negative )
def get_painted_value(self, value=0.0):
# green
if (value >= 0):
return f'\033[92m{value}\033[0m'
else:
# red
return f'\033[1;31;48m{value}\033[0m'
if __name__ == '__main__':
database = Database('orders.txt')
# database.write('This is index 0')
# database.write('This is index 1')
# database.write('This is index 2')
# database.write('This is index 3')
# database.read()
# database.delete(0)
# database.update(0, 'This is the new index 0')
# print(database.get_serial())
|
17,333 | e7193fb0656cf8a07def59c7db3ae8498aebfdc4 | from flask import (
Blueprint, request, current_app, jsonify)
from werkzeug.exceptions import BadRequest
from network_simulator.controller.return_value import ReturnValues
libvirt_network_api_bp = Blueprint("libvirt_network_api", __name__)
@libvirt_network_api_bp.route("/create", methods=["POST"])
def add_and_create_new_network():
data = request.get_json()
current_app.libvirt_network_service.setup_new_network(data)
return ReturnValues.SUCCESS.value
@libvirt_network_api_bp.route("remove/<network_name>", methods=["DELETE"])
def remove_network(network_name):
current_app.libvirt_network_service.shutdown_and_remove_network(network_name)
return ReturnValues.SUCCESS.value
@libvirt_network_api_bp.errorhandler(KeyError)
@libvirt_network_api_bp.errorhandler(BadRequest)
def exception_wrapper(ex):
response = {
"error": {
"type": ex.__class__.__name__,
"message": str(ex.description) if hasattr(ex, "description") else str(ex)
}
}
return jsonify(response), 400
|
17,334 | b393ddfe720e84efb22a2a741b1e1880e50c192f | import codecademylib3_seaborn
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cluster import KMeans
# From sklearn.cluster, import KMeans class
iris = datasets.load_iris()
samples = iris.data
# Use KMeans() to create a model that finds 3 clusters
model = KMeans(n_clusters = 3)
# Use .fit() to fit the model to samples
model.fit(samples)
# Use .predict() to determine the labels of samples
print(model.predict(samples))
|
17,335 | 160eaae3449edcb7eaa3a5953c1064cd4ae07ae4 |
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
"""TI = 0.11"""
# #FAST
# FAST4 = np.array([1.35137908, 1.71541373, 1.98567876, 1.3984026 , 0.94911793,
# 0.67718382, 0.64275145, 0.77966465, 1.02635052])
# FAST7 = np.array([1.32492845, 1.52989691, 1.62069779, 1.34037397, 1.03365472,
# 0.84344457, 0.78222138, 0.83065681, 1.00642498])
# FAST10 = np.array([1.29584224, 1.39422187, 1.411617 , 1.22741363, 0.95970128,
# 0.90070624, 0.87901633, 0.87618861, 1.01573045])
#
# FASTfree = 1.0875922539496865
#
# CC4 = np.array([ 0.95747018, 0.95747027, 0.95747221, 0.95750427, 0.9578723 ,
# 0.96080659, 0.97694552, 1.03747398, 1.18489344, 1.37103782,
# 1.38923034, 1.10051281, 0.71421996, 0.47887851, 0.41644526,
# 0.49104844, 0.67557731, 0.84624982, 0.92904837, 0.95256209,
# 0.9568804 , 0.95742037, 0.95746722, 0.95747006, 0.95747018])
# CC7 = np.array([ 0.95747551, 0.95750993, 0.95771282, 0.95867984, 0.96238909,
# 0.97374232, 1.00102357, 1.05053285, 1.11074008, 1.13747552,
# 1.08080282, 0.94873053, 0.78677212, 0.66394124, 0.611998 ,
# 0.63875774, 0.71989056, 0.81568851, 0.89061773, 0.93245934,
# 0.94995787, 0.95563939, 0.95710602, 0.95741093, 0.95746229])
# CC10 = np.array([ 0.95763661, 0.95807379, 0.95938526, 0.96277369, 0.97024496,
# 0.98406951, 1.00479933, 1.02786568, 1.04114537, 1.02841865,
# 0.98072148, 0.9062183 , 0.82707527, 0.7660368 , 0.73680109,
# 0.74232147, 0.77623955, 0.82537443, 0.87442203, 0.91247844,
# 0.93640694, 0.94890693, 0.95443262, 0.95652688, 0.95721321])
"""TI = 0.056"""
#FAST
FAST4 = np.array([1.36776643, 1.89714315, 2.09654972, 1.47612022, 0.83967825,
0.61750543, 0.48535682, 0.60648216, 0.95753692])
FAST7 = np.array([1.32784831, 1.55496394, 1.63731782, 1.37968735, 1.00741318,
0.71084738, 0.63311522, 0.71779105, 0.90093821])
FAST10 = np.array([1.27216204, 1.37212241, 1.38079224, 1.27542983, 1.079142 ,
0.8413648 , 0.76696968, 0.76236284, 0.88142521])
FASTfree = 1.140557544276966
CC4 = np.array([ 0.95747018, 0.95747018, 0.95747021, 0.95747147, 0.95750966,
0.95820905, 0.96596451, 1.01777584, 1.22129889, 1.59890394,
1.68786962, 1.24089428, 0.67875156, 0.39057955, 0.32215792,
0.41805318, 0.66867037, 0.87807253, 0.94570862, 0.95644108,
0.95741522, 0.95746838, 0.95747015, 0.95747018, 0.95747018])
CC7 = np.array([ 0.95747019, 0.95747037, 0.95747392, 0.95752442, 0.95803436,
0.96167573, 0.9799473 , 1.04369043, 1.19090556, 1.37254094,
1.37708451, 1.10316531, 0.7439813 , 0.51576111, 0.4516839 ,
0.52299544, 0.6862808 , 0.84457932, 0.92636094, 0.95157261,
0.95667916, 0.9573943 , 0.95746497, 0.95746993, 0.95747017])
CC10 = np.array([ 0.95747086, 0.95747785, 0.95753859, 0.95794884, 0.96009639,
0.96875746, 0.99539508, 1.05643818, 1.15268574, 1.23000079,
1.19044521, 1.00936853, 0.78394253, 0.62480823, 0.56896169,
0.60658388, 0.70956206, 0.82490112, 0.90454021, 0.94147693,
0.95374604, 0.95679372, 0.95737385, 0.95745941, 0.95746924])
offsetFAST = np.array([-1.,-0.75,-0.5,-0.25,0.,0.25,0.5,0.75,1.])
offsetCC = np.linspace(-3.,3.,25)
fig = plt.figure(1,figsize=[6.,2.5])
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
import matplotlib as mpl
label_size = 8
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
fontProperties = {'family':'serif','size':8}
ax1.set_xticklabels(ax1.get_xticks(), fontProperties)
ax1.set_yticklabels(ax1.get_yticks(), fontProperties)
ax2.set_xticklabels(ax2.get_xticks(), fontProperties)
ax2.set_yticklabels(ax2.get_yticks(), fontProperties)
ax3.set_xticklabels(ax3.get_xticks(), fontProperties)
ax3.set_yticklabels(ax3.get_yticks(), fontProperties)
ax1.plot(offsetFAST,FAST4,'o',label='SOWFA+FAST',color='C1',markersize=4)
ax1.plot(offsetCC,CC4,'o',label='superimposed loads',color='C0',markersize=4)
ax1.plot(np.array([-3.,-1.]),np.array([1.,1.])*FASTfree,'--',color='C1',label='freestream loads')
ax1.plot(np.array([1.,3.]),np.array([1.,1.])*FASTfree,'--',color='C1')
# ax3.legend(loc=2,prop={'family':'serif', 'size':8})
ax1.set_title('7D',family='serif',fontsize=10)
ax1.set_ylabel('damage',family='serif',fontsize=10)
ax1.set_xlabel('offset (D)',family='serif',fontsize=10)
ax2.plot(offsetFAST,FAST7,'or',color='C1',markersize=4)
ax2.plot(offsetCC,CC7,'ob',color='C0',markersize=4)
ax2.plot(np.array([-3.,-1.]),np.array([1.,1.])*FASTfree,'--',color='C1')
ax2.plot(np.array([1.,3.]),np.array([1.,1.])*FASTfree,'--',color='C1')
ax2.set_title('7D',family='serif',fontsize=10)
ax2.set_xlabel('offset (D)',family='serif',fontsize=10)
#
ax3.plot(offsetFAST,FAST10,'or',color='C1',markersize=4,label='SOWFA+FAST')
ax3.plot(offsetCC,CC10,'ob',color='C0',markersize=4,label='CCBlade+gravity')
ax3.set_title('10D',family='serif',fontsize=10)
ax3.plot(np.array([-3.,-1.]),np.array([1.,1.])*FASTfree,'--',color='C1',label='freestream\nSOWFA+FAST')
ax3.plot(np.array([1.,3.]),np.array([1.,1.])*FASTfree,'--',color='C1')
ax3.set_xlabel('offset (D)',family='serif',fontsize=10)
ax3.legend(loc=1,prop={'family':'serif', 'size':7})
ax1.set_yticks((0.5,1.0,1.5,2.0,2.5))
ax1.set_yticklabels(('0.5','1','1.5','2','2.5'))
ax2.set_yticks((0.5,1.0,1.5,2.0,2.5))
ax2.set_yticklabels(('0.5','1','1.5','2','2.5'))
ax3.set_yticks((0.5,1.0,1.5,2.0,2.5))
ax3.set_yticklabels(('0.5','1','1.5','2','2.5'))
ax1.set_xlim(-3.1,3.1)
ax2.set_xlim(-3.1,3.1)
ax3.set_xlim(-3.1,3.1)
ax1.set_xticks((-3.,-2.,-1.,0.,1.,2.,3.))
ax1.set_xticklabels(('-3','-2','-1','0','1','2','3'))
ax2.set_xticks((-3.,-2.,-1.,0.,1.,2.,3.))
ax2.set_xticklabels(('-3','-2','-1','0','1','2','3'))
ax3.set_xticks((-3.,-2.,-1.,0.,1.,2.,3.))
ax3.set_xticklabels(('-3','-2','-1','0','1','2','3'))
# ax1.set_xticks((-1.,-0.5,0.,0.5,1.))
# ax1.set_xticklabels(('-1','-0.5','0','0.5','1'))
# ax2.set_xticks((-1.,-0.5,0.,0.5,1.))
# ax2.set_xticklabels(('-1','-0.5','0','0.5','1'))
# ax3.set_xticks((-1.,-0.5,0.,0.5,1.))
# ax3.set_xticklabels(('-1','-0.5','0','0.5','1'))
ax1.set_title('4D downstream',family='serif',fontsize=10)
ax2.set_title('7D downstream',family='serif',fontsize=10)
ax3.set_title('10D downstream',family='serif',fontsize=10)
plt.subplots_adjust(top = 0.8, bottom = 0.2, right = 0.98, left = 0.1,
hspace = 0, wspace = 0.2)
ax1.set_ylim(0.,2.6)
ax2.set_ylim(0.,2.6)
ax3.set_ylim(0.,2.6)
plt.suptitle('5.6% TI',family='serif',fontsize=10)
plt.savefig('make_plots/figures/fatigue_damage56REVISED.pdf',transparent=True)
plt.show()
|
17,336 | b0834b238d35de8e24453cb4e131b2f6509d17cd |
f = open(r"e:\downloads\a-small-attempt0.in", "r")
#f = open(r"e:\downloads\the_repeater.txt", "r")
def char_count(s):
res = []
i = 0
while i < len(s):
c = s[i]
j = i+1
while j < len(s) and s[j] == c:
j += 1
res.append((c, j-i))
i = j
return res
def stem(a):
r = ''
for c, i in a:
r += c
return r
def min_cost(u):
u.sort()
r = 0
for i in range(1, len(u)):
r += abs(u[i]-u[i-1])
return r
def min_ops(a):
a = [char_count(s) for s in a]
s = set([stem(x) for x in a])
if len(s) > 1:
return -1
else:
res = 0
n = len(a[0])
for i in range(n):
u = []
for v in a:
u.append(v[i][1])
res += min_cost(u)
return res
T = int(f.readline())
for t in range(1, T+1):
N = int(f.readline())
a = []
for _ in range(N):
a.append(f.readline().strip())
res = min_ops(a)
if res == -1:
print("Case #%d: Fegla Won" % t)
else:
print("Case #%d: %d" % (t, res))
|
17,337 | 0670325b9836545025df08d3c92caa672b4b1497 | from data import Data
class DraftSim:
def __init__(self, teams: int = 12, years_data: int = 2020):
self.df = Data.get_pfr_fantasy_totals_df(years_data)
self.number_of_teams = teams
def run_sim():
pass
|
17,338 | 37d6381dccfa0b90dfd9d32dd3bb57f20f595ba6 | import csv
import os
import fcntl
import config
#This module will hold the statuses of the LED's on the BPS.
#path name to file
file = config.directory_path + config.files['Lights']
#returns integer where 9 LSB are status of 9 LED's
def read():
lights = []
os.makedirs(os.path.dirname(file), exist_ok=True) # creates directory if not exists
with open(file, 'a') as csvfile:
pass # creates file if not exists
with open(file, 'r') as csvfile: #read file
fcntl.flock(csvfile.fileno(), fcntl.LOCK_EX) # Lock file
csvreader = csv.reader(csvfile)
for row in csvreader:
lights.append(row)
fcntl.flock(csvfile.fileno(), fcntl.LOCK_UN) # Unlock file
if len(lights):
return int(lights[0][0])
else:
return 0
|
17,339 | 7da8ea6e852216e09ef2b9357afae9da8448fd0d | # Writes __init__.py file for animals/harmless folder
from .birds import Birds
|
17,340 | 53550298484249e123c3116cc4a74935bac2c38c | from rest_framework import serializers
from inventario.models import Pc
class PcSerializer(serializers.ModelSerializer):
class Meta:
model=Pc
fields=('id_pc','procesador_pc','velocidad_pc','memoria_ram','almacenamiento','tarjeta_video','marca','tipo_equipo','numero_serie','modelo','codigo_activo','sistema_operativo','estado','dni_empleado') |
17,341 | 50e7c75cca7a96dec89ee96f3cc1eaca78bc9d9f | """
__ConditionSet1orMoreConditionBranchPart1_Complete_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: gehan
Modified: Mon Mar 2 14:25:28 2015
______________________________________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from LHS import *
from MT_pre__ConditionSet import *
from graph_MT_pre__ConditionSet import *
from graph_LHS import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def ConditionSet1orMoreConditionBranchPart1_Complete_MDL(self, rootNode, MT_pre__UMLRT2Kiltera_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__UMLRT2Kiltera_MM ---
if( MT_pre__UMLRT2Kiltera_MMRootNode ):
# author
MT_pre__UMLRT2Kiltera_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__UMLRT2Kiltera_MMRootNode.description.setValue('\n')
MT_pre__UMLRT2Kiltera_MMRootNode.description.setHeight(15)
# name
MT_pre__UMLRT2Kiltera_MMRootNode.name.setValue('')
MT_pre__UMLRT2Kiltera_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('ConditionSet1orMoreConditionBranchPart1_Complete')
# --- ASG attributes over ---
self.obj42195=LHS(self)
self.obj42195.isGraphObjectVisual = True
if(hasattr(self.obj42195, '_setHierarchicalLink')):
self.obj42195._setHierarchicalLink(False)
# constraint
self.obj42195.constraint.setValue('#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n')
self.obj42195.constraint.setHeight(15)
self.obj42195.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(40.0,60.0,self.obj42195)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj42195.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj42195)
self.globalAndLocalPostcondition(self.obj42195, rootNode)
self.obj42195.postAction( rootNode.CREATE )
self.obj42196=MT_pre__ConditionSet(self)
self.obj42196.isGraphObjectVisual = True
if(hasattr(self.obj42196, '_setHierarchicalLink')):
self.obj42196._setHierarchicalLink(False)
# MT_label__
self.obj42196.MT_label__.setValue('1')
# MT_pivotOut__
self.obj42196.MT_pivotOut__.setValue('element1')
# MT_subtypeMatching__
self.obj42196.MT_subtypeMatching__.setValue(('True', 0))
self.obj42196.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj42196.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj42196.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj42196.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj42196.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj42196.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj42196.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj42196.MT_pivotIn__.setValue('')
self.obj42196.MT_pivotIn__.setNone()
self.obj42196.graphClass_= graph_MT_pre__ConditionSet
if self.genGraphics:
new_obj = graph_MT_pre__ConditionSet(100.0,100.0,self.obj42196)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__ConditionSet", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj42196.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj42196)
self.globalAndLocalPostcondition(self.obj42196, rootNode)
self.obj42196.postAction( rootNode.CREATE )
# Connections for obj42195 (graphObject_: Obj4) of type LHS
self.drawConnections(
)
# Connections for obj42196 (graphObject_: Obj5) of type MT_pre__ConditionSet
self.drawConnections(
)
newfunction = ConditionSet1orMoreConditionBranchPart1_Complete_MDL
loadedMMName = ['MT_pre__UMLRT2Kiltera_MM_META', 'MoTifRule_META']
atom3version = '0.3'
|
17,342 | b9dffba29e049d84ee9b2324985c626ebe2140ea | from etkinlik import *
for i in range(2):
for j in range(4):
#kare çizimi
ciz(50) #50 birim kenar çiz
solaDon() #sol yöne dön
#bir sonraki katın çizilmeye başlanacağı
#noktaya git
solaDon()#kuzey yönüne dön
git(50) #Karenin sol üst köşesine git
sagaDon()#Üçgen çizimine başlamak için
#doğu yönüne dön
#üçgen çizimi
ciz(50) #50 birim üçgen
#tabanını çiz
ucgenSag(50) #50 birim üçgen
#sağ kenarını çiz
ucgenSol(50) #50 birim üçgen
#sol kenarını çiz
#dogu()
#git(50)
#kuzey()
#git(50)
#ciz(50)
#bati()
#ciz(50)
#guney()
#ciz(50)
#catiSag(50)
#git(50)
#turtle.seth(240)
#turtle.fd(50)
#catiSol(50)
|
17,343 | 683339d596782f411e8b2b963d5a0c3802264ff7 | from typing import Tuple, Sequence
def check_index_is_valid(shape: Tuple[int, ...], index: Tuple[int, ...]) -> None:
if len(shape) != len(index):
raise ValueError("Length of desired index {} must match the length of the shape {}.".format(index, shape))
for i in range(len(index)):
if index[i] >= shape[i]:
raise ValueError("Invalid index {} for shape {}".format(index, shape))
def check_all_shapes_match(shapes: Sequence[Tuple[int, ...]]) -> None:
if not len(set(shapes)) == 1:
raise ValueError("Shapes must match")
|
17,344 | cbd12fed3d2a24e9620d8de2183f4331cbd3cd51 | import sqlite3
class Conecta(object):
def __init__(self):
self.conect()
def conect(self):
self.con = sqlite3.connect('db.db', timeout=1)
def fechaConexao(self):
self.con.close()
def defineCursor(self):
self.cursor = self.con.cursor()
def insereDadosUsuarios(self, nome, login, senha, data_criacao):
self.defineCursor()
self.cursor.execute("""
INSERT INTO usuario(nome, login, senha, data_criacao)
values(?,?,?,?)
""",(nome, login, senha, data_criacao))
self.comita()
def ledados(self, sql, parms = None):
if parms == None:
self.defineCursor()
self.cursor.execute(sql)
return self.cursor.fetchall()
else:
self.defineCursor()
self.cursor.execute(sql, parms)
return self.cursor.fetchall()
def insereDadosVendas(self, cliente, saldo, data_modificacao):
self.defineCursor()
self.cursor.execute("""
INSERT INTO cliente_saldo(cliente, saldo, data_modificacao)
values(?,?,?)
""",(cliente, saldo, data_modificacao))
self.comita()
def insereDadosPedidos(self, nome_cliente, produto, quantidade, data_modificacao):
self.defineCursor()
self.cursor.execute("""
INSERT INTO pedidos(nome_cliente, produto, quantidade, data_modificacao)
values(?,?,?,?)
""",(nome_cliente, produto, quantidade, data_modificacao))
self.comita()
def executaUpdatePedidos(self, nome_cliente, produto, quantidade, data_modificacao):
self.defineCursor()
self.cursor.execute("""
UPDATE pedidos
SET quantidade=?,
data_modificacao=?
WHERE nome_cliente=? AND produto=?
""",(quantidade, data_modificacao, nome_cliente, produto))
self.comita()
def executaConsulta(self, sql, parms = None):
if parms == None:
self.defineCursor()
self.cursor.execute(sql)
return self.cursor.fetchall()
else:
self.defineCursor()
self.cursor.execute(sql, parms)
return self.cursor.fetchall()
def executaConsulta(self, ):
if parms == None:
self.defineCursor()
self.cursor.execute(sql)
return self.cursor.fetchall()
else:
self.defineCursor()
self.cursor.execute(sql, parms)
return self.cursor.fetchall()
def executaUpdate(self, sql, parms = None):
if parms == None:
self.defineCursor()
self.cursor.execute(sql)
self.comita()
else:
self.defineCursor()
self.cursor.execute(sql, parms)
self.comita()
def transformaResultados(self, value):
self.trata = value
for i in self.trata:
for b in i:
return b
def comita(self):
self.con.commit()
if __name__ == '__main__':
c = Conecta() |
17,345 | 08ead39776286102ca6488536e7c1afd9d43689b | from startiot import Startiot
from L76GNSS import L76GNSS
from pytrack import Pytrack
import pycom
import time
from machine import Pin
from lib.onewire import DS18X20
from lib.onewire import OneWire
from lib.deepsleep import DeepSleep
pycom.heartbeat(False) # disable the blue blinking
iot = Startiot()
pycom.rgbled(0xFF0000)
iot.connect()
pycom.rgbled(0x0000FF)
pycom.rgbled(0x000000)
py = Pytrack()
l76 = L76GNSS(py, timeout=0)
# Temperature sensor
ow = OneWire(Pin('P9'))
temp = DS18X20(ow)
ds = DeepSleep()
while True:
# Get coordinates. Timeout in case of no coverage
coord = l76.coordinates()
# Get temperature
tmp = temp.read_temp_async()
temp.start_convertion()
print(str(tmp))
# send some coordinates
pycom.rgbled(0x00FF00)
#if not str(coord).split(", ")[0] == "(None" and not str(coord).split(", ")[1] == "None)":
iot.send(str(coord) + " " + str(tmp))
# continue
pycom.rgbled(0x000000)
#iot.send(str(py.read_battery_voltage()) + " " + str(coord) + " " + str(tmp))
py.go_to_sleep(2)
print("Waking up...") |
17,346 | 4f2a5481f03fb59e27c33c414c117b0e6343d2d0 | import requests
import json
import base64
import os
apikey = "4fc3a688d6aaf8c4368bad7acf78c9e7"
if (True):
input = "upload"
outputformat = "pdf"
headers = {'Content-Type': 'application/json'}
urlbegin = "https://api.convertio.co/convert"
data_begin = {
"apikey": apikey,
"input": "upload",
"outputformat": outputformat,
"filename": "123.doc"
}
req = requests.post(urlbegin, json.dumps(data_begin), headers=headers)
result = json.loads(req.text)
id = result['data']['id']
print(id)
urlupload = "https://api.convertio.co/convert/" + id + "/123.doc"
#file = open('123.doc', 'rb')
files = {'file': open('E:/UnitAi-Project/client/experimentfile/文件在线转换api测试(完成修正,待验证)/123.doc', 'rb')}
req = requests.put(urlupload, files=files)
result = json.loads(req.text)
print(result)
print("begin!")
urlcheck = "https://api.convertio.co/convert/" + id + "/status"
while 1:
req = requests.get(urlcheck)
result1 = json.loads(req.text)
try:
if (result1['data']['step'] == "finish"):
break
else:
print("Waiting")
# print(result['data']['step_percent'])
except:
print(result1)
urlfinish = "https://api.convertio.co/convert/" + id + "/dl/base64"
req = requests.get(urlfinish, headers=headers)
result = json.loads(req.text)
try:
base = result['data']['content']
print(result)
except:
print(result)
# print(base)
imgdata = base64.b64decode(base)
file = open('123.pdf', 'wb')
file.write(imgdata)
file.close()
|
17,347 | 9b460e24a802b1ba775509a8e98a58d825f0cbf6 | m,n = input().strip().split()
h = input().strip().split()
a = set(input().strip().split())
b = set(input().strip().split())
totalHappiness = 0
for i in h:
if i in a:
totalHappiness+=1
elif i in b:
totalHappiness-=1
else:
pass
print(totalHappiness)
|
17,348 | a03c9df4b937f3849dc5a52611285f8464447373 | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Download from finelybook www.finelybook.com
# Nonrepresentative Training Data
# In order to generalize well, it is crucial that your training data be representative of the
# new cases you want to generalize to. This is true whether you use instance-based
# learning or model-based learning.
# For example, the set of countries we used earlier for training the linear model was not
# perfectly representative; a few countries were missing. Figure 1-21 shows what the
# data looks like when you add the missing countries.
#
#
#
#
# Figure 1-21. A more representative training sample
#
# If you train a linear model on this data, you get the solid line, while the old model is
# represented by the dotted line. As you can see, not only does adding a few missing
# countries significantly alter the model, but it makes it clear that such a simple linear
# model is probably never going to work well. It seems that very rich countries are not
# happier than moderately rich countries (in fact they seem unhappier), and conversely
# some poor countries seem happier than many rich countries.
# By using a nonrepresentative training set, we trained a model that is unlikely to make
# accurate predictions, especially for very poor and very rich countries.
# It is crucial to use a training set that is representative of the cases you want to general‐
# ize to. This is often harder than it sounds: if the sample is too small, you will have
# sampling noise (i.e., nonrepresentative data as a result of chance), but even very large
# samples can be nonrepresentative if the sampling method is flawed. This is called
# sampling bias.
#
#
# A Famous Example of Sampling Bias
# Perhaps the most famous example of sampling bias happened during the US presi‐
# dential election in 1936, which pitted Landon against Roosevelt: the Literary Digest
# conducted a very large poll, sending mail to about 10 million people. It got 2.4 million
# answers, and predicted with high confidence that Landon would get 57% of the votes.
#
#
# 24 | Chapter 1: The Machine Learning Landscape
#
# Download from finelybook www.finelybook.com
# Instead, Roosevelt won with 62% of the votes. The flaw was in the Literary Digest’s
# sampling method:
#
# • First, to obtain the addresses to send the polls to, the Literary Digest used tele‐
# phone directories, lists of magazine subscribers, club membership lists, and the
# like. All of these lists tend to favor wealthier people, who are more likely to vote
# Republican (hence Landon).
# • Second, less than 25% of the people who received the poll answered. Again, this
# introduces a sampling bias, by ruling out people who don’t care much about poli‐
# tics, people who don’t like the Literary Digest, and other key groups. This is a spe‐
# cial type of sampling bias called nonresponse bias.
#
# Here is another example: say you want to build a system to recognize funk music vid‐
# eos. One way to build your training set is to search “funk music” on YouTube and use
# the resulting videos. But this assumes that YouTube’s search engine returns a set of
# videos that are representative of all the funk music videos on YouTube. In reality, the
# search results are likely to be biased toward popular artists (and if you live in Brazil
# you will get a lot of “funk carioca” videos, which sound nothing like James Brown).
# On the other hand, how else can you get a large training set?
#
#
# Poor-Quality Data
# Obviously, if your training data is full of errors, outliers, and noise (e.g., due to poor-
# quality measurements), it will make it harder for the system to detect the underlying
# patterns, so your system is less likely to perform well. It is often well worth the effort
# to spend time cleaning up your training data. The truth is, most data scientists spend
# a significant part of their time doing just that. For example:
#
# • If some instances are clearly outliers, it may help to simply discard them or try to
# fix the errors manually.
# • If some instances are missing a few features (e.g., 5% of your customers did not
# specify their age), you must decide whether you want to ignore this attribute alto‐
# gether, ignore these instances, fill in the missing values (e.g., with the median
# age), or train one model with the feature and one model without it, and so on.
#
#
# Irrelevant Features
# As the saying goes: garbage in, garbage out. Your system will only be capable of learn‐
# ing if the training data contains enough relevant features and not too many irrelevant
# ones. A critical part of the success of a Machine Learning project is coming up with a
# good set of features to train on. This process, called feature engineering, involves:
#
#
#
# Main Challenges of Machine Learning | 25
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Nonrepresentative Training Data",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class NonrepresentativeTraining(HierNode):
def __init__(self):
super().__init__("Nonrepresentative Training Data")
self.add(Content(), "content")
# eof
|
17,349 | cfc821f584e2cdad1333d3b352915c368662aee9 | n = int(input())
s = input()
def solve(n, s):
count = 0
for c in s:
if c == 'A':
count += 1
if count > n - count:
print("Anton")
elif count < n - count:
print("Danik")
else:
print("Friendship")
solve(n, s)
|
17,350 | 782d113145c05643b6068a5cbd510865d322662b | """Abstract away the terminal control sequences"""
from sys import stdin
import termios
TERMATTRS = None
def save():
"""Save the terminal state so that we can restore it at the end (called in game setup)"""
global TERMATTRS
TERMATTRS = termios.tcgetattr(stdin)
def restore():
"""Bring the terminal back to our original status"""
global TERMATTRS
termios.tcsetattr(stdin, termios.TCSANOW, TERMATTRS)
|
17,351 | b96c25322568cc35b2981ac1a77713d9e5b79173 | # Generated by Django 2.1.7 on 2019-03-01 09:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0004_remove_comment_comment'),
]
operations = [
migrations.AddField(
model_name='comment',
name='comment',
field=models.TextField(default=1, verbose_name=''),
preserve_default=False,
),
]
|
17,352 | bf8db8c7acf6e9b6a6c75ab93d5dd13d4f0a5b2a | # coding=utf-8
import requests
import re, sys, os
import json
import threading
import pprint
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class spider:
def __init__(self, sid, name):
self.id = sid
self.headers = {"Accept": "text/html,application/xhtml+xml,application/xml;",
"Accept-Encoding": "gzip",
"Accept-Language": "zh-CN,zh;q=0.8",
"Referer": "https://www.example.com/",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36"
}
self.name = name
def openurl(self, url):
self.request = requests.get(url, headers=self.headers)
if self.request.ok:
return self.request.text
def matchs(self):
tmall_exp = r"Setup\(([\s\S]+?)\);" ### 匹配商品数据的正则
detail = r"src=\"(https://img\S+?[jpgifn]+?)\"" ###匹配 商品详情图的正则
html = self.openurl("https://detail.tmall.com/item.htm?id=%s" % self.id)
data = re.findall(tmall_exp, html)
print('-----------------------------------------',data)
data = json.loads(data[0])
main_img = data['propertyPics'] ## 这里包括了主图和颜色图的地址
color_data = data['valItemInfo']['skuList'] ### 这里获得商品的颜色信息列表 包括颜色编码 颜色名称,商品skuID
print ('*******************************************',color_data)
detail_html = self.openurl("http:" + data['api']["httpsDescUrl"])
detail_image = re.findall(detail, detail_html)
self.newdata = {"MAIN": main_img['default'], "DETAIL": detail_image, "id": self.id, }
psvs = []
self.newdata['COLOR'] = []
for v in range(len(color_data)):
if ";" in color_data[v]["pvs"]:
psv = color_data[v]['pvs'][color_data[v]['pvs'].find(";") + 1:]
else:
psv = color_data[v]['pvs']
if psv in psvs:
continue
psvs.append(psv)
self.newdata['COLOR'].append({color_data[v]["names"]: main_img[";" + psv + ";"]})
pprint.pprint(self.newdata)
return self.newdata
def download(self):
if len(self.newdata) > 0:
for x in range(len(self.newdata['MAIN'])):
threading.Thread(target=self.download_main, args=(self.newdata['MAIN'][x], x)).start()
for x in self.newdata['COLOR']:
threading.Thread(target=self.download_color, args=(x,)).start()
for x in range(len(self.newdata['DETAIL'])):
threading.Thread(target=self.download_detail, args=(self.newdata['DETAIL'][x], x)).start()
return
def download_main(self, url, index):
try:
img = requests.get("http:" + url, stream=True, headers=self.headers, timeout=10)
except:
print(sys.exc_info())
return
if img.ok:
if not os.path.exists(self.name + "/main"):
try:
os.makedirs(self.name + "/main")
except:
pass
imgs = open(self.name + "/main/%s.jpg" % index, "wb")
imgs.write(img.content)
imgs.close()
def download_color(self, url):
try:
img = requests.get("http:" + url[list(url.keys())[0]][0], stream=True, headers=self.headers, timeout=10)
except:
print(sys.exc_info())
return
if img.ok:
if not os.path.exists(self.name + "/color"):
try:
os.makedirs(self.name + "/color")
except:
pass
if "/" in list(url.keys())[0]:
color = list(url.keys())[0].replace("/", "_")
elif "\\" in list(url.keys())[0]:
color = list(url.keys())[0].replace("\\", "_")
else:
color = list(url.keys())[0]
imgs = open(self.name + "/color/%s.jpg" % color, "wb")
imgs.write(img.content)
imgs.close()
def download_detail(self, url, index):
try:
img = requests.get(url, stream=True, headers=self.headers, timeout=10)
except:
print(sys.exc_info())
return
if img.ok:
if not os.path.exists(self.name + "/detail"):
try:
os.makedirs(self.name + "/detail")
except:
pass
imgs = open(self.name + "/detail/%s.jpg" % index, "wb")
imgs.write(img.content)
imgs.close()
if __name__ == "__main__":
sid = 562441235623 ## 这里输入天猫宝贝ID
taobao = spider(sid, "下载图片/T")
taobao.matchs()
taobao.download()
|
17,353 | 62326ff417cb9d48ce8caa83e3f42f46bdbbfd4e | def get_derivative_at_point(function, x, h = 1e-7):
return (function(x + h) - function(x)) / h
def square(x):
return x ** 2
def get_left_derivative_at_point(f, x, h = 1e-7):
return (f(x) - f(x - h)) / h
print(get_derivative_at_point(square, 4))
print(get_left_derivative_at_point(square, 4))
print(get_derivative_at_point(square, 0, 1e-10)) |
17,354 | 4476734ddfa4b0a15e00e9985ed7fde36c2ee98b | #!/usr/bin/python2.7
import os
import shutil
def config_bashrc():
try:
shutil.copy2(os.path.expanduser("~/.bashrc"), os.path.expanduser("~/.bashrc.bak"))
f = open(os.path.expanduser("~/.bashrc"), "r+a")
lines = f.readlines()
command = 'PROMPT_COMMAND="history -a;$PROMPT_COMMAND"'
if command not in lines:
f.write(command)
print '[+]', 'File .bashrc was updated ! Thehow saved the backup to .bashrc.bak'
f.close()
except Exception, e:
print e
raise Exception, "Config .bashrc file! Error when write file"
def get_zsh_last_command():
try:
with open(os.path.expanduser("~/.zsh_history"), "r") as f:
list_commands = f.readlines()
f.close()
last_command = list_commands[-2].split(';')[1]
return last_command
except Exception, e:
print e
raise Exception, "Cannot get zsh last command"
def get_bash_last_command():
try:
with open(os.path.expanduser("~/.bash_history"), "r") as f:
list_commands = f.readlines()
f.close()
last_command = list_commands[-2]
return last_command
except Exception, e:
print e
raise Exception, "Cannot get bash last command"
def get_shell():
environ = os.environ["SHELL"]
shell = environ.split('/')[2]
return shell
def get_last_command():
config_bashrc()
shell = get_shell()
if shell == 'bash':
return get_bash_last_command()
elif shell == 'zsh':
return get_zsh_last_command()
else:
raise Exception, "Shell not found"
def get_prefix(command):
try:
prefix = command.split(' ')[0]
except Exception, e:
print e
raise Exception, "Get prefix: error ! Let me check your last command !"
return prefix.strip()
if __name__ == '__main__':
"""
For testing
"""
print get_last_command()
|
17,355 | 197efbeb290e4e8eabf8316dc355151d80a6b6b9 | class Cajero:
efectivo = 0
billetes = None
def __init__(self, efectivo, billetes):
self.efectivo = efectivo
self.billetes = billetes
def dardinero(self, pedido, cliente):
if pedido > self.efectivo:
print("No se dispone de efectivo suficiente. El efectivo disponible es de: {}€".format(self.efectivo))
else:
if pedido > cliente.saldo:
print("El efectivo pedido ({}) es superior al saldo disponible ({}€).".format(pedido, cliente.saldo))
else:
vuelta = self.calcularbilletes(pedido)
if vuelta == pedido:
cliente.saldo -= pedido
self.efectivo -= pedido
print("Su saldo es de: {}€".format(cliente.saldo))
else:
if pedido > vuelta > 0:
print("El cajero solo dispone de {}€. ¿Quiere este dinero? (SI/NO)".format(vuelta))
resp = input()
while resp != "SI" and resp != "NO" and resp != "si" and resp != "no":
print("Respuesta inválida")
print("El cajero solo dispone de {}€. ¿Quiere este dinero? (SI/NO)".format(vuelta))
resp = input()
if resp == "SI" or resp == "si":
cliente.saldo -= vuelta
self.efectivo -= vuelta
print("Su saldo es de: {}€".format(cliente.saldo))
else:
print("Disculpa las molestias")
else:
print("El cajero solo de los siguientes billetes:")
for i in range(len(self.billetes)):
valorbill = self.billetes[i][0]
totalbill = self.billetes[i][1]
if totalbill > 0:
print("Cajero dispone de {} billetes de {}€".format(totalbill, valorbill))
def calcularbilletes(self, pedido):
acumvuleta = 0
for i in range(len(self.billetes)):
valorbill = self.billetes[i][0]
totalbill = self.billetes[i][1]
billvuelta = 0
if valorbill <= pedido:
while totalbill > 0 and pedido - valorbill >= 0:
pedido -= valorbill
acumvuleta += valorbill
totalbill -= 1
billvuelta += 1
self.billetes[i][1] = totalbill
# print("Cajero dispone de {} billetes de {}€".format(totalbill, valorbill))
print("Vuelta: {} bille/s de {}€".format(billvuelta, valorbill))
return acumvuleta |
17,356 | 7f816521995f0ad353c360067aabc0f7ee5410ce | #encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
from DBUtils import *
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.externals import joblib
def train(X_train, X_test, y_train, y_test):
tfidf = TfidfVectorizer(max_features=30000)
train_vector = tfidf.fit_transform(X_train)
print train_vector.shape
test_vector = tfidf.transform(X_test)
joblib.dump(tfidf, 'tfidf.model')
print '逻辑回归分类'
model = LogisticRegression()
model.fit(train_vector, y_train)
predict_label = model.predict(test_vector)
print (classification_report(y_test, predict_label))
joblib.dump(model, 'logistic.model')
def cross(x,y):
rng_state = np.random.get_state()
np.random.shuffle(x)
np.random.set_state(rng_state)
np.random.shuffle(y)
train_data = np.array(x)
train_label = np.array(y)
# 分成10份去交叉验证,得到结果
num_folds = 10
train_data_folds = np.array_split(train_data, num_folds)
train_label_folds = np.array_split(train_label, num_folds)
for i in xrange(num_folds):
train_data = np.concatenate((train_data_folds[0:i] + train_data_folds[i + 1:num_folds]))
train_label = np.concatenate((train_label_folds[0:i] + train_label_folds[i + 1:num_folds]))
test_data = train_data_folds[i]
test_label = train_label_folds[i]
print "fold:" + str(i)
train(train_data, test_data, train_label, test_label)
def getSeg(file):
data = [ ]
f = open(file, 'r')
for line in f.readlines():
data.append(line.strip())
f.close()
return data
def getLabel(file):
data = [ ]
f = open(file, 'r')
for line in f.readlines():
data.append(int(line.strip()))
f.close()
return data
def main():
x = getSeg('seg.txt')
y = getLabel('label.txt')
# segs = selectBySql('select seg, label from sogou')
# x = []
# y = []
# print len(segs)
# for seg in segs:
# x.append(seg[0])
# y.append(seg[1])
# x = np.array(x)
# y = np.array(y)
# cross(x, y)
X_train, X_test, y_train,y_test = train_test_split(x,y, test_size=0.1,random_state=0)
train(X_train, X_test, y_train,y_test)
if __name__ == '__main__':
main()
|
17,357 | 437ba5c847ab09303848b0030bd6c36e1c3220fe | #!/usr/bin/env python
# coding: utf-8
# ___
#
# <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
# ___
# # NumPy
#
# NumPy (or Numpy) is a Linear Algebra Library for Python, the reason it is so important for Data Science with Python is that almost all of the libraries in the PyData Ecosystem rely on NumPy as one of their main building blocks.
#
# Numpy is also incredibly fast, as it has bindings to C libraries. For more info on why you would want to use Arrays instead of lists, check out this great [StackOverflow post](http://stackoverflow.com/questions/993984/why-numpy-instead-of-python-lists).
#
# We will only learn the basics of NumPy, to get started we need to install it!
# ## Installation Instructions
#
# **It is highly recommended you install Python using the Anaconda distribution to make sure all underlying dependencies (such as Linear Algebra libraries) all sync up with the use of a conda install. If you have Anaconda, install NumPy by going to your terminal or command prompt and typing:**
#
# conda install numpy
#
# **If you do not have Anaconda and can not install it, please refer to [Numpy's official documentation on various installation instructions.](http://docs.scipy.org/doc/numpy-1.10.1/user/install.html)**
# ## Using NumPy
#
# Once you've installed NumPy you can import it as a library:
# In[1]:
import numpy as np
# Numpy has many built-in functions and capabilities. We won't cover them all but instead we will focus on some of the most important aspects of Numpy: vectors,arrays,matrices, and number generation. Let's start by discussing arrays.
#
# # Numpy Arrays
#
# NumPy arrays are the main way we will use Numpy throughout the course. Numpy arrays essentially come in two flavors: vectors and matrices. Vectors are strictly 1-d arrays and matrices are 2-d (but you should note a matrix can still have only one row or one column).
#
# Let's begin our introduction by exploring how to create NumPy arrays.
#
# ## Creating NumPy Arrays
#
# ### From a Python List
#
# We can create an array by directly converting a list or list of lists:
# In[19]:
my_list = [1,2,3]
print(my_list)
print(np.array(my_list))
my_matrix = [[1,2,3],[4,5,6],[7,8,9]]
print(my_matrix)
print(np.array(my_matrix))
# ## Built-in Methods
#
# There are lots of built-in ways to generate Arrays
# ### arange
#
# Return evenly spaced values within a given interval.
print(np.arange(0,10))
print(np.arange(0,11,2))
# ### zeros and ones
#
# Generate arrays of zeros or ones
print(np.zeros(3))
print(np.zeros((5,5)))
print(np.ones(3))
print(np.ones((3,3)))
# ### linspace
# Return evenly spaced numbers over a specified interval.
print(np.linspace(0,10,3))
print(np.linspace(0,10,50))
# ## eye
#
# Creates an identity matrix
print(np.eye(4))
# ## Random
#
# Numpy also has lots of ways to create random number arrays:
#
# ### rand
# Create an array of the given shape and populate it with
# random samples from a uniform distribution
# over ``[0, 1)``.
print(np.random.rand(2))
# for 2 dimensionl array:
print(np.random.rand(5,5))
# ### randn
#
# Return a sample (or samples) from the "standard normal" distribution. Unlike rand which is uniform:
print(np.random.randn(2))
# for 2 dimensionl array:
print(np.random.randn(5,5))
# ### randint
# Return random integers from `low` (inclusive) to `high` (exclusive).
print(np.random.randint(1,100))
# will give 10 integers
print(np.random.randint(1,100,10))
# ## Array Attributes and Methods
#
# Let's discuss some useful attributes and methods or an array:
arr = np.arange(25)
ranarr = np.random.randint(0,50,10)
print(arr)
print(ranarr)
# ## Reshape
# Returns an array containing the same data with a new shape.
# rows * cols must equal number of elements in the array
print(arr.reshape(5,5))
# ### max,min,argmax,argmin
#
# These are useful methods for finding max or min values. Or to find their index locations using argmin or argmax
# argmax and argmin return the index of the max and min vals
print(ranarr)
print(ranarr.max())
print(ranarr.argmax())
print(ranarr.min())
print(ranarr.argmin())
# ## Shape
#
# Shape is an attribute that arrays have (not a method):
# Vector
print(arr.shape)
# Notice the two sets of brackets
print(arr.reshape(1,25))
print(arr.reshape(1,25).shape)
print(arr.reshape(25,1))
print(arr.reshape(25,1).shape)
# ### dtype
#
# You can also grab the data type of the object in the array:
print(arr.dtype)
|
17,358 | 293fd73802efb587fad1601ca0beffd992933124 | # Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.tests.loader import TestLoader, LoaderException
from ducktape.tests.runner import SerialTestRunner
from ducktape.tests.reporter import SimpleStdoutReporter, SimpleFileReporter, HTMLReporter
from ducktape.tests.session import SessionContext
from ducktape.cluster.vagrant import VagrantCluster
from ducktape.command_line.config import ConsoleConfig
from ducktape.tests.session import generate_session_id, generate_results_dir
from ducktape.utils.local_filesystem_utils import mkdir_p
import argparse
import os
import sys
def parse_args():
"""Parse in command-line options.
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser(description="Discover and run your tests")
parser.add_argument('test_path', metavar='test_path', type=str, nargs='+',
help='one or more space-delimited strings indicating where to search for tests')
parser.add_argument("--collect-only", action="store_true", help="display collected tests, but do not run")
parser.add_argument("--debug", action="store_true", help="pipe more verbose test output to stdout")
parser.add_argument("--exit-first", action="store_true", help="exit after first failure")
args = parser.parse_args()
return args
def extend_import_paths(paths):
"""Extends sys.path with top-level packages found based on a set of input paths. This only adds top-level packages
in order to avoid naming conflict with internal packages, e.g. ensure that a package foo.bar.os does not conflict
with the top-level os package.
Adding these import paths is necessary to make importing tests work even when the test modules are not available on
PYTHONPATH/sys.path, as they normally will be since tests generally will not be installed and available for import
:param paths:
:return:
"""
for path in paths:
dir = os.path.abspath(path if os.path.isdir(path) else os.path.dirname(path))
while(os.path.exists(os.path.join(dir, '__init__.py'))):
dir = os.path.dirname(dir)
sys.path.append(dir)
def setup_results_directory(results_dir, session_id):
"""Make directory in which results will be stored"""
if os.path.isdir(results_dir):
raise Exception(
"A test results directory with session id %s already exists. Exiting without overwriting..." % session_id)
mkdir_p(results_dir)
latest_test_dir = os.path.join(ConsoleConfig.RESULTS_ROOT_DIRECTORY, "latest")
if os.path.exists(latest_test_dir):
os.unlink(latest_test_dir)
os.symlink(results_dir, latest_test_dir)
def main():
"""Ducktape entry point. This contains top level logic for ducktape command-line program which does the following:
Discover tests
Initialize cluster for distributed services
Run tests
Report a summary of all results
"""
args = parse_args()
# Make .ducktape directory where metadata such as the last used session_id is stored
if not os.path.isdir(ConsoleConfig.METADATA_DIR):
os.makedirs(ConsoleConfig.METADATA_DIR)
# Generate a shared 'global' identifier for this test run and create the directory
# in which all test results will be stored
session_id = generate_session_id(ConsoleConfig.SESSION_ID_FILE)
results_dir = generate_results_dir(session_id)
setup_results_directory(results_dir, session_id)
session_context = SessionContext(session_id, results_dir, cluster=None, args=args)
# Discover and load tests to be run
extend_import_paths(args.test_path)
loader = TestLoader(session_context)
try:
test_classes = loader.discover(args.test_path)
except LoaderException as e:
print "Failed while trying to discover tests: {}".format(e)
sys.exit(1)
if args.collect_only:
print test_classes
sys.exit(0)
# Initializing the cluster is slow, so do so only if
# tests are sure to be run
session_context.cluster = VagrantCluster()
# Run the tests
runner = SerialTestRunner(session_context, test_classes)
test_results = runner.run_all_tests()
# Report results
# TODO command-line hook for type of reporter
reporter = SimpleStdoutReporter(test_results)
reporter.report()
reporter = SimpleFileReporter(test_results)
reporter.report()
# Generate HTML reporter
reporter = HTMLReporter(test_results)
reporter.report()
if not test_results.get_aggregate_success():
sys.exit(1)
|
17,359 | a23bdfd3e1d2168bfa58590fd3851fcdfb0fe479 | # Generated by Django 3.0.4 on 2020-05-10 20:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='WordEng',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(help_text='Enter the word', max_length=200)),
('word_type', models.CharField(choices=[('noun', 'Noun'), ('verb', 'Verb'), ('adjective', 'Adjective'), ('adverb', 'Adverb'), ('other', 'Other')], help_text='Select type of word', max_length=30)),
],
options={
'ordering': ['word'],
'abstract': False,
},
),
migrations.CreateModel(
name='WordPol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(help_text='Enter the word', max_length=200)),
('word_type', models.CharField(choices=[('noun', 'Noun'), ('verb', 'Verb'), ('adjective', 'Adjective'), ('adverb', 'Adverb'), ('other', 'Other')], help_text='Select type of word', max_length=30)),
('wordsEng', models.ManyToManyField(to='wordslearn.WordEng')),
],
options={
'ordering': ['word'],
'abstract': False,
},
),
]
|
17,360 | 9f7b602d943d10a3e39eb9e2d36a6990f3a7ce72 | names=['bob','alice','tom','jerry']
print(names[0])
print(names[1].title())
print(names[2].upper())
print(names[-1])
message=names[0]+",nice to meet u!"
print(message)
brands=("stusst","supreme","palace")
print("I would like to own a "+brands[1].title()+" pailide\n")
name_list=['dingding','lala','dixi','po']
invited=[name_list.pop(),name_list.pop()]
print(name_list)
print(invited)
print("I invite "+invited[0]+" and "+invited[1] +" to my party.")
print("but "+invited[0]+" can't come.")
name_list.append(invited.pop(0))
invited.append(name_list.pop(1))
print(name_list)
print(invited)
print("So I invite "+invited[0]+" and "+invited[1]+" come to my party.")
print("I find bigger restaurant.")
houbu=name_list.pop(0)
print(houbu)
invited.insert(0,houbu)
print(invited)
print("So I invite "+houbu+" to my party too.")
print("But the restuarant tell me that they have no extra table for me.")
name_list.append(invited.pop(0))
print(name_list)
print("sorry,"+name_list[-1]+".")
print(invited)
print("the num of invited people:"+str(len(invited)))
print("Let's go "+invited[0]+" and "+invited[1]+".")
del invited[0]
invited.remove('lala')
print(invited)
print("\n")
local=['xian','shanghai','tokyo','osaka','nagoya']
print(local)
print(sorted(local))
print(local)
print(sorted(local,reverse=True))
print(local)
local.reverse()
print(local)
local.reverse()
print(local)
local.sort()
print(local)
local.sort(reverse=True)
print(local)
print("\n")
Luxury=['gucci','ysl','lv','chanel','armani']
print(Luxury)
print(Luxury[0].title())
print(Luxury[-1].upper())
print("My friend like "+Luxury[2].upper()+".")
Luxury[3]='caoch'
print(Luxury)
Luxury.append('dior')
print(Luxury)
Luxury.insert(1,'hermes')
print(Luxury)
del Luxury[2]
print(Luxury)
buy=[Luxury.pop(-3)]
print(buy)
print(Luxury)
Luxury.remove('armani')
print(Luxury)
print(sorted(Luxury))
print(sorted(Luxury,reverse=True))
print(Luxury)
Luxury.sort()
print(Luxury)
Luxury.sort(reverse=True)
print(Luxury)
Luxury.reverse()
print(Luxury)
Luxury.reverse()
print(Luxury)
print("I like these "+str(len(Luxury))+" Luxury brands.") |
17,361 | 384afaafe357119fd0bea9b4e57349f67ea6965b | from SQL import querys as qy
import sqlite3
# initialise the two tables for database
def initTabel(connection):
cursor = connection.cursor()
__execute_command(cursor,qy.get_create_table_query())
__execute_command(cursor,qy.get_create_tx_table_query())
__execute_command(cursor,qy.get_create_filtered_OP())
# never forget this, if you want that the changes are saved
connection.commit()
# adding a block to table-block with corresponding information
def addBlock(connection, block_number, create_date):
cursor = connection.cursor()
print("Add block to table")
__execute_command(cursor,qy.get_add_block_query(block_number,create_date))
# never forget this, if you want that the changes are saved
connection.commit()
# adding a transaction to table-tx with corresponding information
def addTrans(connection, block_number, transaction_id, version, tx_size ,vin_size, vout_size, tx_time, tx_value, op_return):
cursor = connection.cursor()
print("Add trans to table")
__execute_command(cursor,qy.get_add_tx_query(transaction_id, version, tx_size ,vin_size, vout_size, tx_time, tx_value, op_return, block_number))
# never forget this, if you want that the changes are saved
connection.commit()
# adding a op_return field that is undefinable
def addOP(connection, block_number, transaction_id, prev_tx, tx_value, op_return, op_length, s_address, r_address, address_number, tx_time):
cursor = connection.cursor()
print("Add trans to table")
__execute_command(cursor,qy.get_add_filtered_OP( transaction_id, block_number, prev_tx, tx_value, op_return, op_length, s_address, r_address, address_number, tx_time))
# never forget this, if you want that the changes are saved
connection.commit()
# catch error if a block was added in table-block before
# catch error if a transaction was added in table-tx with corresponding information before
def __execute_command(cursor, sql_command):
try:
sql_command = str(sql_command)
cursor.execute(sql_command)
except sqlite3.IntegrityError:
print('Oops! That was no valid number: '+ str(sql_command) + 'Maybe the PRIMARY KEY exist!')
|
17,362 | cd28d526c6772ce02d9eb985e13a918fec3973e5 | # -*- coding:utf-8 -*-
from scrapy.spider import Spider, Request
from bs4 import BeautifulSoup
class MySpider(Spider):
name = "csdn"
allowed_domains = ["blog.csdn.net"]
start_urls = [
"http://blog.csdn.net/temanm"
]
# 获取blog页数 和相应的链接
def parse(self, response):
based_url = "http://blog.csdn.net"
list_result = ["http://blog.csdn.net/Temanm/article/list/1"]
soup = BeautifulSoup(response.body, 'html.parser')
pages = soup.find("div", "list_item_new").find("div", "pagelist").find_all("a")
for i in range(len(pages)):
href = based_url + pages[i].get("href")
if href not in list_result:
list_result.append(href)
for link in list_result:
yield Request(link, callback=self.parse_link)
# 获取博客链接
def parse_link(self, response):
based_url = "http://blog.csdn.net"
soup = BeautifulSoup(response.body, 'html.parser')
blog = soup.find_all("div", "list_item article_item")
for item in blog:
# print item.find("span", "link_title").find("a").get("href"), item.find("span", "link_title").find("a").get_text()
href = based_url + item.find("span", "link_title").find("a").get("href")
yield Request(href, callback=self.parse_get_blog_title)
# 获取文章标题
def parse_get_blog_title(self, response):
soup = BeautifulSoup(response.body, 'html.parser')
title = soup.find("div", "details").find("div", "article_title").find("span", "link_title").find("a")
print title.get_text() |
17,363 | 57e9c51eeef9bc0a16f2d5c44d2a4527b6abe7ef | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 18:30:50 2019
@author: Akhil
"""
import random
class StudentQueue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item, AT, BT):
self.lst = []
self.lst.append(item)
self.lst.append(AT)
self.lst.append(BT)
self.items.insert(0,self.lst)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def head(self):
return self.items[-1][1]
def burst_time(self):
return self.items[-1][2]
def id_no(self):
return self.items[-1][0]
class TeacherQueue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item, AT, BT):
self.lst = []
self.lst.append(item)
self.lst.append(AT)
self.lst.append(BT)
self.items.insert(0,self.lst)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def head(self):
return self.items[-1][1]
def burst_time(self):
return self.items[-1][2]
def id_no(self):
return self.items[-1][0]
SQ = StudentQueue()
TQ = TeacherQueue()
print("STUDENT TEACHER PROBLEM")
print()
print("Select Mode")
print("0. Pre defined mode")
print("1. Automatic Mode")
print("2. Mannual Mode")
print("Any other key to exit ONLY NUMERICS")
while True:
try:
ip_var = int(input("--> "))
break
except ValueError:
pass
#ip_var = 1
if ip_var == 0:
print("Predefined Mode Selected")
print()
Tat1, Tbt1 = 1, 2
Tat2, Tbt2 = 2, 2
Tat3, Tbt3 = 3, 2
Tat4, Tbt4 = 14, 3
TQ.enqueue(1, Tat1, Tbt1)
TQ.enqueue(2, Tat2, Tbt2)
TQ.enqueue(3, Tat3, Tbt3)
TQ.enqueue(4, Tat4, Tbt4)
Sat1, Sbt1 = 1, 2
Sat2, Sbt2 = 2, 2
SQ.enqueue(1, Sat1, Sbt1)
SQ.enqueue(2, Sat2, Sbt2)
teachers = TQ.size()
students = SQ.size()
elif ip_var == 1:
print("Automatic Mode Selected")
auto = 1
if auto == 1:
tchr = random.randint(1, 51)
lston = 0
for xx in range (tchr):
arrival_time = random.randint(1, 51)
if arrival_time < lston:
while True:
arrival_time = random.randint(1, 51)
if arrival_time >= lston:
break
lston = arrival_time
burst_time = random.randint(1, 51)
idno = xx+1
TQ.enqueue(idno, arrival_time, burst_time)
print("Teacher",idno, " AT:",arrival_time, " BT:",burst_time)
stdnt = random.randint(1, 51)
lston = 0
for xx in range (stdnt):
arrival_time = random.randint(1, 51)
if arrival_time < lston:
while True:
arrival_time = random.randint(1, 51)
if arrival_time >= lston:
break
lston = arrival_time
burst_time = random.randint(1, 51)
idno = xx+1
SQ.enqueue(idno, arrival_time, burst_time)
print("Student",idno, " AT:",arrival_time, " BT:",burst_time)
teachers = TQ.size()
students = SQ.size()
print("Teachers: ", teachers)
print("Students: ", students)
elif ip_var == 2:
print("User Mode Selected")
print()
while True:
try:
teachers = int(input("Enter the number of Teachers in the queue: ", ))
break
except ValueError:
pass
t_data = []
last_time = 0
if teachers >= 0:
for i in range(teachers):
print("Enter Arrival Time for Teacher ",i+1, end = "")
while True:
try:
AT = int(input())
break
except ValueError:
pass
if (AT < last_time):
while True:
print("AT can't be less then previous arrival time")
print("Enter Arrival Time for Teacher ",i+1, end = "")
while True:
try:
AT = int(input())
break
except ValueError:
pass
if last_time <= AT:
break
last_time = AT
print("Enter Burst Time for Teacher ",i+1, end = "")
while True:
try:
BT = int(input())
break
except ValueError:
pass
if BT <= 0:
while True:
print("Error: BT can't be less than 1 ##Min BT req: 1")
print("Enter Burst Time for Teacher ",i+1, end = "")
while True:
try:
BT = int(input())
break
except ValueError:
pass
if BT > 0:
break
temp_list = []
temp_list.append(AT)
temp_list.append(BT)
t_data.append(temp_list)
else:
while True:
print("Number of Teachers can't be less than 0")
while True:
try:
teachers = int(input("Pleas re-enter the number of Teachers: "))
break
except ValueError:
pass
if teachers >= 0:
for i in range(teachers):
print("Enter Arrival Time for Teacher ",i+1, end = "")
while True:
try:
AT = int(input())
break
except ValueError:
pass
if (AT < last_time):
while True:
print("AT can't be less then previous arrival time")
print("Enter Arrival Time for Teacher ",i+1, end = "")
while True:
try:
AT = int(input())
break
except ValueError:
pass
if last_time <= AT:
break
last_time = AT
print("Enter Burst Time for Teacher ",i+1, end = "")
while True:
try:
BT = int(input())
break
except ValueError:
pass
if BT <= 0:
while True:
print("Error: BT can't be less than 1 ##Min BT req: 1")
print("Enter Burst Time for Teacher ",i+1, end = "")
while True:
try:
BT = int(input())
break
except ValueError:
pass
if BT > 0:
break
temp_list = []
temp_list.append(AT)
temp_list.append(BT)
t_data.append(temp_list)
break
while True:
try:
students = int(input("Enter the nubers of Students in the queue: ", ))
break
except ValueError:
pass
s_data = []
last_time = 0
if students >= 0:
for i in range(students):
print("Enter Arrival Time for Student ",i+1, end = "")
while True:
try:
AT = int(input())
break
except ValueError:
pass
if (AT < last_time):
while True:
print("AT can't be less then previous arrival time")
print("Enter Arrival Time for Student ",i+1, end = "")
while True:
try:
AT = int(input())
break
except ValueError:
pass
if last_time <= AT:
break
last_time = AT
print("Enter Burst Time for Student ",i+1, end = "")
while True:
try:
BT = int(input())
break
except ValueError:
pass
if BT <= 0:
while True:
print("Error: BT can't be less than 1 ##Min BT req: 1")
print("Enter Burst Time for Student ",i+1, end = "")
while True:
try:
BT = int(input())
break
except ValueError:
pass
if BT > 0:
break
temp_list = []
temp_list.append(AT)
temp_list.append(BT)
s_data.append(temp_list)
else:
while True:
print("Number of Students can't be less than 0")
while True:
try:
students = int(input("Pleas re-enter the number of Students: "))
break
except ValueError:
pass
if students >= 0:
for i in range(students):
print("Enter Arrival Time for Student ",i+1, end = "")
while True:
try:
AT = int(input())
break
except ValueError:
pass
if (AT < last_time):
while True:
print("AT can't be less then previous arrival time")
print("Enter Arrival Time for Student ",i+1, end = "")
while True:
try:
AT = int(input())
break
except ValueError:
pass
if last_time <= AT:
break
last_time = AT
print("Enter Burst Time for Student ",i+1, end = "")
while True:
try:
BT = int(input())
break
except ValueError:
pass
if BT <= 0:
while True:
print("Error: BT can't be less than 1 ##Min BT req: 1")
print("Enter Burst Time for Student ",i+1, end = "")
while True:
try:
BT = int(input())
break
except ValueError:
pass
if BT > 0:
break
temp_list = []
temp_list.append(AT)
temp_list.append(BT)
s_data.append(temp_list)
break
for i in range (teachers):
TQ.enqueue(i+1, t_data[i][0], t_data[i][1])
for i in range(students):
SQ.enqueue(i+1, s_data[i][0], s_data[i][1])
else:
exit()
maxlen =teachers+students
student_priority = 0
if SQ.isEmpty() or TQ.isEmpty():
if SQ.isEmpty() and TQ.isEmpty():
print("Teacher and Student Queues are EMPTY")
elif SQ.isEmpty():
if TQ.isEmpty() != True:
curr_time = TQ.head()
else:
print("Both the Queues are EMPTY")
elif TQ.isEmpty():
curr_time = SQ.head()
else:
curr_time = min(SQ.head(), TQ.head())
t = teachers
s = students
j = 0
k = 0
'''
print("No of teachers: ", t)
print("No of students: ", s)
print("AT of first student is ", SQ.items[s-1][1])
print("AT of first teacher is ", TQ.items[t-1][1])
'''
for i in range(maxlen):
if (SQ.isEmpty()):
for i in range (teachers):
if TQ.isEmpty() == False:
print("Teacher ",TQ.id_no()," issued book")
curr_time += TQ.burst_time()
TQ.dequeue()
break
elif TQ.isEmpty():
for i in range (students):
if SQ.isEmpty() == False:
print("Student ",SQ.id_no()," issued book")
curr_time += SQ.burst_time()
SQ.dequeue()
break
elif student_priority == 2:
print("Student ",SQ.id_no()," issued book")
curr_time += SQ.burst_time()
student_priority = 0
SQ.dequeue()
else:
tchr = TQ.head()
stdnt = SQ.head()
if tchr <= stdnt:
if curr_time >= stdnt:
student_priority += 1
print("Teacher ", TQ.id_no()," issued book. Student Priority: ", student_priority)
curr_time += TQ.burst_time()
TQ.dequeue()
elif tchr > stdnt:
if curr_time >= tchr:
student_priority += 1
curr_time += TQ.burst_time()
print("Teacher ", TQ.id_no()," issued book. Student Priority: ", student_priority)
TQ.dequeue()
else:
curr_time += SQ.burst_time()
print("Student ", SQ.id_no()," issued book")
student_priority = 0
SQ.dequeue()
|
17,364 | 96ceb4ac7335111bf3873b52b036421fc425f913 | import sqlite3 as lite
SCHEMA = {
'settings':
[
"key PRIMARY KEY",
"value_1",
"value_2"
],
'torrents':
[
'id PRIMARY KEY',
'artist',
'album',
'release',
'quality',
'added DATETIME',
'downloaded BOOLEAN'
],
'user':
[
'username PRIMARY KEY',
'upload',
'download',
'ratio',
'requiredratio',
'class',
'notifications',
'newSubscriptions',
'messages',
'newBlog'
],
'subscriptions':
[
'id INTEGER PRIMARY KEY AUTOINCREMENT',
'search_type',
'term',
'quality',
'release_type'
]
}
DEFAULT_SETTINGS = [
['what_credentials', '', ''],
['webui_credentials', '', ''],
['network', '0.0.0.0', '2020'],
['torrent', '/torrents/', ''],
['domain', 'https://apollo.rip', ''],
['discord', '', '']
]
DB = 'config/data.sqlite3'
def init():
con = lite.connect(DB)
for k in list(SCHEMA.keys()):
con.cursor().execute("create table if not exists " + k + "(" + ", ".join(SCHEMA[k]) + ");")
for setting in DEFAULT_SETTINGS:
con.cursor().execute("insert into settings(key, value_1, value_2) select '" + "', '".join(setting) + "' where not exists(select 1 from settings where key = '" + setting[0] + "')")
con.commit()
if (con.cursor().execute("select count(1) from user").fetchall() == [(0,)]):
con.cursor().execute("insert into user(username) select ''")
con.commit()
def update(query):
con = lite.connect(DB)
con.cursor().execute(query)
con.commit()
return True
def fetch(query):
cur = lite.connect(DB).cursor()
return cur.execute(query).fetchall()
def row_fetch(query):
con = lite.connect(DB)
con.row_factory = lite.Row
return con.cursor().execute(query).fetchall()
def userinfo():
return fetch('select * from user')[0]
def subscriptions():
res = fetch('select search_type, term, quality, release_type, id from subscriptions')
h = []
for r in res:
h.append({
'search_type': r[0],
'term': r[1],
'quality': r[2],
'release_type': r[3],
'id': r[4]
})
return h
def delete_sub(id):
update("delete from subscriptions where id = " + str(id))
|
17,365 | 88fb868b7adae0654b4941660e8e3e9f9162a2fb | #!/usr/bin/python
# Sample download of large blob file from Google Cloud Storage
# using (chunked) resumable downloads
# https://googlecloudplatform.github.io/google-resumable-media-python/latest/google.resumable_media.requests.html#chunked-downloads
import io
import urllib3
import google.auth.transport.requests as tr_requests
from google.oauth2 import service_account
from google.resumable_media.requests import ChunkedDownload
urllib3.disable_warnings()
keyfile = '<yourkeyfile.json>'
bucket = 'edl-west'
blob = '1G-test.bin'
outfile = '/tmp/output-test.bin'
credentials = service_account.Credentials.from_service_account_file(
keyfile,
scopes=['https://www.googleapis.com/auth/devstorage.read_only']
)
transport = tr_requests.AuthorizedSession(credentials)
url_template = (
u'https://www.googleapis.com/download/storage/v1/b/'
u'{bucket}/o/{blob_name}?alt=media')
media_url = url_template.format(
bucket=bucket, blob_name=blob)
chunk_size = 50 * 1024 * 1024 # 50MB
stream = io.BytesIO()
download = ChunkedDownload(
media_url, chunk_size, stream)
fd = open(outfile, 'wb')
while not download.finished:
response = download.consume_next_chunk(transport)
print download.bytes_downloaded
fd.write(response.content)
|
17,366 | 1af32d90f37a51337209e146de2a18d53c25a3f7 | import json
def get_stored_username():
filename = 'username.json'
try:
with open(filename) as f:
username = json.load(f)
except FileNotFoundError:
return None
else:
return username
def get_new_username():
username = input('What\'s your name: ')
filename = 'username.json'
with open(filename, 'w') as f:
json.dump(username, f)
return username
def greet_user():
username = get_stored_username()
correct = input(f'Hi are you {username.title()}? (y/n): ')
if correct == 'y':
print(f'Welcome back {username.title()}!')
else:
username = get_new_username()
print(f'We\'ll remember you when you come back {username.title()}')
greet_user()
|
17,367 | 925388fb236a065e16a1f54dee2c1007feccc8f5 | from __future__ import division
import numpy as np
import soundfile as sf
from scipy.signal import spectrogram
import scipy.stats
from sklearn import linear_model
from . import timbral_util
def warm_region_cal(audio_samples, fs):
"""
Function for calculating various warmth parameters.
:param audio_samples: numpy.array, an array of the audio samples, reques only one dimension.
:param fs: int, the sample ratr of the audio file.
:return: four outputs: mean warmth region, weighted-average warmth region, mean high frequency level,
weighted-average high frequency level.
"""
#window the audio
windowed_samples = timbral_util.window_audio(audio_samples)
# need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)
min_bark_band = 10
max_bark_band = 40
mean_bark_band = (min_bark_band + max_bark_band) / 2.0
array = np.arange(min_bark_band, max_bark_band)
x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)
x -= np.min(x)
x /= np.max(x)
wr_array = np.zeros(240)
wr_array[min_bark_band:max_bark_band] = x
# need to define a second array emphasising the 20 - 40 region (of the bark scale)
min_bark_band = 80
max_bark_band = 240
mean_bark_band = (min_bark_band + max_bark_band) / 2.0
array = np.arange(min_bark_band, max_bark_band)
x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)
x -= np.min(x)
x /= np.max(x)
hf_array = np.zeros(240)
hf_array[min_bark_band:max_bark_band] = x
windowed_loud_spec = []
windowed_rms = []
wr_vals = []
hf_vals = []
for i in range(windowed_samples.shape[0]):
samples = windowed_samples[i, :]
N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)
# append the loudness spec
windowed_loud_spec.append(N_single)
windowed_rms.append(np.sqrt(np.mean(samples * samples)))
wr_vals.append(np.sum(wr_array * N_single))
hf_vals.append(np.sum(hf_array * N_single))
mean_wr = np.mean(wr_vals)
mean_hf = np.mean(hf_vals)
weighted_wr = np.average(wr_vals, weights=windowed_rms)
weighted_hf = np.average(hf_vals, weights=windowed_rms)
return mean_wr, weighted_wr, mean_hf, weighted_hf
def timbral_warmth(fname, dev_output=False, phase_correction=False, clip_output=False, max_FFT_frame_size=8192,
max_WR = 12000, fs=0):
"""
This function estimates the perceptual Warmth of an audio file.
This model of timbral_warmth contains self loudness normalising methods and can accept arrays as an input
instead of a string filename.
Version 0.4
Required parameter
:param fname: string, Audio filename to be analysed, including full file path and extension.
Optional parameters
:param dev_output: bool, when False return the warmth, when True return all extracted features in a
list.
:param phase_correction: bool, if the inter-channel phase should be estimated when performing a mono sum.
Defaults to False.
:param max_FFT_frame_size: int, Frame size for calculating spectrogram, default to 8192.
:param max_WR: float, maximun allowable warmth region frequency, defaults to 12000.
:return: Estimated warmth of audio file.
Copyright 2018 Andy Pearce, Institute of Sound Recording, University of Surrey, UK.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
'''
Read input
'''
audio_samples, fs = timbral_util.file_read(fname, fs, phase_correction=phase_correction)
# get the weighted high frequency content
mean_wr, _, _, weighted_hf = warm_region_cal(audio_samples, fs)
# calculate the onsets
envelope = timbral_util.sample_and_hold_envelope_calculation(audio_samples, fs, decay_time=0.1)
envelope_time = np.arange(len(envelope)) / float(fs)
# calculate the onsets
nperseg = 4096
original_onsets = timbral_util.calculate_onsets(audio_samples, envelope, fs, nperseg=nperseg)
# If onsets don't exist, set it to time zero
if not original_onsets:
original_onsets = [0]
# set to start of file in the case where there is only one onset
if len(original_onsets) == 1:
original_onsets = [0]
'''
Initialise lists for storing features
'''
# set defaults for holding
all_rms = []
all_ratio = []
all_SC = []
all_WR_Ratio = []
all_decay_score = []
# calculate metrics for each onset
for idx, onset in enumerate(original_onsets):
if onset == original_onsets[-1]:
# this is the last onset
segment = audio_samples[onset:]
else:
segment = audio_samples[onset:original_onsets[idx+1]]
segment_rms = np.sqrt(np.mean(segment * segment))
all_rms.append(segment_rms)
# get FFT of signal
segment_length = len(segment)
if segment_length < max_FFT_frame_size:
freq, time, spec = spectrogram(segment, fs, nperseg=segment_length, nfft=max_FFT_frame_size)
else:
freq, time, spec = spectrogram(segment, fs, nperseg=max_FFT_frame_size, nfft=max_FFT_frame_size)
# flatten the audio to 1 dimension. Catches some strange errors that cause crashes
if spec.shape[1] > 1:
spec = np.sum(spec, axis=1)
spec = spec.flatten()
# normalise for this onset
spec = np.array(list(spec)).flatten()
this_shape = spec.shape
spec /= max(abs(spec))
'''
Estimate of fundamental frequency
'''
# peak picking algorithm
peak_idx, peak_value, peak_x = timbral_util.detect_peaks(spec, freq=freq, fs=fs)
# find lowest peak
fundamental = np.min(peak_x)
fundamental_idx = np.min(peak_idx)
'''
Warmth region calculation
'''
# estimate the Warmth region
WR_upper_f_limit = fundamental * 3.5
if WR_upper_f_limit > max_WR:
WR_upper_f_limit = 12000
tpower = np.sum(spec)
WR_upper_f_limit_idx = int(np.where(freq > WR_upper_f_limit)[0][0])
if fundamental < 260:
# find frequency bin closest to 260Hz
top_level_idx = int(np.where(freq > 260)[0][0])
# sum energy up to this bin
low_energy = np.sum(spec[fundamental_idx:top_level_idx])
# sum all energy
tpower = np.sum(spec)
# take ratio
ratio = low_energy / float(tpower)
else:
# make exception where fundamental is greater than
ratio = 0
all_ratio.append(ratio)
'''
Spectral centroid of the segment
'''
# spectral centroid
top = np.sum(freq * spec)
bottom = float(np.sum(spec))
SC = np.sum(freq * spec) / float(np.sum(spec))
all_SC.append(SC)
'''
HF decay
- linear regression of the values above the warmth region
'''
above_WR_spec = np.log10(spec[WR_upper_f_limit_idx:])
above_WR_freq = np.log10(freq[WR_upper_f_limit_idx:])
np.ones_like(above_WR_freq)
metrics = np.array([above_WR_freq, np.ones_like(above_WR_freq)])
# create a linear regression model
model = linear_model.LinearRegression(fit_intercept=False)
model.fit(metrics.transpose(), above_WR_spec)
decay_score = model.score(metrics.transpose(), above_WR_spec)
all_decay_score.append(decay_score)
'''
get mean values
'''
mean_SC = np.log10(np.mean(all_SC))
mean_decay_score = np.mean(all_decay_score)
weighted_mean_ratio = np.average(all_ratio, weights=all_rms)
if dev_output:
return mean_SC, weighted_hf, mean_wr, mean_decay_score, weighted_mean_ratio
else:
'''
Apply regression model
'''
all_metrics = np.ones(6)
all_metrics[0] = mean_SC
all_metrics[1] = weighted_hf
all_metrics[2] = mean_wr
all_metrics[3] = mean_decay_score
all_metrics[4] = weighted_mean_ratio
coefficients = np.array([-4.464258317026696,
-0.08819320850778556,
0.29156539973575546,
17.274733561081554,
8.403340066029507,
45.21212125085579])
warmth = np.sum(all_metrics * coefficients)
# clip output between 0 and 100
if clip_output:
warmth = timbral_util.output_clip(warmth)
return warmth
|
17,368 | 086be2ec13dcd7af28fb06290ca287c375a9fa6f | #!/usr/bin/env python
# encoding: utf-8
import os
import time
import argparse
import sys
sys.path.append('..')
sys.path.append('.')
import tensorflow as tf
from sklearn.utils import shuffle
from keras.callbacks import TensorBoard
from keras.models import Model, load_model
from keras.utils.vis_utils import plot_model
try:
from dataset import load_tax_data, prepare_tax_dual
from utils import llprint
from model_keras_new import build
from metrics import metrics_non_multi, roc_auc_non_multi, prc_auc_non_multi
except ImportError:
from .dataset import load_tax_data, prepare_tax_dual
from .utils import llprint
from .model_keras_new import build
from .metrics import metrics_non_multi, roc_auc_non_multi, prc_auc_non_multi
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description="attention_and_memory_augmented_networks")
parser.add_argument('--datapath', type=str, default='../tax-data/records.csv', help='data path')
parser.add_argument('--run_mode', type=str, default='test', choices=['train','test'], help='run mode')
parser.add_argument('--debug', action='store_true', help='debug')
parser.add_argument('--no_tensorboard', action='store_true', help='not use tensorboard')
parser.add_argument('--no_embed_trainable', action='store_true', help='embed not trainable')
parser.add_argument('--embed_size', type=int, default=100, help='embed size')
parser.add_argument('--no_position_embed', action='store_true', help='use position embed or not')
parser.add_argument('--position_embed_size', type=int, default=100, help='position embed size')
parser.add_argument('--position_embed_mode', type=str, default='sum', choices=['sum','concat'], help='position embed mode[sum,concat]')
parser.add_argument('--self_attention_units', type=int, default=64, help='self attention units')
parser.add_argument('--self_attention_num_heads', type=int, default=4, help='self attention num heads')
parser.add_argument('--no_history', action='store_true', help='use history attention or not')
parser.add_argument('--no_interaction', action='store_true', help='use interaction attention or not')
parser.add_argument('--no_memory', action='store_true', help='remove memory or not')
parser.add_argument('--memory_word_num', type=int, default=256, help='memory word num')
parser.add_argument('--memory_word_size', type=int, default=64, help='memory word size')
parser.add_argument('--memory_read_heads', type=int, default=4, help='memory read heads')
parser.add_argument('--feature_size', type=int, default=256, help='feature size')
parser.add_argument('--multi', action='store_true', help='multi-label classification or not')
parser.add_argument('--epochs', type=int, default=10, help='epochs')
parser.add_argument('--focal_loss', action='store_false', help='use focal loss')
parser.add_argument('--focal_loss_alpha', type=float, default=0.6, help='focal loss alpha')
parser.add_argument('--focal_loss_gamma', type=float, default=2.0, help='focal loss gamma')
parser.add_argument('--optimizer', type=str, default='adam', help='optimizer')
parser.add_argument('--lr', type=float, default=0.00005, help='learning rate')
parser.add_argument('--lr_decay', type=float, default=1e-6, help='learning rate decay')
parser.add_argument('--model_path', type=str, help='model path')
args = parser.parse_args()
model_name = "AMANet-tax"
# time
time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
def write_log(callback, names, logs, epoch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, epoch_no)
callback.writer.flush()
CallBack = TensorBoard(log_dir=('../tb-logs/tax-task/%s/%s' %(model_name, time_str)), # log dir
histogram_freq=0,
write_graph=True,
write_grads=True,
write_images=True,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None)
train_names = ['train_loss']
val_names = ["val_acc", "val_prec", "val_recall", "val_f1", "val_prauc", "val_roc_auc"]
def train(config):
# model save path
model_save_dir = os.path.join("../model/tax-task", model_name, time_str)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
# log save path
log_save_dir = os.path.join("../logs/tax-task", model_name, time_str)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
# load data
data_train, data_valid, data_test, voc_size = load_tax_data(config["datapath"])
# input1 vocab size
config["vocab_size1"] = voc_size[0]
# input1 vocab size
config["vocab_size2"] = voc_size[1]
# output vocab size
config["output_size"] = voc_size[2]
# build model
model = build(config)
# plot model graph
model_graph_file = os.path.join(model_save_dir, ("model_%s.png" % time_str))
plot_model(model, to_file=model_graph_file)
# model summary
model.summary()
# model tensorboard logs
CallBack.set_model(model)
# eval logs
file = open(os.path.join(log_save_dir, "statistic_%s.txt" % time_str), "w+")
file.write(str(config)+"\n")
model.summary(print_fn=lambda x: file.write(x + '\n'))
train_size = len(data_train)
best_f1 = 0.0
best_epoch = 0
best_model = ""
# train
for epoch in range(config["epochs"]):
# 新一次迭代,打乱训练集
data_train = shuffle(data_train)
start_time = time.time()
llprint("Epoch %d/%d\n" % (epoch + 1, config["epochs"]))
losses = []
train_pred_output_prob = []
train_pred_output = []
train_real_output = []
file.write("Epoch: %d/%d\n" % ((epoch + 1), config["epochs"]))
for patient_index in range(train_size):
llprint("\rBatch %d/%d" % (patient_index + 1, train_size))
# 获取第index个企业dual序列
input_vec1, input_vec2, output_vec, o = prepare_tax_dual(data_train, index=patient_index)
train_real_output.append(o[0])
res = model.train_on_batch([input_vec1, input_vec2], output_vec)
losses.append(res[0])
prob = res[1][0][0]
train_pred_output_prob.append(prob)
if prob >= 0.5:
train_pred_output.append(1)
else:
train_pred_output.append(0)
end_time = time.time()
elapsed_time = (end_time - start_time) / 60
train_acc, train_prec, train_recall, train_f1 = metrics_non_multi(train_real_output, train_pred_output)
train_roc_auc = roc_auc_non_multi(train_real_output, train_pred_output_prob)
train_prauc = prc_auc_non_multi(train_real_output, train_pred_output_prob)
if config["use_tensorboard"]:
train_logs = [sum(losses)/len(losses)]
write_log(CallBack, train_names, train_logs, epoch+1)
print('')
acc, pre, recall, f1, prauc, roc_auc = model_eval(model, data_valid, config)
if config["use_tensorboard"]:
val_logs = [acc, pre, recall, f1, prauc, roc_auc]
write_log(CallBack, val_names, val_logs, epoch+1)
file.write("spend time to train: %.2f min\n" % elapsed_time)
file.write("train loss: %f\n" % (sum(losses)/ len(losses)))
file.write("valid acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f\n" % (acc, pre, recall, f1, prauc, roc_auc))
print("spend time to train: %.2f min" % elapsed_time)
print("train loss: %f, acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % ((sum(losses)/ len(losses)), train_acc, train_prec, train_recall, train_f1, train_prauc, train_roc_auc))
print("valid acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % (acc, pre, recall, f1, prauc, roc_auc))
model_save_path = os.path.join(model_save_dir, 'model_%d_%s_%.4f.h5' % ((epoch+1), time_str, f1))
model.save(model_save_path)
if best_f1 < f1:
best_f1 = f1
best_epoch = epoch + 1
best_model = model_save_path
acc, pre, recall, f1, prauc, roc_auc = model_eval(model, data_test, config, type="test")
print("test acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % (acc, pre, recall, f1, prauc, roc_auc))
file.write("test acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f\n" % (acc, pre, recall, f1, prauc, roc_auc))
file.write("###############################################################\n")
print("###############################################################\n")
file.flush()
os.rename(best_model, best_model.replace(".h5", "_best.h5"))
print("train done. best epoch: %d, best: f1: %f, model path: %s" % (best_epoch, best_f1, best_model))
file.write("train done. best epoch: %d, best: f1: %f, model path: %s\n" % (best_epoch, best_f1, best_model))
CallBack.on_train_end(None)
file.close()
# evaluate
def model_eval(model, dataset, config, type="eval"):
eval_real_output = []
eval_pred_output_prob = []
eval_pred_output = []
data_size = len(dataset)
outputs = [model.get_layer('output').output]
layer_model = Model(inputs=model.input, outputs=outputs)
print("#####################%s#####################" % type)
for patient_index in range(data_size):
llprint("\rBatch: %d/%d" % (patient_index + 1, data_size))
dual = prepare_tax_dual(dataset, index=patient_index)
input_vec1, input_vec2, output_vec, o = dual
layer_model_output = layer_model.predict([input_vec1, input_vec2])
prob = layer_model_output[0][0]
eval_real_output.append(o[0])
eval_pred_output_prob.append(prob)
if prob >= 0.5:
eval_pred_output.append(1)
else:
eval_pred_output.append(0)
print('')
acc, prec, recall, f1 = metrics_non_multi(eval_real_output, eval_pred_output)
roc_auc = roc_auc_non_multi(eval_real_output, eval_pred_output_prob)
prauc = prc_auc_non_multi(eval_real_output, eval_pred_output_prob)
return acc, prec, recall, f1, prauc, roc_auc
if __name__ == "__main__":
print("#####################args#####################")
print(args)
config = {
"datapath": args.datapath,
"run_mode": args.run_mode,
"debug": args.debug,
"use_tensorboard": not args.no_tensorboard,
"has_position_embed": not args.no_position_embed,
"has_memory": not args.no_memory,
"has_history": False, #not args.no_history,
"has_interaction": not args.no_interaction,
"vocab_size1": 2000,
"vocab_size2": 2000,
"output_size": 1,
"embed_trainable": not args.no_embed_trainable,
"embed_size": args.embed_size,
"position_embed_size": args.position_embed_size,
"position_embed_mode": args.position_embed_mode,
"self_attention_units": args.self_attention_units,
"self_attention_num_heads": args.self_attention_num_heads,
"memory_word_num": args.memory_word_num,
"memory_word_size": args.memory_word_size,
"memory_read_heads": args.memory_read_heads,
"feature_size": args.feature_size,
"multi": args.multi,
"epochs": args.epochs,
"optimizer": args.optimizer,
"focal_loss": args.focal_loss,
"focal_loss_alpha": args.focal_loss_alpha,
"focal_loss_gamma": args.focal_loss_gamma,
"lr":args.lr,
"lr_decay":args.lr_decay
}
print("#####################config#####################")
print(config)
if config["run_mode"] == "train":
config["datapath"]='../tax-data/records.csv'
train(config=config)
else:
model = build(config)
model.load_weights(os.path.abspath(args.model_path))
data_train, data_valid, data_test, voc_size = load_tax_data(data_path='../tax-data/records.csv')
acc, pre, recall, f1, prauc, roc_auc = model_eval(model, data_test, config, type="test")
print("test acc: %f, prec: %f, recall: %f, f1: %f, prauc: %f, roc_auc: %f" % (acc, pre, recall, f1, prauc, roc_auc))
|
17,369 | 8912ae21856ce4c73cce10739595938b9dc4947d | #!/usr/bin/env python
# SUMMARY: hey'
# COMPLETE
# START HELP
# this is an example of a simple script written in python.
# END HELP
import sys
# when attempting to complete a variable,
# --complete will be passed, with the full argument set.
# for example,
# $ cb file_example foo a --complete
# will pass all arguments (the "a" should be autocompleted.)
if len(sys.argv) > 1 and sys.argv[1] == "--complete":
sys.stdout.write("file autocomplete example")
else:
print("hello")
|
17,370 | 1f4d8294919fedd85500291554319f9b2c14b021 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 30 14:12:14 2019
@author: jaimiecapps
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import matplotlib.pyplot as plt
df = pd.read_csv('sample_0.011.csv')
df.drop(['block', 'lot', 'assesstot'], axis=1, inplace=True)
categorical_vars = ['cd', 'schooldist', 'council', 'zipcode', 'firecomp',
'policeprct', 'healtharea', 'sanitboro', 'sanitsub', 'zonedist1',
'spdist1', 'ltdheight', 'landuse', 'ext', 'proxcode', 'irrlotcode', 'lottype',
'borocode','edesignum', 'sanitdistrict', 'healthcenterdistrict', 'pfirm15_flag']
df_dummies = pd.get_dummies(df[categorical_vars], drop_first=False) #keep all dummies to evaluate importance, for the prediction should say drop_first=True
df.drop(categorical_vars, axis=1, inplace=True)
df = pd.concat([df, df_dummies], axis=1)
X = df[df.columns]
X.drop('assessland', axis=1, inplace=True)
predictors = X.columns
X = X.values
Y = df['assessland'].values
# sigmoid function
# A sigmoid function maps any value to a value between 0 and 1.
# We use it to convert numbers to probabilities.
def nonlin(x,deriv=False): # defines non-linearity
if(deriv==True): # deriviative creation
return x*(1-x)
return 1/(1+np.exp(-x))
# input dataset
#X = np.array(df[1: ], dtype = np.float) # Each row is a single "training example".
# Each column corresponds to one of our input nodes.
# output dataset
#y = np.array(df[1: ], dtype = np.float).T # ".T" is the transpose
# seed random numbers to make calculation
# deterministic (just a good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
syn0 = 2*np.random.random((3,1)) - 1 # It's called "syn0" to imply "synapse zero". Since we only have 2 layers (input and output),
syn0 = 2*np.random.random((3,1)) - 1 # we only need one matrix of weights to connect them.
# Its dimension is (3,1) because we have 3 inputs and 1 output.
for iter in xrange(10000): # This for loop "iterates" multiple times over the training code to optimize our network to the dataset.
# forward propagation
l0 = X # first layer, l0, is simply our data.
l1 = nonlin(np.dot(l0,syn0)) # prediction step. Basically,
# we first let the network "try" to predict the output given the input.
# how much did we miss?
l1_error = y - l1 # l1_error is just a vector of positive and
# negative numbers reflecting how much the network missed.
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
l1_delta = l1_error * nonlin(l1,True) # Secret sauce
# update weights
syn0 += np.dot(l0.T,l1_delta)
print ("Output After Training:")
print (l1)
|
17,371 | 435e1a3ef0cdd91a0933d63347497647f623074b | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-07-09 00:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tasabido', '0016_auto_20160708_1158'),
]
operations = [
migrations.RenameField(
model_name='duvida',
old_name='data_criacao_duvida',
new_name='data_criacao',
),
migrations.RenameField(
model_name='monitoria',
old_name='data_criacao_monitoria',
new_name='data_criacao',
),
]
|
17,372 | 6f0d4f340cf33738886ed807a66d47feed5e96a2 | import string
import math
import readline
locationCode = {'A':10,'B':11,'C':12,'D':13,'E':14,'F':15,'G':16,'H':17,'I':34,\
'J':18,'K':19,'L':20,'M':21,'N':22,'O':35,'P':23,'Q':24,'R':25,\
'S':26,'T':27,'U':28,'V':29,'W':32,'X':30,'Y':31,'Z':33};
def check(id):
if(len(id) != 10 or not(id[0].isalpha()) or not(id[1:].isdigit() or int[id[1] > 2 or id[1] < 1])):
print('Error: wrong format')
# Convert 1st Alphabet to Numeric code
encodeID = list(str(locationCode[id[0].upper()]))
encodeID.extend(list(id[1:]))
checkSum = int(encodeID[0])
# Calculate the checksum of ID
para = 9
for n in encodeID[1:]:
para = [para,1][para==0]
checkSum += int(n)*para
para -= 1
# Check the checksum
return [True,False][checkSum % 10 != 0]
ID=input("輸入身份證(A123??6789):")
digit=ID.count('?')
poss = 0
for i in range(0,int(math.pow(10,digit))):
tmpID=ID.replace("?"*digit,str(i).zfill(digit))
if(check(tmpID)):
poss+=1
print(tmpID)
print('\n共有 %s 種可能' % poss)
|
17,373 | 3287e33e29f7abb8fcb038554c9fe6c39c54c879 | def expand_volume(self):
is_thin = self.volume_detail['thinProvisioned']
if is_thin:
self.debug('expanding thin volume')
thin_volume_expand_req = dict(newVirtualSize=self.size, sizeUnit=self.size_unit)
try:
(rc, resp) = request((self.api_url + ('/storage-systems/%s/thin-volumes/%s/expand' % (self.ssid, self.volume_detail['id']))), data=json.dumps(thin_volume_expand_req), headers=self._post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(msg=('Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s].' % (self.name, self.ssid, str(err))))
else:
self.debug('expanding volume')
volume_expand_req = dict(expansionSize=self.size, sizeUnit=self.size_unit)
try:
(rc, resp) = request((self.api_url + ('/storage-systems/%s/volumes/%s/expand' % (self.ssid, self.volume_detail['id']))), data=json.dumps(volume_expand_req), headers=self._post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(msg=('Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s].' % (self.name, self.ssid, str(err))))
self.debug('polling for completion...')
while True:
try:
(rc, resp) = request((self.api_url + ('/storage-systems/%s/volumes/%s/expand' % (self.ssid, self.volume_detail['id']))), method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception:
err = get_exception()
self.module.fail_json(msg=('Failed to get volume expansion progress. Volume [%s]. Array Id [%s]. Error[%s].' % (self.name, self.ssid, str(err))))
action = resp['action']
percent_complete = resp['percentComplete']
self.debug(('expand action %s, %s complete...' % (action, percent_complete)))
if (action == 'none'):
self.debug('expand complete')
break
else:
time.sleep(5) |
17,374 | 5dc5c2e1fb5274960874ef90a484d5c32e8bddea |
# DAY 001
# PART 001
input_array = []
with open('Data/input001.txt', 'r') as raw_data:
for line in raw_data:
input_array.append(int(line))
freq = 0
for adjust in input_array:
freq += adjust
print('1-loop freq: %i' % freq)
# PART 002
freq = 0
freqs = []
dup_found = False
while not dup_found:
for adjust in input_array:
freq += adjust
if freq in freqs:
print('Duplicate freq: %i' % freq)
dup_found = True
freqs.append(freq)
|
17,375 | 24872930c925604876d516b8e10560aa04fa553a | file = open("input1","r")
i = int(input('Digit the number of the line that you want to change: '))-1
text = input('Write the text you want to append: ')
list_of_lines = []
for line in file:
counter = 1
element = str(counter)+'. '+ line.strip('\n')
counter+=1
list_of_lines.append(element)
list_of_lines[i]= text
print(list_of_lines)
file.close()
""" file = open("input1","w")
readfile.writelines(readfile)
file.close() """ |
17,376 | 8c8044af8676dccb1596ba1ead010fa5407e8a94 | """
Logic for dashboard related routes
"""
from flask import Blueprint, render_template
from .forms import LogUserForm, secti,masoform
from ..data.database import db
from ..data.models import LogUser, Stats
from datetime import datetime, timedelta
import urllib2
import json
import os
blueprint = Blueprint('public', __name__)
os.environ['no_proxy']='*'
@blueprint.route('/', methods=['GET'])
def index():
return render_template('public/index.tmpl')
@blueprint.route('/loguserinput',methods=['GET', 'POST'])
def InsertLogUser():
form = LogUserForm()
if form.validate_on_submit():
LogUser.create(**form.data)
return render_template("public/LogUser.tmpl", form=form)
@blueprint.route('/loguserlist',methods=['GET'])
def ListuserLog():
pole = db.session.query(LogUser).all()
return render_template("public/listuser.tmpl",data = pole)
@blueprint.route('/secti', methods=['GET','POST'])
def scitani():
form = secti()
if form.validate_on_submit():
return render_template('public/vystup.tmpl',hod1=form.hodnota1.data,hod2=form.hodnota2.data,suma=form.hodnota1.data+form.hodnota2.data)
return render_template('public/secti.tmpl', form=form)
@blueprint.route('/maso', methods=['GET','POST'])
def masof():
form = masoform()
if form.validate_on_submit():
return render_template('public/masovystup.tmpl',hod1=form.hodnota1.data,hod2=form.hodnota2.data,suma=form.hodnota1.data+form.hodnota2.data)
return render_template('public/maso.tmpl', form=form)
def getData():
r = urllib2.urlopen('http://192.168.10.1:5001/data.json')
data = json.load(r)
return data
@blueprint.route('/stats', methods=['GET','POST'])
def stats():
now = datetime.now()
now_minus_10 = now - timedelta(minutes=10)
data = list(db.session.query(Stats).filter(Stats.cas>now_minus_10).all())
return render_template('public/stats.tmpl', data=data)
|
17,377 | 645315bc923aa5c22c7f89d9abbbf3b5fe13c29d | # -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import pi
try:
_ = np.use_fastnumpy
from numpy.fft import fft, ifft, rfft, irfft
except AttributeError:
from scipy.fftpack import fft, ifft
from numpy.fft import rfft, irfft
from scipy.optimize import leastsq
from scipy.stats import norm
class IdealObs(object):
"""Statistical ideal observer.
Converts input values (usually SNRenv) to a percentage.
Parameters
----------
k : float, optional
(Default value = sqrt(1.2)
q : float, optional
(Default value = 0.5)
sigma_s : float, optional
(Default value = 0.6)
m : int, optional
Number of words in the vocabulary. (Default value = 8000)
Examples
--------
Converting SNRenv values to percent correct using the default parameters
of the ideal observer:
>>> from pambox import central
>>> obs = central.IdealObs()
>>> obs.snrenv_to_pc((0, 1, 2, 3))
References
----------
.. [JD11] Jørgensen, Søren, and Torsten Dau. "Predicting speech
intelligibility based on the signal-to-noise envelope power ratio
after modulation-frequency selective processing." The Journal of the
Acoustical Society of America 130.3 (2011): 1475-1487.
"""
def __init__(self, k=np.sqrt(1.2), q=0.5, sigma_s=0.6, m=8000.):
self.k = k
self.q = q
self.sigma_s = sigma_s
self.m = m
def get_params(self):
"""Returns the parameters of the ideal observer as dict.
Parameters
----------
None
Returns
-------
params : dict
Dictionary of internal parameters of the ideal observer.
"""
return {'k': self.k, 'q': self.q, 'sigma_s': self.sigma_s, 'm': self.m}
def fit_obs(self, snrenv, pcdata, sigma_s=None, m=None):
"""Finds the parameters of the ideal observer.
Finds the paramaters `k`, `q`, and `sigma_s`, that minimize the
least-square error between a data set and transformed SNRenv.
By default the `m` parameter is fixed and the property `m` is used.
It can also be defined as an optional parameter.
It is also possible to fix the `sigma_s` parameter by passing it as
an optional argument. Otherwise, it is optimized with `k` and `q`.
Parameters
----------
snrenv : ndarray
The linear SNRenv values that are to be converted to percent
correct.
pcdata : ndarray
The data, in percentage between 0 and 1, of correctly understood
tokens. Must be the same shape as `snrenv`.
sigma_s : float, optional
(Default value = None)
m : float, optional
(Default value = None)
Returns
-------
self
"""
if not m:
m = self.m
else:
self.m = m
if sigma_s:
errfc = lambda p, snr, data: self._snrenv_to_pc(snrenv,
p[0],
p[1],
sigma_s,
m) - data
p0 = [self.k, self.q]
else:
errfc = lambda p, snr, data: self._snrenv_to_pc(snrenv,
p[0],
p[1],
p[2],
m) - data
p0 = [self.k, self.q, self.sigma_s]
res = leastsq(errfc, p0, args=(snrenv, pcdata))[0]
if sigma_s:
self.k, self.q = res
self.sigma_s = sigma_s
else:
self.k, self.q, self.sigma_s = res
return self
@staticmethod
def _snrenv_to_pc(snrenv, k=None, q=None, sigma_s=None, m=None):
"""Converts SNRenv values to percent correct using an ideal observer.
Parameters
----------
snrenv : array_like
linear values of SNRenv
k : float
k parameter (Default value = None)
q : float
q parameter (Default value = None)
sigma_s : float
sigma_s parameter (Default value = None)
m : float
m parameter, number of words in the vocabulary. (Default value =
None)
Returns
-------
pc : ndarray
Array of intelligibility percentage values, of the same shape as
`snrenv`.
"""
un = norm.ppf(1.0 - 1.0 / m)
sn = 1.28255 / un
un += 0.577 / un
dp = k * snrenv ** q
return norm.cdf(dp, un, np.sqrt(sigma_s ** 2 + sn ** 2)) * 100
def snrenv_to_pc(self, snrenv):
"""Converts SNRenv values to a percent correct.
Parameters
----------
snrenv : array_like
linear values of SNRenv
Returns
-------
pc : ndarray
Array of intelligibility percentage values, of the same shape as
`snrenv`.
"""
snrenv = np.asarray(snrenv)
return self._snrenv_to_pc(snrenv, self.k, self.q, self.sigma_s, self.m)
def mod_filterbank(signal, fs, modf):
"""Implementation of the EPSM-filterbank.
Parameters
----------
signal : ndarray
Temporal envelope of a signal
fs : int
Sampling frequency of the signal.
modf : array_like
List of the center frequencies of the modulation filterbank.
Returns
-------
tuple of ndarray
Integrated power spectrum at the output of each filter
Filtered time signals.
"""
modf = np.asarray(modf)
fcs = modf[1:]
fcut = modf[0]
# Make signal odd length
signal = signal[0:-1] if (len(signal) % 2) == 0 else signal
q = 1. # Q-factor of band-pass filters
lp_order = 3. # order of the low-pass filter
n = signal.shape[-1] # length of envelope signals
X = fft(signal)
X_mag = np.abs(X)
X_power = np.square(X_mag) / n # power spectrum
X_power_pos = X_power[0:np.floor(n / 2).astype('int') + 1]
# take positive frequencies only and multiply by two to get the same total
# energy
X_power_pos[1:] = X_power_pos[1:] * 2
pos_freqs = np.linspace(0, fs / 2, X_power_pos.shape[-1])
# Concatenate vector of 0:fs and -fs:1
freqs = np.concatenate((pos_freqs, -1 * pos_freqs[-1:0:-1]))
# Initialize transfer function
TFs = np.zeros((len(fcs) + 1, len(freqs))).astype('complex')
# Calculating frequency-domain transfer function for each center frequency:
for k in range(len(fcs)):
TFs[k + 1, 1:] = 1. / (1. + (1j * q * (freqs[1:] / fcs[k] - fcs[k] /
freqs[1:]))) # p287 Hambley.
# squared filter magnitude transfer functions
Wcf = np.square(np.abs(TFs))
# Low-pass filter squared transfer function, third order Butterworth filter
# TF from:
# http://en.wikipedia.org/wiki/Butterworth_filter
Wcf[0, :] = 1 / (1 + ((2 * pi * freqs / (2 * pi * fcut)) ** (2 * lp_order)))
# Transfer function of low-pass filter
TFs[0, :] = np.sqrt(Wcf[0, :])
# initialize output product:
vout = np.zeros((len(fcs) + 1, len(pos_freqs)))
powers = np.zeros(len(modf))
# ------------ DC-power, --------------------------
# here divide by two such that a fully modulated tone has an AC-power of 1.
dc_power = X_power_pos[0] / n / 2
# ------------------------------------------------
X_filt = np.zeros((Wcf.shape[0], X.shape[-1]), dtype='complex128')
filtered_envs = np.zeros_like(X_filt, dtype='float')
for k, (w, TF) in enumerate(zip(Wcf, TFs)):
vout[k] = X_power_pos * w[:np.floor(n / 2).astype('int') + 1]
# Integration estimated as a sum from f > 0
# integrate envelope power in the passband of the filter. Index goes
# from 2:end since integration is for f>0
powers[k] = np.sum(vout[k, 1:]) / n / dc_power
# Filtering and inverse Fourier transform to get time signal.
X_filt[k] = X * TF
filtered_envs[k] = np.real(ifft(X_filt[k]))
return powers, filtered_envs
|
17,378 | 0d8fb5c2707f94821641c1d58682beea5ae54341 | from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from .models import *
from .serializers import *
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
@api_view(['GET', 'PUT', 'POST', 'DELETE'])
def gene_detail(request, pk):
"""
Retrieve, update or delete a code snippet.
"""
try:
gene = Gene.objects.get(pk=pk)
except Gene.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'POST':
serializer = GeneSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
gene.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
elif request.method == 'PUT':
serializer = GeneSerializer(gene, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if request.method == 'GET':
serializer = GeneSerializer(gene)
return Response(serializer.data)
@api_view(['GET'])
def genes_list(request):
if request.method == 'GET':
gene = Gene.objects.all()
serializer = GeneListSerializer(gene, many=True)
return Response(serializer.data)
|
17,379 | 29aa554943dd05bcce36dc3b837970a933226667 | # https://www.hackerrank.com/challenges/correlation-and-regression-lines-6/problem
import fileinput
def compute_correlation_coefficient(xs, ys):
n = float(len(xs))
sum_X = sum(xs)
sum_Y = sum(ys)
sum_XX = sum(list(map((lambda x: x ** 2), xs)))
sum_YY = sum(list(map((lambda y: y ** 2), ys)))
sum_XY = sum(list(map((lambda (x, y): x * y), zip(xs, ys))))
numer = n * sum_XY - sum_X * sum_Y
denom = ((n * sum_XX - sum_X ** 2) * (n * sum_YY - sum_Y ** 2)) ** (0.5)
return numer / denom
ll = []
for line in fileinput.input():
ll.append(list(map((lambda s: float(s)), line.split())))
xs = ll[0]
ys = ll[1]
cc = compute_correlation_coefficient(ll[0], ll[1])
print("{0:.3f}".format(cc))
# beta = (n * sum_XY - sum_X * sum_Y) / (n * sum_XX - sum_X ** 2)
# alpha = (1.0 / n) * sum_Y - beta * (1.0 / n) * sum_Y
|
17,380 | e8d74c4e5f4460c2e78d7e0d050efd7db3c39c34 | import cv2
import numpy as np
from src.config import get_algorithm_params
import dlib
NO_FACE_IN_FRAME = "NO_FACE_IN_FRAME"
FACE_DETECTED = "FACE_DETECTED"
FACE_TRACKER = "FACE_TRACKER"
class FaceTracker:
def __init__(self):
self.params = get_algorithm_params(FACE_TRACKER.lower())
# load dlib detector
self.detector = dlib.get_frontal_face_detector()
def run(self, frame):
# compute the bounding box
# this algorithm requires grayscale frames
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return self.compute_bounding_box(gray_frame)
def compute_bounding_box(self, gray_frame):
"""
computes the bounding box for an image
(4 corners in which the face is in)
:return:
"""
feedback = NO_FACE_IN_FRAME
# The second argument is the number of times we will upscale the image (in this case we don't, as
# it increase computation time)
# The third argument to run is an optional adjustment to the detection threshold,
# where a negative value will return more detections and a positive value fewer.
candidate_bounding_boxes, scores, idx = self.detector.run(gray_frame, 1, -0.3)
if len(candidate_bounding_boxes) > 0:
feedback = FACE_DETECTED
# find best bounding box:
best_bounding_box = self.find_best_bounding_box(
candidate_bounding_boxes, scores, gray_frame
)
color = (0, 255, 0) if feedback == FACE_DETECTED else (0, 0, 255)
output_frame = cv2.putText(
cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2RGB),
feedback,
(gray_frame.shape[1] // 2, 50),
cv2.FONT_HERSHEY_SIMPLEX,
1,
color,
2,
cv2.LINE_AA,
)
detected_face = None
if feedback == FACE_DETECTED:
output_frame = cv2.rectangle(
output_frame,
(best_bounding_box.left(), best_bounding_box.top()),
(best_bounding_box.right(), best_bounding_box.bottom()),
(0, 255, 0),
1,
)
# Get the detected face, but use the initial gray frame to avoid
# having the text on top of the detected face.
detected_face = cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2RGB)[
best_bounding_box.top(): best_bounding_box.bottom(),
best_bounding_box.left():best_bounding_box.right()
]
return feedback, output_frame, detected_face
def find_best_bounding_box(self, candidate_bounding_boxes, scores, gray_frame):
# computes the size of the bounding box diagonal
mean_sizes = (
np.sum(
np.array(
[
[rect.top() - rect.bottom(), rect.left() - rect.right()]
for rect in candidate_bounding_boxes
]
)
** 2,
axis=-1,
)
** 0.5
)
# computes the position of the middle of bounding boxes with respect to the middle of the image
mean_points = np.array(
[
[(rect.top() + rect.bottom()) / 2.0, (rect.left() + rect.right()) / 2.0]
for rect in candidate_bounding_boxes
]
) - np.array([gray_frame.shape[0] / 2.0, gray_frame.shape[1] / 2.0])
# computes the distances to center, divided by the bounding box diagonal
prop_dist = np.sum(mean_points ** 2, axis=-1) ** 0.5 / mean_sizes
# gets the closer bounding box to the center
best_bounding_box_id = np.argmin(prop_dist)
# compute best bounding box
best_bounding_box = dlib.rectangle(
int(candidate_bounding_boxes[best_bounding_box_id].left()),
int(candidate_bounding_boxes[best_bounding_box_id].top()),
int(candidate_bounding_boxes[best_bounding_box_id].right()),
int(candidate_bounding_boxes[best_bounding_box_id].bottom()),
)
return best_bounding_box
|
17,381 | bec3cb9699efcda931317facac82485fa90d2234 | def expand(self, obj, items):
block = [item.raw for item in obj.parents]
block.append(obj.raw)
current_level = items
for b in block:
if (b not in current_level):
current_level[b] = collections.OrderedDict()
current_level = current_level[b]
for c in obj.children:
if (c.raw not in current_level):
current_level[c.raw] = collections.OrderedDict() |
17,382 | a3cc31c80a54748d0c03dfdce7b6fe0435e8c3e2 | ##
import pandas as pd
from datetime import timedelta
import re
df = pd.read_csv('Downloads/data_file (1).csv')
tstamp_df = pd.DataFrame(columns=['TIME STAMP'])
timestamps=[]
dic={'JAN':'01', 'FEB':'02', 'MAR':'03', 'APR':'04','MAY':'05','JUN':'06',
'JUL':'07','AUG':'08','SEP':'09','OCT':'10','NOV':'11','DEC':'12'}
t_stamp= df.filter(['OPD_DATE','ACT_TIME'])
print(t_stamp['OPD_DATE'].head())
from datetime import datetime
for index in range(len(df['OPD_DATE'])):
date=df['OPD_DATE'][index][0:2]
month=dic[df['OPD_DATE'][index][3:6]]
year=df['OPD_DATE'][index][7:10]
time=df['ACT_TIME'][index]
str_time = str(timedelta(seconds=int(time)))
# str time can be '1 day 00:00:34'
if 'day' in str_time:
add_days = ''
for i in range(len(str_time)):
if str_time[i] == ' ':
s = str_time[i + 1:]
break
add_days += str_time[i]
date = int(date) + int(add_days)
# remove 1 day from str_time
str_time = re.sub("[^0-9:]", "", s)
dt=str(date)+month+year+str_time
tstamp=datetime.strptime(dt, '%d%m%y%H:%M:%S')
timestamps.append(tstamp)
df.insert(2, "TIME STAMP", timestamps)
df.drop(columns=['OPD_DATE','ACT_TIME'],inplace=True, axis=1)
## For each trip id, have a single route no, service key and direction
## stop_df --> trip_id, vehicle_id, direction, service_key, route_id
groupby_trip = stopdf.groupby('trip_id')
groups = groupby_trip.groups.keys()
column_names = ['trip_id','vehicle_id','route_id', 'direction', 'service_key']
finaldf = pd.DataFrame(columns = column_names)
for group in groups:
group_df = groupby_trip.get_group(group)
groupby_labels =group_df.groupby(['route_id', 'direction', 'service_key'])
size=max(groupby_labels.size())
# Delete all the groups whose size is less than max size
groupby_labels=groupby_labels.filter(lambda x: len(x) == size, dropna=True)
finaldf=finaldf.append(groupby_labels, ignore_index = True)
finaldf = finaldf.drop_duplicates()
invalid_trip_id_list = []
# TRANSFORMATION 4 : Change the value of direction to out and back if its 0 and 1 respectively
for index in range(len(finaldf['direction'])):
if (pd.isnull(finaldf['direction'][index])):
finaldf['direction'][index]=''
invalid_trip_id_list.append(finaldf['trip_id'][index])
#finaldf = finaldf.drop(finaldf.index[index])
elif finaldf['direction'][index]=='1':
finaldf['direction'][index]='Out'
elif finaldf['direction'][index]=='0':
finaldf['direction'][index]='Back'
else:
finaldf['direction'][index]=''
invalid_trip_id_list.append(finaldf['trip_id'][index])
print(finaldf.head())
# TRANSFORMATION 5: Change W to Weekday, S to Saturday and U to Sunday
for index in range(len(finaldf['service_key'])):
if (pd.isnull(finaldf['service_key'][index])):
finaldf['service_key'][index] == ''
invalid_trip_id_list.append(finaldf['trip_id'][index])
if finaldf['service_key'][index]=='W':
finaldf['service_key'][index]='Weekday'
elif finaldf['service_key'][index]=='S':
finaldf['service_key'][index]='Saturday'
elif finaldf['service_key'][index]=='U':
finaldf['service_key'][index]='Sunday'
else:
finaldf['service_key'][index]=''
invalid_trip_id_list.append(finaldf['trip_id'][index])
newdf=tripdf.merge(finaldf, on=['trip_id','vehicle_id'], how='left')
newdf = newdf.drop(newdf.columns[[2, 3, 4]], axis=1)
print("NEWDF: \n",newdf.head()) |
17,383 | 03ba7cd194b99a794938fb5c1501c96e7695bccc | import numpy as np
from scipy.ndimage import median_filter
class AdvancedFilter:
def mean_blur(self, arr):
new = []
window_size = 10
for d in range(3):
im_conv_d = median_filter(arr[:, :, d], size=(window_size, window_size))
new.append(im_conv_d)
im_conv = np.stack(new, axis=2)
return im_conv
def gaussian_blur(self, arr, kernel=6):
nn = int((kernel - 1) / 2)
a = np.asarray([[x ** 2 + y ** 2 for x in range(-nn,nn+1)] for y in range(-nn, nn+1)])
return np.exp(-a/(4))
|
17,384 | f3093c65a53cf77dfbebd0fd34bc9235ec018662 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from starlette.status import HTTP_403_FORBIDDEN, HTTP_401_UNAUTHORIZED
from epicteller.core.error.base import EpictellerError
class IncorrectEMailPasswordError(EpictellerError):
message = '邮箱或密码不正确'
class UnauthorizedError(EpictellerError):
status_code = HTTP_401_UNAUTHORIZED
message = '登录凭据失效'
code = 401
class EMailUsedError(EpictellerError):
message = '邮箱已被占用'
class ExternalIDUsedError(EpictellerError):
message = '外部帐号已被占用'
class EMailValidateError(EpictellerError):
message = '邮箱验证失败'
class InvalidValidateTokenError(EpictellerError):
message = '无效的邮箱验证凭据'
class InvalidExternalTypeError(EpictellerError):
message = '未知外部帐号类型'
class InvalidExternalIDError(EpictellerError):
message = '无效的外部帐号格式'
class AlreadyBindExternalError(ExternalIDUsedError):
message = '已经绑定过外部帐号'
|
17,385 | bab2b2260beed6996a5c9171c5378f7514d6e051 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import re,sys,commands, getopt, subprocess
from subprocess import Popen, PIPE
# comment line start with
detec_co ="#"
# pfad / name from host for display comments
_PFAD_host = "/common/usr/dhcp/hosts.conf"
# status check_snmp
_unknow = -1
_ok = 0
_warning = 1
_critical= 2
# ------------------------
# main
# ------------------------
_hostadress = str(sys.argv[1])
#open host file
try:
datei = open(_PFAD_host,"r")
except Exception:
print "UNKNOWN"
sys.exit(_ok)
#search in host file for hostadress
for line in datei:
if _hostadress in line:
if not (line.startswith(detec_co)):
line=line.split(detec_co)
print line[1]
sys.exit(_ok)
print "UNKNOWN"
sys.exit(_ok)
|
17,386 | c8b8dfcacb391da815b941d406ff7c34c3ea6183 | #
# @lc app=leetcode id=566 lang=python3
#
# [566] Reshape the Matrix
#
# @lc code=start
from typing import List
class Solution:
def matrixReshape(self, mat: List[List[int]], r: int, c: int) -> List[List[int]]:
m, n = len(mat), len(mat[0])
res = [[0] * c for _ in range(r)]
if m == r and n == c or m * n != r * c: return mat
for i in range(m):
for j in range(n):
x = i*n + j
a, b = x//c, x%c
res[a][b] = mat[i][j]
return res
# @lc code=end
mat = [[1,2],[3,4]]
r = 1
c = 4
mat = [[1,2],[3,4]]
r = 2
c = 4
mat = [[1,2]]
r = 1
c = 1
Sol = Solution()
ans = Sol.matrixReshape(mat, r, c)
print(f'ans: {ans}') |
17,387 | 72246040950324182a943ea88ab9a63c6d563872 | # -*- coding: utf-8 -*-
"""
The darshan.common module provides common functionality required by mulitple modules.
"""
|
17,388 | 580acc794a6fdd9f4a452151ec503606cbb03929 | # coding=utf-8
# start_vpn.py
# save current connected server
connected_server = 'tmp_server.log'
# save connected server history
server_history = 'server_histroy.log'
# save openvpn config file
openvpn_config_file = 'tmp_openvpn_config.ovpn'
# get_ip.py
# http/https proxy server url
proxy_url = {'http': '127.0.0.1:8087'}
# get_location.py
# ip geography information database url
location_post_url = 'http://opendata.baidu.com/api.php?query=%s&resource_id=6006&oe=utf-8'
# MyVPN.py
# temporary account config file, and account params
# DeviceName defined in vpncmd as virtual adapter name,
# will show in "ifconfig" command echo with prefix "vpn_" with a max length 15
device_name = 'vg'
tmp_cfg_file = 'vgp.vpn'
account_name = 'vgp1'
virtual_adapter = 'vpn_' + device_name
max_connections = '1'
# network settings
my_gateway = '192.168.0.1'
my_adapter = 'eth0'
# tmp_cfg_file template
tmp_cfg_file_template = '''# VPN Client VPN Connection Setting File
#
# This file is exported using the VPN Client Manager.
# The contents of this file can be edited using a text editor.
#
# When this file is imported to the Client Connection Manager
# it can be used immediately.
declare root
{
bool CheckServerCert false
uint64 CreateDateTime 0
uint64 LastConnectDateTime 0
bool StartupAccount false
uint64 UpdateDateTime 0
declare ClientAuth
{
uint AuthType 1
byte HashedPassword H8N7rT8BH44q0nFXC9NlFxetGzQ=
string Username vpn
}
declare ClientOption
{
string AccountName %s
uint AdditionalConnectionInterval 1
uint ConnectionDisconnectSpan 0
string DeviceName %s
bool DisableQoS false
bool HalfConnection false
bool HideNicInfoWindow false
bool HideStatusWindow false
string Hostname %s
string HubName VPNGATE
uint MaxConnection %s
bool NoRoutingTracking false
bool NoTls1 false
bool NoUdpAcceleration false
uint NumRetry 4294967295
uint Port %s
uint PortUDP 0
string ProxyName $
byte ProxyPassword $
uint ProxyPort 0
uint ProxyType 0
string ProxyUsername $
bool RequireBridgeRoutingMode false
bool RequireMonitorMode false
uint RetryInterval 15
bool UseCompress false
bool UseEncrypt true
}
}
'''
|
17,389 | ea5d7f976206e3a487ebfea588efb9961be915f6 | class Solution:
def canFinish(self, numCourses: int, prerequisites: [[int]]) -> bool:
adj_list = defaultdict(list)
vertices_with_no_incoming_edges = []
stack = deque()
indegree_graph = defaultdict(int)
for i in range(numCourses):
indegree_graph[i] = 0
for i in range(len(prerequisites)):
source = prerequisites[i][1]
dest = prerequisites[i][0]
adj_list[source].append(dest)
indegree_graph[dest] += 1
for key, val in indegree_graph.items():
if indegree_graph[key] == 0:
stack.append(key)
vertices_with_no_incoming_edges.append(key)
while stack:
vert = stack.pop()
if vert in adj_list:
for v in adj_list[vert]:
indegree_graph[v] -= 1
if indegree_graph[v] == 0:
vertices_with_no_incoming_edges.append(v)
stack.append(v)
return len(vertices_with_no_incoming_edges) == numCourses
|
17,390 | 107d33bced5de679ed44f77f662dcbd1595f3edb | # 11.6 (Algebra: multiply two matrices) Write a function to multiply two matrices. The
# header of the function is:
# def multiplyMatrix(a, b)
# To multiply matrix a by matrix b, the number of columns in a must be the same as
# the number of rows in b, and the two matrices must have elements of the same or
# compatible types. Let c be the result of the multiplication. Assume the column size
# of matrix a is n. Each element cij is For
# example, ai1 * b1j + ai2 * b2j + c cij + ain * bnj for two matrices a and b, c is
# where cij = ai1 * b1j + ai2 * b2j + ai3 * b3j. Write a test program that prompts the user to enter two matrices and displays
# their product.
import math
def multiplyMatrix(a, b):
list = len(m2[0]) * [0]
result = []
for i in range(len(m1)):
result.append([x for x in list])
for i in range(len(result)):
for j in range(len(result[0])):
for k in range(len(m2)):
result[i][j] += m1[i][k] * m2[k][j]
return result
l1 = input("Enter matrix 1: ").split()
l2 = input("Enter matrix 2: ").split()
# shape the input into a 3*3 matrix
m1 = []
m2 = []
counter = 0
size = int(math.sqrt(len(l1)))
for i in range(size):
m1.append([0] * size)
m2.append([0] * size)
for j in range(size):
m1[i][j] = float(l1[counter])
m2[i][j] = float(l2[counter])
counter += 1
res = multiplyMatrix(m1, m2)
res = [["%.1f" % x for x in member] for member in res] # format values
for i in range(size):
print(m1[i], end=' ')
if i == 1:
print(' *', end='')
print(' ', end='')
else:
print('\t', end='')
print(m2[i], end=' ')
if i == 1:
print(' =', end='')
print(' ', end='')
else:
print('\t', end='')
print(res[i], end=' ')
print()
|
17,391 | 73e8cdb89460becdf389b5ea83812f92ff340def | # full_name = lambda first, last: f'I am {first} {last}'
#
# result = full_name('Guido', 'van Rossum')
#
# print(result)
x = lambda a, b: a * b
print(x(3, 4)) |
17,392 | 70f053c976389a131db70720b7981e4b94e01aa6 | from .generate_service import InfrastructureGMLGraph, ServiceGMLGraph
class VolatileResourcesMapping(dict):
# key to store a bool for indicating whether it is a successful mapping (taken from the AbstractMapper's earlier realizations)
WORKED = 'worked'
# key to store a dict of AP names, for each time instance which is selected to serve the mobility cluster.
AP_SELECTION = 'AP_selection'
OBJECTIVE_VALUE = 'Objective_value'
RUNNING_TIME = 'Running_time'
EPSILON = 1e-6
def __init__(self, *args, **kwargs):
"""
Class to store edge mappings to paths and node mappings. Every item is a node name or tuple of node names.
Contains 'worked' keyed bool to indicate the success.
:param args:
:param kwargs:
"""
super(VolatileResourcesMapping, self).__init__(*args, **kwargs)
if VolatileResourcesMapping.WORKED not in self:
self[VolatileResourcesMapping.WORKED] = False
if VolatileResourcesMapping.AP_SELECTION not in self:
# keyed by subinterval index and value is AP name
self[VolatileResourcesMapping.AP_SELECTION] = {}
if VolatileResourcesMapping.OBJECTIVE_VALUE not in self:
self[VolatileResourcesMapping.OBJECTIVE_VALUE] = None
if VolatileResourcesMapping.RUNNING_TIME not in self:
self[VolatileResourcesMapping.RUNNING_TIME] = None
def __repr__(self):
return "VolatileResourcesMapping(Feasible: {}, Obj.value: {}, Runtime: {})".\
format(self[self.WORKED], self[self.OBJECTIVE_VALUE], self[self.RUNNING_TIME])
def __str__(self):
return self.__repr__()
def add_access_point_selection(self, subinterval : int, ap_name):
self[VolatileResourcesMapping.AP_SELECTION][int(subinterval)] = ap_name
def get_access_point_selection(self, subinterval : int):
return self[VolatileResourcesMapping.AP_SELECTION][int(subinterval)]
def get_hosting_infra_node_id(self, ns : ServiceGMLGraph, infra : InfrastructureGMLGraph, vnf_id):
"""
Returns the infra node id, where the given VNF id is hosted. Could be cached...
:param ns:
:param infra:
:param vnf_id:
:return:
"""
for vnf_name, host_name in self.items():
if ns.nodes[vnf_id][ns.node_name_str] == vnf_name:
for host_id, data in infra.nodes(data=True):
if data[infra.node_name_str] == host_name:
return host_id
def validate_mapping(self, ns: ServiceGMLGraph, infra: InfrastructureGMLGraph,
time_interval_count, coverage_threshold, battery_threshold, **kwargs):
"""
Checks whether the mapping task defined by the ns and infra is solved by this mapping object
:param ns:
:param infra:
:param kwargs: some optimization parameters of the solution provided by the heuristic are irrelevant for the validation
:return:
"""
if self[VolatileResourcesMapping.WORKED]:
# if not all service nodes are mapped
if not all({d[ns.node_name_str] in self for n, d in ns.nodes(data=True)}):
return False
if len(self[VolatileResourcesMapping.AP_SELECTION]) != infra.time_interval_count:
return False
# check AP selection
for subinterval in range(1, time_interval_count+1):
ap_name = self.get_access_point_selection(subinterval)
# find the AP_id for this AP name
for ap_id in infra.access_point_ids:
if infra.nodes[ap_id][infra.node_name_str] == ap_name:
for master_mobile_id in infra.ap_coverage_probabilities.keys():
if infra.ap_coverage_probabilities[master_mobile_id][subinterval][ap_id] < coverage_threshold:
return False
# check delay constraints in each interval for all subchains
for sfc_delay, sfc_path in ns.sfc_delays_list:
actual_sfc_delay = 0.0
for nfu, nfv in sfc_path:
host_u, host_v = self.get_hosting_infra_node_id(ns, infra, nfu), self.get_hosting_infra_node_id(ns, infra, nfv)
actual_sfc_delay += infra.delay_distance(host_u, host_v, subinterval, coverage_threshold, ap_id)
if sfc_delay + self.EPSILON < actual_sfc_delay:
return False
# go to next subinterval
break
else:
raise Exception("No AP id found for name {} in subinterval {}".format(ap_name, subinterval))
# location constraints
for nf, data in ns.nodes(data=True):
if ns.location_constr_str in data:
location_constr_name = map(lambda x: infra.nodes[x][infra.node_name_str], data[ns.location_constr_str])
if self[data[ns.node_name_str]] not in location_constr_name:
return False
mobile_ids = list(infra.mobile_ids)
# check capacity constraints
for infra_node_id in infra.nodes():
total_capacity = infra.nodes[infra_node_id][infra.infra_node_capacity_str]
infra_node_name = infra.nodes[infra_node_id][infra.node_name_str]
allocated_load = 0.0
for vnf_id, data in ns.nodes(data=True):
if self[data[ns.node_name_str]] == infra_node_name:
allocated_load += data[ns.nf_demand_str]
# check if load matches
if allocated_load > total_capacity + self.EPSILON:
return False
# check battery constraints
if infra_node_id in infra.mobile_ids:
mobile_ids.remove(infra_node_id)
linear_coeff = infra.unloaded_battery_alive_prob - infra.full_loaded_battery_alive_prob
probability = infra.unloaded_battery_alive_prob - allocated_load / total_capacity * linear_coeff
if probability < battery_threshold - self.EPSILON:
return False
if len(mobile_ids) > 0:
raise Exception("Not all mobile nodes have been checked for battery constraints!")
# if we didnt return yet, all constraints are correct
return True
else:
# if mapping is not 'worked' then it is valid.
return True
|
17,393 | 254701d5a080eb8cb40ace40f6478a4efd5a2a51 | from transformers import BertModel, BertTokenizer
from functools import wraps
import numpy as np
from utils.base_classes import BaseEmbedder
from utils.indexer_utils import get_text_by_ind, get_new_ind_by_ind, prepare_indexer, test_queries
from text_utils.utils import create_logger
from config import logger_path, ModelNames
import torch
logger = create_logger(__name__, logger_path['bert'])
MAX_TEXT_LEN = 512
FEATURE_SIZE = 768
def singleton(cls):
instance = None
@wraps(cls)
def inner(*args, **kwargs):
nonlocal instance
if instance is None:
instance = cls(*args, **kwargs)
return instance
return inner
class BertEmbedder(BaseEmbedder):
"""
Embedding Wrapper on Bert Multilingual Uncased
"""
def __init__(self, model_spec='bert-base-multilingual-uncased'):
self.model_spec = model_spec
self.model = self.bert_model()
self.tokenizer = self.bert_tokenizer()
self.success_count = 0
self.error_count = 0
@singleton
def bert_model(self):
model = BertModel.from_pretrained(self.model_spec).eval()
return model
@singleton
def bert_tokenizer(self):
do_lower_case = False
if 'uncased' in self.model_spec:
do_lower_case = True
tokenizer = BertTokenizer.from_pretrained(self.model_spec, do_lower_case=do_lower_case)
return tokenizer
def sentence_embedding(self, text):
try:
inputs_ids, token_type_ids, attention_mask = self.tokenizer.encode_plus(text, add_special_tokens=True,
max_length=MAX_TEXT_LEN,
padding=False,
truncation=True,
return_tensors='pt').values()
with torch.no_grad():
encoded_layers, _ = self.model(inputs_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
sent_embedding = encoded_layers.mean(dim=1)
vect = sent_embedding[0].numpy()
self.success_count += 1
return vect
except:
logger.exception('exception msg %s', text)
self.error_count += 1
return np.zeros(FEATURE_SIZE)
def check_indexer():
for q in test_queries:
print('____', q)
ans_list = get_answer(q)
for ans in ans_list:
print('\t\t', ans['text'].replace('\n', ''))
print()
print()
def get_answer(query):
print('use bert model')
ans_list = [get_text_by_ind(ind) for k, ind in indexer.return_closest(query, k=4)]
return ans_list
def get_answer_ind(query):
ind_list = [get_new_ind_by_ind(ind) for k, ind in indexer.return_closest(query, k=4)]
return ind_list
from config import used_models
if ModelNames.BERT in used_models:
logger.info('bert indexer started')
indexer, df = prepare_indexer('bert', logger)
logger.info('bert indexer ready')
check_indexer()
|
17,394 | 06d803ccbe313e65584759e0f91decd7fc458f35 | from plone import api
def setoAuthTokenFromCASSAMLProperties(event):
""" This subscriber is responsible for the update of the oauth_token from the CAS
authentication.
"""
user = api.user.get(event.properties['username'])
user.setMemberProperties(mapping=dict(oauth_token=event.properties['oauthToken']))
|
17,395 | fc9865c0b25b494f3ad25eecc1a9704a8089d56f | # called by demo_global_writer
from tensorboardX import GlobalSummaryWriter
writer = GlobalSummaryWriter.getSummaryWriter()
writer.add_text('my_log', 'greeting from global1')
for i in range(100):
writer.add_scalar('global1', i)
for i in range(100):
writer.add_scalar('common', i) |
17,396 | c75df5c5cf6f39fdd7a78b1eb02878b93a02f2e9 | import os
import argparse
import sys
import pickle
from pathlib import Path
from jax import random
from sklearn.decomposition import PCA
from generate_data import gen_source_data
from models import init_invertible_mlp_params, invertible_mlp_fwd
from train import train
def parse():
"""Argument parser for all configs.
"""
parser = argparse.ArgumentParser(description='')
# data generation args
parser.add_argument('-n', type=int, default=5,
help="number of latent components")
parser.add_argument('-k', type=int, default=11,
help="number of latent states")
parser.add_argument('-t', type=int, default=100000,
help="number of time steps")
parser.add_argument('--mix-depth', type=int, default=4,
help="number of mixing layers")
parser.add_argument('--prob-stay', type=float, default=0.99,
help="probability of staying in a state")
parser.add_argument('--whiten', action='store_true', default=True,
help="PCA whiten data as preprocessing")
# set seeds
parser.add_argument('--data-seed', type=int, default=0,
help="seed for initializing data generation")
parser.add_argument('--mix-seed', type=int, default=0,
help="seed for initializing mixing mlp")
parser.add_argument('--est-seed', type=int, default=7,
help="seed for initializing function estimator mlp")
parser.add_argument('--distrib-seed', type=int, default=7,
help="seed for estimating distribution paramaters")
# training & optimization parameters
parser.add_argument('--hidden-units', type=int, default=10,
help="num. of hidden units in function estimator MLP")
parser.add_argument('--learning-rate', type=float, default=3e-4,
help="learning rate for training")
parser.add_argument('--num-epochs', type=int, default=100,
help="number of training epochs")
parser.add_argument('--subseq-len', type=int, default=100,
help="length of subsequences")
parser.add_argument('--minibatch-size', type=int, default=64,
help="number of subsequences in a minibatch")
parser.add_argument('--decay-rate', type=float, default=1.,
help="decay rate for training (default to no decay)")
parser.add_argument('--decay-interval', type=int, default=15000,
help="interval (in iterations) for full decay of LR")
# CUDA settings
parser.add_argument('--cuda', action='store_true', default=True,
help="use GPU training")
# saving
parser.add_argument('--out-dir', type=str, default="output/",
help="location where data is saved")
args = parser.parse_args()
return args
def main():
args = parse()
# check theoretical assumption satisfied
assert args.k > 2*args.n, "K not set high enough for given N"
# generate source data
s_data, state_seq, mu, D, A = gen_source_data(args.n, args.k, args.t,
args.prob_stay,
random_seed=args.data_seed)
# mix the sources to create observable signals
mix_key = random.PRNGKey(args.mix_seed)
mix_params = init_invertible_mlp_params(mix_key, args.n,
args.mix_depth)
x_data = invertible_mlp_fwd(mix_params, s_data)
# preprocessing
if args.whiten:
pca = PCA(whiten=True)
x_data = pca.fit_transform(x_data)
# create variable dicts for training
data_dict = {'x_data': x_data,
's_data': s_data,
'state_seq': state_seq}
train_dict = {'mix_depth': args.mix_depth,
'hidden_size': args.hidden_units,
'learning_rate': args.learning_rate,
'num_epochs': args.num_epochs,
'subseq_len': args.subseq_len,
'minib_size': args.minibatch_size,
'decay_rate': args.decay_rate,
'decay_steps': args.decay_interval}
seed_dict = {'est_mlp_seed': args.est_seed,
'est_distrib_seed': args.distrib_seed}
# set up dict to save results
results_dict = {}
results_dict['data_config'] = {'N': args.n, 'K': args.k, 'T': args.t,
'mix_depth': args.mix_depth,
'p_stay': args.prob_stay,
'data_seed': args.data_seed,
'mix_seed': args.mix_seed}
results_dict['train_config'] = {'train_vars': train_dict,
'train_seeds': seed_dict}
results_dict['results'] = []
# train HM-nICA model
s_est, sort_idx, results_dict, est_params = train(
data_dict, train_dict, seed_dict, results_dict
)
# save
if not os.path.exists(args.out_dir):
Path(args.out_dir).mkdir(parents=True)
with open(args.out_dir+"all_results.pickle", 'ab') as out:
pickle.dump(results_dict, out, pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
sys.exit(main())
|
17,397 | 0de8988a8329ad56340b81d93794a79363e18815 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import views
from backend.api.mixins import ExceptionHandlerMixin
from backend.apps.organization.tasks import sync_organization
from backend.apps.role.tasks import sync_system_manager
from backend.biz.org_sync.syncer import Syncer
from backend.biz.role import RoleBiz
from backend.common.authentication import BasicAppCodeAuthentication
class InitializationView(ExceptionHandlerMixin, views.APIView):
"""
首次部署时初始化
"""
authentication_classes = [BasicAppCodeAuthentication]
permission_classes = [IsAuthenticated]
biz = RoleBiz()
def post(self, request):
"""首次部署初始化"""
# 1. 组织架构同步 - 单用户 admin
Syncer().sync_single_user("admin")
# 2. 将admin添加到超级管理员成员里,在部署migration里已经默认创建了分级管理员
self.biz.add_super_manager_member("admin", True)
# 3. 尽可能的初始化已存在系统的管理员
sync_system_manager()
# 4. 异步任务 - 全量同步组织架构
sync_organization.delay("admin")
return Response({})
|
17,398 | 99676e2960761432327a9ab01a9259ae19766355 | import multiprocessing as mp
import sys
def add_print(num):
total.value += 1
print total.value
#print "\r" + str(total.value)
# sys.stdout.write("Num: " + str(total.value) + "\r")
# sys.stdout.flush()
def setup(t):
global total
total = t
if __name__ == "__main__":
total = mp.Value('i', 0)
nums = range(20)
pool = mp.Pool(processes=20, initializer=setup, initargs=[total])
pool.map(add_print, nums)
print "final: " + str(total.value) |
17,399 | 60fcbdbd6546af4eb739d15217ef6fb1f8edec9c | import json
from cloudant.client import CouchDB
USERNAME = 'admin'
PASSWORD = 'password'
DATABASE = 'dbname'
client = CouchDB(USERNAME, PASSWORD, url='http://127.0.0.1:5984', connect=True)
# Open an existing database
db = client[DATABASE]
# Define the end point and parameters
endpoint = DATABASE + '/_find'
params = {'selector': {'source': 'CNN', 'category': 'news'}, 'execution_stats': True}
def endpointAccess(params, endpoint):
end_point = '{0}/{1}'.format(client.server_url, endpoint)
headers = {'Content-Type': 'application/json'}
response = db.r_session.post(end_point, headers=headers, data=json.dumps(params, cls=db.client.encoder))
response = response.json()
return response
response = endpointAccess(params, endpoint)
print(response['execution_stats']['results_returned']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.