index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
4,000 | 4d43e470144a6284d85902b495dc19dc150eb681 | # -*- coding: utf-8 -*-
import time
import errno
from gi.repository import GLib
from ..async import (FutureSourcePair, FutureCanceled, SucceededFuture,
BrokenPipeError, ConnectionError)
__all__ = ('GCore',)
#------------------------------------------------------------------------------#
# GLib Core #
#------------------------------------------------------------------------------#
class GCore (object):
def __init__ (self, context = None):
self.sources = set ()
#--------------------------------------------------------------------------#
# Time #
#--------------------------------------------------------------------------#
def Time (self, resume, cancel = None):
return self.TimeDelay (resume - time.time (), cancel)
def TimeDelay (self, delay, cancel = None):
resume = time.time () + delay
if delay < 0:
return SucceededFuture (resume)
return self.source_create (lambda source: source.TrySetResult (resume),
cancel, GLib.timeout_add, (int (delay * 1000),))
#--------------------------------------------------------------------------#
# Idle #
#--------------------------------------------------------------------------#
def Idle (self, cancel = None):
return self.source_create (lambda source: source.TrySetResult (None), cancel, GLib.idle_add)
#--------------------------------------------------------------------------#
# Poll #
#--------------------------------------------------------------------------#
READ = GLib.IO_IN
WRITE = GLib.IO_OUT
URGENT = GLib.IO_PRI
DISCONNECT = GLib.IO_HUP
ERROR = GLib.IO_ERR | GLib.IO_NVAL | GLib.IO_HUP
def Poll (self, fd, mask, cancel = None):
if mask is None:
return # no clean up for closed file descriptors
def resolve (source, fd, cond):
if cond & ~self.ERROR:
source.TrySetResult (cond)
else:
source.TrySetException (BrokenPipeError (errno.EPIPE, 'Broken pipe')
if cond & self.DISCONNECT else ConnectionError ())
return self.source_create (resolve, cancel, GLib.io_add_watch, (fd, mask | self.ERROR))
#--------------------------------------------------------------------------#
# Execute #
#--------------------------------------------------------------------------#
def __call__ (self): return self.Execute ()
def Execute (self):
try:
for none in self.Iterator ():
if not self.sources:
return
finally:
self.Dispose ()
#--------------------------------------------------------------------------#
# Iterator #
#--------------------------------------------------------------------------#
def __iter__ (self): return self.Iterator ()
def Iterator (self, block = True):
context = GLib.main_context_default ()
while True:
context.iteration (block)
yield
#--------------------------------------------------------------------------#
# Private #
#--------------------------------------------------------------------------#
def source_create (self, resolve, cancel, enqueue, args = None):
"""Create and enqueue future
enqueue (*args, resolve) -> source_id
resolve (source, *resolve_args) -> None
"""
future, source = FutureSourcePair ()
def resolve_internal (*resolve_args):
self.sources.discard (source)
resolve (source, *resolve_args)
return False # remove from event loop
if cancel:
def cancel_cont (result, error):
GLib.source_remove (source_id)
self.sources.discard (source)
source.TrySetCanceled ()
cancel.Await ().OnCompleted (cancel_cont)
source_id = enqueue (*(args + (resolve_internal,))) if args else enqueue (resolve_internal)
self.sources.add (source)
return future
#--------------------------------------------------------------------------#
# Disposable #
#--------------------------------------------------------------------------#
def Dispose (self, error = None):
error = error or FutureCanceled ('Core has been stopped')
# resolve futures
sources, self.sources = self.sources, set ()
for source in list (sources):
source.TrySetException (error)
def __enter__ (self):
return self
def __exit__ (self, et, eo, tb):
self.Dispose (eo)
return False
# vim: nu ft=python columns=120 :
|
4,001 | 8b7fb0789d197e50d7bdde2791b6fac964782469 | from flask import Flask
from flask_mongoengine import MongoEngine
db = MongoEngine()
def create_app(**config_overrides):
app = Flask(__name__)
app.config.from_pyfile('settings.py')
app.config.update(config_overrides)
db.init_app(app)
from user.views import user_app
app.register_blueprint(user_app)
from workflow.views import workflow_app
app.register_blueprint(workflow_app)
return app |
4,002 | e989f73011559080f96802dba4db30361d5626f9 | # the main program of this project
import log
import logging
import os
from ast_modifier import AstModifier
from analyzer import Analyzer
class Demo():
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
def start(self, filename: str):
self.log.debug('analyse file: ' + filename)
astmodif = AstModifier(filename)
# get origin AST
originTree = astmodif.origin()
self.log.info('origin: ' + astmodif.dump(originTree))
# simplify the AST
astmodif.simplify()
self.log.info('simplified: ' + astmodif.dump(astmodif.simpast))
# analyse
analyzer = Analyzer()
analyzer.analyze(astmodif.simpast)
def main(args):
demo = Demo()
defaultfile = './test/apple.py'
if len(args) > 1:
defaultfile = args[1]
demo.start(os.path.abspath(defaultfile))
if __name__ == "__main__":
import sys
main(sys.argv) |
4,003 | e769e930ab8f0356116679bc38a09b83886eb8f6 | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
"""
The main service module
MIT License
Copyright (c) 2017-2020, Leo Moll
"""
# -- Imports ------------------------------------------------
from resources.lib.service import MediathekViewService
# -- Main Code ----------------------------------------------
if __name__ == '__main__':
SERVICE = MediathekViewService()
SERVICE.init()
SERVICE.run()
SERVICE.exit()
del SERVICE
|
4,004 | 86de5b4a72978e2c49e060eefc513e3ed61272ae | def longest_word(s, d):
lengths = [(entry, len(entry)) for entry in d]
sorted_d = sorted(lengths, key = lambda x: (-x[1], x[0]))
for word, length in sorted_d:
j = 0
for i in range(0, len(s)):
if j < len(word) and word[j] == s[i]:
j += 1
if j == len(word):
return word
return ''
print(longest_word("abpcplea", ["a", "b", "c"]))
print(longest_word("abpcplea", ["ba", "ab", "a", "b"]))
print(longest_word('abpcplea', ["ale","apple","monkey","plea"]))
|
4,005 | ccb6973910dba5897f6a12be23c74a35e848313b | # Generated by Django 2.1 on 2018-12-05 00:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('PleniApp', '0006_auto_20181203_1144'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.Comment')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('user_type', models.CharField(default='regular', max_length=20)),
],
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.User'),
),
]
|
4,006 | c33aedbd5aaa853131c297a9382b72c3c646a319 | import os
import base64
from binascii import hexlify
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import hashes, hmac
from cryptography.hazmat.backends import default_backend
backend = default_backend()
# Llave falsa
key = key = b"vcOqXPg==lz3M0IH4swwYCR/"[:16]
def decrypt(message):
message = base64.urlsafe_b64decode(message)
iv = message[:16]
signed_data = message[16:36]
encrypted_data = message[36:]
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
print(f"iv {len(iv)} {hexlify(iv).decode('ascii')}")
print(f"signed {len(signed_data)} {signed_data}")
print(
f"encrypted_data {len(encrypted_data)} {hexlify(encrypted_data).decode('ascii')}"
)
decryptor = cipher.decryptor()
plaintext_message = decryptor.update(encrypted_data) + decryptor.finalize()
# Remove null padding if it exists
plaintext_message = plaintext_message.split(b"\x00")[0]
print("result")
print(hexlify(plaintext_message).decode("ascii"))
try:
plaintext_message = plaintext_message.decode("utf-8")
except:
print("could not decode")
return plaintext_message
print(
decrypt(
"JW8iuMPmRApsR43iR//gxUdukchHGWhMm4hyummPuI9IT4xuRxh74uP2j6QPgcZYy1lzfBDEHlTFYHMLwII+Ye5t4hLdfuyMWMle8SHFdtWMei/6y8O8dXU6oCjUM2T1vOeb/XoyoAh9sAsYCdkDdo8DrfBtVGSVutz36RopgJL3NilDgTf6FPn7cBYetLPrago5fMuCG6ygr5iMVMkBDMAX7nzL/Z6NGIbbbpBPNyYIi3LbttjoQIeyRfI4lOg2b1fUnw=="
)
)
|
4,007 | a61f351391ca1b18359323fd9e49f1efa4c7513c |
# website = urlopen("https://webservices.ulm.edu/forms/forms-list")
# data = bs(website, "lxml")
# forms = data.findAll("span", {"class": "file"})
# forms_list = []
# names = []
# for f in forms:
# forms_list.append(f.find("a")["href"])
# names.append(f.get_text())
# # print(forms_list)
# for f in forms_list:
# webbrowser.open(f)
from urllib.request import urlopen
from bs4 import BeautifulSoup as bs
import lxml
import urllib.request
import webbrowser
# download function
def downloader(url, div, classTag, className, specificData1, specificData2):
website = urlopen(url)
data = bs(website, "lxml")
contents = data.findAll(div, {"+" + str(classTag) +":" + str(className) + "}"})
contents_list = []
names_list = []
for file in contents:
contents_list.append(file.find(specificData1['"' + specificData2 + '"']))
names_list.append(file.get_text())
print(contents_list)
return contents_list
def main():
website = input("Enter the website you want to download file from: ")
div = input("Enter the div/span (be as specific as you can): ")
classTag = input("Enter the class/id tag you want to extract link from: ")
className = input("Enter the class/id name: ")
specific1 = input("Enter specific tag a, li, : ")
specific2 = input("Enter specific tag inside specific1 : ")
# download the content
contents = downloader(website, div, classTag, className, specific1, specific2)
print(contents)
main()
|
4,008 | a847fc32af2602db3b5545c15186c0209eb8ae8d | # -*- coding: utf-8 -*-
__author__ = 'virtual'
statuses = {
None: {'name': 'None', },
-1: { 'name': 'unknown', },
0: { 'name': '',},
1: { 'name': 'Новый',},
2: { 'name': '',},
3: { 'name': 'Активный', },
4: { 'name': 'Приостановленный',},
5: { 'name': 'Заблокированный', },
6: { 'name': 'Удаленный', },
7: { 'name': 'Закрытый', },
8: { 'name': '', },
}
def get_status_name(status):
return '[%d]%s' % (status, statuses[status]['name'], )
|
4,009 | 3741e44178375f351278cb17c2bf8f11c69e1262 | class StartStateImpl:
start_message = "Для продолжения мне необходим ваш корпоративный E-mail"\
"Адрес вида: <адрес>@edu.hse.ru (без кавычек)"
thank_you = "Спасибо за ваш адрес. Продолжаем."
def __init__(self):
pass
def enter_state(self, message, user):
user.send_message(StartStateImpl.start_message)
def exit_state(self, message, user):
user.send_message(StartStateImpl.thank_you)
def update_state(self, message, user):
pass
class StartState(StartStateImpl):
obj = None
def __new__(cls, *args, **kwargs):
if cls.obj is None:
cls.obj = StartStateImpl()
return cls.obj
|
4,010 | adec7efceb038c0ecb23c256c23c2ea212752d64 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
#multi layer perceptron with back propogation
import numpy as np
import theano
import matplotlib.pyplot as plt
# In[2]:
inputs=[[0,0],
[1,0],
[0,1],
[1,1]]
outputs=[1,0,0,1]
# In[3]:
x=theano.tensor.matrix(name='x')
# In[4]:
#Hidden layer as inputs from every neuron are 2 and we have 3 neuron
w1val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w1=theano.shared(w1val,name='w1')
w2val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w2=theano.shared(w2val,name='w2')
w3val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w3=theano.shared(w3val,name='w3')
# In[5]:
#Bias value is 1
b1 = theano.shared(1.1,name='b1')
b2 = theano.shared(1.2,name='b2')
b3 = theano.shared(1.3,name='b3')
# In[6]:
#computation foe every neuron
#hidden layer
a1sum=theano.tensor.dot(x,w1)+b1
a2sum=theano.tensor.dot(x,w2)+b2
a1=1/(1+theano.tensor.exp(-1*a1sum))
a2=1/(1+theano.tensor.exp(-1*a2sum))
#output layer neuron
#stack is combining two hiding layer values & feeding to the output layer
x2 = theano.tensor.stack([a1,a2],axis=1)
# In[7]:
'''if we write
[[a11,a12,a21,a22],[a33,a34,a43,a44]]-> inputs
what stack will do is
[a11,a33],[a12,a34],[a21,a43],[a22,a44]'''
a3sum=theano.tensor.dot(x2,w3)+b3
a3=1/(1+theano.tensor.exp(-1*a3sum))
#final output
ahat=a3
#actual output
a=theano.tensor.vector(name='a')
# In[8]:
#cost function
cost=-(a*theano.tensor.log(ahat)+(1-a)*theano.tensor.log(1-ahat)).sum()#it is defined for 1/1+eraise to -z
#GDA role
#for calculating gradient
dcostdw1 = theano.tensor.grad(cost,w1)
dcostdw2 = theano.tensor.grad(cost,w2)
dcostdw3 = theano.tensor.grad(cost,w3)
dcostdb1=theano.tensor.grad(cost,b1)
dcostdb2=theano.tensor.grad(cost,b2)
dcostdb3=theano.tensor.grad(cost,b3)
#apply GDA to update the weights
wn1=w1-0.02*dcostdw1
wn2=w2-0.02*dcostdw2
wn3=w3-0.02*dcostdw3
wb1=b1-0.02*dcostdb1
wb2=b2-0.02*dcostdb2
wb3=b3-0.02*dcostdb3
#theano function for training the algorithm
train=theano.function([x,a],[ahat,cost],updates=[(w1,wn1),(w2,wn2),(w3,wn3),(b1,wb1),(b2,wb2),(b3,wb3)])
cost1=[]
val1=[]
#training a model
for i in range(25000):
pval,costval=train(inputs,outputs)
print(costval)
val1.append(pval)
cost1.append(costval)
# In[9]:
print('the final outputs are:')
for i in range(len(inputs)):
print("the output of x1=%d | x2=%d is %.2f"%(inputs[i][0],inputs[i][1],pval[i]))
plt.plot(cost1,color='red')
plt.show()
# In[ ]:
# In[ ]:
|
4,011 | c4aa5869d5f916f13aa924c19dc9792337619b31 | from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
import random
def sim_data():
# Parameters
n_samples = random.randint(500, 5000)
n_features = random.randint(5, 25)
n_informative = random.randint(5, n_features)
noise = random.uniform(0.5, 2)
# Simulate data
X, y = make_regression(n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
noise=noise)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y)
# Param dict
params = {"n_samples": n_samples,
"n_features": n_features,
"n_informative": n_informative,
"noise": noise}
# Return
return X_train, y_train, X_test, y_test, params
|
4,012 | a9a60d4bee45a4012d004bacac7812160ed4241c | #!/usr/bin/env python
# coding: utf-8
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(
host = '192.168.10.28'
))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!')
print "[x] Sent 'Hello World!"
connection.close() |
4,013 | c73bea686786a30f298500968cfd01e2d5125d75 | import copy
import six
from eclcli.common import command
from eclcli.common import utils
from eclcli.storage.storageclient import exceptions
class ListVolumeType(command.Lister):
def get_parser(self, prog_name):
parser = super(ListVolumeType, self).get_parser(prog_name)
parser.add_argument(
"--name",
metavar="<string>",
help="Filter results by virtual storage name")
return parser
def take_action(self, parsed_args):
storage_client = self.app.client_manager.storage
search_opts = {
'display_name': parsed_args.name,
}
columns = ['ID', 'Name', 'available_volume_size',
'available_volume_throughput',
'available_iops_per_gb']
column_headers = copy.deepcopy(columns)
data = storage_client.volume_types.list(search_opts=search_opts)
if parsed_args.name is not None:
data = utils.filter_list_with_property(data, "name", parsed_args.name)
for vtype in data:
for key, value in vtype.extra_specs.items():
setattr(vtype, key, value)
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowVolumeType(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowVolumeType, self).get_parser(prog_name)
parser.add_argument(
"volume_type",
metavar="VOLUME_TYPE_ID",
help="volume type to display (ID)")
return parser
def take_action(self, parsed_args):
storage_client = self.app.client_manager.storage
try:
volume_type = storage_client.volume_types.get(parsed_args.volume_type)
printout = volume_type._info
for key, value in printout.get("extra_specs").items():
printout[key] = copy.copy(value)
del printout["extra_specs"]
except exceptions.ClientException as clientexp:
printout = {"message": clientexp.message,
"details": clientexp.details,
"code": clientexp.code}
return zip(*sorted(six.iteritems(printout)))
|
4,014 | 20f0de097fdd8f2a435c06a73c6a90cc7ebc69ad | from django.contrib import admin
# Register your models here.
from blog.models import Post,Category,Profile
admin.site.register(Profile)
admin.site.register(Category)
admin.site.register(Post) |
4,015 | 8745855d86dcdabe55f8d1622b66b3613dbfe3e1 | arr = []
for i in range(5):
arr.append(int(input()))
print(min(arr[0],arr[1],arr[2])+min(arr[3],arr[4])-50) |
4,016 | 24fa41f916b54345e4647354f972bd22e130decf | #YET TO COMMENT.
import numpy as np
from functools import reduce
class ProbabilityNetwork:
def __init__(self,n,edges,probs):
self.nodes=list(range(n))
self.edges=edges
self.probs=probs
def parents(self, node):
return [a for a,b in edges if b==node]
def ancestralOrder(self):
order=[]
while len(order)<len(self.nodes):
for node in self.nodes:
if node in order:
continue
if not any((edge[0] not in order) and (edge[1]==node) for edge in self.edges):
order.append(node)
return order
def logicSampling(self, evidences, targetNode, niters=10000000):
evidenceNodes=evidences.keys()
ancestralOrder = self.ancestralOrder()
hits=0
total=0
for it in range(niters):
fail=False
values=dict([ [i,None] for i in self.nodes]) #True: present. False: not present
for node in ancestralOrder:
pNode=self.probs(node, values)
nodeValue=np.random.random()<pNode
values[node]=nodeValue
if node in evidences and evidences[node]!=values[node]:
fail=True
break
if fail: continue
#print(values)
total+=1
if values[targetNode]:
hits+=1
return hits/total
def weightedLikelihood(self, evidences, targetNode, niters=10000000):
evidenceNodes=evidences.keys()
ancestralOrder = [node for node in self.ancestralOrder() if node not in evidenceNodes]
cumsumHit=0
cumsumTotal=0
hits=0
for it in range(niters):
values=dict([ [i,None] for i in ancestralOrder]) #True: present. False: not present
for evNode in evidenceNodes:
values[evNode]=evidences[evNode]
for node in ancestralOrder:
pNode=self.probs(node, values)
nodeValue=np.random.random()<pNode
values[node]=nodeValue
currProb=reduce(lambda x,y:x*y, [self.probs(i,values) if values[i] else 1-self.probs(i,values) for i in evidenceNodes ])
if values[targetNode]:
cumsumHit+=currProb
cumsumTotal+=currProb
return cumsumHit/cumsumTotal
edges=[(0,1),(0,2),(1,3),(1,4),(2,4),(2,5)]
def probs(node,evidences):
if node==0: return 0.3
elif node==1:
if evidences[0]: return 0.9
else: return 0.2
elif node==2:
if evidences[0]: return 0.75
else: return 0.25
elif node==3:
if evidences[1]: return 0.6
else: return 0.1
elif node==4:
if evidences[1] and evidences[2]: return 0.8
elif evidences[1] and not evidences[2]: return 0.6
elif not evidences[1] and evidences[2]: return 0.5
else: return 0
elif node==5:
if evidences[2]: return 0.4
else: return 0.1
pn=ProbabilityNetwork(6, edges, probs)
evidences=dict([[3,True],[4,True],[5,False]])
print(pn.logicSampling(evidences, 0))
print(pn.weightedLikelihood(evidences,0))
|
4,017 | 873a53983e3aeb66bd290450fb9c15a552bd163c | #!/usr/bin/env python
import os
import sys
import click
import logging
from signal import signal, SIGPIPE, SIG_DFL
from ..helpers.file_helpers import return_filehandle
from ..helpers.sequence_helpers import get_seqio_fastq_record
signal(SIGPIPE, SIG_DFL)
def subset_fastq(fastq, subset):
'''Subset FASTQ file. Pick 1/subset reads.
If reverse, fasta <= length
'''
seqio_in = sys.stdin
fh = ''
count = 0
total = 0
if not fastq: # Check STDIN
for record in get_seqio_fastq_record(seqio_in): # get SeqIO record
count += 1
if count == subset:
count = 0
total += 1
sys.stdout.write(record.format('fastq'))
sys.stdout.flush()
else: # Check FASTA
fh = return_filehandle(fastq)
for record in get_seqio_fastq_record(fh): # Get SeqIO record
count += 1
if count == subset:
count = 0
total += 1
sys.stdout.write(record.format('fastq'))
sys.stdout.flush()
return 'Output {} reads'.format(total)
@click.command()
@click.option('--fastq',
help='''FASTQ file to subset, can be compressed''')
@click.option('--subset', metavar = '<INT>',
help='''Take every N reads (default:10)''', default=10)
@click.option('--log_file', metavar = '<FILE>', default='./subset_fastq.log',
help='''File to write log to. (default:./subset_fastq.log)''')
@click.option('--log_level', default='INFO',
help='''Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default:INFO)''')
def main(fastq, subset, log_file, log_level):
'''Subset FASTQ Files.
cat input*.fastq | subset_fastq.py
or
subset_fastq.py --fastq input.fastq
'''
log_level = getattr(logging, log_level.upper(), logging.INFO)
msg_format = '%(asctime)s|%(name)s|[%(levelname)s]: %(message)s'
logging.basicConfig(format=msg_format, datefmt='%m-%d %H:%M',
level=log_level)
log_handler = logging.FileHandler(log_file, mode='w')
formatter = logging.Formatter(msg_format)
log_handler.setFormatter(formatter)
logger = logging.getLogger('subset_fastq')
logger.addHandler(log_handler)
if fastq:
fastq = os.path.abspath(fastq)
logger.info(subset_fastq(fastq, subset))
if __name__ == '__main__':
main()
|
4,018 | 9767014992981001bd2e8dece67525650c05a2a8 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sublime
import sublime_plugin
"""
Copy and Paste selinium module and urllib3 module of Python in
"sublime-text-3/Lib/Python3.3" folder of sublime-text3
"""
def process(string):
# Get active file name
filename = sublime.active_window().active_view().file_name()
contestid, problem = string.strip().split()
# Change executor_url according to your preference
executor_url = "127.0.0.1:9222" # change 9222 to the port you have used.
url = "codeforces.com/contest/" + contestid + "/problem/" + problem
_chrome_options = Options()
_chrome_options.add_argument('disable-infobars')
_chrome_options.add_argument("--start-maximized")
_chrome_options.add_experimental_option("debuggerAddress", executor_url)
try:
driver = webdriver.Chrome(options=_chrome_options)
driver.implicitly_wait(30)
try:
driver.get("http://" + url.rstrip())
driver.find_element_by_name("sourceFile")
driver.find_element_by_css_selector('input[type="file"]').clear()
# Send File to Codeforces
driver.find_element_by_css_selector(
'input[type="file"]').send_keys(filename.rstrip())
# Click on submit button
driver.find_element_by_class_name("submit").click()
except Exception:
# In case Codeforces is too busy or File is untitled.
sublime.error_message('Either Codeforces is too busy or \
File is Untitled.')
except Exception:
# In case Server is not active.
sublime.error_message('Server is not active.')
class SolveItCommand(sublime_plugin.TextCommand):
"""
Submit solution from sublime by getting contest ID and problem ID
from the user
"""
def run(self, _):
window = self.view.window()
# Input Panel to get Contest ID and Problem ID from the user
window.show_input_panel(
"Enter ContestID & ProblemID : ",
"",
self.on_done,
self.on_change,
self.on_cancel)
def on_done(self, input_data):
process(input_data)
def on_change(self, input_data):
pass
def on_cancel(self):
pass
|
4,019 | 9620479e9ac27c1c7833c9a31b9cb18408b8d361 | import time
inputStr = """crruafyzloguvxwctqmphenbkd
srcjafyzlcguvrwctqmphenbkd
srijafyzlogbpxwctgmphenbkd
zrijafyzloguvxrctqmphendkd
srijabyzloguvowcqqmphenbkd
srijafyzsoguvxwctbmpienbkd
srirtfyzlognvxwctqmphenbkd
srijafyzloguvxwctgmphenbmq
senjafyzloguvxectqmphenbkd
srijafyeloguvxwwtqmphembkd
srijafyzlogurxtctqmpkenbkd
srijafyzlkguvxictqhphenbkd
srijafgzlogunxwctqophenbkd
shijabyzloguvxwctqmqhenbkd
srjoafyzloguvxwctqmphenbwd
srijafyhloguvxwmtqmphenkkd
srijadyzlogwvxwctqmphenbed
brijafyzloguvmwctqmphenhkd
smijafyzlhguvxwctqmphjnbkd
sriqafvzloguvxwctqmpheebkd
srijafyzloguvxwisqmpuenbkd
mrijakyuloguvxwctqmphenbkd
srnfafyzloguvxwctqmphgnbkd
srijadyzloguvxwhfqmphenbkd
srijafhzloguvxwctdmlhenbkd
srijafyzloguvxwcsqmphykbkd
srijafyzlogwvxwatqmphhnbkd
srijafyzlozqvxwctqmphenbku
srijafyzloguvxwcbamphenbgd
srijafyzlfguvxwctqmphzybkd
srijafyzloguqxwetqmphenkkd
srijafyylogubxwttqmphenbkd
srijafyzloguvxzctadphenbkd
srijafyzloguoxwhtqmchenbkd
srijafyzloguvxwcvqmzhenbko
srijnfyzloguvxwctqmchenjkd
srijaryzloggvxwctqzphenbkd
srijafhzleguvxwcxqmphenbkd
ssijafyzllguvxfctqmphenbkd
srijafyzloguvxdctqmfhenbcd
srijafyzloguvxfctqmplynbkd
srijaftzlogavxwcrqmphenbkd
sriwaoyzloguvxwctqmphenbtd
srijahyzlogunxwctqmphenbvd
srjjafyzloguzxwctumphenbkd
nrijafyzlxguvxwctqmphanbkd
srijafezlqguyxwctqmphenbkd
srijafygloguvxwjtqcphenbkd
erijafyzloguvxoctqmnhenbkd
ssijafyzllguvxwbtqmphenbkd
sriaafyzloguvxwctqqphenbkv
frijafyzloguvswctwmphenbkd
srijafyzyogkvxwctqmprenbkd
syijafyzuoguvxwctqmkhenbkd
srijafyzloganxwctqmphenbkf
srijafyzloguvxwftqmxhenbkq
srijafyflogxvxwctqmghenbkd
srijafyzsoguvxwctqmpjenwkd
srujafylloguvxwctqmphenckd
srijafyzlpzuvxwctqmphenbud
srijafyzlogfvxwctqmhhenbwd
srijafjzlogusxwctqmphepbkd
srijlfyzloguvxwctqfphenzkd
srijafyzlogwvxwctqyphenbqd
srijafyzloluvxwctqtphenukd
srizafyzlowuvxwctqmphqnbkd
sritafkzlkguvxwctqmphenbkd
sbijafdzloguvxgctqmphenbkd
crijafyeloguvxwctqmpsenbkd
srijafyvlogulxwctqmphenbkk
srijafyologuvxwctqmehegbkd
siijafyzloguvxwctjmphenbmd
srijafyzlupuvxwctqmpheabkd
srijafyzlogumxwctqqphanbkd
srijxfyzlogujxwcqqmphenbkd
irijafizeoguvxwctqmphenbkd
sgijafyzloguvtwctqmpfenbkd
srijzfyzloguvmwctnmphenbkd
srijafyzwohuvxwctqmthenbkd
srijafyzlhguvxoctqwphenbkd
srgjafyplogxvxwctqmphenbkd
srijafyqlogovxwctqzphenbkd
srijafjzloguvlnvtqmphenbkd
srijafyzooguvxwctqmphenvud
srijafyzgoguvxwctumphgnbkd
srijaffzloguvxwdqqmphenbkd
srijafyzlogugxwctqxphenbkr
srijafyzlogutxwctqmmcenbkd
srifafyzlhguwxwctqmphenbkd
mrimajyzloguvxwctqmphenbkd
sriyafyzloguvxwcthmphejbkd
srieakyzlokuvxwctqmphenbkd
srisafyzloguhxwctqmphecbkd
srijanyzloguvxcctqmxhenbkd
srijafyzypguvxwctqmqhenbkd
sryjtfyzlvguvxwctqmphenbkd
srijafyzlsguvxwctqmqfenbkd
srijafyzlogudxwbtqwphenbkd
srijysyzloguvxwctqmpvenbkd
srijafyzloggvxwjtqmphegbkd
srijgfyzloguvxwctqmbhdnbkd
ssijufyzloguvawctqmphenbkd
skojafyzloguvxwctqmphenbnd
srijafylloguvxwcqqmpienbkd
trioafyzloguvqwctqmphenbkd
srijafydloguvxwctqmpzjnbkd
saijafvzloguvxwcqqmphenbkd
srhjapyzloguvxwctqmbhenbkd
srijafyzlfguvxwcsqmpwenbkd
shijafyzboguvxwctqmphenbmd
srizafysloguvxwrtqmphenbkd
srijafyzloguvxwciqmwhenbkj
qrijafyzloduvxwctqmphenbko
srijefyuloguvxwctqmphenbed
srijafyzlobuvxwctqmphenhbd
srijafyzloxuvxwctqmpheabkq
srijafyzloguvrwctqmghenkkd
sfisafywloguvxwctqmphenbkd
srgjafyzlogurxwctqmphenbkp
srijafhzloguvxwcjqmphenhkd
srijafyylogufxwrtqmphenbkd
srijafyzvoguvxwzkqmphenbkd
sqijafyzloguvxwctqmpheqbxd
srijafyvloguvxwctqzpherbkd
srijufyzloguvxlcsqmphenbkd
srijafykloguvxlccqmphenbkd
srijafyzloguexwcrqmphenzkd
sridifyzloguyxwctqmphenbkd
srijafyzlogfvxwctqlphenbkl
srijafyzlodqdxwctqmphenbkd
srijafyzloruvxactqmphenekd
grijafyzloguvxpctmmphenbkd
srsjakyzloguvxwctqmphvnbkd
srikafyvloguvxwrtqmphenbkd
srijafyzloguvxwctqjpserbkd
jrijafyzloguvxwctqmpgesbkd
swijafyzluguvxwctqmfhenbkd
srijanynlogovxwctqmphenbkd
jrijafyzloguvxwctymphrnbkd
srinafyzloguvewctqmphenbzd
srijakyzloguvxwctqmphcnbka
srijafyhlobuvxwctqmphenbka
srijafyzcogusxwctqmphwnbkd
srijavyzlosuvxwctqmphjnbkd
orijafyzxoguvxwcnqmphenbkd
srijafyzlogcvxwvtqmthenbkd
srijapyzloauvxwctqmphenvkd
srijaflzloguhxwctqmphenbwd
smijafyzlonuvxwctqmphenbkw
jrijafyzloguvxwclqmnhenbkd
srijaqyzloguvqwctqmphenskd
srijasyzloguvxwctqmvhenbku
crijtfyzloguvxwctqmthenbkd
srrkafyzvoguvxwctqmphenbkd
srijatyzloguvewctqmphenbld
srfjafyyloguvnwctqmphenbkd
srijafyzloguvxwctqjpbenbkt
hrijafyzooguvxwctqmphenbld
srijafbzlogscxwctqmphenbkd
srinafyzlogxvxwctqqphenbkd
slijafyzloglvxwctqmphenbdd
srijafyzlogjvxwcsqmphenbld
sryjcfyzloguvewctqmphenbkd
srijafyzloguexwctqmohknbkd
jaijafyzlogevxwctqmphenbkd
srijafbzlogavxwctqmphenbki
srijafozlogpvxwctqmphgnbkd
srijdfyzloguvxwczqmphenbkm
srijafyzlobuvxwctqmphxndkd
mrijifyzlhguvxwctqmphenbkd
srijafyzloguvxbctumphjnbkd
srijafyzloyuvxwptqmphlnbkd
arijafyzloguvxwcsqmohenbkd
srijaftzioguvxwttqmphenbkd
srijafyzlqsuvxwctqmphxnbkd
srijafyzioguvxwctqnphetbkd
prijafbzloguvxdctqmphenbkd
srijaeyzlnguvxwmtqmphenbkd
srijofyzloguvqwctqmphonbkd
srixaryzpoguvxwctqmphenbkd
srijafyzlowuvxwcwhmphenbkd
srijafydloguvxwctqmptenikd
srijqfyzlogtvfwctqmphenbkd
srijafyzloguvxlctqmpvenbgd
srijafyzlbguvxwjtqgphenbkd
srijafyzlohuqxwctqmphenbka
srijafyzroguvxictqmphynbkd
srijafyzloguvxdctjmphenjkd
srijaoczloguvxwctqmphenbjd
srajafhzloguvxwctqmphenbke
srijofyzloduvxwctqmphanbkd
srijafytloguvxwmtnmphenbkd
srijafyzuoguvxwceqmpgenbkd
rrijafyzloyuvxwctqmphlnbkd
srljafyzloguvxictqmohenbkd
srijafyzlogulxwcrqrphenbkd
srajafyzloguvxwctqmphanbke
srijafyzlhguvxwxtqmpheabkd
sxijafyzloggwxwctqmphenbkd
srijafyultguvxwctqmphinbkd
srijafyzloguvtwctqmfhvnbkd
srijafwzloruvxwctquphenbkd
srbjafyzxoguuxwctqmphenbkd
erijafyzlxguvxbctqmphenbkd
srijagyzlojubxwctqmphenbkd
srijafyzloguvxwdtqmchenakd
srijafkzlogukxwctqiphenbkd
mridafyzloguvxwctqmphenrkd
szqjafyzloguvxwctqmpheibkd
srijahyzloguvxwctcmphenekd
srijafyzloguvxwczpuphenbkd
srijafyzcoguvfwctqmphenbkq
qriiafyzloguvxwctqmpheebkd
srijpfyzloguvxlctqmphenokd
srijzfyzlotuvxwcjqmphenbkd
srinafyqloguvxwctfmphenbkd
srijafyzlogjvxpltqmphenbkd
srijafyzlotuvxwutqmphenbtd
sridafyzloguvxwctqmpyenokd
srxjafyzqogyvxwctqmphenbkd
ssijafyzzoguvxwctqmphenbad
srijafrzloguvxwctqmphekpkd
srijafyzlfgrvxactqmphenbkd
srijafyzroguvxwttqmphekbkd
srijefyzloguvxwctqmpqenbrd
srijefycloguvxwctqmchenbkd
srzjafyzloguvxwcqqmphanbkd
srijauyzlhguvxwctqmphenbgd
srijafyzloguvmwvnqmphenbkd
srihafyzloguvlwotqmphenbkd
srigafyzloguvxwctqmphennsd
sriuafzzloguvxwcuqmphenbkd
srijavuzllguvxwctqmphenbkd
srijafjzloguvlnctqmphenbkd
lrirafyzloguvxwctqmphenbld
soijarxzloguvxwctqmphenbkd
srijapyzlnguvxwctqmdhenbkd
srijafyzkogujxmctqmphenbkd
srijafuzloguvxwcsqvphenbkd
srijagyzzoguvxwctqmpvenbkd
srijafyzlovuvxwctqmrhenbxd
srijafyzqoguvxwctwmpienbkd
sxijafyzloguvxwutqmphenlkd
srijafyzlhgzvxwctqmphqnbkd
srijajyzloguvxwcbwmphenbkd
srijazyzloguvxwhtqmphenbkx
srgjafyzloguvvwctqmphdnbkd
rrivafyzloguvxjctqmphenbkd
srijifyzdoguvxwctqmphenbka
hrijafyzloguvxectqmpheybkd"""
startTime = time.time()
inputList = list(map(str, inputStr.splitlines()))
numRepeatsChar = 0
doubleDupes = 0
tripleDupes = 0
for string in inputList:
hasDoubleDupes = False
hasTripleDupes = False
for char in string:
numRepeatsChar = string.count(char)
if numRepeatsChar == 2 and not hasDoubleDupes:
doubleDupes += 1
hasDoubleDupes = True
elif numRepeatsChar == 3 and not hasTripleDupes:
tripleDupes += 1
hasTripleDupes = True
elif hasDoubleDupes and hasTripleDupes:
break
print(doubleDupes)
print(tripleDupes)
checkSum = doubleDupes * tripleDupes
print('Checksum: ' + str(checkSum))
print("%s seconds" % (time.time() - startTime)) |
4,020 | 1de46ee2818b4cb2ae68ef5870581c341f8d9b04 | # coding=utf-8
from datetime import datetime, timedelta
from flask import current_app as app
from flask_script import Command
from main import db
from models.payment import Payment
from models.product import ProductGroup, Product, PriceTier, Price, ProductView, ProductViewProduct
from models.purchase import Purchase
def create_product_groups():
top_level_groups = [
# name, capacity, expires
('admissions', datetime(2018, 9, 3), app.config.get('MAXIMUM_ADMISSIONS')),
('parking', datetime(2018, 9, 3), None),
('campervan', datetime(2018, 9, 3), None),
('merchandise', datetime(2018, 8, 12), None),
]
for name, expires, capacity in top_level_groups:
if ProductGroup.get_by_name(name):
continue
pg = ProductGroup(name=name, type=name, capacity_max=capacity, expires=expires)
db.session.add(pg)
db.session.flush()
allocations = [
# name, capacity
('vendors', 100),
('sponsors', 200),
('speakers', 100),
('general', 800),
]
admissions = ProductGroup.get_by_name('admissions')
for name, capacity in allocations:
if ProductGroup.get_by_name(name):
continue
ProductGroup(name=name, capacity_max=capacity, parent=admissions)
view = ProductView.get_by_name('main')
if not view:
view = ProductView('main', 'tickets')
db.session.add(view)
db.session.flush()
general = ProductGroup.get_by_name('general')
products = [
# name, display name, transferable, badge, capacity, description, (std cap, gbp eur), (early cap, gbp, eur), (late cap, gbp, eur)
('full', 'Full Camp Ticket', True, True, None, 'Full ticket',
((1500, 115, 135), (250, 105, 125), (None, 125, 145))
),
('full-s', 'Full Camp Ticket (Supporter)', True, True, None, 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.',
((None, 150, 180),)
),
('full-sg', 'Full Camp Ticket (Gold Supporter)', True, True, None, 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.',
((None, 200, 240),)
),
('u18', 'Under-18', True, False, 150, 'For visitors born after August 30th, 2000. All under-18s must be accompanied by an adult.',
((None, 55, 63),)
),
('u12', 'Under-12', True, False, 50, 'For children born after August 30th, 2006. All children must be accompanied by an adult.',
((None, 0, 0),)
),
]
order = 0
for name, display_name, has_xfer, has_badge, capacity, description, prices in products:
if Product.get_by_name('general', name):
continue
product = Product(name=name, display_name=display_name, capacity_max=capacity,
description=description, parent=general,
attributes={'is_transferable': has_xfer,
'has_badge': has_badge})
for index, (price_cap, gbp, eur) in enumerate(prices):
if len(prices) == 1 or index == 0:
tier_name = name + '-std'
active = True
elif index == 1:
tier_name = name + '-early-bird'
active = False
elif index == 2:
tier_name = name + '-late'
active = False
if PriceTier.get_by_name('general', 'name', tier_name):
continue
pt = PriceTier(name=tier_name, capacity_max=price_cap, personal_limit=10, parent=product, active=active)
Price(currency='GBP', price_int=gbp * 100, price_tier=pt)
Price(currency='EUR', price_int=eur * 100, price_tier=pt)
ProductViewProduct(view, product, order)
order += 1
db.session.flush()
misc = [
# name, display_name, cap, personal_limit, gbp, eur, description
('parking', 'Parking Ticket', 1700, 4, 15, 21, "We're trying to keep cars to a minimum. Please take public transport or car-share if you can."),
('campervan', 'Caravan/\u200cCampervan Ticket', 60, 2, 30, 42, "If you bring a caravan, you won't need a separate parking ticket for the towing car."),
]
for name, display_name, cap, personal_limit, gbp, eur, description in misc:
if Product.get_by_name(name, name):
continue
group = ProductGroup.get_by_name(name)
product = Product(name=name, display_name=display_name, description=description, parent=group)
pt = PriceTier(name=name, personal_limit=personal_limit, parent=product)
db.session.add(pt)
db.session.add(Price(currency='GBP', price_int=gbp * 100, price_tier=pt))
db.session.add(Price(currency='EUR', price_int=eur * 100, price_tier=pt))
ProductViewProduct(view, product, order)
order += 1
db.session.commit()
# ('t-shirt', 'T-Shirt', 200, 10, 10, 12, "Pre-order the official Electromagnetic Field t-shirt. T-shirts will be available to collect during the event."),
class CreateTickets(Command):
def run(self):
create_product_groups()
class CancelReservedTickets(Command):
def run(self):
# Payments where someone started the process but didn't complete
payments = Purchase.query.filter(
Purchase.state == 'reserved',
Purchase.modified < datetime.utcnow() - timedelta(days=3),
~Purchase.payment_id.is_(None),
).join(Payment).with_entities(Payment).group_by(Payment)
for payment in payments:
payment.lock()
app.logger.info('Cancelling payment %s', payment.id)
assert payment.state == 'new' and payment.provider in {'gocardless', 'stripe'}
payment.cancel()
# Purchases that were added to baskets but not checked out
purchases = Purchase.query.filter(
Purchase.state == 'reserved',
Purchase.modified < datetime.utcnow() - timedelta(days=3),
Purchase.payment_id.is_(None),
)
for purchase in purchases:
app.logger.info('Cancelling purchase %s', purchase.id)
purchase.cancel()
db.session.commit()
class SendTransferReminder(Command):
def run(self):
pass
# users_to_email = User.query.join(Ticket, TicketType).filter(
# TicketType.admits == 'full',
# Ticket.paid == True, # noqa: E712
# Ticket.transfer_reminder_sent == False,
# ).group_by(User).having(func.count() > 1)
# for user in users_to_email:
# msg = Message("Your Electromagnetic Field Tickets",
# sender=app.config['TICKETS_EMAIL'],
# recipients=[user.email])
# msg.body = render_template("emails/transfer-reminder.txt", user=user)
# app.logger.info('Emailing %s transfer reminder', user.email)
# mail.send(msg)
# for ticket in user.tickets:
# ticket.transfer_reminder_sent = True
# db.session.commit()
class SendTickets(Command):
def run(self):
pass
# paid_items = Ticket.query.filter_by(paid=True).join(TicketType).filter(or_(
# TicketType.admits.in_(['full', 'kid', 'car', 'campervan']),
# TicketType.fixed_id.in_(range(14, 24))))
# users = (paid_items.filter(Ticket.emailed == False).join(User) # noqa: E712
# .group_by(User).with_entities(User).order_by(User.id))
# for user in users:
# user_tickets = Ticket.query.filter_by(paid=True).join(TicketType, User).filter(
# TicketType.admits.in_(['full', 'kid', 'car', 'campervan']),
# User.id == user.id)
# plural = (user_tickets.count() != 1 and 's' or '')
# msg = Message("Your Electromagnetic Field Ticket%s" % plural,
# sender=app.config['TICKETS_EMAIL'],
# recipients=[user.email])
# msg.body = render_template("emails/receipt.txt", user=user)
# attach_tickets(msg, user)
# app.logger.info('Emailing %s receipt for %s tickets', user.email, user_tickets.count())
# mail.send(msg)
# db.session.commit()
|
4,021 | 07b05093b630fc0167532884ec69a00420ed70b4 | # -*- coding: utf-8 -*-
###########################
# CSCI 573 Data Mining - Eclat and Linear Kernel SVM
# Author: Chu-An Tsai
# 12/14/2019
###########################
import fim
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
f = open('house-votes-84.data','r')
lines = f.readlines()
X = []
label = []
for line in lines:
strpline = line.rstrip()
arr = strpline.split(',')
newline = [];
for i in range(len(arr)):
if arr[i] == 'y':
newline.append(i)
if arr[0] == 'republican':
newline.append(100)
label.append(0)
else:
newline.append(200)
label.append(1)
#print(*newline, sep=',')
X.append(newline)
################################# a.
print('a. Run the itemset mining algorithm with 20% support. How many frequent itemsets are there?')
a = np.array(fim.eclat(X, supp=20))
print(len(a))
################################# b.
b1 = fim.eclat(X, supp=20, report='a')
b2 = np.array(b1)
b3 = b2[b2[:,1].argsort()][::-1]
print('\nb. Write top 10 itemsets (in terms of highest support value).')
for i in range(10):
print(b3[i])
################################# c.
print('\nc. How many frequent itemsets have 100 as part of itemsets?')
c1 = []
a=np.array(a)
for i in range(len(a)):
if 100 in a[i][0]:
c1.append(a[i].tolist())
c2 = np.array(c1)
c3 = c2[c2[:,1].argsort()][::-1].tolist()
print(len(c3))
################################## d.
print('\nd. How many frequent itemsets have 200 as part of itemsets?')
d1 = []
for i in range(len(a)):
if 200 in a[i][0]:
d1.append(a[i].tolist())
d2 = np.array(d1)
d3 = d2[d2[:,1].argsort()][::-1].tolist()
print(len(d3))
################################## e.
print('\ne. Write top 10 association rules (in terms of highest confidence value) where the rule''s head is 100.')
e1 = fim.eclat(X, supp=20, target='r', report='c', conf=75.0001)
e2 = np.array(e1)
e3 = e2[e2[:,2].argsort()][::-1]
e4 = []
for i in range(len(e3)):
if e3[i][0] == 100:
e4.append(e3[i].tolist())
e5 = np.array(e4)
for i in range(10):
print('confidence value:',e5[i][2],' association rule:', e5[i][1], '→', e5[i][0],)
################################## f.
print('\nf. How many rules with head 100 are there for which the confidence value is more than 75%? List them.')
f1 = e5.copy()
count_100 = 0
for i in range(len(f1)):
if (f1[i][2]) > 0.75:
count_100 = count_100 + 1
print('confidence value:', f1[i][2], ' association rule:', f1[i][1], '→', f1[i][0],)
print('Total:',count_100)
################################## g.
print('\ng. Write top 10 association rules (in terms of highest confidence value) where the rule''s head is 200.')
g2 = np.array(e1)
g3 = g2[g2[:,2].argsort()][::-1]
g4 = []
for i in range(len(g3)):
if g3[i][0] == 200:
g4.append(g3[i].tolist())
g5 = np.array(g4)
for i in range(10):
print('confidence value:',g5[i][2],' association rule:', g5[i][1], '→', g5[i][0],)
################################## h.
print('\nh. How many rules with head 200 are there for which the confidence value is more than 75%? List them.')
h1 = g5.copy()
count_200 = 0
for i in range(len(h1)):
if (h1[i][2]) > 0.75:
count_200 = count_200 + 1
print('confidence value:', h1[i][2], ' association rule:', h1[i][1], '→', h1[i][0],)
print('Total:',count_200)
################################### i.
print('\ni. soft-margin SVM with linear kernel')
i1 = e3[:,1].copy()
i2 = list(dict.fromkeys(i1))
i3 = np.zeros((len(X),len(i2))).astype(int)
for i in range(len(X)):
for j in range(len(i2)):
if (set(i2[j]).issubset(set(X[i]))) == True:
i3[i][j] = 1
else:
i3[i][j] = 0
# Training set = first 75% data, Tuning set = 25% from training set, Test set = last 25% data
data_train_lin_1, data_test_lin_1, data_train_label_lin_1, data_test_label_lin_1 = train_test_split(i3, label, train_size=0.75, random_state = 0, stratify = label)
#C = np.arange(0.01, 2, 0.01)
#parameters_linear = [{'C':C}]
parameters_linear = [{'C':[0.5, 0.7, 0.9, 1.0, 1.5]}]
model_linear = GridSearchCV(SVC(kernel='linear'), parameters_linear, cv=3).fit(data_train_lin_1, data_train_label_lin_1)
print('The best parameters: ', model_linear.best_params_)
#print("Scores for crossvalidation:")
#for mean, params in zip(model_linear.cv_results_['mean_test_score'], model_linear.cv_results_['params']):
#print("Accuracy: %0.6f for %r" % (mean, params))
predicted_label_lin_1 = model_linear.predict(data_test_lin_1)
accuracy_lin_1 = accuracy_score(data_test_label_lin_1, predicted_label_lin_1)
print('accurac:',accuracy_lin_1)
# Training set = last 75% data, Tuning set = 25% from training set, Test set = first 25% data
data_test_lin_2, data_train_lin_2, data_test_label_lin_2, data_train_label_lin_2 = train_test_split(i3, label, train_size=0.25, random_state = 0, stratify = label)
model_linear = GridSearchCV(SVC(kernel='linear'), parameters_linear, cv=3).fit(data_train_lin_2, data_train_label_lin_2)
print('The best parameters: ', model_linear.best_params_)
#print("Scores for crossvalidation:")
#for mean, params in zip(model_linear.cv_results_['mean_test_score'], model_linear.cv_results_['params']):
#print("Accuracy: %0.6f for %r" % (mean, params))
predicted_label_lin_2 = model_linear.predict(data_test_lin_2)
accuracy_lin_2 = accuracy_score(data_test_label_lin_2, predicted_label_lin_2)
print('accurac:',accuracy_lin_2)
# Training set = first 37.5% and last 37.5%, Tuning set = 25% from training set, Test set = first 25% data
data_temp1_lin_3, data_temp2_lin_3, data_temp1_label_lin_3, data_temp2_label_lin_3 = train_test_split(i3, label, train_size=0.375, random_state = 0, stratify = label)
data_test_lin_3, data_temp3_lin_3, data_test_label_lin_3, data_temp3_label_lin_3 = train_test_split(data_temp2_lin_3, data_temp2_label_lin_3, train_size=0.4, random_state = 0, stratify = data_temp2_label_lin_3)
data_train_lin_3 = np.vstack((data_temp1_lin_3, data_temp3_lin_3))
data_train_label_lin_3 = np.hstack((data_temp1_label_lin_3, data_temp3_label_lin_3))
model_linear = GridSearchCV(SVC(kernel='linear'), parameters_linear, cv=3).fit(data_train_lin_3, data_train_label_lin_3)
print('The best parameters: ', model_linear.best_params_)
#print("Scores for crossvalidation:")
#for mean, params in zip(model_linear.cv_results_['mean_test_score'], model_linear.cv_results_['params']):
#print("Accuracy: %0.6f for %r" % (mean, params))
predicted_label_lin_3 = model_linear.predict(data_test_lin_3)
accuracy_lin_3 = accuracy_score(data_test_label_lin_3, predicted_label_lin_3)
print('accurac:',accuracy_lin_3)
scores_lin = np.array([accuracy_lin_1, accuracy_lin_2, accuracy_lin_3])
print('Average 3-fold classification accuracy(along with standard deviation):', scores_lin.mean(), '(+/-',scores_lin.std(),')')
|
4,022 | 17b3fb44d9e7a09fe3b807b47bdc0248b6960634 | from datapackage_pipelines.wrapper import ingest, spew
params, datapackage, res_iter = ingest()
columns = params['columns']
for resource in datapackage['resources']:
fields = resource.get('schema', {}).get('fields')
if fields is not None:
fields = [
field for field in fields
if field['name'] not in columns
]
resource['schema']['fields'] = fields
def process_resources(_res_iter):
for rows in _res_iter:
def process_rows(_rows):
for row in _rows:
for column in columns:
if column in row:
del row[column]
yield row
yield process_rows(rows)
spew(datapackage, process_resources(res_iter))
|
4,023 | c7ca8235864ce5de188c4aa2feb9ad82d4fa9b0f | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey, Float
from sqlalchemy.orm import relationship, backref
ORMBase = declarative_base()
def create_all(engine):
ORMBase.metadata.create_all(engine)
|
4,024 | a1df804325a074ed980ec864c72fe231e2968997 | """ GetState
Usage:
get_state.py <pem-file> <ip-file> [options]
Options:
-h, --help print help message and exit
--output DIR set the output directory [default: logs]
"""
from docopt import docopt
import paramiko
import os
def get_logs(ip_addr, pem_file, log_dir):
pem = paramiko.RSAKey.from_private_key_file(pem_file)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ip_addr, username="ec2-user", pkey=pem)
ftp = client.open_sftp()
logs = sorted(ftp.listdir('/home/ec2-user/logs/'))
for l in logs:
if l.endswith('.txt'):
print(l)
client.exec_command(f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')
ftp.get(f'/home/ec2-user/logs/tmp', f"{log_dir}/{l}")
client.exec_command('rm /home/ec2-user/logs/tmp')
ftp.close()
client.close()
if __name__ == '__main__':
args = docopt(__doc__)
for ip in open(args['<ip-file>']):
os.system(f"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}")
#get_logs(ip.strip(), args['<pem-file>'], args['--output'])
|
4,025 | da2e388c64bbf65bcef7d09d7596c2869f51524a | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
x = 2
y = 3
add_op = tf.add(x, y)
mul_op = tf.multiply(x, y)
output_1 = tf.multiply(x, add_op)
output_2 = tf.pow(add_op, mul_op)
with tf.Session() as sess:
output_1, output_2 = sess.run([output_1, output_2])
print(output_1, output_2)
|
4,026 | d853964d424e628d6331b27123ad045f8d945dc0 | # coding: utf-8
num = int(input())
str = input().split()
table = [int(i) for i in str]
list.sort(table)
print(table[num-1] - table[0]) |
4,027 | 2dcb02ea2f36dd31eda13c1d666201f861c117e7 | from django.db import models
from django.utils import timezone
# Create your models here.
class URL(models.Model):
label = models.CharField(null=True, blank=True, max_length=30)
address = models.URLField()
slug = models.SlugField(unique=True, max_length=8)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.label |
4,028 | 35cd1c45294b826784eab9885ec5b0132624c957 | from kivy.uix.progressbar import ProgressBar
from kivy.animation import Animation
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.graphics import Color, Rectangle
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.gridlayout import GridLayout
from kivy.core.window import Window
from kivy.uix.dropdown import DropDown
Window.clearcolor = (1, 1, 1, 1)
class _BoxLayout(BoxLayout):
def __init__(self, **kwargs):
super(_BoxLayout, self).__init__(**kwargs)
with self.canvas.before:
Color(0.878, 0.941, 0.784)
self.rect = Rectangle(size=self.size, pos=self.pos)
self.bind(size=self._update_rect, pos=self._update_rect)
def _update_rect(self, instance, value):
self.rect.pos = instance.pos
self.rect.size = instance.size
class KaliteUI(object):
def __init__(self, kaliteApp):
dropdown = DropDown()
dropdown_btn = Button(text='menu', size_hint_x=None, size_hint_y=None, size=(150, 40), font_size=18
, color=(.06, .6, .2, 1), bold=True, background_color=(1, 1, 1, 0.2))
dropdown_btn.bind(on_release=dropdown.open)
self.root_layout = GridLayout(cols=1)
logo_holder = _BoxLayout(orientation='horizontal')
logo_img = Image(source='horizontal-logo.png', size_hint_x=None, width=360)
logo_holder.padding = [10,10,10,10]
logo_holder.add_widget(logo_img)
self.content_reload_btn= Button(text='Reload Content', size_hint_x=None, size_hint_y=None, size=(150, 40), font_size=18
, color=(1, 1, 1, 1), bold=True)
self.content_reload_btn.bind(on_press=kaliteApp.reload_content)
space_holder = _BoxLayout(orientation='horizontal', pos_hint={'x': .8})
logo_holder.add_widget(space_holder)
buttons_holder = AnchorLayout(anchor_x='center', anchor_y='center')
dropdown.add_widget(self.content_reload_btn)
logo_holder.add_widget(dropdown_btn)
logo_holder.spacing = [300, 0]
self.root_layout.add_widget(logo_holder)
self.img_holder = BoxLayout(orientation='vertical', size=(200,200), size_hint=(1, None))
self.img_holder.padding = [0,80,0,10]
self.root_layout.add_widget(self.img_holder)
self.progress_bar = ProgressBar()
self.messages = BoxLayout(orientation='vertical')
self.root_layout.add_widget(self.messages)
self.root_layout.add_widget(buttons_holder)
self.root_layout.add_widget(self.progress_bar)
def disable_reload_bnt(self):
self.content_reload_btn.disabled = True
def get_root_Layout(self):
return self.root_layout
def add_messages(self, message):
self.messages.add_widget(message)
def remove_messages(self, message):
self.messages.remove_widget(message)
def add_loading_gif(self):
self.gif_img = Image(source='loading.zip', anim_delay = 0.15)
self.img_holder.add_widget(self.gif_img)
def remove_loading_gif(self):
self.img_holder.remove_widget(self.gif_img)
def start_progress_bar(self, anim_value):
self.anim = Animation(value = anim_value, duration = 3)
self.anim.start(self.progress_bar)
def animation_bind(self, bindFunction):
self.anim.bind(on_complete = bindFunction)
|
4,029 | 9c3ca2fa43c6a34d7fe06517812a6d0bf5d6dbe1 | #!/usr/bin/python
"""
Create a 1024-host network, and run the CLI on it.
If this fails because of kernel limits, you may have
to adjust them, e.g. by adding entries to /etc/sysctl.conf
and running sysctl -p. Check util/sysctl_addon.
This is a copy of tree1024.py that is using the Containernet
constructor. Containernet overrides the buildFromTopo
functionality and adds Docker hosts instead.
"""
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.node import OVSSwitch
from mininet.topolib import TreeContainerNet
if __name__ == '__main__':
setLogLevel( 'info' )
network = TreeContainerNet( depth=2, fanout=100, switch=OVSSwitch )
network.run( CLI, network )
|
4,030 | 883a50cf380b08c479c30edad3a2b61a6f3075cc | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import unittest
from selenium import webdriver
from appium import webdriver
from time import sleep
import os
from PublicResour import Desired_Capabilities
"""
登录状态下检查“我的”界面的所有的功能模块
大部分执行用例时在“我的”界面
"""
#Return ads path relative to this file not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
class My(unittest.TestCase):
def setUp(self):
desired_caps = Desired_Capabilities.startdevices()
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
print u'设备配置成功'
sleep(5)
def test_myFavorite(self):
print u'进入首页了----'
make_commic = self.driver.find_elements_by_class_name("android.view.View")
make_commic[0].click()
sleep(5)
favorite_comic = self.driver.find_element_by_id("com.manboker.headportrait:id/comic_praise_iv")
if (favorite_comic.is_selected() == True):
pass
else:
favorite_comic.click()
sleep(1)
print u'漫画已收藏'
main_entry = self.driver.find_element_by_id("com.manboker.headportrait:id/comics_main_top_view_to_entry_iv")
main_entry.click()
sleep(1)
print u'返回到主界面'
head_icon = self.driver.find_element_by_id("com.manboker.headportrait:id/entry_album_set_icon")
head_icon.click()
sleep(1)
select_myfavorite = self.driver.find_element_by_id("com.manboker.headportrait:id/set_favorite_tv")
select_myfavorite.click()
sleep(3)
print u'进入我的收藏'
edit_favorite = self.driver.find_element_by_id("com.manboker.headportrait:id/edit_iv")
edit_favorite.click()
sleep(1)
item_comic_favorite= self.driver.find_element_by_id("com.manboker.headportrait:id/item_layout_0_iv")
item_comic_favorite.click()
sleep(1)
delete_comic_favorite = self.driver.find_element_by_id("com.manboker.headportrait:id/delete_tv")
delete_comic_favorite.click()
sleep(1)
confirm_delete = self.driver.find_element_by_id("android:id/button1")
confirm_delete.click()
sleep(1)
print u'你已经把漫画删除了, 表情改版没做好暂时不过表情模块'
back_my = self.driver.find_element_by_id("com.manboker.headportrait:id/iv_back")
back_my.click()
sleep(2)
self.driver.find_element_by_id("com.manboker.headportrait:id/set_set_goback").click()
sleep(2)
def test_aboutMe(self):
head_icon = self.driver.find_element_by_id("com.manboker.headportrait:id/entry_album_set_icon")
head_icon.click()
sleep(1)
print u'进入个人空间'
select_aboutme = self.driver.find_element_by_name("我的空间")
select_aboutme.click()
sleep(3)
user_headicon = self.driver.find_element_by_id("com.manboker.headportrait:id/specific_user_headicon")
user_headicon.click()
sleep(3)
self.driver.get_screenshot_as_file('C:\Pycharm\Manboker\MainMy\Screenshot\userhead' + '.jpg')
sleep(1)
self.driver.find_element_by_id("com.manboker.headportrait:id/community_comment_adjust_imageview").click()
sleep(1)
self.driver.swipe(1000,600,1000,900,1000)
sleep(1)
self.driver.get_screenshot_as_file('C:\Pycharm\Manboker\MainMy\Screenshot\AboutMe' + '.jpg')
print u'-----个人空间检查完毕-----'
go_backmy = self.driver.find_element_by_id("com.manboker.headportrait:id/topic_specific_user_goback")
go_backmy.click()
sleep(2)
self.driver.find_element_by_id("com.manboker.headportrait:id/set_set_goback").click()
sleep(2)
def test_myFollowing(self):
head_icon = self.driver.find_element_by_id("com.manboker.headportrait:id/entry_album_set_icon")
head_icon.click()
sleep(1)
print u'进入我的关注'
select_myfollowing = self.driver.find_element_by_name("我的关注")
select_myfollowing.click()
sleep(2)
#添加关注
add_following = self.driver.find_element_by_id("com.manboker.headportrait:id/t_fans_image")
add_following.click()
sleep(2)
#刷新后再次关注好友和取消关注
self.driver.swipe(1000, 600, 1000, 900, 1000)
sleep(3)
add_following.click()
sleep(2)
cancel_following = add_following
cancel_following.click()
sleep(1)
find_follows = self.driver.find_element_by_id("com.manboker.headportrait:id/t_follows_find")
find_follows.click()
sleep(2)
#换一换
refresh_friends = self.driver.find_element_by_name("换一换")
refresh_friends.click()
sleep(2)
add_follow = self.driver.find_element_by_id("com.manboker.headportrait:id/add_follow")
add_follow.click()
#返回到我的界面
self.driver.find_element_by_id("com.manboker.headportrait:id/t_find_back").click()
sleep(2)
go_backmy = self.driver.find_element_by_id("com.manboker.headportrait:id/t_follows_back")
go_backmy.click()
sleep(2)
self.driver.find_element_by_id("com.manboker.headportrait:id/set_set_goback").click()
sleep(2)
def test_Followers(self):
head_icon = self.driver.find_element_by_id("com.manboker.headportrait:id/entry_album_set_icon")
head_icon.click()
sleep(1)
print u'进入我的粉丝'
select_followers = self.driver.find_element_by_name("我的粉丝")
select_followers.click()
sleep(2)
self.driver.swipe(1000, 600, 1000, 900, 1000)
sleep(2)
self.driver.swipe(1000, 900, 1000, 600, 1000)
sleep(2)
go_backmy = self.driver.find_element_by_id("com.manboker.headportrait:id/topic_paise_list_goback")
go_backmy.click()
self.driver.find_element_by_id("com.manboker.headportrait:id/set_set_goback").click()
sleep(2)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(My)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
|
4,031 | 466ffbd1f25423e4209fa7331d8b824b2dd3cd70 | # Code
import json
import os
import pandas
from pathlib import Path
from asyncio import sleep
# Import default websocket conection instance
from channels.generic.websocket import AsyncJsonWebsocketConsumer
# Global variable ----------
timeout = 0.5
# Get curent working directory
cwd = os.getcwd() # Get the current working directory (cwd)
# Get the MAIN directory
rootDir = Path(cwd).parent
# Get the data directory
dataDir = f"{rootDir}/DataBehandling/Data/"
"""
Make a object that is used to store menu state
"""
class menu:
nr = ""
menu1 = menu()
menu2 = menu()
menu1.nr = "10min"
menu2.nr = "24h"
"""
Create a instance that inherits from AsyncJsonWebsocketConsumer
This creates a websocket conection betwene server and clinet that can handle loads of information transferr simultaniously
"""
class graphLevel(AsyncJsonWebsocketConsumer):
"""
This method will define wat will happen when you get a conection to a user passed down
self is just itself object, the class gets a user conection as a object
When the user is conected acept the conection
"async def connect" is a inbuilt method in AsyncJsonWebsocketConsumer object
We change the method in AsyncJsonWebsocketConsumer, and overide it to modify what is inside the method
We await for a respons from the user conection to syncronise the conection
We need to wait before the signal is acepted and cunfirmed
If the conection confirmation takes to long cut the conection and move on
"""
async def connect(self):
# Wait and accept the inncoming connection
await self.accept()
# Endless loop
while True:
# Variables -----
level1 = {
"height": [],
"time": []
}
level2 = {
"height": [],
"time": []
}
level3 = {
"height": [],
"time": []
}
prices = {
"prices": [],
"time": []
}
# Get data frame
df = pandas.read_csv(dataDir + "Readings.csv", sep="\\t")
# Function -----
async def getTime(menuObject):
# Get latest time
time0 = list(map(int, df["Time"][len(df) - 1].split(":")))
date0 = list(map(int, df["Date"][len(df) - 1].split("-")))
# Get time
timeListLocal = []
for i in range(len(df) - 1, 0, -1):
# Get data
timeNow = list(map(int, df["Time"][i].split(":")))
dateNow = list(map(int, df["Date"][i].split("-")))
#print(date0, dateNow)
# Calculate in unit hh/mm/ss
year = date0[0] - dateNow[0]
month = date0[1] - dateNow[1]
day = date0[2] - dateNow[2]
h = time0[0] - timeNow[0] + (year * 9125 + month * 730 + day * 24)
m = time0[1] - timeNow[1]
s = time0[2] - timeNow[2]
#print("Date: ", year, month, day)
#print("Time: ", h, m, s)
# Calculate in seconds
if menuObject.nr == "1min":
timeDelta = h * 3600 + m * 60 + s
# Check if time fits in
if timeDelta <= 60.0:
timeListLocal += [str(round(timeDelta, 2)) + " s"]
# Calculate in minutes
elif menuObject.nr == "10min":
timeDelta = h * 60 + m + s/60
# Check if time fits in
if timeDelta <= 10.0:
timeListLocal += [str(round(timeDelta, 2)) + " min"]
# Calculate in minutes
elif menuObject.nr == "1h":
timeDelta = h * 60 + m + s/60
# Check if time fits in
if timeDelta <= 60.0:
timeListLocal += [str(round(timeDelta, 2)) + " min"]
# Calculate in hours
elif menuObject.nr == "24h":
timeDelta = h + m/60 + s/3600
# Check if time fits in
if timeDelta <= 24.0:
timeListLocal += [str(round(timeDelta, 2)) + " h"]
# Calculate in hours
elif menuObject.nr == "ALL":
timeListLocal += [str(round((h + m/60 + s/3600), 2)) + " h"]
return timeListLocal
# Wait til you get time
timeList1 = await getTime(menu1)
timeList2 = await getTime(menu2)
# Sort data for level height
for i in range(len(df) - len(timeList1), len(df)):
# Level 1
level1["height"] += [str(df["Level1"][i])]
# Level 2
level2["height"] += [str(df["Level2"][i])]
# Level 3
level3["height"] += [str(df["Level3"][i])]
"""
Give time data for level graphs
We use reversed for loop because we calculated values backwards
"""
for t in reversed(timeList1):
level1["time"] += [t]
level2["time"] += [t]
level3["time"] += [t]
# Sost data for prices
for i in range(len(df) - len(timeList2), len(df)):
prices["prices"] += [str(df["Price"][i])]
# Give time data for price graph
for t in reversed(timeList2):
prices["time"] += [t]
"""
Send data back to the other side of the conection as string
package it as json file
Wait for response
"""
data = {
"level1": level1,
"level2": level2,
"level3": level3,
"prices": prices
}
await self.send(json.dumps(data))
# Wait and sleep for 1 second
await sleep(timeout)
# Recomendation graph websocket insatnce
class recomend(AsyncJsonWebsocketConsumer):
# On first conect
async def connect(self):
# Wait and accept the inncoming connection
await self.accept()
# Endless loop
while True:
# Get data frame
df = pandas.read_csv(dataDir + "Readings.csv", sep="\\t")
# Get latest recomendations Re1 Re2 Re3"
recommendation1 = float(df["Recommendation1"][len(df) - 1])
recommendation2 = float(df["Recommendation2"][len(df) - 1])
recommendation3 = float(df["Recommendation3"][len(df) - 1]) # DELETE The last value is special because it was saved as a string with extra " at the end, and so we need to get rid of the " BASICALY: A smal bug XD
# Set values inside data
data = {
"recommend1": recommendation1,
"recommend2": recommendation2,
"recommend3": recommendation3
}
# send data to client
await self.send(json.dumps(data))
# Wait and sleep for 1 second
await sleep(timeout)
# Send control state (manual[1]/auto[0]) mode
class controlState(AsyncJsonWebsocketConsumer):
# Send iformation
async def connect(self):
# Acept the client conection
await self.accept()
# Endless lopp
while True:
# Get data frame
df = pandas.read_csv(dataDir + "Readings.csv", sep="\\t")
# Get latest state of controll
controlState1 = str(df["ESP_control1"][len(df) - 1])
controlState2 = str(df["ESP_control2"][len(df) - 1])
controlState3 = str(df["ESP_control3"][len(df) - 1])
# Set values inside data
data = {
"controlState1": controlState1,
"controlState2": controlState2,
"controlState3": controlState3
}
# send data to client
await self.send(json.dumps(data))
# Wait and sleep for 1 second
await sleep(timeout)
pass
"""
Receive data from user
Receive button states and alocate signal comands to the right place in data "SCADA.txt" file
"""
class receiveButtonState(AsyncJsonWebsocketConsumer):
"""
Inbuilt method in AsyncJsonWebsocketConsumer
Alows to receive data from the client side
"""
async def receive(self, text_data):
# Variables
dataOld = ""
dataNew = "1" # Have 1 at the start to indicate that client is conected and asking for controll
buttonName = text_data[1:-2]
buttonNumber = int(text_data[-2])
# Get data
with open(cwd + "/VMB_GUSTAV/data/SCADA.txt", "+r") as file:
dataOld = str(file.readline())
"""
Rewrite data acordingly to mesage gottten from client
If pressed button ON => 1
If pressed button OFF => 0
"""
if buttonName == "buttonON":
for i in range(1, len(dataOld)):
if i == buttonNumber:
dataNew += "1"
else:
dataNew += dataOld[i]
else:
for i in range(1, len(dataOld)):
if i == buttonNumber:
dataNew += "0"
else:
dataNew += dataOld[i]
# Save new data
with open(cwd + "/VMB_GUSTAV/data/SCADA.txt", "+w") as file:
file.write(dataNew)
"""
When client disconects from websocket
Rewrite the control file to everything off including conection value (THe first value)
"""
async def disconnect(self, code):
# Rewrite data
with open(cwd + "/VMB_GUSTAV/data/SCADA.txt", "+w") as file:
file.write("0000")
# Instance for websocket that handles timeline menu selections for level graphs
class receiveMenuTimeline1(AsyncJsonWebsocketConsumer):
# Receive a signal and edit menu variable to be that signal
async def receive(self, text_data):
menu1.nr = text_data[1:-1].split("-")[1]
# Instance for websocket that handles timeline menu selections for price graphs
class receiveMenuTimeline2(AsyncJsonWebsocketConsumer):
# Receive a signal and edit menu variable to be that signal
async def receive(self, text_data):
menu2.nr = text_data[1:-1].split("-")[1]
|
4,032 | db49313d2bc8b9f0be0dfd48c6065ea0ab3294cb | """empty message
Revision ID: 3e4ee9eaaeaa
Revises: 6d58871d74a0
Create Date: 2016-07-25 15:30:38.008238
"""
# revision identifiers, used by Alembic.
revision = '3e4ee9eaaeaa'
down_revision = '6d58871d74a0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_account_interface'), 'account', ['interface'], unique=False)
op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)
op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_account_sub_int'), table_name='account')
op.drop_index(op.f('ix_account_mac'), table_name='account')
op.drop_index(op.f('ix_account_interface'), table_name='account')
### end Alembic commands ###
|
4,033 | ba486b64b1da3dc1775bee0980d5236516e130d4 | import time
import math
from random import randrange
import multilineMAX7219 as LEDMatrix
from multilineMAX7219_fonts import CP437_FONT, SINCLAIRS_FONT, LCD_FONT, TINY_FONT
from multilineMAX7219 import DIR_L, DIR_R, DIR_U, DIR_D
from multilineMAX7219 import DIR_LU, DIR_RU, DIR_LD, DIR_RD
from multilineMAX7219 import DISSOLVE, GFX_ON, GFX_OFF, GFX_INVERT
import datetime,ephem
from myfont import f
def utlst():
gtc = ephem.Observer()
gtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0
t = "%s %s" % (gtc.date,gtc.sidereal_time())
p = t.split(" ")
lst=p[2].split(".")
ut=p[1]
return ut,lst[0]
def at(x,y,string,state=GFX_ON):
for c in string:
LEDMatrix.gfx_sprite_array(f[ord(c)-48],x,y,state)
x+=len(f[ord(c)-48][0])
if c == ":" : x-=7
if c >= "A" : x-=1
# Initialise the library and the MAX7219/8x8LED arrays
LEDMatrix.init()
LEDMatrix.brightness(5)
sun, moon = ephem.Sun(), ephem.Moon()
gtc = ephem.Observer()
gtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0
print gtc.date, gtc.sidereal_time()
print gtc.lon, gtc.lat
try:
while 1:
ut,lst=utlst()
sut="%s" % ut
slst="%s" % lst
if len(slst) < 8: slst = "0"+slst
at(0,16,"UT%s" % sut)
at(0, 0,"ST%s" % slst)
LEDMatrix.gfx_render()
time.sleep(0.1)
except KeyboardInterrupt:
# reset array
LEDMatrix.clear_all()
|
4,034 | 4ecf9c03750a31ecd113a7548df4e2a700e775e0 | from django.utils.html import strip_tags
from django.core.mail import send_mail
from django.urls import reverse
from django.http import HttpResponseRedirect
def Email(doctorFullName,password,otp,email,id):
print("\n== UTILS ===")
html_message='''
<html>
<body>
<p>Welcome %s and pass is %s and %d</p>
<p>http://127.0.0.1:8000/varificationpage/%d<p>
</body>
</html>
'''%(doctorFullName,password,otp,id)
plain_message =strip_tags(html_message)
send_mail("my subjects",plain_message,'pragneshchauhan00798@gmail.com',[email],html_message=html_message)
def emailpatient(firstname,lastname,password,otp,email,id):
print("\n== UTILS ===")
html_message='''
<html>
<body>
<p>Welcome %s %s and pass is %s and otp is %d</p>
<p>http://127.0.0.1:8000/varificationpage/%d<p>
</body>
</html>
'''%(firstname,lastname,password,otp,id)
plain_message =strip_tags(html_message)
send_mail("my subjects",plain_message,'pragneshchauhan00798@gmail.com',[email],html_message=html_message)
def forgotPassword(otp,email,id):
email_subject = "This is your new OTP"
print("\n== UTILS ===")
html_message='''
<html>
<body>
<p>Welcome %s Your Otp is %d </p>
<p>http://127.0.0.1:8000/forgetpwdvarification/%d<p>
</body>
</html>
'''%(email,otp,id)
print(otp)
plain_message =strip_tags(html_message)
send_mail("my subjects",plain_message,'pragneshchauhan00798@gmail.com',[email],html_message=html_message)
# return HttpResponseRedirect(reverse(login))
# link = "https://localhost:8000/example?email="+email+"&otp="+otp+"&random="+random
# send_mail(email_subject, 'mail_template','pragneshchauhan00798@gmail.com', [email], {'otp': otp}) |
4,035 | 73d02615863826d77d65fbf0314dc71acb97ef28 | '''a,b = input().split()
a, b = [int(a),int(b)]
List = set()
ArrayA = list(map(int, input().split()))
temp = 1
ArrayB = list(map(int, input().split()))
for i in range(max(ArrayA), min(ArrayB)+1):
for j in ArrayA:
if i%j is 1:
temp += 1
if temp is len(ArrayA):
List.add(i)
temp=1
newList = list(List)
temp = 1
newSet = set()
for i in newList:
for j in ArrayB:
if j%i==1:
temp+=1
if temp is len(ArrayB):
newSet.add(i)
temp=1
print(len(list(newSet)))
'''
'''nm = input().split( "-" )
a = (nm[1])
b = (nm[1])
print(nm)'''
'''x1, v1, x2, v2 = input().split()
x1, v1, x2, v2 = [int(x1),int(v1),int(x2),int(v2)]
if (x1<x2 and v1<v2) or (x2>x1 and v2>v1) or v1 is v2:
print("NO")
exit(1)
diff = 1
while True:
x1 += v1
x2 += v2
diff = x2 - x1
if diff < 1:
print("NO")
break
elif diff is 1:
print("YES")
break'''
#Graph Explaorartion
'''
import numpy as np
import matplotlib.pyplot as plt
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 1.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, menMeans, width, color='royalblue', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, womenMeans, width, color='seagreen', yerr=womenStd)
# add some
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels( ('G1', 'G2', 'G3', 'G4', 'G5') )
ax.legend( (rects1[1], rects2[1]), ('Men', 'Women') )
plt.show()
'''
from math import gcd
# from functools import reduce
# for _ in range(int(input())):
# N = int(input())
# print(reduce(lambda x,y: x*y//gcd(x,y), range(1,N+1)))
import numpy as np
nk = input().split()
board = int(nk[0])
numberOfObs = int(nk[1])
roco = input().split()
obstacle = []
row = int(roco[0])
col = int(roco[1])
for _ in range(numberOfObs):
obs = input().split()
obstacle.append((int(obs[0]), int((obs[1]))))
#up
q = row
r = col
#down
s = row
t = col
#left
u = row
v = col
#right
w = row
x = col
#upper right
k = row
l = col
#lower left
i = row
j = col
#upperleft
m = row
n = col
#lower right
o = row
p = col
boxes = 0
while (1 <= q <= board) and (1 <= r <= board):
if (q, r) in obstacle:
break
else:
boxes += 1
q -= 1
while (1 <= s <= board) and (1 <= t <= board):
if (s, t) in obstacle:
break
else:
boxes += 1
s += 1
while (1 <= u <= board) and (1 <= v <= board):
if (u, v) in obstacle:
break
else:
boxes += 1
v -= 1
while (1 <= w <= board) and (1 <= x <= board):
if (w, x) in obstacle:
break
else:
boxes += 1
x += 1
while (1 <= o <= board) and (1 <= p <= board):
if (o, p) in obstacle:
break
else:
boxes += 1
o += 1
p += 1
while (1 <= m <= board) and (1 <= n <= board):
if (m, n) in obstacle:
break
else:
boxes += 1
m -= 1
n -= 1
while (1 <= k <= board) and (1 <= l <= board):
if (k, l) in obstacle:
break
else:
boxes += 1
k -= 1
l += 1
while (1 <= i <=board) and (1 <= j <= board):
if (i,j) in obstacle:
break
else:
boxes += 1
i += 1
j -= 1
print(boxes - 8)
|
4,036 | 2d9d66ea8a95285744b797570bfbeaa17fdc922a | numbers = [3, 7, 5]
maxNumber = 0
for number in numbers:
if maxNumber < number:
maxNumber = number
print maxNumber |
4,037 | f4715a1f59ceba85d95223ef59003410e35bfb7f | #!/usr/bin/python
import os
# http://stackoverflow.com/questions/4500564/directory-listing-based-on-time
def sorted_ls(path):
mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=mtime))
def main():
print "Content-type: text/html\n\n"
print "<html><head><title>title</title></head>"
print "<body>"
path='../html/biasframes/'
# print '<img width=100% src=\"../biasframes/'+file+'\" alt=\"'+file+'\" /><br>'
files = sorted_ls(path)
files.reverse()
# print files
nfiles=0
for file in files:
print '<img width=100% src=\"../biasframes/'+file+'\" alt=\"'+file+'\" /><br>'
nfiles+=1
if nfiles>24:
break
print "</body>"
print "</html>"
if __name__ == "__main__":
main()
|
4,038 | 925e1a1a99b70a8d56289b72fa0e16997e12d854 | from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
cmc = requests.get('https://coinmarketcap.com/')
soup = BeautifulSoup(cmc.content, 'html.parser')
data = soup.find('script', id="__NEXT_DATA__", type="application/json")
coins = {}
slugs = {}
coin_data = json.loads(data.contents[0])
listings = coin_data['props']['initialState']['cryptocurrency']['listingLatest']['data']
historical_list = []
for i in listings:
coins[str(i['id'])] = i['slug']
slugs[i['slug']] = str(i['id'])
# https://coinmarketcap.com/currencies/[slug]/historical-data/?start=[YYYYMMDD]&end=[YYYYMMDD]
for i in coins:
page = requests.get(f'https://coinmarketcap.com/currencies/{coins[i]}/historical-data/?start=20200101&end=20200630')
soup = BeautifulSoup(page.content, 'html.parser')
data = soup.find('script', id="__NEXT_DATA__", type="application/json")
if data is not None:
historical_data = json.loads(data.contents[0])
if str(i) in historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical']:
quotes = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical'][i]['quotes']
name = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical'][i]['name']
symbol = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical'][i]['symbol']
historical_list.append((quotes, name, symbol))
market_cap = []
volume = []
high = []
low = []
open = []
timestamp = []
name = []
symbol = []
# slug = []
for data in historical_list:
quotes, curr_name, curr_symbol = data
# curr_slug = slugs[curr_name.lower()]
for j in quotes:
market_cap.append(j['quote']['USD']['market_cap'])
volume.append(j['quote']['USD']['volume'])
high.append(j['quote']['USD']['high'])
low.append(j['quote']['USD']['low'])
open.append(j['quote']['USD']['open'])
timestamp.append(j['quote']['USD']['timestamp'])
name.append(curr_name)
symbol.append(curr_symbol)
# slug.append(curr_slug)
df = pd.DataFrame(columns=['marketcap', 'volume', 'high', 'low', 'open', 'timestamp', 'name', 'symbol'])
df['marketcap'] = market_cap
df['volume'] = volume
df['high'] = high
df['low'] = low
df['open'] = open
df['timestamp'] = timestamp
df['name'] = name
df['symbol'] = symbol
# df['slug'] = slug
df.to_csv('cryptos.csv', index=False)
|
4,039 | f8c30f8ccd1b901fd750a2c9e14cab78e1d12a14 | from nose.tools import assert_equal
def rec_coin(target, coins):
'''
INPUT: Target change amount and list of coin values
OUTPUT: Minimum coins needed to make change
Note, this solution is not optimized.
'''
# Default to target value
min_coins = target
# Check to see if we have a single coin match (BASE CASE)
if target in coins:
return 1
else:
# for every coin value that is <= than target
for i in [c for c in coins if c <= target]:
# Recursive Call (add a count coin and subtract from the target)
num_coins = 1 + rec_coin(target-i, coins)
# Reset Minimum if we have a new minimum
if num_coins < min_coins:
min_coins = num_coins
return min_coins
# consider using decorators to encapsulate memoization
def rec_coin_dynam(target, coins, known_results):
'''
INPUT: This function takes in a target amount and a list of possible coins to use.
It also takes a third parameter, known_results, indicating previously calculated results.
The known_results parameter shoud be started with [0] * (target+1)
OUTPUT: Minimum number of coins needed to make the target.
'''
# Default output to target
min_coins = target
# Base Case
if target in coins:
known_results[target] = 1
return 1
# Return a known result if it happens to be greater than 0
elif known_results[target] > 0:
return known_results[target]
else:
# for every coin value that is <= than target
for i in [c for c in coins if c <= target]:
# Recursive call, note how we include the known results!
num_coins = 1 + rec_coin_dynam(target-i, coins, known_results)
# Reset Minimum if we have a new minimum
if num_coins < min_coins:
min_coins = num_coins
# Reset the known result
known_results[target] = min_coins
return min_coins
def bottom_up_solution(n, coins):
# intialize the array
arr = [0] + [n]*(n)
for i in range(1, len(arr)):
min_coins = n
for coin in [c for c in coins if c <= i]:
min_coins = min(arr[i-coin] + 1, min_coins)
arr[i] = min_coins
return arr[n]
class TestCoins(object):
def check(self, solution):
coins = [1, 5, 10, 25]
assert_equal(solution(45, coins, [0]*(45+1)), 3)
assert_equal(solution(23, coins, [0]*(23+1)), 5)
assert_equal(solution(74, coins, [0]*(74+1)), 8)
print('Passed all tests.')
# Run Test
# test = TestCoins()
# test.check(rec_coin_dynam)
# print(bottom_up_solution(6, [1, 2, 5]))
# dynamic solution
target = 23
coins = [1, 2, 5, 10, 20]
known_results = [0]*(target+1)
print(rec_coin_dynam(target, coins, known_results))
|
4,040 | acc39044fa1ae444dd4a737ea37a0baa60a2c7bd | from Stack import Stack
from Regex import Regex
from Symbol import Symbol
class Postfix:
def __init__(self, regex):
self.__regex = regex.expression
self.__modr = Postfix.modRegex(self.__regex)
self.__pila = Stack()
self.__postfix = self.convertInfixToPostfix()
def getRegex(self):
return self.__regex
def getExtendedRegex(self):
return self.__extended
def getModifiedRegex(self):
return self.__modr
def getPostfix(self):
return self.__postfix
@staticmethod
def isConcat(character, nextCharacter):
if Symbol.isOperand(character) and Symbol.isOperand(nextCharacter):
return True
elif Symbol.isRightParenthesis(character) and Symbol.isLeftParenthesis(nextCharacter):
return True
elif Symbol.isStar(character) and Symbol.isOperand(nextCharacter):
return True
elif Symbol.isStar(character) and Symbol.isLeftParenthesis(nextCharacter):
return True
elif Symbol.isOperand(character) and Symbol.isLeftParenthesis(nextCharacter):
return True
elif Symbol.isRightParenthesis(character) and nextCharacter == "#":
return True
elif Symbol.isRightParenthesis(character) and Symbol.isOperand(nextCharacter):
return True
else:
return False
@staticmethod
def modRegex(reg):
list = [char for char in reg+'$']
nlist = []
for i in range(len(list)-1):
if Postfix.isConcat(list[i], list[i+1]) and list[i+1] != '$':
nlist.append(list[i])
nlist.append('.')
elif(list[i] != list[-1] and list[i+1] != '$'):
nlist.append(list[i])
else:
nlist.append(list[i])
return "".join(nlist)
def convertInfixToPostfix(self):
self.__pila.push('(')
tempr = self.__modr+')'
auxpost = ""
for i in range(len(tempr)):
if Symbol.isOperand(tempr[i]):
auxpost += tempr[i]
elif Symbol.isLeftParenthesis(tempr[i]):
self.__pila.push(tempr[i])
elif Symbol.isOperator(tempr[i]):
while not self.__pila.isEmpty() and Symbol.isOperator(self.__pila.peek()) and (Symbol.checkPrecedence(self.__pila.peek()) >= Symbol.checkPrecedence(tempr[i])):
auxpost += self.__pila.pop()
self.__pila.push(tempr[i])
elif Symbol.isRightParenthesis(tempr[i]):
while not self.__pila.isEmpty() and not Symbol.isLeftParenthesis(self.__pila.peek()):
auxpost += self.__pila.pop()
self.__pila.pop()
return auxpost |
4,041 | 6375ac80b081b7eafbc5c3fc7e84c4eff2604848 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import pandas as pd
df = pd.read_csv('games_data.csv')
names = df['game']
driver = webdriver.Chrome('D:/chromedriver.exe')
driver.get('https://www.google.ca/imghp?hl=en&tab=ri&authuser=0&ogbl')
k = 0
for name in names:
box = driver.find_element_by_xpath('//*[@id="sbtc"]/div/div[2]/input')
box.send_keys(name + str(' cover ps4'))
box.send_keys(Keys.ENTER)
for i in range(0,1):
try:
driver.find_element_by_xpath('//*[@id="islrg"]/div[1]/div[1]/a[1]/div[1]/img').screenshot('C:/Users/AAYUSH/OneDrive/Desktop/labels/images/image('+str(k)+').png')
k = k+1
except:
pass
driver.get('https://www.google.ca/imghp?hl=en&tab=ri&authuser=0&ogbl')
|
4,042 | b52429f936013ac60659950492b67078fabf3a13 | """
======================
@author:小谢学测试
@time:2021/9/8:8:34
@email:xie7791@qq.com
======================
"""
import pytest
# @pytest.fixture()
# def login():
# print("登录方法")
# def pytest_conftest(config):
# marker_list = ["search","login"]
# for markers in marker_list:
# config.addinivalue_line("markers",markers) |
4,043 | 362c4e572f0fe61b77e54ab5608d4cd052291da4 | import io
from flask import Flask, send_file
app = Flask(__name__)
@app.route('/')
def index():
buf = io.BytesIO()
buf.write('hello world')
buf.seek(0)
return send_file(buf,
attachment_filename="testing.txt",
as_attachment=True)
|
4,044 | b8d45a0028cb4e393ddca9dd6d246289328d1791 | from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
unet_feature_n = 512
unet_feature_nstep_size = 1e-4
unet_input_image_size = 128
def unet(pretrained_weights=None, input_size=(unet_input_image_size, unet_input_image_size, 1)):
inputs = Input(input_size)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss='binary_crossentropy', metrics=['accuracy'])
if (pretrained_weights):
model.load_weights(pretrained_weights)
return model
def small_unet(pretrained_weights=False, patch_size=128):
input_ = Input((patch_size, patch_size, 1))
skips = []
output = input_
for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, 64, 64]):
skips.append(output)
print(output.shape)
output= Conv2D(filters, (shape, shape), strides=2, padding="same", activation="relu")(output)
#output = BatchNormalization()(output)
#if shape != 7:
# output = BatchNormalization()(output)
for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64,32, 16, 2]):
output = UpSampling2D()(output)
skip_output = skips.pop()
output = concatenate([output, skip_output], axis=3)
if filters != 2:
activation = "relu"
else:
activation = "softmax"
output = Conv2D(filters if filters != 2 else 2, (shape, shape), activation=activation, padding="same")(output)
if filters != 2:
output = BatchNormalization(momentum=.9)(output)
assert len(skips) == 0
m = Model([input_], [output])
if pretrained_weights:
m.load_weights(pretrained_weights)
m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
return m |
4,045 | c2f82cf73d095979d1da346b7dd7779bcc675805 | # 1 use the operators to solve for the following equation:
# (a)
number = ((30*39) + 300) **10
print(number)
# find the value of C. X + Y = C Given:
x = 0.0050
y = 0.1000
c = x + y
print(c)
"""
what is the result of the following:
(a) take the sentence:
the study or use of the systems
(especially computers and communications)
for storing, retrieving, and sending information
"""
"""
strore each word in a separate variable, then print out the sentence on the one line using the print function
"""
word1 = "the study or use of the systems"
word2 = "especially computers and communications"
word3 = "for storing, retrieving, and sending information"
print(word1, " " + word2, " " + word3)
# (b) what is output ?
word = "Mystery"
print(word[:4])
|
4,046 | 0d98472d1c04bfc52378aa6401a47d96582696a2 | from sklearn import datasets, svm
import matplotlib.pyplot as plt
digits = datasets.load_digits()
X, y = digits.data[:-1], digits.target[:-1]
clf = svm.SVC(gamma=0.1, C=100)
clf.fit(X, y)
prediction = clf.predict(digits.data[-1:])
actual = digits.target[-1:]
print("prediction = " + str(prediction) + ", actual = " + str(actual))
plt.matshow(digits.images[-1])
plt.show()
|
4,047 | 9dccc19abb6dac9e9606dc1fd83a227b4da9bf1f | # -*- coding: utf-8 -*-
"""
Neverland2 Colorscheme
~~~~~~~~~~~~~~~~~~~~~~
Converted by Vim Colorscheme Converter
"""
from pygments.style import Style
from pygments.token import Token, Keyword, Comment, Number, Generic, Operator, Name, String
class Neverland2Style(Style):
background_color = '#121212'
styles = {
Token: '#ffffff',
Name.Function: '#ff005f',
Operator.Word: '#00ff00',
Name.Label: 'noinherit #ffffaf',
Generic.Subheading: '#0000ff',
Generic.Traceback: '#ff00af bg:#121212 bold',
Generic.Error: '#ffafff bg:#121212',
Comment: '#87875f',
Name.Attribute: '#ff005f',
Name.Constant: '#af5fff bold',
Number.Float: '#af5fff',
Generic.Inserted: 'bg:#121212',
Keyword.Type: 'noinherit #5fd7ff',
String: '#d7af5f',
Generic.Deleted: '#d70087 bg:#080808',
Comment.Preproc: '#ffafd7',
Keyword: '#ffff87 bold',
Name.Exception: '#87ff00 bold',
Name.Variable: '#d75f00',
Generic.Heading: '#0000ff',
Name.Tag: '#ffff87 bold',
Number: '#0087ff',
Generic.Output: '#121212 bg:#121212',
Name.Entity: '#5fd7ff bg:#080808',
Generic.Emph: '#808080 underline',
}
|
4,048 | 099396a75060ad0388f5a852c4c3cb148febd8a3 | from network import WLAN
import machine
import pycom
import time
import request
def wifiConnect():
wlan = WLAN(mode=WLAN.STA)
pycom.heartbeat(False)
wlan.connect(ssid="telenet-4D87F74", auth=(WLAN.WPA2, "x2UcakjTsryz"))
while not wlan.isconnected():
time.sleep(1)
print("WiFi not connected")
pycom.rgbled(0xFF0000)
print("WiFi connected succesfully")
pycom.rgbled(0x00FF00)
print("test")
print(wlan.ifconfig())
print("hond")
while not wlan.isconnected():
print("WiFi not connected2.0")
pycom.rgbled(0xFF0000)
|
4,049 | f77df47fdb72ba50331b8b5d65984efaec474057 | # -*- coding: utf-8 -*-
import threading
import time
def work():
i = 0
while i < 10:
print 'I am working..'
time.sleep(0.5)
i += 1
t = threading.Thread(target=work)
# Daemon 설정
#t.setDaemon(True)
t.daemon = True # 혹인 이렇게도 가능
t.start()
print 'main thread finished'
|
4,050 | c9b76fed088b85cf68e96778016d8974fea84933 | #!/usr/bin/python
import os, sys
# Assuming /tmp/foo.txt exists and has read/write permissions.
ret = os.access("/tmp/foo.txt", os.F_OK)
print "F_OK - return value %s"% ret
ret = os.access("/tmp/foo.txt", os.R_OK)
print "R_OK - return value %s"% ret
ret = os.access("/tmp/foo.txt", os.W_OK)
print "W_OK - return value %s"% ret
ret = os.access("/tmp/foo.txt", os.X_OK)
print "X_OK - return value %s"% ret
This produces following result:
F_OK - return value True R_OK - return value True W_OK - return value True X_OK - return value False
|
4,051 | 1cc9a7bbe1bda06ce76fa8ec1cdc17c7b2fde73b |
a = 1
b = a
print(a)
print(b)
a = 2
print(a)
print(b)
# 全部大写字符代表常量
USER_NAME = "常量"
print(USER_NAME)
print(USER_NAME) |
4,052 | 7f2489aa440441568af153b231420aa2736716ca | print ("Welcome to the Guessing Game 2.0\n")
print ("1 = Easy\t(1 - 10)")
print ("2 = Medium\t(1 - 50)")
print ("3 = Hard\t(1 - 100)")
# Player: Input user's choice
# while: Check if user enters 1 or 2 or 3
# CPU: Generate a random number
# Player: Input user's number
# Variable: Add a variable 'attempt' and assign 1
# while: Check user number is wrong
# Conditional Statement: Check if user number is whether higher or lower.
# Player: Input user's number
# Variable: Add 1 to 'attempt'
# Result with attempts
# Player: Input user's choice
# Print: Thank you for playing the game. |
4,053 | c40bb410ad68808c2e0cc636820ec6a2ec2739b8 | # Importing the random library for random choice.
import random
getnum = int(input("Pick a number greater than 7: "))
# Error checking.
if (getnum < 7):
print("Error 205: Too little characters entered")
print("Run again using python passwordgenerator.py, or click the run button on your IDE.")
exit()
# A list of random things.
lista = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0','#', '@', '!', '%','^', '//', '\\']
# Main function takes two params, lista and get num.
def main(lista, getnum):
password = ''
for i in range(0, getnum):
passchar = random.choice(lista)
password = password + passchar
print(password)
passwordagain()
#Password again.
def passwordagain():
again = input("Do you want to generate another password(y/n)?: ")
if (again == 'y'):
main(lista,getnum)
elif(again == 'n'):
exit()
else:
print("Sorry, couldn't understand what you were saying.")
passwordagain()
main(lista, getnum)
|
4,054 | 681788ffe7672458e8d334316aa87936746352b1 | # CSE 415 Winter 2019
# Assignment 1
# Jichun Li 1531264
# Part A
# 1
def five_x_cubed_plus_1(x):
return 5 * (x ** 3) + 1
#2
def pair_off(ary):
result = []
for i in range(0, int(len(ary) / 2 * 2), 2):
result.append([ary[i], ary[i + 1]])
if (int (len(ary) % 2) == 1):
result.append([ary[-1]])
return result
#3
def mystery_code(input_string):
result = ''
for c in input_string:
next_char = c
if str.isalpha(c):
if c.upper() < 'H':
if c.islower():
next_char = chr(ord(c) + 19).upper()
else:
next_char = chr(ord(c) + 19).lower()
else:
if c.islower():
next_char = chr(ord(c) - 7).upper()
else:
next_char = chr(ord(c) - 7).lower()
result = result + next_char
return result
#4
def past_tense(words):
result = []
irregular_dict = {'have':'had',
'be':'was',
'eat':'ate',
'go':'went'}
for word in words:
word = str.lower(word)
if word in irregular_dict.keys():
result.append(irregular_dict[word])
elif word[-1] is 'e':
result.append(word + 'd')
elif word[-1] is 'y' and word[-2] not in 'aeiou':
result.append(word[:-1] + 'ied')
elif word[-2] in 'aeiou' and word[-1] not in 'aeiouwy' and word[-3] not in 'aeiou':
result.append(word + word[-1] + 'ed')
else:
result.append(word + 'ed')
return result
|
4,055 | 18e76df1693d4fc27620a0cf491c33197caa5d15 | '''
Created on Dec 2, 2013
A reference entity implementation for Power devices
that can be controlled via RF communication.
@author: rycus
'''
from entities import Entity, EntityType
from entities import STATE_UNKNOWN, STATE_OFF, STATE_ON
from entities import COMMAND_ON, COMMAND_OFF
class GenericPower(Entity):
''' This type of entites are able to report their states as logical
on (0x01) or off (0x00) state, and accept commands to switch this state. '''
def __init__(self, unique_id, entity_type=EntityType.find(100), name='Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0):
Entity.__init__(self, unique_id, entity_type, name=name, state=state, state_value=state_value, last_checkin=last_checkin)
def state_changed(self, state_message):
Entity.state_changed(self, state_message)
state = state_message[0]
if state == 0x00:
if 0 != self.state_value:
self.set_state(STATE_OFF, 0)
return True
elif state == 0x01:
if 1 != self.state_value:
self.set_state(STATE_ON, 1)
return True
return False
def control(self, controller, command, value=None):
if command.id == COMMAND_ON.id:
controller.send_message(self.unique_id, [ chr(0x00), chr(0x01) ])
self.log_command('Turning the power on')
return
elif command.id == COMMAND_OFF.id:
controller.send_message(self.unique_id, [ chr(0x00), chr(0x00) ])
self.log_command('Turning the power off')
return
Entity.control(self, command, value=value)
def describe_state(self):
return str(self.state)
# register type
EntityType.register(100, 'Power', GenericPower, [COMMAND_ON, COMMAND_OFF], '#99CC00', 'power.png')
|
4,056 | e60d57e8884cba8ce50a571e3bd0affcd4dcaf68 | import requests
import re
from bs4 import BeautifulSoup
r = requests.get("https://terraria.fandom.com/wiki/Banners_(enemy)")
soup = BeautifulSoup(r.text, 'html.parser')
list_of_banners = soup.find_all('span', {'id': re.compile(r'_Banner')})
x_count = 1
y_count = 1
for banner_span in list_of_banners:
print(f"{banner_span['id']}, {x_count}, {y_count}")
x_count += 1
if x_count == 51:
x_count = 1
y_count += 1
print("\n\n-----------------")
|
4,057 | f3a34d1c37165490c77ccd21f428718c8c90f866 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import random
import sys
def sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
return (time.time() - start_time), found
def ordered_sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
elif search_elt > elt:
break
return (time.time() - start_time), found
def binary_search_iterative(my_list, search_elt):
first = 0
last = len(my_list) - 1
found = False
start_time = time.time()
while first <= last and not found:
midpoint = (first + last) // 2
if my_list[midpoint] == search_elt:
found = True
elif search_elt < my_list[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return (time.time() - start_time), found
def binary_search_rec(a_list, item):
if len(a_list) == 0:
return False
else:
midpoint = len(a_list) // 2
if a_list[midpoint] == item:
return True
elif item < a_list[midpoint]:
return binary_search_rec(a_list[:midpoint], item)
else:
return binary_search_rec(a_list[midpoint + 1:], item)
def binary_search_recursive(my_list, search_elt, start_time = time.time):
start_time = time.time()
return (time.time() - start_time), binary_search_rec(my_list, search_elt)
def generate_random_nb_my_list(nb, amount_my_list, maxNumber = sys.maxint):
return [
[random.randint(0, maxNumber) for _ in range (nb)]
for _ in range (amount_my_list)
]
def functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):
(fn_name, fn_function, fn_list_indx) = fn
(timing, _) = fn_function(rnd_list[fn_list_indx], -1)
if amt_of_nb not in timeAggregator:
timeAggregator[amt_of_nb] = {}
if fn_name not in timeAggregator[amt_of_nb]:
timeAggregator[amt_of_nb][fn_name] = 0
timeAggregator[amt_of_nb][fn_name] += timing
def printTimerAggregator(timeAggregator, list_size):
for amount_of_number, fn_type in timeAggregator.iteritems():
print('For %s size of list:' % amount_of_number)
for fn_name, consumedTime in fn_type.iteritems():
print('\t%s took %10.7f seconds to run, on average'
% (fn_name, consumedTime / list_size))
if __name__ == '__main__':
timeAggregator = {}
amount_of_numbers = [500, 1000, 10000]
function_list = [
('Sequential Search', sequential_search, 0),
('Ordered Sequential Search', ordered_sequential_search, 1),
('Binary Search Iterative', binary_search_iterative, 1),
('Binary Search Recursive', binary_search_recursive, 1),
]
list_size = 100
for amount_of_number in amount_of_numbers:
my_randoms = generate_random_nb_my_list(amount_of_number, list_size)
for unsorted_list in my_randoms:
sorted_list = unsorted_list[:]
sorted_list.sort()
for fn in function_list:
functionTimerAggregator(
timeAggregator, fn, amount_of_number,
(unsorted_list, sorted_list))
printTimerAggregator(timeAggregator, list_size)
|
4,058 | 800edfc61635564abf8297c4f33c59d48cc99960 | import heapq as heap
import networkx as nx
import copy
import random
def remove_jumps(moves):
res = []
for move in moves:
if move[2] > 1:
move[3].reverse()
res.extend(make_moves_from_path(move[3]))
else:
res.append(move)
return res
def make_moves_from_path(path):
moves = []
p = path[:]
for i in range(len(p)-1):
moves.append((p[i+1], p[i], 1, [p[i+1], p[i]]))
return moves
def find_nearest_hole(o,r,graph, start):
visited, queue = [], [(start, [start])]
results = []
while queue:
(node, search_path) = queue.pop(0)
if node not in visited:
visited.append(node)
adjacent = graph.adj[node]
for neighbor in adjacent:
if neighbor in o:
if neighbor not in visited:
queue.append((neighbor, search_path + [neighbor]))
else:
if neighbor != r:
results.append(search_path + [neighbor])
moves = []
for res in results:
moves.append((res[0], res[-1], len(res)-1, res))
return moves
def move_robot(o,r,graph,node_from,node_to):
obstacles = o[:]
robot = r
if not node_from == r:
raise RuntimeError('node_from is not robot ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
robot = node_to
return (obstacles,robot)
def move_obstacle(o,r,graph,node_from,node_to):
obstacles = o[:]
robot = r
if node_from not in obstacles:
raise RuntimeError('node_from is not obstacle ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
if node_to == robot:
raise RuntimeError('node_to is robot' + node_to)
obstacles.append(node_to)
obstacles.remove(node_from)
return(obstacles,robot)
def make_move(o,r,graph,node_from,node_to):
if node_from == None:
return (o, r)
if( r == node_from):
return move_robot(o,r,graph,node_from,node_to)
if ( node_from in o):
return move_obstacle(o,r,graph,node_from,node_to)
raise RuntimeError('Cant move from ' + node_from)
def make_moves(o,r,graph,moves):
obstacles= o[:]
robot = r
for move in moves:
obstacles,robot = make_move(obstacles,robot,graph,move[0],move[1])
return (obstacles,robot)
def is_hole(o, r, node):
if (node not in o):
return True
return False
def possible_robot_moves(o, r, graph):
moves=[]
robot_node = r
robot_neighbors = graph.adj[r]
for neighbor in robot_neighbors:
if is_hole(o,r,neighbor):
moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))
return moves
def possible_obstacle_moves(o,r,graph,obstacle):
obstacle_neighbors = graph.adj[obstacle]
moves = []
for neighbor in obstacle_neighbors:
if is_hole(o,r,neighbor) and neighbor != r:
moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))
else:
if neighbor != r:
nh = find_nearest_hole(o, r, graph, neighbor)
if len(nh) > 0:
moves.extend(find_nearest_hole(o,r,graph, neighbor))
return moves
def possible_obstacles_moves(o,r,graph):
moves = []
for obstacle in o:
moves.extend(possible_obstacle_moves(o,r,graph,obstacle))
return moves
def possible_moves(o,r,graph):
moves = []
moves.extend(possible_robot_moves(o,r,graph))
moves.extend(possible_obstacles_moves(o,r,graph))
return moves
def color(o,r,graph,node,target,start):
if (node in o and node == target):
return 'c'
if node in o:
return 'r'
if node == r:
return 'b'
if node == start:
return 'y'
if node == target:
return 'g'
return 'w'
def create_state(o, r):
o.sort()
return '-'.join(o) + ' ___ R = ' + r
#__________________________________________________________________________________
def fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):
shortest = nx.shortest_path(graph,robot,target)
score = -len(shortest) - num_of_moves
for obstacle in obstacles:
if obstacle in shortest:
score = score - 1
return -score
def solve_heap(o,r,graph,t):
round = 0
visited = set([])
queue= [(-1000,[],o,r)]
while queue:
score,moves,obstacles,robot = heap.heappop(queue)
obstacles.sort()
st = ('#'.join(obstacles),robot)
if ( st not in visited ):
visited.add(st)
score = fitness_fun_heap(graph,obstacles,robot,t,len(moves))
pm = possible_moves(obstacles,robot,graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])
if t == newrobot:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
return new_moves
round = round+1
if (round % 100000 == 0):
print ("Visited = " + str(len(visited)))
heap.heappush(queue,(score,new_moves,newobstacles,newrobot))
def solve_brute_force(o,r,graph,t):
num_of_solutions = 0
all_solutions = []
round = 0
visited = set([])
queue = [([],o,r)]
while queue:
moves,obstacles,robot = queue.pop(0)
obstacles.sort()
st = ('#'.join(obstacles),robot)
if ( st not in visited ):
visited.add(st)
pm = possible_moves(obstacles,robot,graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])
if t == newrobot:
all_solutions.append(new_moves)
round = round+1
if (round % 100000 == 0):
print ("Visited = " + str(len(visited)))
queue.append((new_moves,newobstacles,newrobot))
print('Number of solutions: ' + str(len(all_solutions)))
best = min(all_solutions, key = lambda x : len(x))
return best
|
4,059 | a3cfd507e30cf232f351fbc66d347aaca99a0447 | from pyramid.view import view_config, view_defaults
from ecoreleve_server.core.base_view import CRUDCommonView
from .individual_resource import IndividualResource, IndividualsResource, IndividualLocationsResource
@view_defaults(context=IndividualResource)
class IndividualView(CRUDCommonView):
@view_config(name='equipment', request_method='GET', renderer='json', permission='read')
def getEquipment(self):
return self.context.getEquipment()
|
4,060 | 37d079ca6a22036e2660507f37442617d4842c4e | import arcade
import os
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Raymond Game"
MOVEMENT_SPEED = 50
class Ball:
def __init__(self, position_x, position_y, change_x, change_y, radius):
# Take the parameters of the init function above, and create instance variables out of them.
self.position_x = position_x
self.position_y = position_y
self.change_x = change_x
self.change_y = change_y
self.radius = radius
self.player_color = arcade.color.AMETHYST
def draw(self):
""" Draw the balls with the instance variables we have. """
arcade.draw_circle_filled(self.position_x, self.position_y, self.radius,self.player_color)
def update(self):
# Move the ball
self.position_y += self.change_y
self.position_x += self.change_x
# See if the ball hit the edge of the screen. If so, change direction
if self.position_x < self.radius:
self.position_x = self.radius
if self.position_x > SCREEN_WIDTH - self.radius:
self.position_x = SCREEN_WIDTH - self.radius
if self.position_y < self.radius:
self.position_y = self.radius
if self.position_y > SCREEN_HEIGHT - self.radius:
self.position_y = SCREEN_HEIGHT - self.radius
class MyGame(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
self.drawer = 0
self.wardrobe = 0
self.bookshelves = 0
self.door = 0
self.bed = 0
self.book_1 = 0
self.book_2 = 0
self.book_3 = 0
self.endscreen = 0
self.movement_tutorial = 0
self.code = 0
self.exit_key = 0
arcade.set_background_color(arcade.color.BROWN)
self.ball = Ball(400,300, 0, 0, 15)
def on_draw(self):
arcade.start_render()
self.ball.draw()
#door
arcade.draw_rectangle_filled(35,560,60,80,arcade.color.AMAZON)
arcade.draw_rectangle_filled(7,560,4,80,arcade.color.GRAY)
arcade.draw_rectangle_filled(17,560,4,80,arcade.color.GRAY)
arcade.draw_rectangle_filled(27,560,4,80,arcade.color.GRAY)
arcade.draw_rectangle_filled(37,560,4,80,arcade.color.GRAY)
arcade.draw_rectangle_filled(47,560,4,80,arcade.color.GRAY)
arcade.draw_rectangle_filled(57,560,4,80,arcade.color.GRAY)
arcade.draw_rectangle_filled(67,560,4,80,arcade.color.GRAY)
arcade.draw_rectangle_filled(57,560,20,15,arcade.color.GRAY)
arcade.draw_circle_filled(62,563,2,arcade.color.BLACK)
arcade.draw_triangle_filled(62,562,60,559,64,559,arcade.color.BLACK)
#bed
arcade.draw_rectangle_filled (740,80,70,120,arcade.color.GRAY)
arcade.draw_rectangle_filled (740,120,60,30,arcade.color.WHITE)
arcade.draw_rectangle_filled (740,60,70,80,arcade.color.WHITE)
#bookshelves
arcade.draw_rectangle_filled (365,550,60,90,arcade.color.GRAY)
arcade.draw_rectangle_filled (365,570,50,30,arcade.color.BLACK)
arcade.draw_rectangle_filled (365,530,50,30,arcade.color.BLACK)
arcade.draw_rectangle_filled (345,567,6,24,arcade.color.RED)
arcade.draw_rectangle_filled (353,567,6,24,arcade.color.ORANGE)
arcade.draw_rectangle_filled (361,567,6,24,arcade.color.BLUE)
arcade.draw_rectangle_filled (369,567,6,24,arcade.color.RED)
arcade.draw_rectangle_filled (377,567,6,24,arcade.color.ORANGE)
arcade.draw_rectangle_filled (385,567,6,24,arcade.color.BLUE)
arcade.draw_rectangle_filled (345,527,6,24,arcade.color.RED)
arcade.draw_rectangle_filled (353,527,6,24,arcade.color.ORANGE)
arcade.draw_rectangle_filled (361,527,6,24,arcade.color.BLUE)
arcade.draw_rectangle_filled (369,527,6,24,arcade.color.RED)
arcade.draw_rectangle_filled (377,527,6,24,arcade.color.ORANGE)
arcade.draw_rectangle_filled (385,527,6,24,arcade.color.BLUE)
arcade.draw_rectangle_filled (435,550,60,90,arcade.color.GRAY)
arcade.draw_rectangle_filled (435,570,50,30,arcade.color.BLACK)
arcade.draw_rectangle_filled (435,530,50,30,arcade.color.BLACK)
arcade.draw_rectangle_filled (415,567,6,24,arcade.color.RED)
arcade.draw_rectangle_filled (423,567,6,24,arcade.color.ORANGE)
arcade.draw_rectangle_filled (431,567,6,24,arcade.color.BLUE)
arcade.draw_rectangle_filled (439,567,6,24,arcade.color.RED)
arcade.draw_rectangle_filled (447,567,6,24,arcade.color.ORANGE)
arcade.draw_rectangle_filled (455,567,6,24,arcade.color.BLUE)
arcade.draw_rectangle_filled (415,527,6,24,arcade.color.RED)
arcade.draw_rectangle_filled (423,527,6,24,arcade.color.ORANGE)
arcade.draw_rectangle_filled (431,527,6,24,arcade.color.BLUE)
arcade.draw_rectangle_filled (439,527,6,24,arcade.color.RED)
arcade.draw_rectangle_filled (447,527,6,24,arcade.color.ORANGE)
arcade.draw_rectangle_filled (455,527,6,24,arcade.color.BLUE)
#drawer
arcade.draw_rectangle_filled (30,30,50,50,arcade.color.GRAY)
arcade.draw_rectangle_filled (30,30,42,42,arcade.color.WHITE)
#wardrobe
arcade.draw_rectangle_filled (750,540,80,100,arcade.color.GRAY)
arcade.draw_rectangle_filled (750,540,4,100,arcade.color.BLACK)
arcade.draw_circle_filled (740,540,3,arcade.color.YELLOW)
arcade.draw_circle_filled (760,540,3,arcade.color.YELLOW)
if self.ball.position_x < 115 and self.ball.position_y > 470:
arcade.draw_text("Hold D to interact", 235, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("with Door", 235, 314, arcade.color.WHITE, font_size=18)
if self.ball.position_x > 635 and self.ball.position_y < 210:
arcade.draw_text("Hold E to interact", 235, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("with Bed", 235, 314, arcade.color.WHITE, font_size=18)
if self.ball.position_x > 255 and self.ball.position_x < 535 and self.ball.position_y > 435:
arcade.draw_text("Hold O to interact", 235, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("with Bookshelves", 235, 314, arcade.color.WHITE, font_size=18)
if self.ball.position_x < 105 and self.ball.position_y < 105:
arcade.draw_text("Hold R to interact", 235, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("with Drawer", 235, 314, arcade.color.WHITE, font_size=18)
if self.ball.position_x > 660 and self.ball.position_y > 440:
arcade.draw_text("Hold W to interact", 235, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("with Wardrobe", 235, 314, arcade.color.WHITE, font_size=18)
if self.movement_tutorial == 0:
arcade.draw_text("Use arrow keys to move", 235, 368, arcade.color.WHITE, font_size=18)
if self.drawer == 1:
if self.code == 1:
arcade.draw_text("Congratulations!", 435, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("You got a key", 435, 314, arcade.color.WHITE, font_size=18)
self.exit_key = 1
else:
arcade.draw_text("It seems I need", 435, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("a code to open this", 435, 314, arcade.color.WHITE, font_size=18)
if self.bed == 1:
arcade.draw_text("It's just a bed", 435, 338, arcade.color.WHITE, font_size=18)
if self.wardrobe == 1:
arcade.draw_text("There are many outfits here", 435, 338, arcade.color.WHITE, font_size=18)
if self.bookshelves == 1:
arcade.draw_text("There are many books in here", 435, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("which one should I read? A, B, C", 435, 314, arcade.color.WHITE, font_size=18)
if self.book_1 == 1:
arcade.draw_text("There is a key in the", 435, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("drawer... huh", 435, 314, arcade.color.WHITE, font_size=18)
if self.book_2 == 1:
arcade.draw_text("Congratulations!", 435, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("You got a code", 435, 314, arcade.color.WHITE, font_size=18)
self.code = 1
if self.book_3 == 1:
arcade.draw_text("It's the Bible", 435, 338, arcade.color.WHITE, font_size=18)
if self.door == 1:
if self.exit_key == 1:
self.endscreen = 1
else:
arcade.draw_text("It seems that I need", 435, 338, arcade.color.WHITE, font_size=18)
arcade.draw_text("a key to open this", 435, 314, arcade.color.WHITE, font_size=18)
if self.endscreen == 1:
arcade.draw_rectangle_filled(400,300,800,600,arcade.color.BLACK)
arcade.draw_text("Congratulations! you beat the game", 235, 468, arcade.color.WHITE, font_size=18)
#sword
arcade.draw_rectangle_filled (290,190,20,180,arcade.color.WHITE_SMOKE)
arcade.draw_rectangle_filled (270,190,20,180,arcade.color.GRAY)
arcade.draw_triangle_filled (260,100,280,100,280,70,arcade.color.GRAY)
arcade.draw_triangle_filled (300,100,280,100,280,70, arcade.color.WHITE)
arcade.draw_rectangle_filled (280,184,4,196,arcade.color.BLACK)
arcade.draw_rectangle_filled (280,300,40,40,arcade.color.PURPLE)
arcade.draw_triangle_filled (280,265,270,280,290,280,arcade.color.GOLD)
arcade.draw_rectangle_filled (240,290,50,20,arcade.color.PURPLE,30)
arcade.draw_rectangle_filled (320,290,50,20,arcade.color.PURPLE,330)
arcade.draw_rectangle_filled (220,283,50,2,arcade.color.BLACK,30)
arcade.draw_rectangle_filled (220,275,59,2,arcade.color.BLACK,30)
arcade.draw_rectangle_filled (340,283,50,2,arcade.color.BLACK,330)
arcade.draw_rectangle_filled (340,275,59,2,arcade.color.BLACK,330)
arcade.draw_rectangle_filled (280,340,15,50,arcade.color.PURPLE)
arcade.draw_triangle_filled (260,320,280,320,280,340,arcade.color.PURPLE)
arcade.draw_triangle_filled (265,320,280,320,280,365,arcade.color.PURPLE)
arcade.draw_triangle_filled (300,320,280,320,280,340,arcade.color.PURPLE)
arcade.draw_triangle_filled (295,320,280,320,280,365,arcade.color.PURPLE)
arcade.draw_circle_filled (280,375,15,arcade.color.LIGHT_BROWN)
def on_update(self, delta_time):
self.ball.update()
def on_key_press(self, key, modifiers):
if key == arcade.key.LEFT:
self.ball.change_x = -MOVEMENT_SPEED
self.movement_tutorial = 1
elif key == arcade.key.RIGHT:
self.ball.change_x = MOVEMENT_SPEED
self.movement_tutorial = 1
elif key == arcade.key.UP:
self.ball.change_y = MOVEMENT_SPEED
self.movement_tutorial = 1
elif key == arcade.key.DOWN:
self.ball.change_y = -MOVEMENT_SPEED
self.movement_tutorial = 1
if key == arcade.key.R:
self.drawer = 1
if key == arcade.key.W:
self.wardrobe = 1
if key == arcade.key.D:
self.door = 1
if key == arcade.key.O:
self.bookshelves = 1
if key == arcade.key.E:
self.bed = 1
if key == arcade.key.A:
self.book_1 = 1
if key == arcade.key.B:
self.book_2 = 1
if key == arcade.key.C:
self.book_3 = 1
def on_key_release(self, key, modifiers):
if key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.ball.change_x = 0
elif key == arcade.key.UP or key == arcade.key.DOWN:
self.ball.change_y = 0
if key == arcade.key.R:
self.drawer = 0
if key == arcade.key.W:
self.wardrobe = 0
if key == arcade.key.D:
self.door = 0
if key == arcade.key.O:
self.bookshelves = 0
if key == arcade.key.E:
self.bed = 0
if key == arcade.key.A:
self.book_1 = 0
if key == arcade.key.B:
self.book_2 = 0
if key == arcade.key.C:
self.book_3 = 0
def main():
""" Main method """
game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
arcade.run()
if __name__ == "__main__":
main()
|
4,061 | 3a678f9b5274f008a510a23b2358fe2a506c3221 | import logging
import argparse
import getpass
import errno
import re
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import dns.resolver
class Mail(object):
def __init__(self, recipient=None, sender=None, subject=None, body=None):
self.recipient = recipient
self.sender = sender or '{}@example.com'.format(getpass.getuser())
self.subject = subject or 'Sir! My sir!'
self.body = body or 'A message from their majesty.'
self.verbose = False
@property
def domain(self):
m = re.match(r'.+@(\w+\.\w+)', self.recipient)
if m:
return m.group(1)
else:
raise ValueError('Unable to get recipient domain')
@property
def message(self):
m = MIMEMultipart('alternative')
m['Subject'] = self.subject
m['From'] = self.sender
m['To'] = self.recipient
m.attach(MIMEText(self.body, 'plain'))
return m
def send(self):
"""
Sends an email to a single recipient straight to his MTA.
Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.
"""
answers = dns.resolver.query(self.domain, 'MX')
try:
for answer in answers:
ex = answer.exchange.to_text()
server = smtplib.SMTP(ex)
server.set_debuglevel(self.verbose)
server.sendmail(self.sender, [self.recipient], self.message.as_string())
server.quit()
except OSError as e:
if e.errno is errno.ENETUNREACH:
print('Looks like port 25 is blocked')
raise e
class App(object):
def run(self):
mail = Mail()
self.parse(mail)
mail.send()
@classmethod
def parse(cls, mail):
parser = argparse.ArgumentParser(prog='lumpy', description=mail.send.__doc__)
arg = parser.add_argument
arg('--from', '-f', nargs='?', dest='sender')
arg('recipient')
arg('--subject', '-s', nargs='?')
arg('--body', '-b', nargs='?')
arg('--verbose', '-v', action='store_true')
parser.parse_args(namespace=mail)
if __name__ == "__main__":
App().run()
|
4,062 | ae0547aa1af2d4dd73bb60154574e64e74107a58 | import numpy as np
import cv2
def optical_flow_from_video():
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi")
# 设置 ShiTomasi 角点检测的参数
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
# 设置 lucas kanade 光流场的参数
# maxLevel 为使用图像金字塔的层数
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 产生随机的颜色值
color = np.random.randint(0, 255, (100, 3))
# 获取第一帧,并寻找其中的角点
_, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# 创建一个掩膜为了后面绘制角点的光流轨迹
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算能够获取的角点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 绘制角点的轨迹
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 0xff == ord("q"):
break
# 更新当前帧和当前角点的位置
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera():
cap = cv2.VideoCapture(0)
# 设置 ShiTomasi 角点检测的参数
feature_params = dict(maxCorners=100, qualityLevel=0.3,
minDistance=7, blockSize=7)
# 设置 lucas kanade 光流场的参数
# maxLevel 为使用图像金字塔的层数
lk_params = dict(winSize=(15, 15), maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 产生随机的颜色值
color = np.random.randint(0, 255, (100, 3))
# 获取第一帧,并寻找其中的角点
_, old_frame = cap.read()
old_frame = cv2.flip(old_frame, 1)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# 创建一个掩膜为了后面绘制角点的光流轨迹
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算能够获取的角点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 绘制角点的轨迹
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 0xff == ord("q"):
break
# 更新当前帧和当前角点的位置
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera_farneback2():
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
ret, frame1 = cap.read()
frame1 = cv2.flip(frame1, 1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.flip(frame2, 1)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback(flip=True, resize=True):
# cap = cv2.VideoCapture('test.mp4')
# cap = cv2.VideoCapture('test2.ts')
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi")
# cap = cv2.VideoCapture(0)
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_and_write_video():
# cap = cv2.VideoCapture('eccv.avi')
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
i += 1
cv2.imwrite("{}/{}.jpg".format("test2", str(i)), result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_farneback_and_write_video():
def crop(frame):
# start_x = 1400
# end_x = start_x + 600
# start_y = 100
# end_y = start_y + 700
start_x = 800
end_x = start_x + 500
start_y = 1500
end_y = start_y + 500
return frame[start_x:end_x, start_y: end_y]
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
ret, frame1 = cap.read()
frame1 = crop(frame1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
i += 1
if i % 2 != 0:
continue
frame2 = crop(frame2)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3,
winsize=7, iterations=3, poly_n=5, poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
cv2.imwrite("{}/{}.jpg".format("test2", str(i // 3)), result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_2(flip=False, resize=True):
# cap = cv2.VideoCapture('test.mp4')
# cap = cv2.VideoCapture('test2.ts')
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi")
# cap = cv2.VideoCapture(0)
width = 800
height = 500
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3, winsize=8,
iterations=5, poly_n=5, poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(100) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
if __name__ == '__main__':
optical_flow_farneback_and_write_video()
pass
|
4,063 | f3d9e783491916e684cda659afa73ce5a6a5894a | import numpy as np
import os
import sys
file_path = sys.argv[1]
triplets = np.loadtxt(os.path.join(file_path, "kaggle_visible_evaluation_triplets.txt"),
delimiter="\t", dtype="str")
enum_users = np.ndenumerate(np.unique(triplets[:, 0]))
print(enum_users)
triplets[triplets[:, 0] == user_id[user_nr[0]], 0] = user_nr + 1
print(triplets)
|
4,064 | 612b1851ba5a07a277982ed5be334392182c66ef | import re # regex module
from ftplib import FTP, error_perm
from itertools import groupby
from typing import List, Tuple, Dict
import requests # HTTP requests module
from util import retry_multi, GLOBAL_TIMEOUT # from util.py
class ReleaseFile:
"""! Class representing a Released file on Nebula
`name`: str
Mod (or build) name,
`url`: str
Primary host URL,
`group`: str
Mod group string,
`subgroup`: str
Mod subgroup string,
`mirrors`: List[str]
List of URL's of FTP mirrors
"""
def __init__(self, name, url, group, subgroup=None, mirrors=None):
if mirrors is None:
mirrors = []
self.mirrors = mirrors
self.subgroup = subgroup
self.group = group
self.url = url
self.name = name
self.base_url = "/".join(url.split('/')[0:-1]) + "/"
self.filename = url.split('/')[-1]
# A list of tuples of (filename, hash)
self.content_hashes = None
self.hash = None
self.size = 0
def __repr__(self):
return repr((self.name))
class SourceFile:
"""! Class represeting a source file
`name`: str
File name,
`url`: str
FTP URL,
`group`
<unknown>
@details More details
"""
def __init__(self, name, url, group):
self.group = group
self.url = url
self.name = name
class FileGroup:
"""! Represents a file group
`name`: str
Name of this group
`files`: List[ReleaseFile]
List of files within this group
`mainFile`: str
If this FileGroup has a subgroup, `mainFile` is the head of that group
`subFiles`: List[ReleaseFile]
Files within a subgroup
"""
def __init__(self, name, files: List[ReleaseFile]):
self.files = files
self.name = name
if len(files) == 1:
self.mainFile = files[0]
self.subFiles = {}
else:
self.mainFile = None
subFiles = []
for file in files:
# We only have subcategories for Windows where SSE2 is the main group
if file.subgroup == "SSE2":
self.mainFile = file
else:
subFiles.append(file)
self.subFiles = dict(((x[0], next(x[1])) for x in groupby(subFiles, lambda f: f.subgroup)))
def get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]:
"""! Brief Gets the binary and source files from the Github Release server
@param[in] `tag_name` Git tag of the current release
@param[in] `config` confi metadata set in main.py
@returns `List[ReleaseFile]` List of release files
@returns `Dict[str, SourceFile]` Dictionary of source files
@details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not
actually downloaded here, just their metadata is gathered and organized in their respective container for later
use.
"""
@retry_multi(5) # retry at most 5 times
def execute_request(path):
"""!
@brief Performs a GET request with the given path. To be used with Github's REST API.
@returns If successful, returns a .JSON object
"""
headers = {
"Accept": "application/vnd.github.v3+json"
}
url = "https://api.github.com" + path
# GET https://api.github.com/<path> Accept: "application/vnd.github.v3+json"
response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)
response.raise_for_status() # Raise a RequestException if we failed, and trigger retry
return response.json()
build_group_regex = re.compile("fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*") # regex for matching binary .zip's and .7z's
source_file_regex = re.compile("fs2_open_.*-source-([^.]*)?.*") # regex for matching source .zip's and .7z's
# Get the github release metadata of the given tag name
response = execute_request(
"/repos/{}/releases/tags/{}".format(config["github"]["repo"], tag_name))
# Extract the binary and source files from the response["asset"] metadata
binary_files = []
source_files = {}
for asset in response["assets"]:
url = asset["browser_download_url"]
name = asset["name"]
group_match = build_group_regex.match(name)
if group_match is not None:
platform = group_match.group(1)
# x64 is the Visual Studio name but for consistency we need Win64
if platform == "x64":
platform = "Win64"
binary_files.append(ReleaseFile(name, url, platform, group_match.group(3)))
else:
group_match = source_file_regex.match(name)
if group_match is None:
continue
group = group_match.group(1)
source_files[group] = SourceFile(name, url, group)
binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)
return binary_files, source_files
def get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] :
"""!
@brief Gets file metadata for nightlies hosted on FTP, as determined by config["ftp"] attributes
@param [in] `build_type` Unknown str
@param [in] `tag_name` Github tag name of the release
@param [in] `config` config metadata set in main.py
"""
tag_regex = re.compile("nightly_(.*)")
build_group_regex = re.compile("nightly_.*-builds-([^.]+).*")
files = []
try:
with FTP(config["ftp"]["host"], config["ftp"]["user"], config["ftp"]["pass"]) as ftp:
# extract version
version_str = tag_regex.match(tag_name).group(1)
# extract filepath w/ version
# then list all ftp hits with that path
path_template = config["ftp"]["path"]
path = path_template.format(type=build_type, version=version_str)
file_entries = list(ftp.mlsd(path, ["type"]))
# get all ftp hits of type file
for entry in file_entries:
if entry[1]["type"] == "file":
files.append(entry[0])
except error_perm:
print("Received permanent FTP error!")
return []
out_data = []
for file in files:
# from the file list, extract only nightly files
file_match = build_group_regex.match(file)
if file_match is None:
print("Ignoring non nightly file '{}'".format(file))
continue
group_match = file_match.group(1)
primary_url = None
mirrors = []
# x64 is the name Visual Studio uses but Win64 works better for us since that gets displayed in the nightly post
if "x64" in group_match:
group_match = group_match.replace("x64", "Win64")
# construct the download URL list for all mirrors. The first listed ftp location is taken as the Primary
for mirror in config["ftp"]["mirrors"]:
download_url = mirror.format(type=build_type, version=version_str, file=file)
if primary_url is None:
primary_url = download_url
else:
mirrors.append(download_url)
# Form the List[ReleaseFile] list with the download URL links
out_data.append(ReleaseFile(file, primary_url, group_match, None, mirrors))
return out_data |
4,065 | ff20b65f35614415ad786602c0fc2cabd08124fb | from typing import Sequence
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
def plot3D(X, Y, Z, proporcao=1, espelharZ = False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('X ')
ax.set_ylabel('Y ')
ax.set_zlabel('Z ')
np.floor
colortuple = (colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88'))
colorsArray = np.empty([len(X), len(Y)], dtype=tuple)
for y in range(len(Y)):
for x in range(len(X)):
colorsArray[x, y] = colortuple[int(
np.ceil(x/proporcao) + np.ceil(y/proporcao)) % len(colortuple)]
surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)
if(espelharZ):
surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)
#surf = ax.plot_wireframe(X, Y, Z, linewidth=1)
#plt.show()
def limitZ(Z, limit = 10):
for i in range(len(Z)):
for j in range(len(Z[i])):
if(Z[i][j]>limit):
Z[i][j] = np.inf
if(Z[i][j]<-limit):
Z[i][j] = -np.inf
def plotPontos3D(X,Y,Z):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(X, Y, Z, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def curvaNivel(X,Y,Z,levels):
fig = plt.figure()
ax = fig.add_subplot()
curva = ax.contourf(X,Y,Z,levels)
ax.set_xlabel('X')
ax.set_ylabel('Y')
#curva.cmap.set_under('white')
#curva.cmap.set_over('cyan')
fig.colorbar(curva)
plt.show()
|
4,066 | f89800e0d8d4026c167381f275ca86c2cf7f011e | def digitSum(x):
if x < 10: return x
return x % 10 + digitSum(x // 10)
def solve(S,n):
Discriminante = S*S + 4*n
r = int(Discriminante**0.5)
if r * r == Discriminante:
if r % 2 == S % 2:
return (r - S) // 2
else:
return -1
else:
return -1
n = int(input())
ans = -1
for S in range(1,163):
x = solve(S,n)
if x > 0 and digitSum(x) == S:
if ans == -1: ans = x
else: ans = min(ans,x)
print(ans)
|
4,067 | 255130082ee5f8428f1700b47dee717465fed72f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 18 18:21:37 2021
@author: benoitdeschrynmakers
"""
import requests
url = 'http://127.0.0.1:8888/productionplan'
if __name__ == "__main__":
filename = "example_payloads/payload1.json"
data = open(filename, 'rb').read()
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
response = requests.post(url, data=data, headers=headers)
if response.ok:
print(response.json())
else:
print("error!")
|
4,068 | dbec74ecf488ca98f3f441e252f79bc2bc0959c1 | from django.db import models
# Create your models here.
class UserInfo(models.Model):
uname = models.CharField('用户名', max_length=50, null=False)
upassword = models.CharField('密码', max_length=200, null=False)
email = models.CharField('邮箱', max_length=50, null=True)
phone = models.CharField('手机号', max_length=20, null=False)
time = models.DateTimeField('注册时间', auto_now=True)
isban = models.BooleanField('禁用', default=False)
isdelete = models.BooleanField('删除', default=False)
def __str__(self):
return self.uname
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
class Address(models.Model):
aname = models.CharField('收货人', max_length=50, null=False)
ads = models.CharField('地址', max_length=300, null=False)
phone = models.CharField('电话', max_length=20, null=False)
user = models.ForeignKey(UserInfo)
def __str__(self):
return self.aname
class Meta:
verbose_name = '收货地址'
verbose_name_plural = verbose_name
|
4,069 | f765f54a89a98a5f61c70a37379860f170444c0a | G = 1000000000
M = 1000000
K = 1000 |
4,070 | 6c94b487eaa179a70ea6528b0214d04d5148574f | # File Name: create_data.py
from sqlalchemy.orm import sessionmaker
from faker import Faker
from db_orm import Base, engine, User, Course
from sqlalchemy import MedaData
session = sessionmaker(engine)()
fake = Faker('zh-cn')
# 创建表
users_table = Table('users', metadata,
Column('id', Integer, primary_key = True),
Column('name', String(64)),
Column('age', Integer),
Column('address', String(64))
)
def create_users():
for i in range(10):
# 创建 10 个 User 类实例,伪造 name 和 email
user = User(name=fake.name(), email=fake.email())
# 将实例添加到 session 会话中,以备提交到数据库
# 注意,此时的 user 对象没有 id 属性值
# 映射类的主键字段默认从 1 开始自增,在传入 session 时自动添加该属性值
session.add(user)
def create_courses():
# session 有个 query 方法用来查询数据,参数为映射类的类名
# all 方法表示查询全部,这里也可以省略不写
# user 就是上一个函数 create_users 中的 user 对象
for user in session.query(User).all():
# 两次循环,对每个作者创建两个课程
for i in range(2):
# 创建课程实例,name 的值为 8 个随机汉字
course = Course(name=''.join(fake.words(4)), user_id=user.id)
session.add(course)
def main():
# 执行两个创建实例的函数,session 会话内就有了这些实例
create_users()
create_courses()
# 执行 session 的 commit 方法将全部数据提交到对应的数据表中
session.commit()
if __name__ == '__main__':
# main()
MedaData.tables |
4,071 | 01e60123ad87d9ff49812fe3a6f5d55bc85921c5 | """
-*- coding:utf-8 -*-
@ Time : 14:05
@ Name : handle_ini_file.py
@ Author : xiaoyin_ing
@ Email : 2455899418@qq.com
@ Software : PyCharm
...
"""
from configparser import ConfigParser
from Common.handle_path import conf_dir
import os
class HandleConfig(ConfigParser):
def __init__(self, ini_file_neme):
super().__init__()
self.ini_file_neme = ini_file_neme
def red_conf__(self):
file_path = os.path.join(conf_dir, self.ini_file_neme)
self.read(file_path, encoding="utf-8")
red_conf = HandleConfig("xiaoyin.ini")
red_conf.red_conf__()
# 日志模块用到的属性
log_data_list = [red_conf.get("log", "log_name"), red_conf.get("log", "log_level"), red_conf.getboolean("log", "file")]
# print(log_data_list)
|
4,072 | b2f9a133581b5144b73a47f50a3b355d1112f7ea | import numpy as np
import time
# Create key based on timestamp
KEY = time.time()
np.random.seed(int(KEY))
# Read in message
with open('Message.txt', 'r') as f:
Message = f.read()
f.close()
# Generate vector of random integers
Encoder = np.random.random_integers(300, size=len(Message))
# Map message to encoded array
M = []
for i in range(len(Message)):
M.append(ord(Message[i])*Encoder[i])
# Create or overwrite the file with the message
with open('ENCODED.txt', 'w') as e:
for m in M:
e.write(str(m)+" ")
# Create or overwrite the file with the key
with open('KEY.txt', 'w') as f:
f.write(str(KEY))
print "Your message has been encoded!" |
4,073 | b2bb7393bf7955f5de30c59364b495b8f888e178 | import numpy as np
class Constants():
DNN_DEFAULT_ACTIVATION = 'relu'
DNN_DEFAULT_KERNEL_REGULARIZATION = [0, 5e-5]
DNN_DEFAULT_BIAS_REGULARIZATION = [0, 5e-5]
DNN_DEFAULT_LOSS = 'mean_squared_error'
DNN_DEFAULT_VALIDATION_SPLIT = 0.2
DNN_DEFAULT_EPOCHS = 100
DNN_DEFAULT_CHECKPOINT_PERIOD = 100
DNN_DEFAULT_VALIDATION_PERIOD = 1
DNN_DEFAULT_PATIENCE = 1000
DNN_DEFAULT_BATCH_SIZE = 16
DNN_DEFAULT_OPTIMIZER = 'adam'
DNN_DEFAULT_DROPOUT_RATE = 0.02
DNN_DEFAULT_DECAY = 0
DNN_DEFAULT_BIAS = 0.1
DNN_DEFAULT_OUTPUT_BIAS = 0.5 |
4,074 | f01f97f8998134f5e4b11232d1c5d341349c3c79 | import numpy as np
import matplotlib.pyplot as plt
# image data
a = np.array([0.1,0.2,0.3,
0.4,0.5,0.6,
0.7,0.8,0.9]).reshape(3,3)
plt.imshow(a,interpolation='nearest',cmap='bone',origin='upper')
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.show()
|
4,075 | 14a39b9aa56777c8198794fe2f51c9a068500743 | #!/bin/python3
import socket
HOST = '127.0.0.1'
PORT= 4444
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST,PORT)) |
4,076 | cf7556034020d88ddb6b71b9f908c905e2f03cdb | #17219
tot, inp = map(int, input().split())
ID_dict = {}
for _ in range(tot):
id, pw = map(str, input().split())
ID_dict[id] = pw
for _ in range(inp):
print(ID_dict[input()]) |
4,077 | ec6067cc86b6ac702123d13911cc4ab97be6a857 | from oil_prices import *
with_without = 'without training'
show_plot = 'yes'
print('START')
# Defining the past and future sequences for the LSTM training
n_past = 8
n_future = 1
target_date = '2018-11-16'
past = ['t']+['t-'+str(i) for i in range(1,n_past)]
future = ['t+'+str(i) for i in range(1,n_future+1)]
# Importing and feature engineering data
print(' - Imports data and formats the data')
data = data_import()
df = data_imputing(data)
df_train, df_predict = train_predict_split(df, n_past, n_future)
scaler = data_scaler(df_train)
timeseries_to_supervised(df_train, n_past, n_future)
# Training the model anew if needed, otherwise, just loaded a pre-trained model
model_name = 'WTI_oil_price.mdl'
if with_without == 'with training':
print(' - Training the LSTM model')
model_trainer(df_train, n_past, n_future, model_name)
print(' - Loading the LSTM model')
model = tf.keras.models.load_model(model_name, custom_objects=None, compile=True)
# Validating the neural net by predicting all of the set and comparing with the observed data
df_train = make_many_predictions(df_train, model, past, n_future)
df_train = real_price_prediction(df_train, scaler)
# Predicting the oil price on Friday, November 16th, 2018.
prediction_run_forward(df_predict, target_date, scaler, model)
target_WTI_price = df_predict[df_predict['DATE'] == target_date]['WTI'].values[0]
print('Price of WTI oil on {}: $ {}'.format(target_date, target_WTI_price))
if show_plot == 'yes':
data_plot()
plot_real_prediction(df_train)
plot_prediction(df_predict, target_WTI_price, target_date)
print('END')
|
4,078 | d3c36ad36c50cd97f2101bc8df99d1961b0ad7ea | #!/usr/bin/env python
# coding: utf-8
# In[2]:
print(" sum of n numbers with help of for loop. ")
n = 10
sum = 0
for num in range(0, n+1, 1):
sum = sum+num
print("Output: SUM of first ", n, "numbers is: ", sum )
# In[3]:
print(" sum of n numbers with help of while loop. ")
num = int(input("Enter the value of n: "))
hold = num
sum = 0
if num <= 0:
print("Enter a whole positive number!")
else:
while num > 0:
sum = sum + num
num = num - 1;
# displaying output
print("Sum of first", hold, "natural number is: ",sum)
# In[4]:
print("Take an integer and find whether the number is prime or not")
#input from user
number = int(input("Enter any number: "))
# prime number is always greater than 1
if number > 1:
for i in range(2, number):
if (number % i) == 0:
print(number, "is not a prime number")
break
else: print(number, "is a prime number")
# if the entered number is less than or equal to 1
# then it is not prime number
else: print(number, "is not a prime number")
# In[ ]:
|
4,079 | d10468d2d0aefa19a7d225bfffad03ec6cb6e082 | class Solution:
def getDescentPeriods(self, prices: List[int]) -> int:
ans = 1 # prices[0]
dp = 1
for i in range(1, len(prices)):
if prices[i] == prices[i - 1] - 1:
dp += 1
else:
dp = 1
ans += dp
return ans
|
4,080 | ab5412a3d22bd53a592c93bad4870b06fd9f0720 | radius = int(input("enter the value for the radius of the cycle: "))
circumference = 2 * 3.14159 * radius
diameter = 2 * radius
area = 3.14159 * radius ** 2
print('circumference is ', circumference)
print('diameter is: ', diameter)
print('area is ', area)
|
4,081 | fa07553477e3bb2ecbeb87bd1383a2194282579c | #coding=UTF-8
import random
import random
list=[]
s=0
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if i!=j and j<>k:
list.append(str(i)+str(j)+str(k))
s=s+1
print len(list)
print s
if len(list)==s:
print "是相等的!"
else:
print "不相等!"
print list[random.randrange(1,len(list))]
import math
for n in range(1,1):
i=math.sqrt(n+100)
print i
j=math.sqrt(n+268)
print j
if i/2.0==int(i/2) and j/2.0==int(j/2):
print n
break
import time
#print help(time.strftime)
print time.strftime("%Y")
list=[90,19,8,99,87,45,109]
list.sort()
print u"sort排序输出:",list
list=[90,19,8,99,87,45,109]
i=len(list)
for b in range(1,i):
i=i-1
for a in range(0,i):
if list[a+1]<list[a]:
temp=list[a+1]
list[a+1]=list[a]
list[a]=temp
print u"冒泡排序输出:",list
print '*'*10
for i in range(5):
print "* *"
print '*'*10
import sys
#sys.stdout.write(chr(1))
temp=0#正常产仔的兔子
temp1=0#剩余一个月产仔的兔子
temp2=1#剩余2个月产仔的兔子
m=12#int(raw_input(u"请输入月份:"))
for i in range(1,m+1):
temp=temp+temp1
temp22=temp2
temp2=temp
temp1=temp22
print "24个月后的兔子数量:",temp+temp1+temp2
f1=1
f2=1
for i in range(1,24):
#print "%12d%12d"%(f1,f1)
if (i%2)==0:
print ''
f1=f1+f2
f2=f1+f2
for i in range(1,10):
for j in range(0,10):
for k in range(0,10):
if i**3+j**3+k**3==int(str(i)+str(j)+str(k)):
print int(str(i)+str(j)+str(k))
import sys
from sys import stdout
n=45
print '数值:n=%d'%n
list=[]
for i in range(2,n+1):
while n!=0:
if n%i==0:
list.append(str(i))
sys.stdout.write(str(i))
sys.stdout.write("*")
n=n/i
else:
break
print "%d"%n
for i in range(0,len(list)):
if i<len(list)-1:
sys.stdout.write(list[i]+"*")
else:
sys.stdout.write(list[i])
h=100
sum=0
for i in range(1,11):
if i==1:
print ''
sum=sum+h
h=h/2.0
sum=sum+2*h
print h
print sum
|
4,082 | 5d4ef436c4ee5c31496977a5ae9b55db9ff34e79 |
class Donkey(object):
def manzou(self):
print('走路慢……')
def jiao(self):
print('驴在欢叫%……')
class Horse(object):
def naili(self):
print('马力足,持久强……')
def jiao(self):
print('马在嘶鸣')
class Mule(Donkey,Horse):
pass
def jiao(self):
print('骡子在唱歌')
骡子一号 = Mule()
骡子一号.manzou()
骡子一号.naili()
骡子一号.jiao()
print(Mule.__mro__)
|
4,083 | c58f40d369388b94778e8583176f1ba8b81d0c5e | #!/usr/bin/env python
from program_class import Program
import tmdata
import os
def main():
""""""
args1 = {"progname" : "whoami",
"command" : "/usr/bin/whoami",
"procnum" : 1,
"autolaunch" : True,
"starttime" : 5,
"restart" : "never",
"retries" : 2,
"stopsig" : "SSIG",
"stoptime" : 10,
"exitcodes" : [0, 2, 4, 5],
"stdout" : "/usr/bin/whoami.stdout",
"stderr" : "/usr/bin/whoami.stderr",
"redout" : False,
"rederr" : False,
"envvars" : {"ENV1" : "VAL1", "ENV2" : "VAL2"},
"workingdir" : "/tmp",
"umask" : "077"}
args2 = {"progname" : "top",
"command" : "/usr/bin/top",
"procnum" : 1,
"autolaunch" : True,
"starttime" : 5,
"restart" : "never",
"retries" : 2,
"stopsig" : "SSIG",
"stoptime" : 10,
"exitcodes" : [0, 2, 4, 5],
"stdout" : "/usr/bin/whois.stdout",
"stderr" : "/usr/bin/whois.stderr",
"redout" : False,
"rederr" : False,
"envvars" : {"ENV1" : "VAL1", "ENV2" : "VAL2"},
"workingdir" : "/tmp",
"umask" : "077"}
# args1 = {"command" : "/C/Downloads/darkradiant-1.8.0-x64",
# "procnum" : 1,
# "autolaunch" : True,
# "starttime" : 5,
# "restart" : "never",
# "retries" : 2,
# "stopsig" : "SSIG",
# "stoptime" : 10,
# "exitcodes" : [0, 2, 4, 5],
# "stdout" : "/C/Downloads/darkradiant-1.8.0-x64.stdout",
# "stderr" : "/C/Downloads/darkradiant-1.8.0-x64.stderr",
# "redir" : "/C/Downloads/darkradiant-1.8.0-x64.redir",
# "envvars" : {"ENV1" : "VAL1", "ENV2" : "VAL2"},
# "workingdir" : "/tmp",
# "umask" : "077"}
#
# args2 = {"command" : "/C/UsbFix/UsbFix.exe",
# "procnum" : 1,
# "autolaunch" : True,
# "starttime" : 5,
# "restart" : "never",
# "retries" : 2,
# "stopsig" : "SSIG",
# "stoptime" : 10,
# "exitcodes" : [0, 2, 4, 5],
# "stdout" : "/C/UsbFix/UsbFix.exe.stdout",
# "stderr" : "/C/UsbFix/UsbFix.exe.stderr",
# "redir" : "/C/UsbFix/UsbFix.exe.redir",
# "envvars" : {"ENV1" : "VAL1", "ENV2" : "VAL2"},
# "workingdir" : "/tmp",
# "umask" : "077"}
prog1 = Program(args1)
prog2 = Program(args2)
tmdata.saveProgram(prog1, "./config.xml", False)
tmdata.saveProgram(prog2, "./config.xml", False)
# tmdata.saveProgram(prog1, "./config.json", False)
# tmdata.saveProgram(prog2, "./config.json", False)
if __name__ == "__main__":
main();
|
4,084 | 05021c3b39a0df07ca3d7d1c3ff9d47be6723131 | import numpy
import cv2
from keras.models import model_from_json
from keras.layers import Dense
from keras.utils import np_utils
import os
from keras.optimizers import SGD, Adam
numpy.random.seed(42)
file_json = open('model.json', "r")
model_json = file_json.read()
file_json.close()
model = model_from_json(model_json)
model.load_weights('weights.h5')
print('Model loaded')
sgd = SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])
# for i in range(10):
# img = cv2.imread(str(i) + '.png', 0)
# img = cv2.resize(img, (28, 28))
# for i in range(28):
# for j in range(28):
# img[i][j] = abs(img[i][j] - 255)
# print('%4.f' % img[i][j], end='')
# print()
# print()
# print()
# print()
for i in range(10):
img = cv2.imread(str(i) + '.png', 0)
img = cv2.resize(img, (28, 28))
for x in range(28):
for y in range(28):
img[x][y] = abs(img[x][y] - 255)
img = img.astype('float32')
img /= numpy.max(img)
img = numpy.array([img[numpy.newaxis, :, :]])
a = model.predict(img, batch_size=64)
print(i, numpy.argmax(a, axis=None, out=None))
|
4,085 | 3c738a07d71338ab838e4f1d683e631252d50a30 | __author__ = 'ldd'
# -*- coding: utf-8 -*-
from view.api_doc import handler_define, api_define, Param
from view.base import BaseHandler,CachedPlusHandler
@handler_define
class HelloWorld(BaseHandler):
@api_define("HelloWorld", r'/', [
], description="HelloWorld")
def get(self):
self.write({'status':"HelloWorld"}) |
4,086 | dc2cbbaca3c35f76ac09c93a2e8ad13eb0bdfce6 |
from xai.brain.wordbase.verbs._essay import _ESSAY
#calss header
class _ESSAYED(_ESSAY, ):
def __init__(self,):
_ESSAY.__init__(self)
self.name = "ESSAYED"
self.specie = 'verbs'
self.basic = "essay"
self.jsondata = {}
|
4,087 | 6c0ca72d7f5d2373a50cd344991ad9f9e3046e8d | #tkinter:Label 、Button 、标签、按钮
#详见:
#1、os:https://blog.csdn.net/xxlovesht/article/details/80913193
#2、shutil:https://www.jb51.net/article/157891.htm
#3、tkinter:https://blog.csdn.net/mingshao104/article/details/79591965
# https://blog.csdn.net/sinat_41104353/article/details/79313424
# https://blog.csdn.net/Bugest/article/details/81557112
#import : 使用import xx 可以修改模块对象的属性(无论属性是不是可变类型)
#from xx import x使用from xx import x 只能修改模块对象的属性是可变类型的(不可变类型不能修改,会发生属性错误)
#===========================================《import》======================================================
import re
import os
import shutil
import tkinter as tk
from tkinter import filedialog
import tkinter.messagebox #弹窗库
import sys
import datetime
import socket
curPyDirect = os.getcwd()#获取当前fileOperation.py的路径
curSysTime = datetime.datetime.now().strftime('%F %T')#获取当前系统时间 字符类型 str
#===========================================《window》===========================================
window=tk.Tk()#指定tkinter窗口
window.title('my window')#tkinter窗口名字
window.geometry('600x300')#tkinter窗口大小
#===========================================《Menu》===========================================
#for item in ['新建', '打开', '保存', '另存为']:
# fmenu1.add_command(label=item,command=File_Deal_Event)# 如果该菜单是顶层菜单的一个菜单项,则它添加的是下拉菜单的菜单项。
#===========================================《Menu》 1st:指定一个菜单项,类似于导航栏,顶层菜单
menubar=tk.Menu(window)#指定tkinter菜单
def File_Open_EventC():
#FolderPath = filedialog.askdirectory()#打开提示框,选则文件夹,输出文件夹绝对路径
FilePath = filedialog.askopenfilename(filetypes=( ("C file", "*.c*"),("Text file", "*.txt*"),("HTML files", "*.html;*.htm")))#打开提示框,选则文件,输出文件绝对路径
fp = open(FilePath, 'r')
flag_1 = 0#原括弧
slash_char = '/'
slash_flag = 0
slash_char2 = '/'
slash_flag2 = 0
star_char='*'
star_flag = 0
s1 = []
for s in fp.readlines():
#1.排除/* */
slash_flag = s.find(slash_char)
#1.1 /
if (slash_flag != -1 ):#找到了/
#1.2 *
star_flag = s.find(star_char)
if( star_flag!=-1):#找到了*
if(star_flag - slash_flag == 1):#找到了/*
print(s)
star_flag = 0
slash_flag = 0
slash_flag2 = 0
else:
star_flag = 0
#1.3 /
slash_flag2 = s.find(slash_char2)
if (slash_flag2 != -1 ):
if(slash_flag2 - slash_flag == 1):#找到了//
print(s)
star_flag = 0
slash_flag = 0
slash_flag2 = 0
else:
slash_flag2 = 0
else:
slash_flag = 0
fp.close()
#===========================================《Menu》 2nd:创建菜单栏
#=================第1个菜单项:
fmenu1 = tk.Menu(window)
fmenu1.add_command(label='新建',command=None)
fmenu1.add_command(label='打开',command=File_Open_EventC)
fmenu1.add_command(label='保存',command=None)
fmenu1.add_command(label='另存为',command=None)
#=================第2个菜单项:
fmenu2 = tk.Menu(window)
for item in ['复制', '粘贴', '剪切']:
fmenu2.add_command(label=item)
#=================第3个菜单项:
fmenu3 = tk.Menu(window)
for item in ['默认视图', '新式视图']:
fmenu3.add_command(label=item)
#=================第4个菜单项:
fmenu4 = tk.Menu(window)
fmenu4.add_command(label='版权信息',command=None)
fmenu4.add_command(label='其他说明',command=None)
#===========================================《Menu》 3rd:级联菜单栏
# add_cascade 的一个很重要的属性就是 menu 属性,它指明了要把那个菜单级联到该菜单项上,
# 当然,还必不可少的就是 label 属性,用于指定该菜单项的名称
menubar.add_cascade(label="文件", menu=fmenu1)#菜单项:文件
menubar.add_cascade(label="编辑", menu=fmenu2)#菜单项:编辑
menubar.add_cascade(label="视图", menu=fmenu3)#菜单项:视图
menubar.add_cascade(label="关于", menu=fmenu4)#菜单项:关于
#===========================================《Menu》 4th:激活菜单
#最后可以用窗口的 menu 属性指定我们使用哪一个作为它的顶层菜单
window.config(menu=menubar)
#===============================激活窗口
window.mainloop() |
4,088 | 7997efb00f24ecc5c4fbf3ca049eca6b5b178d53 | import pytest
from freezegun import freeze_time
from datetime import datetime
from khayyam import JalaliDatetime, TehranTimezone
from dilami_calendar import DilamiDatetime, dilami_to_jalali
def test_dilami_date():
gdate = datetime(2018, 2, 1)
ddate = DilamiDatetime(gdate, tzinfo=TehranTimezone)
assert ddate.year == 1591
assert ddate.month == 6
assert ddate.day == 28
ddate = DilamiDatetime(1591, 6, 28, tzinfo=TehranTimezone)
assert ddate
ddate = DilamiDatetime(1592, 5, 1, tzinfo=TehranTimezone)
dilami_date = DilamiDatetime(ddate)
assert dilami_date
# Check Dilami date return today
ddate = DilamiDatetime().now()
jy, jm, jd = dilami_to_jalali(ddate.year, ddate.month, ddate.day)
today = JalaliDatetime.now(TehranTimezone())
assert today.year == jy
assert today.month == jm
assert today.day == jd
with freeze_time(datetime.now()):
dilami_now = DilamiDatetime(datetime.now()).to_datetime()
assert dilami_now.time() == datetime.now().time()
now = datetime.now()
dilami_date = DilamiDatetime(now)
assert dilami_date.to_date() == now.date()
def test_limits():
# Test MinYear and MaxYear
with pytest.raises(ValueError):
DilamiDatetime(194, 1, 1)
with pytest.raises(ValueError):
DilamiDatetime(3373, 1, 1)
# Test months
with pytest.raises(ValueError):
DilamiDatetime(1592, -1, 3)
with pytest.raises(ValueError):
DilamiDatetime(1592, 13, 1)
# Test days
with pytest.raises(ValueError):
DilamiDatetime(1592, 1, 32)
with pytest.raises(ValueError):
DilamiDatetime(1592, 1, -1)
# Test days of leap year
with pytest.raises(ValueError):
DilamiDatetime(1595, 0, 0)
with pytest.raises(ValueError):
DilamiDatetime(1593, 0, 6)
|
4,089 | acf787885834961a71fb2655b9d8a1eb026942c7 | #https://www.hackerrank.com/challenges/caesar-cipher-1/problem
n=int(input())
stringy=input()
k=int(input())
s=""
for i in stringy:
if ord(i)>=65 and ord(i)<=90:
temp=(ord(i)+k-65)%26
s+=chr(temp+65)
elif ord(i)>=97 and ord(i)<=122:
temp=(ord(i)+k-97)%26
s+=chr(temp+97)
else:
s+=i
print(s)
|
4,090 | 9cf32e127664cb4c3290e665e35245acc936e064 | # created by ahmad on 17-07-2019
# last updated on 21-07-2019
#recommended font size of console in pydroid is 12
from decimal import Decimal
def fromTen():
global fin
fin = num
nnum = num
base = base2
if count == 1:
nnum = sum(milst) + sum(mdlst)
Ipart = int(nnum)
Dpart = Decimal(nnum - Ipart)
strDpart = str(Dpart)
Ilist = []
Dlist = []
print("digits before . (dot) is {} ".format(Ipart))
if strDpart == "0":
print("digits after . (dot) is 0")
else:
print("digits after . (dot) is {}".format(strDpart[2:]))
print(" --------------------------------------------------")
print("| INTEGRAL PART |")
print(" --------------------------------------------------")
print(" {}|_{}".format(base, Ipart))
while nnum >= base:
rem = int(nnum % base)
srem = str(rem)
nnum = int(nnum / base)
Ilist.append(rem)
if nnum >= base:
print(" {}|_".format(base) + str(nnum) + " --->{}".format(srem))
else:
print(" " + str(nnum) + " --->{}".format(srem))
Ilist.append(nnum)
print(" --------------------------------------------------")
IIlist = Ilist
for i in range(len(IIlist)):
try:
a = int(IIlist[i]) + 55
if a > 64:
IIlist[i] = chr(a)
except:
pass
print(Ilist[::-1])
print()
print(" --------------------------------------------------")
print("| DECIMAL PART |")
print(" --------------------------------------------------")
k = 0
while k < (len(strDpart) - 2) * 2:
print("{} x {} = ".format(Dpart, base), end='')
a = Dpart * base
Dpart = a - int(a)
print(a)
a1 = int(a)
Dlist.append(a1)
k = k + 1
print(" --------------------------------------------------")
print("integer part:")
print(Ilist[::-1])
print("decimal part:")
print(Dlist)
dot = ["."]
y=Ilist[::-1]
y1=y+dot+ Dlist
for i in range(len(y1)):
y1[i]=str(y1[i])
print("Final Answer = ",'(' ,''.join(y1),')','base',base2)
def toTen():
mnum = num
mbase = base1
global fin
mdnum = mnum - int(mnum)
minum = int(mnum)
strmdnum = str(mdnum)[2:]
mdlen = len(strmdnum)
strminum = str(minum)[::-1]
milen = len(strminum)
strnum = strmdnum + strminum
con = 0
for i in range(len(strnum)):
a = int(strnum[i])
if a >= mbase:
con = con + 1
if con == 0:
p = 0
global milst, mdlst
milst = []
mdlst = []
print(" --------------------------------------------------")
print("| INTEGRAL PART |")
print(" --------------------------------------------------")
for ii in range(milen):
minum = int(strminum[ii])
power1 = pow(mbase, p)
print("""{} power {} is "{}" """.format(mbase, p, power1),
" --> {} x {} = {}".format(power1, minum, minum * power1))
p = p + 1
milst.append(minum * power1)
print("___________________________________________________")
print()
print("ADDITION OF INTEGRAL PART ===> ", end='')
for i in range(milen):
if (i + 1) < (milen):
print(" {} +".format(milst[i]), end='')
if i + 1 == milen:
print("{} = ".format(milst[i]), end='')
print(sum(milst))
print()
print("___________________________________________________")
print(" --------------------------------------------------")
print("| DECIMAL PART |")
print(" --------------------------------------------------")
print()
mbase = Decimal(mbase)
for jj in range(mdlen):
q = Decimal(pow(mbase, -(jj + 1)))
print("{} power {} = {} ---> ".format(mbase, -(jj + 1), q)) # ,end='')
print(" ", strmdnum[jj], " x ", q, " = ", q * int(strmdnum[jj]))
mdlst.append(float(q * int(strmdnum[jj])))
print(" --------------------------------------------------")
print(sum(mdlst))
print("___________________________________________________")
print()
print("ADDITION OF DECIMAL PART ===> ", end='')
for i in range(mdlen):
if (i + 1) < (mdlen):
print(" {} +".format(mdlst[i]), end='')
if i + 1 == mdlen:
print("{} = ".format(mdlst[i]), end='')
print(sum(mdlst))
print("___________________________________________________")
# print("---------------------------------------------------------------")
print("SUM OF DECIMAL SUM AND INTEGRAL SUM ===> {} + {} = ".format(sum(milst), sum(mdlst)), sum(milst) + sum(mdlst))
print(" --------------------------------------------------")
else:
try:
print(" --------------------------------------------------")
print(" ---------------------")
print(" | INVALID |")
print(" ---------------------")
print()
print("all the digits should be less than the base ")
print("The base of {} should not be {}".format(mnum, mbase))
print()
main()
except:
pass
def forBoth():
toTen()
global count
count = 1
fromTen()
def main():
global num, base1, base2, count, fin
count = 0
num = Decimal(input("Enter a number :"))
base1 = int(input("Enter base of {} :".format(num)))
base2 = int(input("Enter the base of resulting number:"))
print(num)
if base1 == 10:
fromTen()
elif base2 == 10:
toTen()
else:
forBoth()
s = 1
if s == 1:
main()
s = s + 1
while True:
print("\n")
condition = input("Do you want to continue ? (y/n):")
if condition == "y":
main()
elif condition == "n":
print()
quit()
else:
print("Invalid input")
|
4,091 | 5d8d47d77fba9027d7c5ec4e672fc0c597b76eae | # models.py
from sentiment_data import *
from utils import *
import nltk
from nltk.corpus import stopwords
import numpy as np
from scipy.sparse import csr_matrix
class FeatureExtractor(object):
"""
Feature extraction base type. Takes a sentence and returns an indexed list of features.
"""
def get_indexer(self):
raise Exception("Don't call me, call my subclasses")
def extract_features(self, ex_words: List[str], add_to_indexer: bool=False) -> List[int]:
"""
Extract features from a sentence represented as a list of words. Includes a flag add_to_indexer to
:param ex_words: words in the example to featurize
:param add_to_indexer: True if we should grow the dimensionality of the featurizer if new features are encountered.
At test time, any unseen features should be discarded, but at train time, we probably want to keep growing it.
:return:
"""
raise Exception("Don't call me, call my subclasses")
class UnigramFeatureExtractor(FeatureExtractor):
"""
Extracts unigram bag-of-words features from a sentence. It's up to you to decide how you want to handle counts
and any additional preprocessing you want to do.
"""
def __init__(self, indexer: Indexer, train_exs, stop_words):
for sentimentExample in train_exs:
words = sentimentExample.words
for word in words:
lowercase = word.lower()
if not lowercase in stop_words:
indexer.add_and_get_index(lowercase)
self.indexer = indexer
self.corpus_length = len(indexer)
self.feats = []
for i, sentimentExample in enumerate(train_exs):
sentence = sentimentExample.words
self.feats.append(self.calculate_sentence_probability(sentence))
def calculate_sentence_probability(self, sentence):
col = [self.indexer.index_of(word.lower()) for word in sentence if self.indexer.contains(word.lower())]
row = np.zeros(len(col), dtype=np.int)
data = np.ones(len(col), dtype=np.int)
feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))
if len(col) > 0:
feat = feat * (1. / len(col))
return feat
class BigramFeatureExtractor(FeatureExtractor):
"""
Bigram feature extractor analogous to the unigram one.
"""
def __init__(self, indexer: Indexer, train_exs, stop_words):
for sentimentExample in train_exs:
words = sentimentExample.words
previous_word = None
for word in words:
if previous_word is not None:
if not (previous_word.lower() in stop_words and word.lower() in stop_words):
indexer.add_and_get_index((previous_word.lower(), word.lower()))
previous_word = word
self.indexer = indexer
self.corpus_length = len(indexer)
self.feats = []
for i, sentimentExample in enumerate(train_exs):
sentence = sentimentExample.words
self.feats.append(self.calculate_sentence_probability(sentence))
def calculate_sentence_probability(self, sentence):
col = []
previous_word = None
for word in sentence:
if previous_word is not None:
if self.indexer.contains((previous_word.lower(), word.lower())):
col.append(self.indexer.index_of((previous_word.lower(), word.lower())))
previous_word = word
row = np.zeros(len(col), dtype=np.int)
data = np.ones(len(col), dtype=np.int)
feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))
if len(col) > 0:
feat = feat * (1. / len(col))
return feat
class BetterFeatureExtractor(FeatureExtractor):
"""
Better feature extractor...try whatever you can think of!
"""
def __init__(self, indexer: Indexer, train_exs, stop_words):
# unigram
for sentimentExample in train_exs:
words = sentimentExample.words
for word in words:
lowercase = word.lower()
if not lowercase in stop_words:
indexer.add_and_get_index(lowercase)
# bigram
for sentimentExample in train_exs:
words = sentimentExample.words
previous_word = None
for word in words:
if previous_word is not None:
if not (previous_word.lower() in stop_words and word.lower() in stop_words):
indexer.add_and_get_index((previous_word.lower(), word.lower()))
previous_word = word
self.indexer = indexer
self.corpus_length = len(indexer)
self.feats = []
for i, sentimentExample in enumerate(train_exs):
sentence = sentimentExample.words
self.feats.append(self.calculate_sentence_probability(sentence))
def calculate_sentence_probability(self, sentence):
col = [self.indexer.index_of(word.lower()) for word in sentence if self.indexer.contains(word.lower())]
unigram_count = len(col)
previous_word = None
for word in sentence:
if previous_word is not None:
if self.indexer.contains((previous_word.lower(), word.lower())):
col.append(self.indexer.index_of((previous_word.lower(), word.lower())))
previous_word = word
bigram_count = len(col) - unigram_count
row = np.zeros(len(col), dtype=np.int)
data = np.ones(len(col))
data[:unigram_count] = data[:unigram_count] * 1. / unigram_count
data[unigram_count:unigram_count + bigram_count] = data[unigram_count:unigram_count + bigram_count] * 1. / bigram_count
feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))
return feat
class SentimentClassifier(object):
"""
Sentiment classifier base type
"""
def predict(self, ex_words: List[str]) -> int:
"""
:param ex_words: words (List[str]) in the sentence to classify
:return: Either 0 for negative class or 1 for positive class
"""
raise Exception("Don't call me, call my subclasses")
class TrivialSentimentClassifier(SentimentClassifier):
"""
Sentiment classifier that always predicts the positive class.
"""
def predict(self, ex_words: List[str]) -> int:
return 1
class PerceptronClassifier(SentimentClassifier):
"""
Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier
superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to
modify the constructor to pass these in.
"""
def __init__(self):
raise Exception("Must be implemented")
class LogisticRegressionClassifier(SentimentClassifier):
"""
Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier
superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to
modify the constructor to pass these in.
"""
def __init__(self, feat_size, feat_extractor):
self.w = np.zeros(feat_size)
self.feat_extractor = feat_extractor
def predict(self, sentence):
feat = self.feat_extractor.calculate_sentence_probability(sentence)
return int(feat.dot(np.expand_dims(self.w, axis=1))[0, 0] > 0)
def train_perceptron(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> PerceptronClassifier:
"""
Train a classifier with the perceptron.
:param train_exs: training set, List of SentimentExample objects
:param feat_extractor: feature extractor to use
:return: trained PerceptronClassifier model
"""
raise Exception("Must be implemented")
def train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:
"""
Train a logistic regression model.
:param train_exs: training set, List of SentimentExample objects
:param feat_extractor: feature extractor to use
:return: trained LogisticRegressionClassifier model
"""
lr = LogisticRegressionClassifier(feat_extractor.corpus_length, feat_extractor)
alpha = 1e0
# beta = 1e-4
for epoch in range(8):
loss = 0.
acc = 0
indices = np.arange(len(train_exs))
np.random.shuffle(indices)
for i in indices:
feat = feat_extractor.feats[i]
sentimentExample = train_exs[i]
y = sentimentExample.label
z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]
loss += -y * np.log(z) - (1 - y) * np.log(1 - z) \
# + beta * np.expand_dims(lr.w, axis=0).dot(np.expand_dims(lr.w, axis=1))[0, 0]
predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)
acc += (predict == y)
grad = (z - y) * feat.toarray()[0] # + 2 * beta * lr.w
lr.w = lr.w - alpha * grad
print("epoch {:d}, loss: {:f}, accuracy: {:f}".format(epoch, loss / len(train_exs), acc / len(train_exs)))
for i in indices:
feat = feat_extractor.feats[i]
sentimentExample = train_exs[i]
y = sentimentExample.label
z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]
loss += -y * np.log(z) - (1 - y) * np.log(1 - z)
print("training loss: {:f}".format(loss / len(train_exs)))
return lr
def train_model(args, train_exs: List[SentimentExample]) -> SentimentClassifier:
"""
Main entry point for your modifications. Trains and returns one of several models depending on the args
passed in from the main method. You may modify this function, but probably will not need to.
:param args: args bundle from sentiment_classifier.py
:param train_exs: training set, List of SentimentExample objects
:return: trained SentimentClassifier model, of whichever type is specified
"""
# Initialize feature extractor
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
if args.model == "TRIVIAL":
feat_extractor = None
elif args.feats == "UNIGRAM":
feat_extractor = UnigramFeatureExtractor(Indexer(), train_exs, stop_words)
elif args.feats == "BIGRAM":
# Add additional preprocessing code here
feat_extractor = BigramFeatureExtractor(Indexer(), train_exs, stop_words)
elif args.feats == "BETTER":
# Add additional preprocessing code here
feat_extractor = BetterFeatureExtractor(Indexer(), train_exs, stop_words)
else:
raise Exception("Pass in UNIGRAM, BIGRAM, or BETTER to run the appropriate system")
# Train the model
if args.model == "TRIVIAL":
model = TrivialSentimentClassifier()
elif args.model == "PERCEPTRON":
model = train_perceptron(train_exs, feat_extractor)
elif args.model == "LR":
model = train_logistic_regression(train_exs, feat_extractor)
else:
raise Exception("Pass in TRIVIAL, PERCEPTRON, or LR to run the appropriate system")
return model |
4,092 | 0b2a036b806cca6e7f58008040b3a261a8bc844d | PROJECT_ID = "aaet-geoscience-dev"
# The tmp folder is for lasio I/O purposes
DATA_PATH = "/home/airflow/gcs/data/tmp"
# Credential JSON key for accessing other projects
# CREDENTIALS_JSON = "gs://aaet_zexuan/flow/keys/composer_las_merge.json"
CREDENTIALS_JSON = "keys/composer_las_merge.json"
# Bucket name for merged las files and spliced las files
BUCKET_LAS_MERGE = "las_merged"
BUCKET_LAS_SPLICE = "us-central1-lithos-dev-94beb3d4-bucket"
# las_splice.py output to the composer data folder, as input of logqc
COMPOSER_FOLDER = "data/logqc_landing"
TMP_FOLDER = "data/tmp"
# for GCP web UI and Big Query Job Status Report
BUCKET_JOB = "log_splice_tool_jobs"
BIGQUERY_DATASET_ID = "urc_jobs"
BIGQUERY_TABLE_ID = "jobs"
# Workflow type
tpt_workflow_type = "tpt"
logsplice_workflow_type = "logsplice"
logqc_workflow_type = "logqc"
geomech_workflow_type = "geomech"
# Number of processors for las_merge_MP (multiprocessing).
N_PROCESSORS = 16
# The window size for moving average, e.g. 11 means the window covers a
# point and 5 adjacent points on both sides
MOVING_AVG_WINDOW_SIZE = 11
# Default value for missing data, usually it is either -999.25 or -999.0
MISSING = -999.0
# COL_DICT: a dictionary of aliased curve names for log splicing. keys correspond to measurements
# (e.g., 'density', 'gamma', 'resistivity', etc.),
# and each value is a list of aliased column names that could potentially correspond
# to those measurements. Each key is the aliased curve name before splicing,
# each key's value is the standard curve name after splicing.
COL_DICT = {
# Caliper
"cal": ["CAL", "CALI", "CALX", "HCAL", "TGS_CALX", "RAW_CALX"],
# Compressional Sonic Slowness
"dtc": ["DT", "DT24", "DTC", 'TGS_DT', "TGS_DTC", "RAW_DT", "RAW_DTC"],
# Deep Resistivity
# 'rdeep' includes 'rdeep_ltrl' (laterolog), 'rdeep_indct' (induction), 'rdeep_unknown'.
# A final 'rdeep' will be generated
# with an additional 'rdeep_type' curve to denote the log type.
"rdeep": ['ILT90', 'LLD', 'RDEEP', 'RES', 'RES_DEEP', 'AHT90', 'AT90', 'ILD', 'ILT90', 'LLD', 'ILO90', 'ILF90', 'LLMD'],
# Density (Bulk)
"rhob": ["DEN", "RHOB", "RHOZ", "ZDEN", "ZDNC", "TGS_RHOB", 'RAW_RHOB'],
# Density (Correction)
"drho": ["DRHO", "HDRA", "ZCOR"],
# Gamma Ray
"gr": ["APC_GR_NRM", "GAMM", "GR", "GR_R", "GRR", 'SGR', 'SGRR', 'CGR'],
# Neutron Porosity
"nphil": ["CNCF", "NEU", "NPOR", "NPHI", "NPHIL", "TNPH", 'TGS_NPHI', 'NPHI_LS', 'TNPH_LS', 'RAW_NPHI'],
# Photoelectric effect
"pe": ["PE", "PEF", "PEFZ", 'TGS_PE', 'RAW_PE'],
}
# LDD is laterolog
# The rest are inductions
# RDEEP, RES, RES_DEEP are of unknown origin
# __log_type_rdeep = [log_type_enum.induction, #AHT90
# log_type_enum.induction, #AT90
# log_type_enum.induction, #ILD
# log_type_enum.induction, #ILT90
# log_type_enum.laterolog, #LLD
# log_type_enum.induction, #M2R9
# log_type_enum.unknown, #RDEEP
# log_type_enum.unknown, #RES
# log_type_enum.unknown] #RES_DEEP
RDEEP_TYPE_LIST = ["rdeep_ltrl", "rdeep_indct", "rdeep_unknown"]
RDEEP_TYPE_DICT = {"rdeep_ltrl": 1, "rdeep_indct": 2, "rdeep_unknown": 3}
# curve description dictionary
CURVE_DESC = {
"DEPT": "Depth",
"CAL": "Caliper",
"DRHO": "Density Correction",
"DTC": "Compressional Wave Slowness",
"DTS": "Shear Wave Slowness",
"GR": "Gamma Ray",
"NPHI": "Neutron Porosity",
"NPHIL": "Neutron Porosity",
"PE": "Photoelectric Effect",
"RDEEP": "Deep Resistivity",
"RDEEP_LTRL": "Laterolog Resistivity",
"RDEEP_INDCT": "Induction Resistivity",
"RDEEP_UNKNOWN": "Unknown Resistivity (Laterolog or Induction)",
"RDEEP_TYPE": "RDEEP Type 1:Laterolog 2:Induction 3:Unknown",
"RHOB": "Bulk Density",
"RUGOSITY": "Borehole Rugosity",
"RUGOSITY_BHF": "Rugosity Bad Hole Flag",
"DRHO_BHF": "Density Correction Bad Hole Flag",
"DTC_BHF": "Sonic Bad Hole Flag",
"GR_BHF": "Gamma Ray Bad Hole Flag",
"NPHIL_BHF": "Neutron Bad Hole Flag",
"RHOB_BHF": "Density Bad Hole Flag",
"LOG_RDEEP_BHF": "Resistivity Bad Hole Flag",
"PE_BHF": "PE Bad Hole Flag",
"RHOB_MCF": "Density Corrected from Multiwell Flag",
"RHOB_SYN": "Density Estimation from Ensemble of Learners",
"NPHI_MCF": "Neutron Corrected from Multiwell Flag",
"NPHI_SYN": "Neutron Estimation from Ensemble of Learners",
"DTC_MCF": "Sonic Corrected from Multiwell Flag",
"DTC_SYN": "Sonic Estimation from Ensemble of Learners",
"PE_MCF": "PE Corrected from Multiwell Flag",
"PE_SYN": "PE Estimation from Ensemble of Learners",
"RHOB_NCF": "Density No Correction Flag",
"RHOB_CORR": "Density Corrected",
"NPHI_NCF": "Neutron No Correction Flag",
"NPHI_CORR": "Neutron Corrected",
"DTC_NCF": "Sonic No Correction Flag",
"DTC_CORR": "Sonic Corrected",
"PE_NCF": "PE No Correction Flag",
"PE_CORR": "PE Corrected"
}
|
4,093 | 7ff7da216bdda5c30bf7c973c82886035b31247c | #!/usr/bin/python
class Bob(object):
def __init__(self):
self.question_response = "Sure."
self.yell_response = "Woah, chill out!"
self.silent_response = "Fine. Be that way!"
self.whatever = "Whatever."
def hey(self, question):
if not(question) or question.strip()=='':
return self.silent_response
if question.isupper():
return self.yell_response
elif question.endswith("?"):
return self.question_response
return self.whatever
|
4,094 | 443ed24ab396e83dbf12558207376258124bca8b | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import torch
from timm.data.transforms_factory import transforms_imagenet_eval
from torchvision import transforms
from PIL import Image
def preprocess(args, src_path, save_path):
if isinstance(args.input_size, tuple):
img_size = args.input_size[-2:]
else:
img_size = args.input_size
preprocesser = transforms_imagenet_eval(
img_size,
interpolation=args.interpolation,
use_prefetcher=args.use_prefetcher,
mean=args.mean,
std=args.std,
crop_pct=args.crop_pct)
i = 0
in_files = os.listdir(src_path)
for file in in_files:
i = i + 1
print(file, "===", i)
input_image = Image.open(src_path + file).convert('RGB')
input_tensor = preprocesser(input_image)
img = np.array(input_tensor).astype(np.float32)
img = (img - np.array([x * 255 for x in args.mean]).reshape(3, 1, 1)) / np.array(
[x * 255 for x in args.std]).reshape(3, 1, 1)
img = img.astype(np.float32)
img.tofile(os.path.join(save_path, file.split('.')[0] + ".bin"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', default='', type=str)
parser.add_argument('--save_path', default='', type=str)
parser.add_argument('--interpolation', default='bicubic', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('use_prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--crop-pct', default=0.9, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
args = parser.parse_args()
args.mean = (0.485, 0.456, 0.406)
args.std = (0.229, 0.224, 0.225)
args.input_size = (3, 224, 224)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
preprocess(args, args.src_path, args.save_path)
if __name__ == '__main__':
main()
|
4,095 | 773fc4660def134410eca92886b2629be6977f74 | #
# Util for WebDriver
#
import sys
from string import Formatter
from functools import wraps
from numbers import Integral
from .locator import Locator
from .keys import Keys
PY3 = sys.version_info[0] == 3
class MemorizeFormatter(Formatter):
"""Customize the Formatter to record used and unused kwargs."""
def __init__(self):
"""Initialize the MemorizeFormatter."""
Formatter.__init__(self)
self._used_kwargs = {}
self._unused_kwargs = {}
def check_unused_args(self, used_args, args, kwargs):
"""Implement the check_unused_args in superclass."""
for k, v in kwargs.items():
if k in used_args:
self._used_kwargs.update({k: v})
else:
self._unused_kwargs.update({k: v})
def vformat(self, format_string, args, kwargs):
"""Clear used and unused dicts before each formatting."""
self._used_kwargs = {}
self._unused_kwargs = {}
return super(MemorizeFormatter, self).vformat(format_string, args, kwargs)
def format_map(self, format_string, mapping):
"""format a string by a map
Args:
format_string(str): A format string
mapping(dict): A map to format the string
Returns:
A formatted string.
Raises:
KeyError: if key is not provided by the given map.
"""
return self.vformat(format_string, args=None, kwargs=mapping)
def get_used_kwargs(self):
"""Get used kwargs after formatting."""
return self._used_kwargs
def get_unused_kwargs(self):
"""Get unused kwargs after formatting."""
return self._unused_kwargs
def add_element_extension_method(Klass):
"""Add element_by alias and extension' methods(if_exists/or_none)."""
def add_element_method(Klass, using):
locator = using.name.lower()
find_element_name = "element_by_" + locator
find_element_if_exists_name = "element_by_" + locator + "_if_exists"
find_element_or_none_name = "element_by_" + locator + "_or_none"
wait_for_element_name = "wait_for_element_by_" + locator
find_elements_name = "elements_by_" + locator
wait_for_elements_name = "wait_for_elements_by_" + locator
def find_element(self, value):
return self.element(using.value, value)
find_element.__name__ = find_element_name
find_element.__doc__ = (
"Set parameter 'using' to '{0}'.\n".format(using.value) +
"See more in \'element\' method."
)
def find_element_if_exists(self, value):
return self.element_if_exists(using.value, value)
find_element_if_exists.__name__ = find_element_if_exists_name
find_element_if_exists.__doc__ = (
"Set parameter 'using' to '{0}'.\n".format(using.value) +
"See more in \'element_if_exists\' method."
)
def find_element_or_none(self, value):
return self.element_or_none(using.value, value)
find_element_or_none.__name__ = find_element_or_none_name
find_element_or_none.__doc__ = (
"Set parameter 'using' to '{0}'.\n".format(using.value) +
"See more in \'element_or_none\' method."
)
def wait_for_element_by(self, *args, **kwargs):
return self.wait_for_element(using.value, *args, **kwargs)
wait_for_element_by.__name__ = wait_for_element_name
wait_for_element_by.__doc__ = (
"Set parameter 'using' to '{0}'.\n".format(using.value) +
"See more in \'wait_for_element\' method."
)
def find_elements(self, value):
return self.elements(using.value, value)
find_elements.__name__ = find_elements_name
find_elements.__doc__ = (
"Set parameter 'using' to '{0}'.\n".format(using.value) +
"See more in \'elements\' method."
)
def wait_for_elements_available(self, *args, **kwargs):
return self.wait_for_elements(using.value, *args, **kwargs)
wait_for_elements_available.__name__ = wait_for_elements_name
wait_for_elements_available.__doc__ = (
"Set parameter 'using' to '{0}'.\n".format(using.value) +
"See more in \'wait_for_elements\' method."
)
setattr(Klass, find_element_name, find_element)
setattr(Klass, find_element_if_exists_name, find_element_if_exists)
setattr(Klass, find_element_or_none_name, find_element_or_none)
setattr(Klass, wait_for_element_name, wait_for_element_by)
setattr(Klass, find_elements_name, find_elements)
setattr(Klass, wait_for_elements_name, wait_for_elements_available)
for locator in iter(Locator):
add_element_method(Klass, locator)
def fluent(func):
"""Fluent interface decorator to return self if method return None."""
@wraps(func)
def fluent_interface(instance, *args, **kwargs):
ret = func(instance, *args, **kwargs)
if ret is not None:
return ret
return instance
return fluent_interface
def value_to_key_strokes(value):
"""Convert value to a list of key strokes
>>> value_to_key_strokes(123)
['1', '2', '3']
>>> value_to_key_strokes('123')
['1', '2', '3']
>>> value_to_key_strokes([1, 2, 3])
['1', '2', '3']
>>> value_to_key_strokes(['1', '2', '3'])
['1', '2', '3']
Args:
value(int|str|list)
Returns:
A list of string.
"""
result = []
if isinstance(value, Integral):
value = str(value)
for v in value:
if isinstance(v, Keys):
result.append(v.value)
elif isinstance(v, Integral):
result.append(str(v))
else:
result.append(v)
return result
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
|
4,096 | cb0df06ee474576b3024678fa0f63ce400d773ea | from flask.ext.wtf import Form
from wtforms import TextField
from wtforms.validators import Required
class VerifyHandphoneForm(Form):
handphone_hash = TextField('Enter verification code here', validators=[Required()]) |
4,097 | aec45936bb07277360ea1a66b062edc4c282b45a | import server_pb2
import atexit
from grpc.beta import implementations
from random import randint
from grpc._adapter._types import ConnectivityState
global _pool
_pool = dict()
class ChannelPool(object):
def __init__(self, host, port, pool_size):
self.host = host
self.port = port
self.pool_size = pool_size
self.channels = []
self.stubs = []
# only index, no ref!
# and this is a stub rank!
self.working_channel_indexs = set()
self.connect()
def flush_channels(self):
# call this method to check all the channels status
# if channel connection is failed or idle
# we could try to reconnect sometime
channels = [self.channels[i] for i in self.working_channel_indexs]
for channel in channels:
try:
state = channel._low_channel.check_connectivity_state(True)
if state == ConnectivityState.CONNECTING:
self.on_channel_connection(channel, state)
elif state == ConnectivityState.TRANSIENT_FAILURE:
self.on_transient_failure(channel, state)
elif state == ConnectivityState.FATAL_FAILURE:
self.on_fatal_failure(channel, state)
else:
self.on_success(channel, state)
except Exception, e:
self.on_exception(channel, state, e)
def on_channel_connection(self, channel, state):
pass
def on_transient_failure(self, channel, state):
pass
def on_fatal_failure(self, channel, state):
pass
def on_success(self, channel, state):
pass
def on_exception(self, channel, state, e):
pass
def connect(self):
for i in range(self.pool_size):
channel = implementations.insecure_channel(self.host, self.port)
stub = server_pb2.beta_create_SimpleService_stub(channel)
# we need to make channels[i] == stubs[i]->channel
self.channels.append(channel)
self.stubs.append(stub)
def shutdown(self):
for channel in self.channels:
del channel
del self.channels
for stub in self.stubs:
del stub
del self.stubs
self.channels = []
self.stubs = []
def get_stub(self):
index = randint(0, self.pool_size - 1)
self.working_channel_indexs.add(index)
return self.stubs[index]
def __del__(self):
self.shutdown()
class ClientImpl(object):
def __init__(self, host='0.0.0.0', port=50051, size=1):
self.pool = ChannelPool(host, port, size)
self.pool.connect()
self.register()
def register(self):
key = str(id(self))
value = self
if _pool.get(key):
old_obj = _pool.get(key)
del old_obj
_pool[key] = value
def shutdown(self):
self.pool.shutdown()
@property
def stub(self):
return self.pool.get_stub()
def hello(self, words, with_call=False):
request = server_pb2.HelloRequest(say=words)
return self.stub.Hello(request, 3, with_call=with_call)
Hello = hello
def get_client():
if _pool:
key = _pool.keys()[0]
return _pool[key]
client = ClientImpl()
return client
def exit_handler():
# this is a gRPC python bug
# so we need to end everything
# when app close
for _, obj in _pool.items():
obj.shutdown()
atexit.register(exit_handler)
|
4,098 | 97eb599ae8bf726d827d6f8313b7cf2838f9c125 | import math
from chainer import cuda
from chainer import function
from chainer.functions import Sigmoid
from chainer.utils import type_check
import numpy
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
class Autoencoder(function.Function):
def __init__(self, in_size, hidden_size, activation=Sigmoid,
wscale=1, bias=0,
initialW=None, initial_bias1=None, initial_bias2=None):
self.W = None
self.gW = None
self.b1 = None
self.b2 = None
self.gb1 = None
self.gb2 = None
self.activation = None
if initialW is not None:
assert initialW.shape == (hidden_size, in_size)
self.W = initialW
else:
self.W = numpy.random.normal(
0, wscale * math.sqrt(1. / in_size),
(hidden_size, in_size)).astype(numpy.float32)
xp = cuda.get_array_module(self.W)
self.gW = xp.full_like(self.W, numpy.nan)
if initial_bias1 is not None:
assert initial_bias1.shape == (hidden_size,)
self.b1 = initial_bias1
else:
self.b1 = numpy.repeat(numpy.float32(bias), hidden_size)
if initial_bias2 is not None:
assert initial_bias2.shape == (in_size,)
self.b2 = initial_bias2
else:
self.b2 = numpy.repeat(numpy.float32(bias), in_size)
self.gb1 = xp.empty_like(self.b1)
self.gb2 = xp.empty_like(self.b2)
if activation is not None:
if activation == Sigmoid:
self.activation = activation()
else:
self.activation = activation
def hidden(self, x):
h = _Encoder(self.W, self.b1)(x)
if self.activation is not None:
h = self.activation(h)
h.unchain_backward()
return h
@property
def parameter_names(self):
return 'W', 'b1', 'b2'
@property
def gradient_names(self):
return 'gW', 'gb1', 'gb2'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
(type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==
type_check.Variable(self.W.shape[1], 'W.shape[1]')),
)
def check_type_backward(self, in_types, out_types):
type_check.expect(
in_types.size() == 1,
out_types.size() == 1,
)
x_type, = in_types
y_type, = out_types
type_check.expect(
y_type.dtype == numpy.float32,
y_type.ndim == 2,
y_type.shape[0] == x_type.shape[0],
y_type.shape[1] == type_check.Variable(self.W.shape[1],
'W.shape[1]'),
)
def zero_grads(self):
self.gW.fill(0)
self.gb1.fill(0)
self.gb2.fill(0)
def forward(self, x):
_x = _as_mat(x[0])
Wx = _x.dot(self.W.T)
Wx += self.b1
self.x_activation = Wx
if self.activation is not None:
h, = self.activation.forward([Wx])
else:
h = Wx
self.x_decode = h
y = h.dot(self.W)
y += self.b2
return y,
def backward(self, x, gy):
_x = self.x_decode
_gy = gy[0]
self.gW += _x.T.dot(_gy)
self.gb2 += _gy.sum(0)
_gy = _gy.dot(self.W.T).reshape(_x.shape)
if self.activation is not None:
_gy, = self.activation.backward([self.x_activation], [_gy])
_x = _as_mat(x[0])
self.gW += _gy.T.dot(_x)
self.gb1 += _gy.sum(0)
return _gy.dot(self.W).reshape(x[0].shape),
# undifferentiable Linear function
class _Encoder(function.Function):
def __init__(self, initialW, initial_Bias):
self.W = initialW
self.b = initial_Bias
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
(type_check.Variable(numpy.prod, 'prod')(x_type.shape[1:]) ==
type_check.Variable(self.W.shape[1], 'W.shape[1]')),
)
def forward(self, x):
x = _as_mat(x[0])
Wx = x.dot(self.W.T)
Wx += self.b
return Wx,
|
4,099 | 41f2a5ba0d7a726389936c1ff66a5724209ee99c | import torch
import torch.optim as optim
import torch.nn as nn
import torch.utils.data as data
from dataset import InsuranceAnswerDataset, DataEmbedding
from model import Matcher
from tools import Trainer, Evaluator
from tools import save_checkpoint, load_checkpoint, get_memory_use
def main():
batch_size = 64
valid_batch_size = 8
dataset_size = 500
learning_rate = 0.001
weight_decay = 1e-4
epochs = 30
show_frq = 20
negative_size = 10
negative_expand = 1
negative_size_bound = 20
negative_retake = True
load_read_model = False
save_dir = '/cos_person/data/'
torch.backends.cudnn.benchmark = True
dm = DataEmbedding()
dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=negative_size, data_type='train')
valid_dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=400,
data_type='valid')
print(len(dataset))
model = Matcher(embedding_dim=dm.embedding_dim, vocab_size=dm.embedding_size,
hidden_dim=150, tagset_size=50, negative_size=negative_size)
embedding_matrix = torch.Tensor(dm.get_embedding_matrix())
print('before model:' + get_memory_use())
if torch.cuda.is_available():
embedding_matrix = embedding_matrix.cuda()
model = model.cuda()
model.encoder.embedding.weight.data.copy_(embedding_matrix)
print('after model:' + get_memory_use())
train_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True)
valid_loader = data.DataLoader(dataset=valid_dataset, batch_size=valid_batch_size, shuffle=True, drop_last=True)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay, amsgrad=True)
train_accu_list = []
train_loss_list = []
valid_accu_list = []
valid_loss_list = []
trainer = Trainer(model=model, loader=train_loader, optimizer=optimizer, batch_size=batch_size,
data_size=len(train_loader), threshold_decay=True)
valider = Evaluator(model=model, loader=valid_loader, batch_size=valid_batch_size)
for epoch in range(1, epochs + 1):
print('before:' + get_memory_use())
print('Epoch {} start...'.format(epoch))
model.reset_negative(dataset.negative_size)
trainer.train(epoch=epoch, show_frq=show_frq, accu_list=train_accu_list, loss_list=train_loss_list)
print('train after:' + get_memory_use())
model.reset_negative(valid_dataset.negative_size)
valider.evaluate(epoch=epoch, accu_list=valid_accu_list, loss_list=valid_loss_list)
print('valid after:' + get_memory_use())
torch.save(train_loss_list, save_dir + 'train_loss.pkl')
torch.save(train_accu_list, save_dir + 'train_accu.pkl')
if negative_retake:
if negative_size + negative_expand <= negative_size_bound:
negative_size += negative_expand
del dataset
del train_loader
dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=negative_size)
train_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True)
trainer.loader = train_loader
if epochs - epoch <= 5:
load_read_model = True
if load_read_model:
if epoch <= 1:
save_checkpoint(save_dir=save_dir + 'check.pkl', model=model, optimizer=optimizer)
elif valid_accu_list[-1] > valid_accu_list[-2] \
or (valid_accu_list[-1] == valid_accu_list[-2] and valid_loss_list[-1] < valid_loss_list[-2]):
save_checkpoint(save_dir=save_dir + 'check.pkl', model=model, optimizer=optimizer)
else:
checkpoint = load_checkpoint(save_dir + 'check.pkl')
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
trainer.model = model
trainer.optimizer = optimizer
trainer._lr_decay(0.8)
valider.model = model
else:
torch.save(model, save_dir + 'model.pkl')
torch.save(train_loss_list, save_dir + 'train_loss.pkl')
torch.save(train_accu_list, save_dir + 'train_accu.pkl')
torch.save(valid_loss_list, save_dir + 'valid_loss.pkl')
torch.save(valid_accu_list, save_dir + 'valid_accu.pkl')
torch.save(model, save_dir + 'model.pkl')
test_dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=400, data_type='test')
test_loader = data.DataLoader(dataset=test_dataset, batch_size=valid_batch_size, shuffle=True, drop_last=True)
tester = Evaluator(model=model, loader=test_loader, batch_size=valid_batch_size)
test_accu_list = []
test_loss_list = []
model.reset_negative(test_dataset.negative_size)
tester.evaluate(epoch=1, accu_list=test_accu_list, loss_list=test_loss_list)
torch.save(test_loss_list, save_dir + 'test_loss.pkl')
torch.save(test_accu_list, save_dir + 'test_accu.pkl')
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.