index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
20,200 | 7c09478af2acb3513c9a9fc3ff206c344ee02d5e | from django.db import models
from common.models import baseModel
# Create your models here.
class parkingSpot(baseModel):
lat = models.IntegerField('latitude of spot', db_index=True, blank=False)
longi = models.IntegerField('longitude of spot', db_index=True, blank=False)
spot_addr = models.TextField('spot address', blank=True)
cost_per_hr = models.PositiveIntegerField('cost of spot for parking in USD', blank=False)
is_reserved = models.BooleanField('is spot reserved or not', default=False) |
20,201 | 8f402b6b7cfb38225f02f82df2ce707c0b80db82 | """
Page 73
Given an image represented by an NxN matrix, where each pixel in the image is 4 bytes, write a method to rotate the image by 90 degrees. Can you do this in place?
ans: pg 179
"""
# layer by layer
def matrix_rotate(matrix, N):
"""
assume that the matrix is rotated to the right
"""
for layer in range(N/2):
last_col = N - 1 - layer
for col_idx in range(layer, last_col):
# copy the top value
top = matrix[layer][col_idx]
# rotate the left to the top
matrix[layer][col_idx] = matrix[N-1-col_idx][layer]
# rotate the bottom to the left
matrix[N-1-col_idx][layer] = matrix[N-1-layer][N-1-col_idx]
# rotate the right to the bottom
matrix[N-1-layer][N-1-col_idx] = matrix[col_idx][N-1-layer]
# rotate the top to the right
matrix[col_idx][N-1-layer] = top
return matrix
print matrix_rotate([[1, 2], [3, 4]], 2)
print matrix_rotate([[1,1,1,1], [2,2,2,2], [3,3,3,3], [4,4,4,4]], 4)
|
20,202 | 80e6f920cff24394ceffd4ea308d5bba14b3b97f | __author__ = 'dan'
import unittest
from mlb.parsers.player_list_parser import PlayerListParser
class TestPlayerListParser(unittest.TestCase):
player_list_parser = None
def setUp(self):
self.player_list_parser = PlayerListParser()
def tearDown(self):
self.player_list_parser = None
def test_parse_all(self):
self.player_list_parser.active_only = False
self.player_list_parser.parse(open('test_files/players_a.html'), 'a')
self.assertTrue(self.player_list_parser.active_player_ids[0] == 'aardsda01')
self.assertTrue(self.player_list_parser.active_player_ids[len(self.player_list_parser.active_player_ids)-1] == 'aybarer01')
self.assertTrue(self.player_list_parser.retired_player_ids[len(self.player_list_parser.retired_player_ids)-1] == 'azocaos01')
def test_parse_active_only(self):
self.player_list_parser.active_only = True
self.player_list_parser.parse(open('test_files/players_a.html'), 'a')
self.assertTrue(self.player_list_parser.active_player_ids[0] == 'aardsda01')
self.assertTrue(self.player_list_parser.active_player_ids[len(self.player_list_parser.active_player_ids)-1] == 'aybarer01')
if __name__ == '__main__':
unittest.main() |
20,203 | 9c7aa76eb28bb77c21396e50190e69a0b4cc8680 | from Individual import Individual
import random
import data_loader
class Izzy(Individual):
nr_of_bits = 66
t = 10.0
I = 10.0
threshold = 35
def __init__(self, genotype=None):
if genotype is None:
self.initial_genotype()
else:
self.genotype = genotype
self.phenotype = []
self.fitness = 0.0
self.v = -60.0
self.u = 0.0
self.distance = 0
#Creates an intitial random genotype for representing the
#five variables; a, b, c, d, and K
def initial_genotype(self):
self.genotype = random.getrandbits(66)
#Perform mutation on the genotype
def mutate(self, mutation_prob, mutation_count):
for _ in range(mutation_count):
if random.random() < mutation_prob:
self.genotype = self.genotype ^ (1 << random.randint(0, self.nr_of_bits))
#Develop the genotype to a set of parameters, which are the phenotype for the neuron
def development(self):
#Convert genotype to a string list
gtype = int(self.genotype)
genome_list = []
for _ in range(0, self.nr_of_bits):
genome_list.insert(0, str(gtype % 2))
gtype = gtype/2
#Develop 'a' parameter: | RANGE: [0.001, 0.2] *1000 -> [1, 200]
self.a = (dev_parameter(genome_list, 0, 16, 65536, 200)+1) / 1000.0
#Develop 'b' parameter: | RANGE: [0.01, 0.3] *100 -> [1, 30]
self.b = (dev_parameter(genome_list, 16, 26, 1024, 30)+1) / 100.0
#Develop 'c' parameter: | RANGE: [-80, -30] -30 -> [-50, 0]
self.c = -dev_parameter(genome_list, 26, 38, 4096, 50) - 30.0
#Develop 'd' parameter: | RANGE: [0.1, 10] *10 -> [1, 100]
self.d = (dev_parameter(genome_list, 38, 52, 16384, 100)+1) / 10.0
#Develop 'k' parameter: | RANGE: [0.01, 1] *100 -> [1. 100]
self.k = (dev_parameter(genome_list, 52, 66, 16384, 100)+1) / 100.0
#GET ON DA SPIKE TRAIN! CHOO CHOO!
self.spiketrain = []
for _ in range(1001):
self.v += (1/self.t)*(self.k*self.v**2 + 5*self.v + 140 + self.I - self.u)
self.u += (self.a/self.t)*(self.b*self.v - self.u)
self.spiketrain.append(self.v)
if self.v > self.threshold:
self.v = self.c
self.u += self.d
#We must find the spikes! I.e. find where the train tops or goes over threshold?
self.spikes = find_spikes(self.spiketrain, 0)
def set_distance(self, dist):
self.distance = dist
#Perform crossover on the genotype
def crossover(self, other, crossover_rate):
if random.random()<crossover_rate:
crossover_range = (2, 5)
splits = [(i % 2, random.randint(*crossover_range)) for i in range(self.nr_of_bits / crossover_range[0])]
genotypes = (self.num_to_bitstring(self.genotype), self.num_to_bitstring(other.genotype))
new_genotype = []
index = 0
for individual, n_genes in splits:
to_index = min(index+n_genes, self.nr_of_bits)
new_genotype.append(genotypes[individual][index:to_index])
if to_index >= self.nr_of_bits:
break
index += n_genes
return Izzy(int("".join(new_genotype), 2))
else:
return Izzy(self.genotype)
def num_to_bitstring(self, n, l=20):
return bin(n)[2:].zfill(l)
def __str__(self):
return "IzzyPhenotype-a%sb%sc%sd%sk%s"%(self.a,self.b,self.c,self.d,self.k)
def __repr__(self):
return self.__str__()
#Develop a single paramter, from the binary list representing it to a float.
def dev_parameter(glist, start, stop, binlim, lim):
return round( ((int( "".join(glist[start:stop]), 2 )*1.0/binlim)*lim) )
#Takes in the spiketrain, returns the data points of the spikes, i thinks
def find_spikes(data, t):
spikes = []
k = 5
l = len(data)
for i,j in zip(range(0,l-k),range(k,l)):
if data[i+2]==max(data[i:j]) and data[i+2]>t:
spikes.append(i+2)
return spikes
|
20,204 | be926cf737eca6535af30a3b4d04aa976f079034 | import mysql.connector
conn= mysql.connector.connect(host='localhost',username='Your Username',password='Your Password')
print(conn) # this is the object of the connector
my_cursor=conn.cursor() # this will create the cursor
query='CREATE DATABASE New_Database' # this the query
my_cursor.execute(query) # this will execute our query
conn.commit() # to reflect the changes in sql
conn.close() # to close the connection
conn= mysql.connector.connect(host='localhost',username='Your Username',password='Your Password')
my_cursor=conn.cursor()
query='SHOW DATABASES'
my_cursor.execute(query)
# This method will give all the databases in the tuple format!!
for i in my_cursor:
print(i)
# print(my_cursor.fetchall()) # This will print all the databases in list format!!
conn.commit()
conn.close()
conn1= mysql.connector.connect(host='localhost',username='Your Username',password='Your Password',database='new_database') # this will open the "new_database"
myc=conn1.cursor()
query1='CREATE TABLE Students (First_Name VARCHAR(200),Last_Name VARCHAR(200))'
myc.execute(query1)
conn1.commit()
conn1.close()
conn2= mysql.connector.connect(host='localhost',username='Your Username',password='Your Password',database='new_database')
myc1=conn2.cursor()
query2='INSERT INTO Students (First_Name,Last_Name) VALUES (%s,%s)' # %s is used to take all the values from the list of tuples!!
values=[('Akshat','Bhatnagar'),('Preet','Kothari'),('Lekhansh','Bhatnagar'),('Kartikay','Maharshi')]
myc1.executemany(query2,values)
conn2.commit()
conn2.close()
conn3= mysql.connector.connect(host='localhost',username='Your Username',password='Your Password',database='new_database')
myc2=conn3.cursor()
query3='SELECT * FROM Students'
myc2.execute(query3)
print(myc2.fetchall())
# for i in myc2:
# print(i)
conn3.commit()
conn3.close()
|
20,205 | 4ad35958806c604bba1c7b39a603ddb1aa415027 | from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation',
]
kw = {
'name': 'tornado-basic-auth',
'version': '0.1.3',
'description': 'basic authentication for tornado',
'long_description': open('README.rst').read(),
'author': 'Yangjing Zhang',
'author_email': 'zhangyangjing@gmail.com',
'license': 'Apache License 2.0',
'url': 'https://github.com/zhangyangjing/tornado-basic-auth',
'keywords': 'tornado digest-auth basic-auth',
'py_modules': ['tornado_basic_auth'],
'classifiers': classifiers,
'install_requires': ['tornado>=4.0.0'],
}
if __name__ == '__main__':
setup(**kw)
|
20,206 | 1ad29a215389a9f6052e2433242ee893e9bd4abf | #!/usr/bin/python3
#
# LaunchBar Action Script
#
import sys
import json
import os
from devonthink import DEVONthink
from logger import logger
from config import UserConfig
from cache import DB_PATH as DB_PATH_CACHE
from frequency import DB_PATH as DB_PATH_FREQUENCY
EXCLUDED_TAG = UserConfig.excluded_tag
QUERY_TEMPLATE = 'name:({}) tags!={}'
items = []
class LaunchBarError(Exception):
def __init__(self, launchbar_item, message=None):
self.message = message
self.launchbar_item = launchbar_item
def clean_all_db():
os.remove(DB_PATH_CACHE)
os.remove(DB_PATH_FREQUENCY)
def parse_query(arg):
def prepend_tilde(word):
if word.startswith('~'):
return word
else:
return '~' + word
if arg.startswith('>'):
arg = arg[1:].strip()
if arg == 'clean':
clean_all_db()
else:
raise LaunchBarError(dict(title='Invalid arguments',icon='character:🚫'))
elif arg.startswith('`'):
return arg[1:]
elif len(arg.split()) == 1:
return QUERY_TEMPLATE.format(prepend_tilde(arg), EXCLUDED_TAG)
else:
parts = arg.split(' ')
parts = [prepend_tilde(p) for p in parts]
return QUERY_TEMPLATE.format(' '.join(parts), EXCLUDED_TAG)
def main():
dt = DEVONthink()
assert len(sys.argv) == 2
arg = sys.argv[1]
try:
if arg:
logger.debug('======================')
logger.debug('before search')
query = parse_query(arg)
logger.debug('query: ' + query)
items.extend(dt.search(query))
logger.debug('after search')
if not items:
raise LaunchBarError({
'title': 'No record found',
'icon': 'character:☹️'
})
else:
raise LaunchBarError({
'title': 'Please inpu the query',
'icon': 'character:⌨️'
})
except LaunchBarError as e:
lb_item = e.launchbar_item
if lb_item:
items.append(lb_item)
else:
raise ValueError()
else:
logger.debug(f'Record amounts: {len(items)}')
print(json.dumps(items))
if __name__ == "__main__":
main()
|
20,207 | f19024b846d8a0f46668ebb08c13499180e013be | from errbot import BotPlugin, botcmd, arg_botcmd
class demo(BotPlugin):
"""Example 'Hello, world!' plugin for Errbot"""
@botcmd
def demo2(self, msg, args):
"""Help for Demo 2"""
demo2content = """
• Open VSCode
• Look at the `plugins` directory
• Look at `hello.py` - `hello` function
• Explain the help header
• Run `!help hello` to show the contextual help
• Run `!hello`
• Run `!saymyname matt`
• Open VSCode
• Look at `saymyname` function
"""
yield demo2content
|
20,208 | 7cb0123aa13f477508697f2d2c91943a99cfc28e | # Fooling around with adding inputed things to a list
# Decided to do this after reading the "adding elements to a list" section on page 41-42
list = []
print('Enter "Stop" to stop')
while True:
listElement = input()
listElement = str(listElement)
if listElement.isalpha(): # ]
listElementLower = listElement.lower() # ] Exit Condition
if listElementLower == 'stop': # ]
break # ]
list.append(listElement)
print(list)
|
20,209 | 3aaae9234e42a3d58e76839e25de1d0dc0063147 | # -*- coding: utf-8 -*-
"""
@brief test log(time=1s)
"""
import io
import unittest
import pandas
import numpy
from pyquickhelper.pycode import ExtTestCase
from lightmlboard.metrics import l1_reg_max, multi_label_jaccard
class TestMetricsCustom(ExtTestCase):
def test_l1_reg_max(self):
exp = [50, 60, 100, 180, 200]
val = [50, 60, 100, 180, 180]
r = l1_reg_max(exp, val)
self.assertEqual(r, 0)
exp = [50, 60, 100, 180, 200]
val = [50, 60, 100, 160, 180]
r = l1_reg_max(exp, val)
self.assertEqual(r, 0.02222222222222222)
r = l1_reg_max(exp, val, nomax=True)
self.assertEqual(r, 0)
exp = numpy.array(exp)
val = numpy.array(val)
r = l1_reg_max(exp, val)
self.assertEqual(r, 0.02222222222222222)
r = l1_reg_max(exp, val, nomax=True)
self.assertEqual(r, 0)
exp = {i: exp[i] for i in range(0, len(exp))}
val = {i: val[i] for i in range(0, len(val))}
r = l1_reg_max(exp, val)
self.assertEqual(r, 0.02222222222222222)
r = l1_reg_max(exp, val, nomax=True)
self.assertEqual(r, 0)
exp = [50, 60, 100, 180, 200]
val = [50, 60, 100, 160]
self.assertRaise(lambda: l1_reg_max(exp, val), ValueError)
exp = numpy.array(exp)
val = numpy.array(val)
self.assertRaise(lambda: l1_reg_max(exp, val), ValueError)
exp = {i: exp[i] for i in range(0, len(exp))}
val = {i: val[i] for i in range(0, len(val))}
self.assertRaise(lambda: l1_reg_max(exp, val), ValueError)
self.assertRaise(lambda: l1_reg_max(exp, tuple(val)), TypeError)
def test_l1_reg_max_streams(self):
st1 = io.StringIO()
st2 = io.StringIO()
exp = [50, 60, 100, 180, 200]
val = [50, 60, 100, 180, 180]
d1 = pandas.DataFrame(dict(name=exp)).reset_index(drop=False)
d2 = pandas.DataFrame(dict(name=val)).reset_index(drop=False)
d1.to_csv(st1, index=False, header=None, sep=';')
d2.to_csv(st2, index=False, header=None, sep=';')
r = l1_reg_max(io.StringIO(st1.getvalue()),
io.StringIO(st2.getvalue()))
self.assertEqual(r, 0)
def test_classification_jaccard(self):
exp = ["4", "5", "6,7", [6, 7], (6, 7), {6, 7}]
val = ["4", ["5"], "6,7", [6, 7], (6, 7), {6, 7}]
r = multi_label_jaccard(exp, val)
self.assertEqual(r, 1)
exp = ["4", "5", "6,7", [6, 7], (6, 7), {6, 7}]
val = ["4", ["5"], "7", [7], (7,), {7}]
r = multi_label_jaccard(exp, val)
self.assertEqual(r, 0.6666666666666666)
dexp = {i: exp[i] for i in range(0, len(exp))}
dval = {i: val[i] for i in range(0, len(val))}
r = multi_label_jaccard(dexp, dval)
self.assertEqual(r, 0.6666666666666666)
val = val[:-1]
self.assertRaise(lambda: multi_label_jaccard(exp, val), ValueError)
self.assertRaise(lambda: multi_label_jaccard(
exp, tuple(val)), TypeError)
def test_classification_jaccard_streams(self):
st1 = io.StringIO()
st2 = io.StringIO()
exp = ["4", "5", "6,7", "6,7", "6,7", "6,7"]
val = ["4", "5", "7", "7", "7", "7,6"]
d1 = pandas.DataFrame(dict(name=exp)).reset_index(drop=False)
d2 = pandas.DataFrame(dict(name=val)).reset_index(drop=False)
d1.to_csv(st1, index=False, header=None, sep=';')
d2.to_csv(st2, index=False, header=None, sep=';')
r = multi_label_jaccard(io.StringIO(st1.getvalue()),
io.StringIO(st2.getvalue()))
self.assertEqual(r, 0.75)
if __name__ == "__main__":
unittest.main()
|
20,210 | be2d62b5b28495f041ab0a086a734fc42aac3bcf | # Generated by Django 2.2 on 2019-06-14 07:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0008_seo'),
]
operations = [
migrations.AddField(
model_name='seo',
name='title',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='seo',
name='desc',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='seo',
name='keywords',
field=models.TextField(),
),
]
|
20,211 | b326cac03de59ba3b14d3e39a8f6ed2cdcba23c8 | from time import time
start = time()
# Problem Code
from collections import namedtuple
from heapq import heappop, heappush
node_heap = [] # list of entries arranged in a heap
entry_finder = {} # mapping of tasks to entries
REMOVED = -1 # placeholder for a removed task
#counter = itertools.count() # unique sequence count
def add_task(task, priority=0):
'Add a new task or update the priority of an existing task'
if task in entry_finder:
remove_task(task)
#count = next(counter)
entry = [priority, task]
entry_finder[task] = entry
heappush(node_heap, entry)
def remove_task(task):
'Mark an existing task as REMOVED. Raise KeyError if not found.'
entry = entry_finder.pop(task)
entry[-1] = REMOVED
def pop_task():
'Remove and return the lowest priority task. Raise KeyError if empty.'
while node_heap:
priority, task = heappop(node_heap)
if task is not REMOVED:
del entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
Edge = namedtuple('Edge', ['length','n1','n2'])
class Network:
def __init__(self,num_nodes):
self._net = [ [0 for _ in range(num_nodes)] for __ in range(num_nodes)]
def get_children(self,node):
return [ idx for idx in range(len(self._net)) if self._net[node][idx] != 0 ]
def get_edge(self,node1,node2):
return self._net[node1][node2]
def set_edge(self,node1,node2,edge_length):
self._net[node1][node2] = edge_length
#self._net[node2][node1] = edge_length
def remove_edge(self,node1,node2):
self.set_edge(node1,node2,0)
def export_edges(self):
edges = []
for n1 in range(len(self._net)):
for n2 in range(n1,len(self._net)):
if self._net[n1][n2] != 0:
edges.append(Edge(self._net[n1][n2],n1,n2))
return edges
n = 80
net = Network(n**2+2)
mat = [ [ 0 for _ in range(n) ] for __ in range(n) ]
with open('inputs/p082.txt','r') as infile:
for idx,line in enumerate(infile):
mat[idx] = list(map(int,line.strip().split(',')))
for r in range(n):
for c in range(n):
if c != 0:
net.set_edge(r*n+c,r*n+c+1,mat[r][c])
if c != n-1:
net.set_edge(r*n+c+2,r*n+c+1,mat[r][c])
if r != 0:
net.set_edge( (r-1)*n+c+1, r*n+c+1,mat[r][c])
if r != n-1:
net.set_edge( (r+1)*n+c+1, r*n+c+1,mat[r][c])
net.set_edge(0,1,mat[0][0])
net.set_edge(n**2,n**2+1,0.000001)
#
prev = [ None for _ in range(n**2+2) ]
dist = [ float('Inf') for _ in range(n**2+2) ]
dist[0] = 0
add_task(0,0)
for i in range(1,n**2+2):
add_task(i,float('Inf'))
traversed = set()
while node_heap:
try:
node = pop_task()
except KeyError:
break
if node not in traversed:
traversed.add(node)
for c in net.get_children(node):
if dist[c] > dist[node]+net.get_edge(node,c):
dist[c] = dist[node]+net.get_edge(node,c)
prev[c] = node
add_task(c,dist[c])
print('%d' % (dist[n**2+1],))
print('Time Elapsed: %.2fs' % (time()-start,))
|
20,212 | 9847b2aca82a822ce2d98faa47463a28cc69dee2 | from ft232.wrapper import FT232
import logging
import time
from ft232.dll_h import *
class UART(FT232):
def __init__(self, description, BaudRate, Parity, ByteSize, Stopbits):
FT232.__init__(self, description)
self.BaudRate = BaudRate
self.Parity = Parity
self.ByteSize = ByteSize
self.Stopbits = Stopbits
self.open()
self.config_to_uart()
self.uart_config()
def config_to_uart(self):
self.FT_ResetDevice()
self.check_status()
self.FT_SetBitMode(0, FT_BITMODE_RESET)
self.check_status()
self.FT_SetUSBParameters(65536, 65536)
self.check_status()
number_to_read = self.FT_GetQueueStatus()
self.check_status()
if number_to_read:
number_read = self.FT_Read(number_to_read)
logging.debug('FT_Read, %d, %d, %d' %
(number_read, self.status, self.inbytes))
self.FT_SetChars(0, 0, 0, 0)
self.check_status()
self.FT_SetTimeouts(100, 100)
self.check_status()
self.FT_SetLatencyTimer(1)
self.check_status()
self.FT_SetFlowControl(FT_FLOW_NONE, 0, 0)
self.check_status()
def uart_config(self):
self.FT_SetBaudRate(self.BaudRate)
if self.ByteSize not in [7, 8]:
logging.error('invalid data width')
return False
if self.Stopbits == 1:
ftstopbit = FT_STOP_BITS_1
elif self.Stopbits == 2:
ftstopbit = FT_STOP_BITS_2
else:
logging.error('invalid Stopbits')
return False
if self.Parity in ['n', 'N']:
ftparity = FT_PARITY_NONE
elif self.Parity in ['O', 'o']:
ftparity = FT_PARITY_ODD
elif self.Parity in ['e', 'E']:
ftparity = FT_PARITY_EVEN
else:
logging.error('invalid Parity')
return False
self.FT_SetDataCharacteristics(self.ByteSize, ftstopbit, ftparity)
self.check_status()
def uart_close(self):
self.close()
def flushinbuff(self):
number_to_read = self.FT_GetQueueStatus()
self.check_status()
if number_to_read:
number_read = self.FT_Read(number_to_read)
if number_to_read != number_read:
logging.warning('buffer free may fail %d in buff, but %d read' % (
number_to_read, number_read))
self.check_status()
logging.info('flush', str(self.inbytes))
def uart_read(self, num, mtimeout=100):
start = time.time()
while(time.time() - start < mtimeout / 1000):
num_in_queue = self.FT_GetQueueStatus()
# print(num_in_queue)
if num_in_queue >= num:
self.FT_Read(num)
self.check_status()
if num_in_queue:
self.FT_Read(num_in_queue)
self.check_status()
else:
logging.warning('no data in queue')
return self.inbytes
def uart_readall(self):
num_in_queue = self.FT_GetQueueStatus()
if num_in_queue:
self.FT_Read(num_in_queue)
self.check_status()
return self.inbytes
def uart_write(self, bdata):
num_wirtten = self.FT_Write(bdata)
if num_wirtten != len(bdata):
logging.warning('TX %d, %d wanted' % (num_wirtten, len(bdata)))
self.check_status()
|
20,213 | 920955585741d01f1161ffa1e2f854ecd7a29142 | import sklearn.svm as svm
import numpy as np
from sklearn.metrics import accuracy_score
class Classifier:
def __init__(self, train_dataset, train_labels, valid_dataset, valid_labels, C, crit_val, crit_val_alg='const'):
self.svc = svm.SVC(kernel='linear', C=C).fit(train_dataset, train_labels)
self.train_dataset = train_dataset
self.train_labels = train_labels
self.valid_dataset = valid_dataset
self.valid_labels = valid_labels
self.C = C
self.val_errors = []
self.val_errors.append(1 - accuracy_score(self.valid_labels, self.svc.predict(self.valid_dataset)))
self.crit_val = crit_val
self.crit_val_alg = crit_val_alg
self.test_results = [False, False, False]
# part of test statistics
self.part_test_stat_h = 0.0
h_indeces = np.where(valid_labels == 1)
v_indeces = np.where(valid_labels == -1)
self.part_test_stat_v = 0.0
for i in h_indeces:
for j in h_indeces:
self.part_test_stat_h += np.linalg.norm(valid_dataset[i, :] - valid_dataset[j, :])
self.part_test_stat_h /= 2 * (len(h_indeces) ** 2)
for i in v_indeces:
for j in v_indeces:
self.part_test_stat_v += np.linalg.norm(valid_dataset[i, :] - valid_dataset[j, :])
self.part_test_stat_v /= 2 * (len(v_indeces) ** 2)
def predict(self, test_dataset):
return self.svc.predict(test_dataset)
def get_error(self, test_dataset, test_labels):
pred_labels = self.predict(test_dataset)
return 1 - accuracy_score(test_labels, pred_labels)
def is_valid(self, train_dataset, train_labels, test_stat=0):
if test_stat == 0:
test_stat = self.get_test_stat(train_dataset, train_labels)
#print('classifier: test performed, statistics value is ', test_stat_h + test_stat_v)
if test_stat > self.crit_val and self.crit_val_alg == 'asc' and len(self.test_results) > 1:
if not self.test_results[-1] and not self.test_results[-2] and not self.test_results[-3]:
self.crit_val += 0.01
self.test_results.append(test_stat < self.crit_val)
return test_stat < self.crit_val
def get_test_stat(self, train_dataset, train_labels):
test_stat_h = self.part_test_stat_h
test_stat_v = self.part_test_stat_v
train_h_indeces = np.where(train_labels == 1)[0]
train_v_indeces = np.where(train_labels == -1)[0]
valid_h_indeces = np.where(self.valid_labels == 1)[0]
valid_v_indeces = np.where(self.valid_labels == -1)[0]
# harmless points
for i in train_h_indeces:
for j in valid_h_indeces:
test_stat_h += np.linalg.norm(train_dataset[i, :] - self.valid_dataset[j, :]) / (
len(train_h_indeces) * len(valid_h_indeces))
for i in train_h_indeces:
for j in train_h_indeces:
test_stat_h += np.linalg.norm(train_dataset[i, :] - train_dataset[j, :]) / (
2 * len(valid_h_indeces) ** 2)
test_stat_h *= len(train_h_indeces) * len(valid_h_indeces) / (len(train_h_indeces) + len(valid_h_indeces))
# virus points
for i in train_v_indeces:
for j in valid_v_indeces:
test_stat_v += np.linalg.norm(train_dataset[i, :] - self.valid_dataset[j, :]) / (
len(train_v_indeces) * len(valid_v_indeces))
for i in train_v_indeces:
for j in train_v_indeces:
test_stat_v += np.linalg.norm(train_dataset[i, :] - train_dataset[j, :]) / (
2 * len(valid_v_indeces) ** 2)
test_stat_v *= len(train_v_indeces) * len(valid_v_indeces) / (len(train_v_indeces) + len(valid_v_indeces))
return test_stat_h + test_stat_v
def partial_fit(self, new_train_dataset, new_train_labels):
self.train_dataset = np.append(self.train_dataset, new_train_dataset, axis=0)
self.train_labels = np.append(self.train_labels, new_train_labels)
self.svc = svm.SVC(kernel='linear', C=self.C).fit(self.train_dataset, self.train_labels)
self.val_errors.append(1 - accuracy_score(self.valid_labels, self.svc.predict(self.valid_dataset)))
if self.crit_val_alg == 'desc' and self.val_errors[len(self.val_errors) - 1] < self.val_errors[
len(self.val_errors) - 2]:
self.crit_val = self.crit_val * 0.95
print(' decreasing crit_val to ', self.crit_val)
elif self.crit_val_alg == 'asc' and self.val_errors[len(self.val_errors) - 1] > self.val_errors[
len(self.val_errors) - 2]:
self.crit_val = self.crit_val * 1.05
print(' increasing crit_val to ', self.crit_val)
def fit(self, train_dataset, train_labels):
self.svc = svm.SVC(kernel='linear', C=self.C).fit(train_dataset, train_labels)
self.val_errors.append(1 - accuracy_score(self.valid_labels, self.svc.predict(self.valid_dataset)))
if self.crit_val_alg == 'desc' and self.val_errors[-1] < self.val_errors[-2]:
self.crit_val -= .1 #*= * 0.9
print(' decreasing crit_val to ', self.crit_val)
elif self.crit_val_alg == 'asc' and self.val_errors[-1] > self.val_errors[-2]:
self.crit_val += .01 #*= 1.1
print(' increasing crit_val to ', self.crit_val)
|
20,214 | 421feefa98f63e77bf5275f8a4a3accb7a49d8c9 | from django.contrib import admin
from helpMe.models import User,User_profile
# Register your models here.
admin.site.register(User)
admin.site.register(User_profile) |
20,215 | 529e717e80cc3bef777063aaedf130e7d849b3a4 | import nukescripts
axis = 1
nuke.thisNode()['code'].execute()
_input = checkInput()
if _input['cam'] and _input['geo']:
### checks how many vertices are selected
i = 0
for vertex in nukescripts.snap3d.selectedPoints():
i += 1
if i:
first = int(nuke.thisNode()['firstFrame'].getValue())
last = int(nuke.thisNode()['lastFrame'].getValue())
FrameRange = getFrameRange(first, last)
if FrameRange:
first = int(FrameRange[0])
if len(FrameRange) == 1:
last = int(FrameRange[0])
else:
last = int(FrameRange[1])
nuke.thisNode()['firstFrame'].setValue(first)
nuke.thisNode()['lastFrame'].setValue(last)
frames = [frame for frame in range(first, last+1)]
pB = nuke.ProgressTask('PointsToCornerPin')
pB.setMessage("Tracing points:")
ct = nuke.nodes.CurveTool()
for frame in frames:
if pB.isCancelled():
break
nuke.execute(ct, frame, frame)
gen = nukescripts.snap3d.selectedPoints()
points = [point for point in gen]
avgLen = int(len(points)/2)
x = 0; y = 0; z = 0
for i in range(avgLen):
x += points[i][0]
y += points[i][1]
z += points[i][2]
x /= avgLen; y /= avgLen; z /= avgLen
nuke.toNode('xPt%d' %axis)['translate'].setAnimated()
nuke.toNode('xPt%d' %axis)['translate'].setValueAt(x, frame,0)
nuke.toNode('xPt%d' %axis)['translate'].setValueAt(y, frame,1)
nuke.toNode('xPt%d' %axis)['translate'].setValueAt(z, frame,2)
pB.setProgress(int((frame-first)*100/len(frames)))
del pB
nuke.delete(ct)
nuke.thisNode()['generateBool'].setValue(1)
else:
nuke.message('Select some vertices first')
elif not _input['geo']:
nuke.message('Geometry is not connected or recognized')
else:
nuke.message('Camera is not connected or recognized')
|
20,216 | 6fd5984672278d5c53601372530930528597790a | # SELECT username FROM users WHERE username LIKE 'r%';
from flask_restful import Resource, reqparse
from flask import jsonify
from flask_login import current_user
from . import api
from flask_jwt_extended import jwt_required
from flask_jwt_extended import get_jwt_identity
from ..models import BlogsInfo, Users, gen_post_id, strip_image, gen_image_name
import werkzeug
import os
uploads_dir = "app/static/images/uploads"
allowed_types = ["image/png", "image/jpeg", "image/gif"]
class BlogCheck(Resource):
"""Check if blog name exists"""
@jwt_required()
def get(self):
"""on GET"""
arg_parser = reqparse.RequestParser()
arg_parser.add_argument(
"blog",
dest="blog",
location="args",
required=True,
help="The user's preferred blog name",
)
args = arg_parser.parse_args()
blog = BlogsInfo.query.filter_by(name=args.blog).first()
if blog is None:
return {"status": True}
return {"status": False}
api.add_resource(BlogCheck, "/blog/check")
class UserList(Resource):
"""List users with names like name%"""
@jwt_required()
def get(self):
arg_parser = reqparse.RequestParser()
arg_parser.add_argument(
"friend",
dest="friend",
location="args",
required=True,
help="The user's friend",
)
args = arg_parser.parse_args()
response = []
chars = set("%$`")
if (
args.friend.strip() != ""
and not any((c in chars) for c in args.friend.strip())
and not args.friend.startswith("_")
):
args.friend = args.friend.replace("_", "\\_")
users_like = (
Users.query.with_entities(Users.username, Users.id)
.filter(Users.id != get_jwt_identity())
.filter(Users.username.ilike(f"{args.friend}%"))
.all()
)
print(users_like)
response = [{"id": id, "username": value} for (value, id) in users_like]
print(response)
user_obj = {}
user_obj = response
return user_obj
api.add_resource(UserList, "/users/like")
class BlogList(Resource):
"""List blogs with names like name%"""
@jwt_required()
def get(self):
arg_parser = reqparse.RequestParser()
arg_parser.add_argument(
"name",
dest="name",
location="args",
required=True,
help="The blog's name",
)
args = arg_parser.parse_args()
response = []
chars = set("%$`")
if (
args.name.strip() != ""
and not any((c in chars) for c in args.name.strip())
# and not args.name.startswith("_")
):
args.name = args.name.replace("_", "\\_")
blogs_like = (
BlogsInfo.query.with_entities(
BlogsInfo.name, BlogsInfo.description, BlogsInfo.id
)
.filter(
(
BlogsInfo.name.ilike(f"%{args.name}%")
| (BlogsInfo.description.ilike(f"%{args.name}%"))
)
)
.all()
)
print(blogs_like)
response = [
{"name": blog, "desc": description, "id": id}
for (blog, description, id) in blogs_like
]
print(response)
blog_obj = {}
blog_obj["blogs"] = response
return blog_obj
api.add_resource(BlogList, "/blogs/like")
class UploadFile(Resource):
"""Upload a file to to storage"""
@jwt_required()
def post(self):
arg_parser = reqparse.RequestParser()
arg_parser.add_argument(
"file", type=werkzeug.datastructures.FileStorage, location="files"
)
args = arg_parser.parse_args()
content_type = args.file.mimetype
image = args.file.read()
# stripped_image,length = strip_image(image)
# stripped_image.seek(0, os.SEEK_END)
# length = stripped_image.tell()
# stripped_image.seek(0)
args.file.seek(0, os.SEEK_END)
length = args.file.tell()
print(length)
args.file.seek(0)
response = {}
response["status"] = False
if content_type in allowed_types:
if length > 2000000:
response["reason"] = "Max file size is 2MB"
else:
# return {"status":True}
ext = content_type.split("/")[1]
rand_name = gen_image_name(path=uploads_dir)
file = f"{rand_name}.{ext}"
args.file.save(os.path.join(uploads_dir, file))
# stripped_image.save(os.path.join(uploads_dir, file))
response["status"] = True
response["name"] = file
else:
response["reason"] = "Only JPEG/JPG, GIF and PNG files are allowed"
return response
api.add_resource(UploadFile, "/upload/file")
|
20,217 | dd140aa3c8b361aa2b2369423ea05ee4edbae645 | """
Máme nějaká data (viz. níže), chceme je projít a spočítat
frekvenční tabulku (kolikrát se které číslo vyskytlo v datech).
Čísla v datech jsou pouze v rozmezí 0 až 9.
Správné počty (pro kontrolu) jsou:
0: 8x 5: 13x
1: 11x 6: 10x
2: 8x 7: 13x
3: 9x 8: 8x
4: 5x 9: 15x
"""
data = [
9, 7, 8, 9, 0, 6, 2, 0, 8, 4, 5, 2, 9, 7, 2, 8, 9, 1, 5, 2,
1, 6, 0, 1, 7, 7, 9, 1, 1, 9, 4, 5, 3, 5, 0, 0, 7, 7, 4, 7,
9, 5, 1, 5, 2, 6, 1, 3, 3, 8, 6, 1, 9, 9, 9, 6, 8, 1, 9, 1,
3, 7, 6, 8, 3, 5, 5, 3, 8, 3, 3, 2, 9, 6, 7, 4, 6, 7, 0, 1,
0, 4, 9, 5, 2, 9, 5, 0, 6, 7, 5, 2, 7, 7, 6, 9, 8, 5, 3, 5
]
# ===========
# zadávání vlastního vstupu na jeden řádek
# (odkomentuj, pro použití)
# given_input = input()
# data = []
# for i in given_input.split():
# data.append(int(i))
# ===========
digit_counts = [0] * 10
for d in data:
digit_counts[d] += 1
for i in range(10):
print(str(i) + ":", str(digit_counts[i]) + "x")
|
20,218 | 04bc2c8d26720756522e405bbdacb5824324d89b | import Types
import struct
def standart_type_packet(data:bytes):
return bytes(Types.varint(len(data)))+data
def handshake(server_address,server_port,next_state=2,protocol_version=754):
'''next state: 1 for status, 2 for login'''
packet = bytes(Types.varint(0))+bytes(Types.varint(protocol_version))+struct.pack('B',len(server_address))+bytes(server_address,'ascii')+struct.pack('H',server_port)+bytes(Types.varint(next_state))
return standart_type_packet(packet)
def login(username:str):
encoded_username = bytes(username,'utf-8')
if len(encoded_username)<3 or len(encoded_username)> 16:
raise Exception('length of uesrname must be between 3 an 16 bytes')
packet = bytes(Types.varint(0))+struct.pack('B',len(encoded_username))+encoded_username
return standart_type_packet(packet)
if __name__ == '__main__':
data = handshake('127.0.0.1',25565)
print(data)
data = login('DrEenot')
print(data) |
20,219 | 6971c1f02e42fb0c4bd3ceba37bc263cacdfa548 | import numpy as np
import pybullet as p
import pybullet_data
from .panda import Panda
from .objects import YCBObject
from gym import spaces
import gym
import os
from .key import Key
import time
class PandaRawEnv(gym.Env):
def __init__(self, engine='DIRECT'):
# create simulation (GUI or DIRECT)
self.urdfRootPath = pybullet_data.getDataPath()
if engine == 'DIRECT':
p.connect(p.DIRECT)
elif engine == 'GUI':
p.connect(p.GUI)
else:
print('Panda-grasp ERROR: unknown engine')
p.setGravity(0, 0, -9.81)
# set up camera
self._set_camera()
# load some scene objects
self.plane = p.loadURDF(os.path.join(self.urdfRootPath, "plane.urdf"), basePosition=[0, 0, -0.65])
p.loadURDF(os.path.join(self.urdfRootPath, "table/table.urdf"), basePosition=[0.5, 0, -0.65])
# load a panda robot
self.panda = Panda()
def reset(self):
self.panda.reset()
return self.panda.state
def close(self):
p.disconnect()
def step(self, action):
# get current state
state = self.panda.state
# action in this example is the end-effector velocity
self.panda.step(dposition=action)
# take simulation step
p.stepSimulation()
# return next_state, reward, done, info
next_state = self.panda.state
reward = 0.0
done = False
info = {}
return next_state, reward, done, info
def render(self, mode='None'):
(width, height, pxl, depth, segmentation) = p.getCameraImage(width=self.camera_width,
height=self.camera_height,
viewMatrix=self.view_matrix,
projectionMatrix=self.proj_matrix)
rgb_array = np.array(pxl, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (self.camera_height, self.camera_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _set_camera(self):
self.camera_width = 256
self.camera_height = 256
p.resetDebugVisualizerCamera(cameraDistance=1.2, cameraYaw=30, cameraPitch=-60,
cameraTargetPosition=[0.5, -0.2, 0.0])
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.5, 0, 0],
distance=1.0,
yaw=90,
pitch=-50,
roll=0,
upAxisIndex=2)
self.proj_matrix = p.computeProjectionMatrixFOV(fov=60,
aspect=float(self.camera_width) / self.camera_height,
nearVal=0.1,
farVal=100.0)
class PandaMoveBoxEnv(PandaRawEnv):
def __init__(self, engine='DIRECT', max_episode_steps=5000):
super(PandaMoveBoxEnv, self).__init__(engine)
p.getConnectionInfo()
p.setPhysicsEngineParameter(enableFileCaching=0)
self.max_episode_steps = max_episode_steps
# set location of the object
self.obj_location = np.asarray([0.6, 0., 0.12])
# object is a long box with a square bottom
self.obj = YCBObject('zsy_long_box')
self.obj.load()
p.resetBasePositionAndOrientation(self.obj.body_id, self.obj_location, [0, 0, 0, 1])
self.obj_height = 0.24
self.obj_width = 0.06
# set the target location
self.target = YCBObject('zsy_base')
self.target.load()
self.target_location = np.asarray([0.3, -0.3, 0])
p.resetBasePositionAndOrientation(self.target.body_id, self.target_location, [0, 0, 0, 1])
self.target_width = 0.12
self.target_height = 0.02
# todo: fix the target
# load a panda robot
self.seed(1234)
self.arm_id = self.panda.panda
self.obj_id = self.obj.body_id
# open the gripper
self.grasp = False
# action space is the end-effector's normalized velocity
self.action_space = spaces.Box(
low=np.array([-1., -1., -1.]),
high=np.array([1., 1., 1.]),
dtype=np.float64
)
# observation space is
# [ee_position*3, obj_location*3, obj_height, obj_width,
# target_location*3, target_height, target_width, dist_ee_obj, dist_obj_tar, grasp]
self.observation_space = spaces.Box(
low=np.array([-np.inf] * 16),
high=np.array([np.inf] * 16),
dtype=np.float64
)
self.step_number = 0
self.catch = False
self.move_to_target = False
self.overturn_goal = False
# connect to keyboard
self.key = Key(scale=0.1)
def reset(self):
# reset the markers
self.step_number = 0
self.move_to_target = False
self.overturn_goal = False
self.catch = False
self.grasp = False
# reset the position of the object, the target and the robot
p.resetBasePositionAndOrientation(self.obj_id, self.obj_location, [0, 0, 0, 1])
p.resetBasePositionAndOrientation(self.target.body_id, self.target_location, [0, 0, 0, 1])
self.panda.reset()
# return the current state
return_state = self.return_state()
return return_state
def reset_with_obs(self, obs):
self.step_number = 0
self.move_to_target = False
self.overturn_goal = False
self.catch = False
self.grasp = False
p.resetBasePositionAndOrientation(self.obj_id, self.obj_location, [0, 0, 0, 1])
p.resetBasePositionAndOrientation(self.target.body_id, self.target_location, [0, 0, 0, 1])
self.panda.reset_with_obs(obs)
return_state = self.return_state()
return return_state
def seed(self, seed=None):
self.panda.seed(seed)
return [seed]
def return_state(self):
catch_position = self.obj.get_position() + np.asarray([0, 0, self.obj_height / 2])
dist_ee_obj = np.linalg.norm(self.panda.state['ee_position'] - catch_position)
target_position = self.target_location + np.asarray([0, 0, self.obj_height / 2 + self.target_height])
dist_obj_tar = np.linalg.norm(self.obj.get_position() - target_position)
return_state = np.concatenate(
[self.panda.state['ee_position'],
self.obj.get_position(),
np.array([self.obj_height]),
np.array([self.obj_width]),
self.target.get_position(),
np.array([self.target_height]),
np.array([self.target_width]),
np.array([dist_ee_obj]),
np.array([dist_obj_tar]),
np.array([self.grasp])]
)
return return_state
def calculate_reward(self, state, action):
reward = 0
done = False
obj_position = self.obj.get_position()
# punish the distance between the end-effector and the object
catch_position = obj_position + np.asarray([0, 0, self.obj_height / 2])
dist_ee_obj = np.linalg.norm(state['ee_position'] - catch_position)
if dist_ee_obj > 0.01:
reward -= dist_ee_obj
# punish the energy cost
reward -= np.linalg.norm(action[0:3]) * 0.1
# punish the distance between the object and the target
target_position = self.target_location + np.asarray([0, 0, self.obj_height / 2 + self.target_height])
dist_obj_tar = np.linalg.norm(obj_position - target_position)
reward -= dist_obj_tar
# judge if the object is caught
if np.linalg.norm(obj_position[0:2] - self.obj_location[0:2]) > 0.02 and self.grasp and not self.catch:
self.catch = True
reward += 1000
# judge if the object is overturned
if obj_position[2] < self.obj_height / 2 - 0.05 and not self.overturn_goal:
self.overturn_goal = True
reward -= 1000
done = True
# judge if the object has been moved to the target
if abs(obj_position[0] - self.target_location[0]) < (self.target_width - self.obj_width) / 2 \
and abs(obj_position[1] - self.target_location[1]) < (self.target_width - self.obj_width) / 2 \
and obj_position[2] < self.target_height + self.obj_height / 2 and not self.move_to_target:
self.move_to_target = True
reward += 5000
done = True
return reward, done
def close_gripper(self, state):
catch_position = self.obj.get_position() + np.asarray([0, 0, self.obj_height / 2 - 0.01])
if np.linalg.norm(state['ee_position'][0] - catch_position[0]) < 0.02 \
and np.linalg.norm(state['ee_position'][1] - catch_position[1]) < 0.02 \
and np.linalg.norm(state['ee_position'][2] - catch_position[2]) < 0.007:
self.grasp = True
def step(self, action):
# get real action
action_real = action * 2
# get current state
state = self.panda.state
self.step_number += 1
# action in this example is the end-effector and grasp
self.close_gripper(state)
self.panda.step(dposition=action_real[0:3], grasp_open=not self.grasp)
# take simulation step
p.stepSimulation()
# return next_state, reward, done, info
next_state = self.panda.state
info = next_state
return_state = self.return_state()
reward, done = self.calculate_reward(next_state, action_real)
return return_state, reward, done, info
def teleop_step(self):
"""
use keyboard to control the robot
:return: state, action, reward, next_state, done, info
"""
# get current state
state = self.panda.state
self.step_number += 1
return_state = self.return_state()
# read in from keyboard
key_input = self.key.get_controller_state()
dpos, dquat, grasp, reset = (
key_input["dpos"],
key_input["dquat"],
key_input["grasp"],
key_input["reset"],
)
action = dpos
self.close_gripper(state)
# action[0:3] = dpos
# action in this example is the end-effector velocity
self.panda.step(dposition=dpos, dquaternion=dquat, grasp_open=not self.grasp)
# take simulation step
p.stepSimulation()
# return next_state, reward, done, info
next_state = self.panda.state
return_next_state = self.return_state()
reward, done = self.calculate_reward(next_state, action)
print(f'step: {self.step_number}\treward: {reward}\tdone: {done}')
if reset:
done = True
info = self.panda.state
# self.grasp = grasp
return return_state, action, reward, return_next_state, done, info
def _set_camera(self):
self.camera_width = 512
self.camera_height = 512
p.resetDebugVisualizerCamera(cameraDistance=1, cameraYaw=20, cameraPitch=-30,
cameraTargetPosition=[0.5, -0.2, 0.2])
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.5, 0, 0],
distance=1.0,
yaw=90,
pitch=-50,
roll=0,
upAxisIndex=2)
self.proj_matrix = p.computeProjectionMatrixFOV(fov=60,
aspect=float(self.camera_width) / self.camera_height,
nearVal=0.1,
farVal=100.0)
class PandaAvoidObstacleEnv(PandaRawEnv):
def __init__(self, engine='DIRECT', max_episode_steps=2000):
super(PandaAvoidObstacleEnv, self).__init__(engine)
p.getConnectionInfo()
p.setPhysicsEngineParameter(enableFileCaching=0)
self.max_episode_steps = max_episode_steps
# set location of the object
self.obj_location = np.asarray([0.7, 0., 0.12])
# object is a long box with a square bottom
self.obj = YCBObject('zsy_long_box')
self.obj.load()
p.resetBasePositionAndOrientation(self.obj.body_id, self.obj_location, [0, 0, 0, 1])
self.obj_height = 0.24
self.obj_width = 0.06
# obstacle is a lying long box
self.obstacle = YCBObject('xt_obstacle')
self.obstacle.load()
self.obstacle_location = np.asarray([0.5, -0.15, 0.05])
p.resetBasePositionAndOrientation(self.obstacle.body_id, self.obstacle_location, [0, 0, 0, 1])
self.obstacle_width = 0.3
self.obstacle_height = 0.11
# set the target location
self.target = YCBObject('zsy_base')
self.target.load()
self.target_location = np.asarray([0.3, -0.3, 0.0025])
p.resetBasePositionAndOrientation(self.target.body_id, self.target_location, [0, 0, 0, 1])
self.target_width = 0.12
self.target_height = 0.005
# load a panda robot
self.seed(1234)
self.arm_id = self.panda.panda
self.obj_id = self.obj.body_id
# open the gripper
self.grasp = False
# action space is the end-effector's normalized velocity
self.action_space = spaces.Box(
low=np.array([-1., -1., -1.]),
high=np.array([1., 1., 1.]),
dtype=np.float64
)
# observation space is
# [ee_position*3, joint_position*7, joint_velocity*7, joint_torque*7,
# obj_location*3, obj_height, obj_width,
# obstacle_location*3, obstacle_height, obstacle_width,
# target_location*3, target_height, target_width]
self.observation_space = spaces.Box(
low=np.array([-np.inf] * 39),
high=np.array([np.inf] * 39),
dtype=np.float64
)
self.step_number = 0
# connect to keyboard
self.key = Key(scale=0.1)
def reset(self):
self.step_number = 0
self.grasp = True
obs = [0., 0.58, 0., -1.55, 0., 2.1, 0.]
p.resetBasePositionAndOrientation(self.obj_id, self.obj_location, [0, 0, 0, 1])
p.resetBasePositionAndOrientation(self.obstacle.body_id, self.obstacle_location, [0, 0, 0, 1])
p.resetBasePositionAndOrientation(self.target.body_id, self.target_location, [0, 0, 0, 1])
self.panda.reset_with_obs(obs)
return_state = self.return_state()
return return_state
def seed(self, seed=None):
self.panda.seed(seed)
return [seed]
def return_state(self):
return_state = np.concatenate(
[self.panda.state['ee_position'],
self.panda.state['joint_position'][0:7],
self.panda.state['joint_velocity'][0:7],
self.panda.state['joint_torque'][0:7],
self.obj.get_position(),
np.array([self.obj_height]),
np.array([self.obj_width]),
self.obstacle.get_position(),
np.array([self.obstacle_height]),
np.array([self.obstacle_width]),
self.target.get_position(),
np.array([self.target_height]),
np.array([self.target_width])]
)
return return_state
def calculate_reward(self, state, action):
reward = 0
done = False
obj_position = self.obj.get_position()
obstacle_position = self.obstacle.get_position()
# punish the energy cost
reward -= np.linalg.norm(action) * 2
# punish the distance between the object and the target
target_position = self.target_location + np.asarray([0, 0, self.obj_height / 2 + self.target_height])
dist_obj_tar = np.linalg.norm(obj_position - target_position)
reward -= dist_obj_tar * 5
# judge if the object is dropped
if np.linalg.norm(state['ee_position'] - obj_position - np.asarray([0, 0, self.obj_height / 2])) > 0.1:
reward -= 2000
done = True
# judge if the obstacle is moved
if np.linalg.norm(obstacle_position[0:2] - self.obstacle_location[0:2]) > 0.02:
reward -= 2000
done = True
# judge if the target is moved
if np.linalg.norm(target_position[0:2] - self.target_location[0:2]) > 0.02:
reward -= 2000
done = True
# judge if the object has been moved to the target
if abs(obj_position[0] - self.target_location[0]) < self.target_width / 2 - 0.02\
and abs(obj_position[1] - self.target_location[1]) < self.target_width / 2 - 0.02\
and obj_position[2] < self.target_height + self.obj_height / 2 + 0.1:
reward += 5000
done = True
return reward, done
def step(self, action):
# get real action
action_real = action * 2
# get current state
state = self.panda.state
self.step_number += 1
# action in this example is the end-effector and grasp
self.panda.step(dposition=action_real, grasp_open=not self.grasp)
# take simulation step
p.stepSimulation()
# return next_state, reward, done, info
next_state = self.panda.state
info = next_state
return_state = self.return_state()
reward, done = self.calculate_reward(next_state, action_real)
# self.grasp = grasp
return return_state, reward, done, info
def teleop_step(self):
"""
use keyboard to control the robot
:return: state, action, reward, next_state, done, info
"""
# get current state
state = self.panda.state
self.step_number += 1
return_state = self.return_state()
# read in from keyboard
key_input = self.key.get_controller_state()
dpos, dquat, grasp, reset = (
key_input["dpos"],
key_input["dquat"],
key_input["grasp"],
key_input["reset"],
)
action = dpos
# action in this example is the end-effector velocity
self.panda.step(dposition=dpos, dquaternion=dquat, grasp_open=not self.grasp)
# take simulation step
p.stepSimulation()
# return next_state, reward, done, info
next_state = self.panda.state
return_next_state = self.return_state()
reward, done = self.calculate_reward(next_state, action)
print(f'step: {self.step_number}\treward: {reward}\tdone: {done}')
if reset:
done = True
info = self.panda.state
return return_state, action, reward, return_next_state, done, info
def _set_camera(self):
self.camera_width = 512
self.camera_height = 512
p.resetDebugVisualizerCamera(cameraDistance=1, cameraYaw=20, cameraPitch=-30,
cameraTargetPosition=[0.5, -0.2, 0.2])
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.5, 0, 0],
distance=1.0,
yaw=90,
pitch=-50,
roll=0,
upAxisIndex=2)
self.proj_matrix = p.computeProjectionMatrixFOV(fov=60,
aspect=float(self.camera_width) / self.camera_height,
nearVal=0.1,
farVal=100.0)
|
20,220 | e0f567fb80c5bb055df4c50a380c7e3e24853723 | # Generated by Django 3.0.1 on 2020-02-13 20:07
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20200107_1735'),
]
operations = [
migrations.DeleteModel(
name='Rooms',
),
migrations.RemoveField(
model_name='reservation',
name='guest',
),
migrations.RemoveField(
model_name='reservation',
name='room',
),
migrations.AddField(
model_name='reservation',
name='room_type',
field=models.CharField(choices=[('single_room', 'Single Room'), ('double_room', 'Double Room'), ('executive_room', 'Executive Room')], default=('double_room', 'Double Room'), max_length=30),
),
migrations.AlterField(
model_name='reservation',
name='check_out',
field=models.DateField(default=datetime.datetime(2020, 2, 15, 20, 7, 4, 247628, tzinfo=utc)),
),
migrations.DeleteModel(
name='Room',
),
]
|
20,221 | 88d7f13eed2f22b289e23eec9814171c73ba1805 | -X FMLP -Q 0 -L 2 71 250
-X FMLP -Q 0 -L 2 62 200
-X FMLP -Q 0 -L 2 62 200
-X FMLP -Q 1 -L 2 60 175
-X FMLP -Q 1 -L 2 55 300
-X FMLP -Q 1 -L 2 50 300
-X FMLP -Q 2 -L 1 45 175
-X FMLP -Q 2 -L 1 41 300
-X FMLP -Q 3 -L 1 34 125
-X FMLP -Q 3 -L 1 31 150
25 175
22 300
22 125
20 100
20 150
13 125
13 150
10 125
10 300
|
20,222 | f4b8a0f02d40356b4ddafddca0e5f741c327973b | #!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
Created on 2018年4月17日
@author: Administrator
'''
'''
Mysql 密码破解工具
其中连接数最好不要写太大,要不然会连不上数据库
'''
import argparse
try:
import pymysql.cursors
except:
print u'[>]没有安装mysql驱动,需要安装mysql驱动'
import thread
import os
import sys
import time
#from _mysql import result
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
threadNum = 0 # 当前线程数
FLAG=True
print u'''
这些要显示很NB的logo图片
'''
parser = argparse.ArgumentParser()
#parser.add_argument('-hh', '--host', help=u"主机IP,必输")
parser.add_argument('-p', '--port', help=u"mysql端口,看清楚 这里是非必输", default=3306, type=int)
parser.add_argument('-t', '--threads', help=u"线程数,这里不要设置太多,有可能会导致后面不能正常连接数据库" , default=5, type=int)
parser.add_argument('-n', '--name', help=u"用户名" , default='root')
parser.add_argument('-f', '--filepath', help=u"密码文件路径,必输")
parser.add_argument('-H', '--ipfilepath', help=u"ip文件路径,必输")
parser.add_argument('-db', '--databasename', help=u"要连接的数据库名" , default='information_schema')
parser.add_argument('-s', '--sleep', help=u"休眠时间,防止访问频繁被对方限制" , default=0,type=int)
args = parser.parse_args()
# 主机IP
#host = args.host
# mysql端口号
port = args.port
# ip地址文件路径
ipfilepath = args.ipfilepath
# 开启线程数
threadCount = args.threads
# 数据库用户
username = args.name
#
# 文件路径
filepath = args.filepath
#数据库名
databasename = args.databasename
sleeptime = 0.01
# 判断主机是否输入
# if str(type(host)) == 'NoneType':
# print u"请输入IP地址"
# sys.exit()
# 判断密码文件是否输入
# if str(type(host)) == 'NoneType':
# print u"请输入密码文件文件路径"
# exit()
def getconn(host,port,username,pwd,databasename):
global threadNum
global FLAG
# print '进入到线程中'
try:
pymysql.connect(host=host, port=port, user=username, password=pwd, db=databasename, charset='utf8mb4', autocommit=True, cursorclass=pymysql.cursors.DictCursor)
print u'密码破解成功 : \n',pwd
print u'主机IP:',host
print u'连接端口: ',port
print u'密码: ',pwd
# 破解成功
FLAG = False
except Exception ,es:
print str(es)
pass
finally:
threadNum = threadNum - 1
# print u'关闭一个线程'
pass
def dothreads(host,port,username,pwd,databasename):
global threadCount
global threadNum
global sleeptime
# if threadNum<threadCount:
thread.start_new_thread(getconn,(host,port,username,pwd,databasename))
threadNum = threadNum + 1
# print u'开启一个线程 '
# print u'当前线程数 '+str(threadNum)
time.sleep(sleeptime)
# else:
# print u'线程池超限'
def beforethread(host,port,username,s,databasename):
try:
#pass
dothreads(host,port,username,s,databasename)
# getconn(host,port,username,s,databasename)
except Exception,es:
print str(es)
with open(ipfilepath,'r') as ips:
pass
ip = ips.read()
# 获取所有的ip地址
ips = ip.split('\n')
with open(filepath,'r') as f:
start_time = time.time()
#pwd = f.readline().replace('\n','')
locks=[];
pwd = f.read()
pwds = pwd.split('\n')
x = 0
print ips
for host in ips:
for s in pwds:
if FLAG:
x =x + 1
print u'第: '+str(x)+u'个 '
print u'当前密码: '+s
if threadNum<threadCount:
beforethread(host,port,username,s,databasename)
else:
time.sleep(0.1)
beforethread(host,port,username,s,databasename)
print u'共耗时 :%d 秒'% (time.time()-start_time)
|
20,223 | 2429708225cfacace046448982175e5db1e8eb3c | from Instruction.Parameter import Parameter
from Instruction.Function import Function
from Expression.Primitive import Primitive
from Abstract.Instruction import Instruction
from Environment.Environment import Environment
from Abstract.Expression import Expression
from Enum.typeExpression import typeExpression
class CallFuncSt(Instruction):
def __init__(self,id,parameters) -> None:
self.id = id
self.parameters = parameters
def execute(self, environment: Environment):
tempFunc: Function = environment.getFunction(self.id)
newEnvironment = Environment(environment.getGlobal())
for x in range(0,len(tempFunc.parameters)):
tempPar: Parameter = tempFunc.parameters[x]
tempPar.setValue(self.parameters[x])
tempFunc.executeFunction(newEnvironment)
|
20,224 | cf0a846186f8724e3c61ef3a16d2bae9119460a9 | '''
Created on Sep 27, 2016
@author: Dushyant sapra
'''
from org.ds.graph.DirectedGraph import DirectedGraph
from org.ds.graph.common.DFSApplicationUtil import DFSApplicationUtil
from org.ds.stack.Stack import StackUsingLinkedList
class TopoloicalSort:
def topologicalSortUsingDFS(self, graph):
visitedVertexMap = {};
stack = StackUsingLinkedList();
for vertex in graph.getVertexMap().values():
visitedVertexMap[vertex] = False;
for vertex, value in visitedVertexMap.items():
if not value:
DFSApplicationUtil.topologicalSortUsingDFSHelper(vertex, visitedVertexMap, stack);
print("Topological Sort Using DFS is : ");
while stack.getSize() > 0:
print(stack.pop());
def topologicalSortUsingKhanAlgo(self, directedGraph):
zeroInVertexList = [];
topologicalSortedVertexList = [];
inVertexCountMap = {};
for vertex in directedGraph.vertexMap.values():
inVertexCountMap[vertex] = len(vertex.getInVerticesList());
if len(vertex.getInVerticesList()) == 0:
zeroInVertexList.append(vertex);
while len(zeroInVertexList) > 0:
vertex = zeroInVertexList.pop(0);
topologicalSortedVertexList.append(vertex);
for inVertex in vertex.getOutVerticesList():
inVertexCountMap[inVertex] = inVertexCountMap[inVertex] - 1;
if inVertexCountMap[inVertex] == 0:
zeroInVertexList.append(inVertex);
isTrue = True;
for lt in inVertexCountMap.values():
if lt > 0:
isTrue = False;
break;
if isTrue:
print("Topological Sort Using Khan's Algo is : ");
for v in topologicalSortedVertexList:
print(v);
else:
print("Graph is not a Directed Acyclic Graph");
if __name__ == '__main__':
obj = TopoloicalSort();
g = DirectedGraph();
g.addVertex("V1");
g.addVertex("V2");
g.addVertex("V3");
g.addVertex("V4");
g.addVertex("V5");
g.addVertex("V6");
g.addVertex("V7");
g.addEdge("V1", "V2", "E1");
g.addEdge("V1", "V3", "E2");
g.addEdge("V1", "V4", "E3");
g.addEdge("V2", "V5", "E4");
g.addEdge("V5", "V6", "E5");
g.addEdge("V3", "V6", "E6");
g.addEdge("V4", "V6", "E7");
g.addEdge("V6", "V7", "E8");
# obj.topologicalSortUsingDFS(g);
# obj.topologicalSortUsingKhanAlgo(g);
g = DirectedGraph();
g.addVertex("V0");
g.addVertex("V1");
g.addVertex("V2");
g.addVertex("V3");
g.addVertex("V4");
g.addVertex("V5");
g.addEdge("V2", "V3", "E1");
g.addEdge("V3", "V1", "E2");
g.addEdge("V4", "V0", "E3");
g.addEdge("V4", "V1", "E4");
g.addEdge("V5", "V0", "E5");
g.addEdge("V5", "V2", "E6");
obj.topologicalSortUsingDFS(g);
obj.topologicalSortUsingKhanAlgo(g); |
20,225 | 10c9fdabc70c6a206f01d5ea47796714f9fee9ca | import cv2
print("OpenCV version: ", cv2.__version__)
img = cv2.imread("img/me.jpg")
img = cv2.resize(img, (int(img.shape[1]/2),int(img.shape[0]/2)))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print(gray)
cv2.imshow("Me", img)
cv2.imshow("Me gray", gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
20,226 | 3f94811f394278eb1db9d793a4ecd441545cf77d | # -*- coding: utf-8 -*-
class User:
def __init__(self, userId, name, rank, status, channel, location, auth=False):
self.userId = userId
self.name = name
self.rank = rank
self.status = status
self.channel = channel
self.location = location
self.authenticated = auth
def __repr__(self):
r = {
'user_id': self.userId,
'name': self.name,
'rank': self.rank,
'status': self.status,
'channel': self.channel,
'location': self.location,
'authenticated': self.authenticated,
}
return str(r)
def is_authenticated(self):
'''user객체가 인증되었다면 True를 반환'''
return self.authenticated
def is_active(self):
'''특정 계정의 활성화 여부, inactive 계정의 경우 로그인이 되지 않도록 설정할 수 있다.'''
return True
def is_anonymous(self):
'''익명의 사용자로 로그인 할 때만 True반환'''
return False
def get_id(self):
'''
- 사용자의 ID를 unicode로 반환
- user_loader 콜백 메소드에서 사용자 ID를 얻을 때 사용할 수도 있다.
'''
return self.userId
|
20,227 | a8e8043f815350e79a936fdabf340afd12a0deef | import cgi
import urllib
import json
import webapp2
from google.appengine.ext import ndb
class Rssi(ndb.Model):
"""Models a rssi data set with BSSID and rssi strength"""
bssid = ndb.StringProperty()
level = ndb.IntegerProperty()
class Location(ndb.Model):
"""Models a location signature entry with x,y coordinates and RSSI + other possible signatures"""
xcoor = ndb.IntegerProperty()
ycoor = ndb.IntegerProperty()
rssis = ndb.StructuredProperty(Rssi, repeated=True)
credibility = ndb.FloatProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_map(cls, ancestor_key):
# ancestor key must be place at the back because it is a "magic word"!
return cls.query(ancestor=ancestor_key)
class ViewMap(webapp2.RequestHandler):
def get(self):
# View map location data here
self.response.out.write('<html><body>Hello World!')
map_name = self.request.get('map_name')
location_key = ndb.Key("Map", map_name or "*no_name*")
locations = Location.query_map(location_key).fetch(10)
self.response.out.write('Map: %s\n' % map_name)
for location in locations:
self.response.out.write('<blockquote>X:%s Y:%s Credibility:%s Date:%s</blockquote>' %
(location.xcoor, location.ycoor, location.credibility, location.date ))
class UpdateLocation(webapp2.RequestHandler):
def get(self):
# updating grid coordinates with corresponding RSSI/ other indexes
self.response.headers['Content-Type'] = 'application/json'
map_name = self.request.get('map_name')
location_key = ndb.Key("Map", map_name or "*no_name*")
local_x = int(self.request.get('xcoor'))
local_y = int(self.request.get('ycoor'))
locations = Location.query( Location.xcoor == local_x,
Location.ycoor == local_y,
ancestor=location_key).fetch(1)
local_credibility = float(self.request.get('credibility'))
local_rssi_str_list = self.request.get('rssi').split('|')
local_rssi_data_list = []
for local_rssi_str in local_rssi_str_list:
rssi_list = local_rssi_str.split('!')
rssi_data = Rssi(bssid=rssi_list[0], level=int(rssi_list[1]))
local_rssi_data_list.append(rssi_data)
result = json.dumps({'status':'NO_CHANGE'})
# location found
if len(locations) > 0:
location = locations[0]
if location.credibility <= local_credibility:
# update database as local data is more trustworthy
location.rssis = local_rssi_data_list
location.credibility = local_credibility
location.put()
result = json.dumps({'status':'SERVER_UPDATE'})
elif local_credibility < 0.7:
# update mobile client with database entry as local data is not trustworthy
# closest_matched_location = getClosestMatch(local_rssi, location_key)
closest_matched_location = self.getClosestMatch(local_rssi_data_list, location_key)
if closest_matched_location is not None:
if self.getDistance(local_x, local_y, closest_matched_location) <= 5:
if closest_matched_location.credibility > local_credibility:
# Update mobile client
result = json.dumps({'status':'MOBILE_UPDATE',
'xcoor':closest_matched_location.xcoor,
'ycoor':closest_matched_location.ycoor,
'credibility':closest_matched_location.credibility})
else:
if local_credibility >= 0.3:
location = location = Location(parent=ndb.Key('Map', map_name),
xcoor = local_x,
ycoor = local_y,
rssis = local_rssi_data_list,
credibility = local_credibility)
location.put()
result = json.dumps({'status':'SERVER_UPDATE'})
self.response.out.write(result)
def getClosestMatch(self, rssi_list, location_key):
# Return location entry with closest rssi value
# Match 3 out of 4 BSSIDs, 2 out of 3, 2 out of 2...
if len(rssi_list) == 4:
locations = Location.query(ndb.OR(ndb.AND(Location.rssis.bssid == rssi_list[1].bssid,
Location.rssis.bssid == rssi_list[2].bssid,
Location.rssis.bssid == rssi_list[3].bssid),
ndb.AND(Location.rssis.bssid == rssi_list[0].bssid,
Location.rssis.bssid == rssi_list[2].bssid,
Location.rssis.bssid == rssi_list[3].bssid),
ndb.AND(Location.rssis.bssid == rssi_list[0].bssid,
Location.rssis.bssid == rssi_list[1].bssid,
Location.rssis.bssid == rssi_list[3].bssid),
ndb.AND(Location.rssis.bssid == rssi_list[0].bssid,
Location.rssis.bssid == rssi_list[1].bssid,
Location.rssis.bssid == rssi_list[2].bssid)),ancestor=location_key).fetch(10)
elif len(rssi_list) == 3:
locations = Location.query(ndb.OR(ndb.AND(Location.rssis.bssid == rssi_list[1].bssid,
Location.rssis.bssid == rssi_list[2].bssid),
ndb.AND(Location.rssis.bssid == rssi_list[0].bssid,
Location.rssis.bssid == rssi_list[2].bssid),
ndb.AND(Location.rssis.bssid == rssi_list[0].bssid,
Location.rssis.bssid == rssi_list[1].bssid)),ancestor=location_key).fetch(10)
elif len(rssi_list) == 2:
locations = Location.query(ndb.AND(Location.rssis.bssid == rssi_list[1].bssid,
Location.rssis.bssid == rssi_list[2].bssid),ancestor=location_key).fetch(10)
for location in locations:
matches = [rssi for rssi in rssi_list if self.matchRssiData(rssi, location.rssis, 10)]
if len(rssi_list) > 2:
if len(matches) >= len(rssi_list) - 1:
return location
else:
if len(matches) >= len(rssi_list):
return location
# No matches
return None
def matchRssiData(self, local_rssi, db_rssi_list, threshold):
# Match a single rssi data with threshold on the level
for db_rssi in db_rssi_list:
if local_rssi.bssid == db_rssi.bssid:
if local_rssi.level >= db_rssi.level - threshold and local_rssi.level <= db_rssi.level + threshold:
return True
return False
def getDistance(self, local_x, local_y, db_loc):
# Compute the distance between 2 grid point
return math.sqrt((local_x - db_loc.xcoor)*(local_x - db_loc.xcoor) + (local_y - db_loc.ycoor)*(local_y - db_loc.ycoor))
class InitMapData(webapp2.RequestHandler):
def get(self):
# Initialize map data with all possible x,y with credibility set to 0
map_name = self.request.get('map_name')
rows = int(self.request.get('rows'))
cols = int(self.request.get('cols'))
for j in range(rows):
for i in range(cols):
location = Location(parent=ndb.Key('Map', map_name),
xcoor = i,
ycoor = j,
rssis = [Rssi(bssid='empty', level=0)],
credibility = 0.0)
location.put()
self.redirect('/?' + urllib.urlencode({'map_name':map_name}))
class ClearMapData(webapp2.RequestHandler):
def get(self):
# Clear all map data
map_name = self.request.get('map_name')
location_key = ndb.Key("Map", map_name or "*no_name*")
ndb.delete_multi(Location.query(ancestor=location_key).fetch(keys_only=True))
app = webapp2.WSGIApplication([
('/', ViewMap),
('/init', InitMapData),
('/update', UpdateLocation),
('/clear', ClearMapData)
]) |
20,228 | 8be2398a2ccc1c75602c1ec7848a193ee1c1cc1f | import asyncio
import asynctest
from mimic.brokerage import *
from mimic.proxy_collection import *
REQUEST_URL_A = 'http://www.google.com/search'
class TestBrokerage(asynctest.ClockedTestCase):
def setUp(self):
proxy_a = ProxyProps('http', 'localhost', 8888, 0.1,
'us', 'transparent')
proxy_b = ProxyProps('http', 'localhost', 8889, 0.2)
proxies = ProxyCollection()
proxies.register_proxy(proxy_a.to_dict())
proxies.register_proxy(proxy_b.to_dict())
self.proxy_collection = proxies
self.brokerage = Brokerage(proxies)
async def test_acquire(self):
proxies = {str(p) for p in self.proxy_collection.proxies}
res = await self.brokerage.acquire(REQUEST_URL_A, [], 10.0)
self.assertEqual(res['broker'], "www.google.com")
self.assertIn(res['proxy'], proxies)
del res['proxy']
res = await self.brokerage.acquire(REQUEST_URL_A, [], 10.0)
self.assertEqual(res['broker'], "www.google.com")
del res['proxy']
|
20,229 | 64868e7b24982594f7a69c5b8ff9a92fbea3e581 | import os
from flask import Flask, jsonify
from flask_jwt_extended import JWTManager
from flask_cors import CORS
from .setup import revoked_store, init_db
import app.model
app = Flask(__name__)
# https://flask-cors.readthedocs.io/en/latest/api.html
CORS(app, resources={
r"/api/*": {
"origins": os.getenv('CLIENT_ORIGIN', 'http://localhost:51080'),
'supports_credentials': True
}
})
conf = 'DevConfig' if os.getenv('FLASK_ENV', 'development') == 'development' else 'ProConfig'
app.config.from_object('app.config.' + conf)
init_db(app)
jwt = JWTManager(app)
# Create our function to check if a token has been blacklisted. In this simple
# case, we will just store the tokens jti (unique identifier) in redis
# whenever we create a new token (with the revoked status being 'false'). This
# function will return the revoked status of a token. If a token doesn't
# exist in this store, we don't know where it came from (as we are adding newly
# created tokens to our store with a revoked status of 'false'). In this case
# we will consider the token to be revoked, for safety purposes.
@jwt.token_in_blacklist_loader
def check_if_token_is_revoked(decrypted_token):
jti = decrypted_token['jti']
entry = revoked_store.get(jti)
if entry is None:
return True
return entry == 'true'
# Using the expired_token_loader decorator, we will now call
# this function whenever an expired but otherwise valid access
# token attempts to access an endpoint
@jwt.expired_token_loader
def my_expired_token_callback(expired_token):
token_type = expired_token['type']
return jsonify({
'status': 401,
'sub_status': 42,
'msg': 'The {} token has expired'.format(token_type)
}), 401
from .route import api
app.register_blueprint(api, url_prefix='/api/v1')
print('app environment: %s' % (os.getenv('FLASK_ENV', 'development')))
print('client origins: %s' % (os.getenv('CLIENT_ORIGIN', 'http://localhost:51080'))) |
20,230 | 97dca00f4fc210e6024cd93dca3c179c67f14d27 | import tkinter as tk
import cv2 as cv
import numpy as np
from PIL import Image, ImageTk
import tkinter.filedialog
from numpy import fft
import math
import matplotlib.pyplot as graph
window = tk.Tk()
window.title('毛玻璃清晰化处理软件')
window.geometry('815x790')
address0 = 'code_image//timg.jpg'
address1 = 'code_image//timg.jpg'
folder = 1
photo0 = None
photo1 = None
contrast_data0 = 5
contrast_data1 = 1.3
denosing_data = 10
expansion_data = 1
rust_data = 1
exposure = -5
logic = 1
winner_data = 0.001
global img, img1
global point1, point2, point1_dis, point2_dis
def resizeImage(image, width=None, height=None, inter=cv.INTER_AREA):
newsize = (width, height)
# 获取图像尺寸
(h, w) = image.shape[:2]
if width is None and height is None:
return image
# 高度算缩放比例
if width is None:
n = height / float(h)
newsize = (int(n * w), height)
else:
n = width / float(w)
newsize = (width, int(h * n))
# 缩放图像
newimage = cv.resize(image, newsize, interpolation=inter)
return newimage
def on_mouse(event, x, y, flags, param):
global img, img1, point1, point2, point1_dis, point2_dis
img2 = img1.copy()
if event == cv.EVENT_LBUTTONDOWN: #左键点击
point1 = (x*4, y*4)
point1_dis = (x, y)
cv.circle(img2, point1_dis, 10, (0,255,0), 5)
cv.imshow('image', img2)
elif event == cv.EVENT_MOUSEMOVE and (flags & cv.EVENT_FLAG_LBUTTON): #按住左键拖曳
cv.rectangle(img2, point1_dis, (x, y), (255, 0, 0), 5)
cv.imshow('image', img2)
elif event == cv.EVENT_LBUTTONUP: #左键释放
point2 = (x*4, y*4)
point2_dis = (x,y)
cv.rectangle(img2, point1_dis, point2_dis, (0,0,255), 5)
cv.imshow('image', img2)
min_x = min(point1[0], point2[0])
min_y = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] -point2[1])
cut_img = img[min_y:min_y+height, min_x:min_x+width]
cv.imwrite("output/%s/cutted.jpg"%(folder), cut_img)
def screenshots():
global img, img1
img = cv.imread("output/%s/input.jpg"%(folder))
img1 = resizeImage(img, 816, 612)
cv.namedWindow('image',1)
cv.setMouseCallback('image', on_mouse)
cv.imshow('image', img1)
cv.waitKey(0)
def motion_process(image_size, motion_angle):
PSF = np.zeros(image_size)
print(image_size)
center_position = (image_size[0] - 1) / 2
print(center_position)
slope_tan = math.tan(motion_angle * math.pi / 180)
slope_cot = 1 / slope_tan
if slope_tan <= 1:
for i in range(15):
offset = round(i * slope_tan) # ((center_position-i)*slope_tan)
PSF[int(center_position + offset), int(center_position - offset)] = 1
return PSF / PSF.sum() # 对点扩散函数进行归一化亮度
else:
for i in range(15):
offset = round(i * slope_cot)
PSF[int(center_position - offset), int(center_position + offset)] = 1
return PSF / PSF.sum()
def wiener(input,PSF,eps,K=0.01): #维纳滤波,K=0.01
input_fft=fft.fft2(input)
PSF_fft=fft.fft2(PSF) +eps
PSF_fft_1=np.conj(PSF_fft) /(np.abs(PSF_fft)**2 + K)
b = input_fft * PSF_fft_1
result=fft.ifft2(b)
result=np.abs(fft.fftshift(result))
return result
def wiener_change(image):
img_h = image.shape[0]
img_w = image.shape[1]
#graph.figure(0)
#graph.xlabel("Original Image")
#graph.gray()
#graph.imshow(image)
graph.figure(1)
graph.gray()
# 进行运动模糊处理
PSF = motion_process((img_h, img_w), 60)
out = wiener(image, PSF, winner_data)
#graph.subplot(236)
#graph.xlabel("wiener deblurred(k=0.01)")
graph.imshow(out)
graph.axis('off')
graph.savefig('output/%s/winner_out.jpg'%(folder))
graph.show()
def image_out(image, x, y, word):
cv.namedWindow(word, 0)
cv.resizeWindow(word, x, y)
cv.imshow(word, image)
def contrast(image):
dst = image
img_h = image.shape[0]
img_w = image.shape[1]
graph.figure(1)
graph.gray()
# 进行运动模糊处理
PSF = motion_process((img_h, img_w), 60)
out = wiener(image, PSF, 1e-3)
graph.imshow(out)
graph.axis('off')
graph.savefig('output/%s/winner_in.jpg'%(folder))
graph.show()
if contrast_data0 != 0:
clache = cv.createCLAHE(clipLimit=contrast_data0, tileGridSize=(8, 8))
dst = clache.apply(dst)
if denosing_data != 0:
dst = cv.fastNlMeansDenoising(dst,None ,denosing_data, 7, 21)
if contrast_data1!=0:
clache = cv.createCLAHE(clipLimit=contrast_data1, tileGridSize=(8, 8))
dst = clache.apply(dst)
if expansion_data != 0:
kernel = cv.getStructuringElement(cv.MORPH_RECT, (expansion_data, expansion_data))
dst = cv.morphologyEx(dst, cv.MORPH_OPEN, kernel) # 开运算
if rust_data != 0:
kernel = np.ones((rust_data, rust_data), np.uint8)
dst = cv.erode(dst, kernel) # 腐蚀
wiener_change(dst)
return dst
def sharpen(image):
kernel = np.array([[0,-1,0],[-1,5,-1],[0,-1,0]], np.float32)
dst = cv.filter2D(image , -1 , kernel=kernel)
cv.namedWindow("median", 0)
cv.resizeWindow("median", 600, 600)
cv.imshow("median",dst)
def chang_contrast0(input):
global contrast_data0
contrast_data0 = float(input)
def chang_contrast1(input):
global contrast_data1
contrast_data1 = float(input)
def chang_denosing_data(input):
global denosing_data
denosing_data = float(input)
def change_expansion_data(input):
global expansion_data
expansion_data = int(input)
def change_rust_data(input):
global rust_data
rust_data = int(input)
def change_winner_data(input):
global rust_data
rust_data = float(input)
def scale_creat():
s1 = tk.Scale(window,label='对比度0',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 5, resolution = 1,command = chang_contrast0)
s1.set(contrast_data0)
s1.place(x= 300,y = 310)
s2 = tk.Scale(window,label='对比度1',from_=0.0 , to = 2.5,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 0.5, resolution = 0.1,command = chang_contrast1)
s2.set(contrast_data1)
s2.place(x= 300,y = 390)
s3 = tk.Scale(window,label='去噪',from_=0.0 , to = 20.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 5, resolution = 1,command = chang_denosing_data)
s3.set(denosing_data)
s3.place(x= 300,y = 470)
s3 = tk.Scale(window,label='开运算系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_expansion_data)
s3.set(expansion_data)
s3.place(x= 300,y = 550)
s4 = tk.Scale(window,label='腐蚀系数',from_=0.0 , to = 50.0,orient = tk.HORIZONTAL,
length = 250, showvalue = 1, tickinterval = 10, resolution = 1,command = change_rust_data)
s4.set(rust_data)
s4.place(x= 300,y = 630)
b1 = tk.Button(window,text = '开始处理', width = 20,height = 3 , command = opencv)
b1.place(x= 340,y = 710)
def resize(w, h, w_box, h_box, pil_image):
f1 = 1.0 * w_box / w # 1.0 forces float division in Python2
f2 = 1.0 * h_box / h
factor = min([f1, f2])
# print(f1, f2, factor) # test
# use best down-sizing filter
width = int(w * factor)
height = int(h * factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
def file_open():
global a
a = tkinter.filedialog.askopenfilename(filetypes=[("图片", ".jpg")])
def folder1():
global folder
folder = 1
def folder2():
global folder
folder = 2
def folder3():
global folder
folder = 3
def folder4():
global folder
folder = 4
def folder5():
global folder
folder = 5
def folder6():
global folder
folder = 6
def creat_bottom():
top2 = tk.Toplevel()
top2.title = ('设定存储文件夹')
top2.geometry('400x220')
r1 = tk.Button(top2, text='1',width = 10,
command = folder1)
r1.pack()
r2 = tk.Button(top2, text='2',width = 10,
command = folder2)
r2.pack()
r3 = tk.Button(top2, text='3',width = 10,
command = folder3)
r3.pack()
r4 = tk.Button(top2, text='4',width = 10,
command = folder4)
r4.pack()
r5 = tk.Button(top2, text='5',width = 10,
command = folder5)
r5.pack()
r6 = tk.Button(top2, text='test',width = 10,
command = folder6)
r6.pack()
r7 = tk.Button(top2, text='确认',width = 20,
command = top2.destroy)
r7.pack()
top2.mainloop()
def creat_menu():
menubar = tk.Menu(window)
filemenu = tk.Menu(menubar, tearoff = 0)
menubar.add_cascade(label='文件', menu=filemenu)
helpmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='帮助', menu=helpmenu)
filemenu.add_cascade(label='选择文件夹', command=creat_bottom)
filemenu.add_cascade(label='摄像头',command = photograph)
filemenu.add_cascade(label='切割图像', command=screenshots)
helpmenu.add_cascade(label='关于',command = about_creat)
window.config(menu=menubar)
def exposure_change(input):
global exposure
exposure = input
def logic_change(input):
global logic
logic = input
def photograph():
global address0
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FOURCC, 1196444237)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 3264) # 设置分辨率
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 2448)
#cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC, cv.CV_FOURCC('M', 'J', 'P', 'G'))
#844715353
#CAP_PROP_FOURCC
#1196444237
#cv.VideoWriter_fourcc(*'MJPG)
cv.namedWindow("摄像头", 0)
cv.resizeWindow("摄像头", 800, 600)
cv.createTrackbar("更改曝光","摄像头", 0, 15, exposure_change)
switch = '0:OFF\n1:ON'
cv.createTrackbar(switch, '摄像头', 0, 1, logic_change)
cap.set(cv.CAP_PROP_FOURCC,cv.COLOR_YUV2BGR_YUY2)
while (1):
# get a frame
if logic == 0:
cap.set(cv.CAP_PROP_AUTO_EXPOSURE,logic)
cap.set(cv.CAP_PROP_EXPOSURE,exposure-15)
ret, frame = cap.read()
# show a frame
cv.imshow("摄像头", frame)
if cv.waitKey(1) & 0xFF == ord('q'):
cv.imwrite("output/%s/input.jpg"%(folder), frame)
address0 = "output/%s/input.jpg"%(folder)
cavans_creat()
break
elif cv.waitKey(1) & 0xFF == ord('c'):
break
cap.release()
cv.destroyAllWindows()
def cavans_creat():
global photo0
global photo1
#address0 = "output/%s/cutted.jpg" % (folder)
img0 = Image.open(address0)
img0 = resize(3264, 2448, 400, 300, img0)
photo0 = ImageTk.PhotoImage(img0) # 用PIL模块的PhotoImage打开
img1 = Image.open(address1)
img1 = resize(3264, 2448, 400, 300, img1)
photo1 = ImageTk.PhotoImage(img1) # 用PIL模块的PhotoImage打开
canvas0 = tk.Canvas(window, bg ='white',height=300,width=400)
canvas0.create_image(0,0,anchor = 'nw',image = photo0)
canvas0.place(x= 0, y= 0)
canvas1 = tk.Canvas(window, bg ='white',height=300,width=400)
canvas1.create_image(0,0,anchor = 'nw',image = photo1)
canvas1.place(x= 410, y= 0)
def about_creat():
top1=tk.Toplevel()
top1.title('关于本程序')
top1.geometry('300x200')
image = Image.open('code_image\\111.jpg')
img = ImageTk.PhotoImage(image)
word_box = tk.Label(top1, text='毛玻璃清晰化处理软件\r版本:1.7\r编写者:张逸航')
canvas1 = tk.Canvas(top1, width = 80 ,height = 80, bg = 'white')
canvas1.create_image(0,0,image = img,anchor="nw")
canvas1.create_image(image.width,0,image = img,anchor="nw")
canvas1.pack()
word_box.pack()
top1.mainloop()
def opencv():
global address1
src = cv.imread('output/%s/cutted.jpg'%(folder))
src = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
cv.imwrite('output/%s/gray.jpg' % (folder),src)
#wiener_change(src)
#image_out(src, 600, 800, "input_image")
out = contrast(src)
#image_out(out, 600, 600, "out")
cv.imwrite('output/%s/output1.jpg'%(folder),out)
address1 = 'output/%s/output1.jpg' % (folder)
cv.waitKey(0)
cv.destroyAllWindows()
cavans_creat()
creat_menu()
cavans_creat()
scale_creat()
window.mainloop() |
20,231 | 18127ed04f5a3e0b12e242e0b900961253a72d1e | from framework import logger
class Vars:
key = 'ldlofb5egsg5h22gj3zn77dhs4gdh5ss'
from .rclone_tool import RcloneTool
from .rclone_tool2 import RcloneTool2
|
20,232 | 646a14316e925ef8f8295a688e48c9fb1913ec95 | #This is the boot
import time
check = "1"
checkFile = open("check.txt","r")
if check==checkFile.read():
print ("Boot Menu")
print ("Boot 1.Desktop")
print ("boot 2.Servers")
while 1:
Booting = input('Enter the name were you would like to boot to: ')
if Booting=="desktop" or Booting=="1" or Booting=="Desktop":
print("Booting to desktop")
break
elif Booting=="servers" or Booting=="2" or Booting=="Servers":
print("Booting to Servers")
break
else:
print("There is no option to boot to", Booting)
while 1:
print("Enter Your Username")
username = input("Username: ")
usernameFile = open("username.txt","r")
if username==usernameFile.read():
break
else:
print("There is no user called", username)
while 1:
print("Enter Your Password")
password = input("Password: ")
passwordFile = open("password.txt","r")
if password==passwordFile.read():
break
else:
print("That Password is incorrect")
print("Welcome",username)
else:
print("Hello and welcome to Trapdoor V1.0")
print ("Let's begin what do you want your username to be?")
username = input("Enter your desired username: ")
print("Thats a nice username", username)
print ("Now how old are you?")
age = input('Enter your age: ')
print ("Ok so you're", age)
password = input("What would you like your password to be?: ")
print("Ok lets setup your wifi")
wifi = input("Whats your wifi name?: ")
wifipass = input("Ok and whats the password?: ")
print("Attempting to join", wifi)
time.sleep(2.2)
print("Joined", wifi,"!")
while 1:
print("So your username is", username)
print("And your age is", age)
print("The wifi you connected to is", wifi)
print("Your password will be", password)
user = input("So all this information correct?: ")
if user=="No" or user=="no" or user=="n" or user=="N":
print("Okay then quit and try again")
elif user=="Yes" or user=="yes" or user=="y" or user=="Y": break
usernameFile = open("username.txt","w")
usernameFile.write(username)
usernameFile.close()
passwordFile = open("password.txt","w")
passwordFile.write(password)
passwordFile.close()
usernameFile = open("check.txt","w")
usernameFile.write("1")
usernameFile.close()
print("Setting up Files")
time.sleep(2.2)
print("Ok close this window and re-run boot.py to log in!")
time.sleep(7.7) |
20,233 | a6e88ff624de1cfc70fae37c4dc7639564fedc20 | # -*- coding: utf-8 -*-
''' messages.propertiesからNgram.KANJI_*_*を取得して変換mapを作る
'''
import sys
import string
import re
def main():
spath = sys.argv[1]
template = string.Template(u'''{$tgts}
{$sources}
''')
maps = []
for line in open(spath):
line = line.strip()
if not line.startswith('NGram.KANJI_'):
continue
codestr = line.split('=', 1)[1]
codes = ['0x' + x[2:] for x in re.findall(r'\\u[0-9a-fA-F]+', codestr)]
maps.append(codes)
tgtstr = ','.join(x[0] for x in maps)
sourcestr = ',\n'.join('{' + ','.join(x[1:]) + '}' for x in maps)
print(template.substitute(tgts=tgtstr, sources=sourcestr))
if __name__ == '__main__':
main()
|
20,234 | 8b124b69735554bb1ed5f407f1659ef702b477b7 | def returns_five():
return 5
|
20,235 | f3c8622890c7e75090a485764c86d4d14a0a1344 | from scrapy.selector import HtmlXPathSelector
from scrapy.spider import BaseSpider
from w3lib.html import remove_tags
from XboxBC.items import WikipediaXB360ExclusiveItem
class WikipediaXB360Exclusive(BaseSpider):
name = "WikipediaXB360Exclusive"
allowed_domains = ['en.wikipedia.org']
start_urls = (
"https://en.wikipedia.org/wiki/List_of_video_game_exclusives_(seventh_generation)",
)
def parse(self, response):
base_link = 'https://en.wikipedia.org'
rows_in_big_table = response.xpath('//*[@id="mw-content-text"]/table[4]/tr')
for i, onerow in enumerate(rows_in_big_table):
WXB360ExclusiveItem = WikipediaXB360ExclusiveItem()
gameName = onerow.xpath('td/i/a/text()')
if len(gameName) != 0:
gameName = gameName[0].extract()
publisher = onerow.xpath('td[3]/a[1]/text()')
if len(publisher) != 0:
publisher = publisher[0].extract()
releaseDate = onerow.xpath('td[5]/span[1]/text()')
if len(releaseDate) != 0:
releaseDate = releaseDate[0].extract()[8:18]
exclusiveType = onerow.xpath('td[4]/text()')
if len(exclusiveType) != 0:
exclusiveType = exclusiveType[0].extract()
WXB360ExclusiveItem['gameName'] = gameName
WXB360ExclusiveItem['publisher'] = publisher
WXB360ExclusiveItem['releaseDate'] = releaseDate
WXB360ExclusiveItem['exclusiveType'] = exclusiveType
yield WXB360ExclusiveItem
|
20,236 | 4a853ec19dc243e8202dedc2f8127487d8287bfa | # encoding: utf-8
from datetime import timedelta
from datetime import date, datetime
from openerp import models, fields, api, _
from openerp.tools import cache
from openerp.exceptions import Warning
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
class Product(models.Model):
u'''产品''' #可选的描述文本,docstring 格式
_name = 'sales.product' #定义模型的内部名称,必选具备
name = fields.Char(u'名称', required=True, index=True) #名称字段定义
unit_price = fields.Float(u'单价', required=True) #单价字段定义
class Customer(models.Model):
u'''客户信息'''
_name = 'sales.customer'
name = fields.Char(u'姓名', required=True)
address = fields.Char('联系地址')
class Order(models.Model):
u'''销售订单'''
_name = 'sales.order'
name = fields.Char(u'单号', required=True)
customer = fields.Many2one('sales.customer', u'客户', required=True)
order_time = fields.Datetime(u'下单时间', required=True, default=fields.Datetime.now())
lines = fields.One2many('sales.order.line', 'sales_order', u'订单明细')
price_total = fields.Float(u'总价', compute='_sum_price')
note = fields.Text(u'备注')
@api.one
@api.depends('lines')
def _sum_price(self):
price_total = 0.0
for d in self.lines:
price_total += d.subtotal
self.price_total = price_total
class OrderLine(models.Model):
u'''销售订单明细'''
_name = 'sales.order.line'
sales_order = fields.Many2one('sales.order', u'订单', index=True, required=True, ondelete='cascade')
name = fields.Many2one('sales.product', u'产品', required=True)
quantity = fields.Float(u'数量', required=True)
unit_price = fields.Float(u'单价', required=True)
subtotal = fields.Float(u'小计', compute='_sum_subtotal')
@api.one
@api.depends('unit_price', 'quantity')
def _sum_subtotal(self):
self.subtotal = self.unit_price * self.quantity
@api.onchange('name')
def _onchange_product(self):
self.unit_price = self.name.unit_price
self.subtotal = self.unit_price * self.quantity
@api.onchange('quantity', 'unit_price')
def _onchange_qty_or_unit_price(self):
self.subtotal = self.unit_price * self.quantity
|
20,237 | 741683baf04adaa538d20b22d3390d59da05d927 | from hashlib import md5
from base64 import b64decode
from base64 import b64encode
from Crypto.Cipher import AES
from copy import deepcopy
from os import urandom
from random import randint
def padding(block_size,s):
tmp = len(s)%block_size
if (tmp == 0):
tmp = block_size
else:
tmp = block_size - tmp
tmp_string = ""
for i in range(tmp):
tmp_string = tmp_string + chr(block_size)
return s+tmp_string
def xor(data, key):
return ''.join(chr(ord(a)^ord(b)) for a, b in zip(data, key))
def rand_AES_KEY(block_size):
return urandom(block_size)
def encrypt_ecb(key,m):
tmp = len(key)
m = padding(tmp,m)
cipher_text = ""
ecb_obj = AES.new(key, AES.MODE_ECB)
for i in range(0,len(m),tmp):
substr = m[i:i+tmp]
cipher_text = cipher_text + ecb_obj.encrypt(substr)
return cipher_text
def enc_oracle(m):
cipher_text = encrypt_ecb(key,m)
return cipher_text
def get_blocks(cipher_text, blocksize=16):
x = [cipher_text[i:i+blocksize] for i in range(0, len(cipher_text), blocksize)]
return x
def detect_block_size():
# global postfix
# print (postfix)
plaintext = 'A' + postfix
cipher_text1 = enc_oracle(plaintext)
for i in range(40):
plaintext = 'A' + plaintext
cipher_text = enc_oracle(plaintext)
if cipher_text1 in cipher_text:
blocksize = i+1
# print(blocksize)
break
return blocksize
def create_dict(blocksize):
dict_1_block_cipher = {}
for i in range(256):
plaintext = 'A'*(blocksize - 1) + chr(i)
dict_1_block_cipher[enc_oracle(plaintext)] = plaintext
# print dict_1_block_cipher
# print dict_1_block_cipher['A'*(blocksize - 1) + chr(97)]
# print('A'*(blocksize - 1) + chr(97))
return dict_1_block_cipher
def decrypt_text(blocksize,dict_1_block_cipher):
unknown_String = ""
for i in range(len(postfix)):
plaintext = 'A'*(blocksize - 1) + postfix[i]
cipher_text = enc_oracle(plaintext)
unknown_String = unknown_String + dict_1_block_cipher[cipher_text][-1]
# print(len(unknown_String))
return unknown_String
block_size = 16
key = rand_AES_KEY(block_size)
postfix = '''Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK'''
postfix = postfix.decode('base64')
blocksize = detect_block_size()
dict_1_block_cipher = create_dict(blocksize)
print decrypt_text(blocksize,dict_1_block_cipher) |
20,238 | 529acbfc1f4fb210862a87c79f6d0abc3e9bb5d1 | import sys
from antlr4 import *
from DataGeneratorLexer import DataGeneratorLexer
from DataGeneratorParser import DataGeneratorParser
from DataGeneratorParserListener import DataGeneratorParserListener
from proto import data_pb2
from google.protobuf import message_factory
from google.protobuf import descriptor
from google.protobuf.internal.containers import MessageMap
class DataProtoGenerator:
def __init__(self):
_tree = None
pass
def generateDataProto(
self, config: str, output_proto, new_repeats: dict=None, var_values: dict={}):
"""new_repeats: repeat fields that need to be newly created."""
lexer = DataGeneratorLexer(InputStream(config))
stream = CommonTokenStream(lexer)
parser = DataGeneratorParser(stream)
tree = parser.generateDataProto()
walker = ParseTreeWalker();
listener = self.Listener(
output_proto=output_proto, new_repeats=new_repeats, var_values=var_values);
walker.walk(listener, tree);
def configurate(self, config: str):
lexer = DataGeneratorLexer(InputStream(config))
stream = CommonTokenStream(lexer)
parser = DataGeneratorParser(stream)
self._tree = parser.generateDataProto()
def generateDataProto(self, output_proto, new_repeats: dict=None, var_values: dict={}):
walker = ParseTreeWalker();
listener = self.Listener(
output_proto=output_proto, new_repeats=new_repeats, var_values=var_values);
walker.walk(listener, self._tree);
class Listener(DataGeneratorParserListener):
def __init__(self, output_proto, new_repeats: dict=None, var_values: dict={}):
self._output_proto = output_proto
self._scopes = []
self._var_values = var_values
# overwrite the last instead of append a new one for repeated fields.
self._new_repeats = new_repeats
self._map_key = None
def IsIntType(self, field_type):
return (field_type == descriptor.FieldDescriptor.TYPE_INT64 or
field_type == descriptor.FieldDescriptor.TYPE_UINT64 or
field_type == descriptor.FieldDescriptor.TYPE_INT32 or
field_type == descriptor.FieldDescriptor.TYPE_FIXED64 or
field_type == descriptor.FieldDescriptor.TYPE_FIXED32 or
field_type == descriptor.FieldDescriptor.TYPE_UINT32 or
field_type == descriptor.FieldDescriptor.TYPE_SFIXED32 or
field_type == descriptor.FieldDescriptor.TYPE_SFIXED64 or
field_type == descriptor.FieldDescriptor.TYPE_SINT32 or
field_type == descriptor.FieldDescriptor.TYPE_SINT64)
def IsFloatType(self, field_type):
return (field_type == descriptor.FieldDescriptor.TYPE_FLOAT or
field_type == descriptor.FieldDescriptor.TYPE_DOUBLE)
def _getProtoPathMapFields(
self, field, field_descriptor, field_names:list, is_all:bool, results:list):
assert isinstance(field, MessageMap)
# The path is for key or non-message type value of the map.
if len(field_names) == 1:
results.append(field)
return
type_name = field_names[1]
if type_name == "key":
if is_all:
results.extend(field.keys())
else:
results.append(field)
elif type_name == "value":
assert self._map_key
if is_all:
for value in field.values():
self._getProtoPathFields(value, field_names[1:], is_all, results)
else:
# skip the 'value' level
self._getProtoPathFields(field[self._map_key], field_names[2:], is_all, results)
# self._map_key = None
else:
raise
def _getProtoPathListFields(
self, field, field_descriptor, field_names:list, is_all:bool, results:list):
if is_all:
for f in field:
self._getProtoPathFields(f, field_names[1:], is_all, results)
return
field_name = field_names[0]
if not len(field) or (self._new_repeats and field_name in self._new_repeats):
assert field_descriptor.message_type
message = message_factory.MessageFactory().GetPrototype(field_descriptor.message_type)()
field.append(message)
self._getProtoPathFields(field[-1], field_names[1:], is_all, results)
def _getProtoPathFields(self, parent, field_names:list, is_all:bool, results:list):
"""A Recursive function to return all fields with the field name list.
is_all: if set true, returns all contents in repeated fields. Will not create any
new field if repeated field is empty.
if set false, create a new message for repeated fields if they are empty or the
field name is in 'self._new_repeats'. Returns the last one in the repeated fields.
"""
if not field_names:
results.append(parent)
return
field_name = field_names[0]
field = getattr(parent, field_name)
field_descriptor = parent.DESCRIPTOR.fields_by_name[field_name]
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if isinstance(field, MessageMap):
self._getProtoPathMapFields(field, field_descriptor, field_names, is_all, results)
else:
self._getProtoPathListFields(field, field_descriptor, field_names, is_all, results)
else:
self._getProtoPathFields(field, field_names[1:], is_all, results)
def getProtoPathFields(self, parent, field_names:list, is_all:bool, results:list):
self._getProtoPathFields(parent, field_names, is_all, results)
def setFieldValue(self, parent, field_name, value):
if isinstance(parent, MessageMap):
if field_name == 'key':
# Delay setting of the key, value.
self._map_key = value[1:-1]
elif field_name == 'value':
assert self._map_key
parent[self._map_key] = value
# self._map_key = None
return
field_descriptor = parent.DESCRIPTOR.fields_by_name[field_name]
if self.IsIntType(field_descriptor.type):
value = int(value)
elif self.IsFloatType(field_descriptor.type):
value = float(value)
elif field_descriptor.type == descriptor.FieldDescriptor.TYPE_STRING:
value = str(value)
elif field_descriptor.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = bytes(value, 'utf-8')
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
getattr(parent, field_name).append(value)
else:
setattr(parent, field_name, value)
def assignFieldValue(
self, parent, scopes:list, proto_path:DataGeneratorParser.ProtoPathContext, value):
field_names = []
field_names.extend(scopes)
pp = proto_path
while pp.protoPath():
field_names.append(pp.NAME().getText())
pp = pp.protoPath()
results = []
self.getProtoPathFields(self._output_proto, field_names, is_all=False, results=results)
assert len(results) == 1
self.setFieldValue(results[0], pp.NAME().getText(), value)
def pushScope(self, field_name):
"""push the field message as the current scope."""
self._scopes.append(field_name)
def popScope(self):
self._scopes.pop()
def evalExpr(self, expr):
if expr.NUMBER():
return float(expr.NUMBER().getText())
if expr.STRING():
return expr.STRING().getText()
if expr.refVar():
return self.evalRefVar(expr.refVar())
if expr.PLUS():
return self.evalExpr(expr.left) + self.evalExpr(expr.right)
if expr.MINUS():
return self.evalExpr(expr.left) - self.evalExpr(expr.right)
if expr.MULTI():
return self.evalExpr(expr.left) * self.evalExpr(expr.right)
if expr.DIV():
return self.evalExpr(expr.left) / self.evalExpr(expr.right)
if expr.expr():
return self.evalExpr(expr.expr()[0])
raise
def evalRefVar(self, refVar):
result = self._var_values[refVar.protoPath().NAME().getText()]
proto_path = refVar.protoPath().protoPath()
if proto_path == None:
return result
fields = [];
while proto_path:
fields.append(proto_path.NAME().getText())
proto_path = proto_path.protoPath()
results = [];
self.getProtoPathFields(result, fields, is_all=True, results=results)
if len(results) == 1:
return results[0]
return results
# Exit a parse tree produced by DataGeneratorParser#assignVariable.
def exitAssignVariable(self, ctx:DataGeneratorParser.AssignVariableContext):
self._var_values[ctx.NAME().getText()] = self.evalExpr(ctx.expr())
# Enter a parse tree produced by DataGeneratorParser#assignSubProto.
def enterAssignSubProto(self, ctx:DataGeneratorParser.AssignSubProtoContext):
self.pushScope(ctx.NAME().getText())
# Exit a parse tree produced by DataGeneratorParser#assignSubProto.
def exitAssignSubProto(self, ctx:DataGeneratorParser.AssignSubProtoContext):
self.popScope()
# Exit a parse tree produced by DataGeneratorParser#assignField.
def enterAssignField(self, ctx:DataGeneratorParser.AssignFieldContext):
value = self.evalExpr(ctx.expr())
if isinstance(value, list):
for v in value:
self.assignFieldValue(self._output_proto, self._scopes, ctx.protoPath(), v)
else:
self.assignFieldValue(self._output_proto, self._scopes, ctx.protoPath(), value)
|
20,239 | 897cb731d1887a0fc79627c238a530f8ced4507f | """
Maps: ComboMaps
===============
Invert synthetic magnetic data with variable background values
and a single block anomaly buried at depth. We will use the Sum Map
to invert for both the background values and an heterogeneous susceptibiilty
model.
.. code-block:: python
:linenos:
"""
from discretize import TensorMesh
from discretize.utils import active_from_xyz
from SimPEG import (
utils,
maps,
regularization,
data_misfit,
optimization,
inverse_problem,
directives,
inversion,
)
from SimPEG.potential_fields import magnetics
import numpy as np
import matplotlib.pyplot as plt
def run(plotIt=True):
H0 = (50000.0, 90.0, 0.0)
# Create a mesh
dx = 5.0
hxind = [(dx, 5, -1.3), (dx, 10), (dx, 5, 1.3)]
hyind = [(dx, 5, -1.3), (dx, 10), (dx, 5, 1.3)]
hzind = [(dx, 5, -1.3), (dx, 10)]
mesh = TensorMesh([hxind, hyind, hzind], "CCC")
# Lets create a simple Gaussian topo and set the active cells
[xx, yy] = np.meshgrid(mesh.nodes_x, mesh.nodes_y)
zz = -np.exp((xx**2 + yy**2) / 75**2) + mesh.nodes_z[-1]
# We would usually load a topofile
topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)]
# Go from topo to array of indices of active cells
actv = active_from_xyz(mesh, topo, "N")
nC = int(actv.sum())
# Create and array of observation points
xr = np.linspace(-20.0, 20.0, 20)
yr = np.linspace(-20.0, 20.0, 20)
X, Y = np.meshgrid(xr, yr)
# Move the observation points 5m above the topo
Z = -np.exp((X**2 + Y**2) / 75**2) + mesh.nodes_z[-1] + 5.0
# Create a MAGsurvey
rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)]
rxLoc = magnetics.Point(rxLoc)
srcField = magnetics.SourceField([rxLoc], parameters=H0)
survey = magnetics.Survey(srcField)
# We can now create a susceptibility model and generate data
model = np.zeros(mesh.nC)
# Change values in half the domain
model[mesh.gridCC[:, 0] < 0] = 0.01
# Add a block in half-space
model = utils.model_builder.addBlock(
mesh.gridCC, model, np.r_[-10, -10, 20], np.r_[10, 10, 40], 0.05
)
model = utils.mkvc(model)
model = model[actv]
# Create active map to go from reduce set to full
actvMap = maps.InjectActiveCells(mesh, actv, np.nan)
# Create reduced identity map
idenMap = maps.IdentityMap(nP=nC)
# Create the forward model operator
prob = magnetics.Simulation3DIntegral(
mesh,
survey=survey,
chiMap=idenMap,
ind_active=actv,
store_sensitivities="forward_only",
)
# Compute linear forward operator and compute some data
data = prob.make_synthetic_data(
model, relative_error=0.0, noise_floor=1, add_noise=True
)
# Create a homogenous maps for the two domains
domains = [mesh.gridCC[actv, 0] < 0, mesh.gridCC[actv, 0] >= 0]
homogMap = maps.SurjectUnits(domains)
# Create a wire map for a second model space, voxel based
wires = maps.Wires(("homo", len(domains)), ("hetero", nC))
# Create Sum map
sumMap = maps.SumMap([homogMap * wires.homo, wires.hetero])
# Create the forward model operator
prob = magnetics.Simulation3DIntegral(
mesh, survey=survey, chiMap=sumMap, ind_active=actv, store_sensitivities="ram"
)
# Make sensitivity weighting
# Take the cell number out of the scaling.
# Want to keep high sens for large volumes
wr = (
prob.getJtJdiag(np.ones(sumMap.shape[1]))
/ np.r_[homogMap.P.T * mesh.cell_volumes[actv], mesh.cell_volumes[actv]] ** 2.0
)
# Scale the model spaces independently
wr[wires.homo.index] /= np.max((wires.homo * wr)) * utils.mkvc(
homogMap.P.sum(axis=0).flatten()
)
wr[wires.hetero.index] /= np.max(wires.hetero * wr)
wr = wr**0.5
## Create a regularization
# For the homogeneous model
regMesh = TensorMesh([len(domains)])
reg_m1 = regularization.Sparse(regMesh, mapping=wires.homo)
reg_m1.cell_weights = wires.homo * wr
reg_m1.norms = [0, 2]
reg_m1.mref = np.zeros(sumMap.shape[1])
# Regularization for the voxel model
reg_m2 = regularization.Sparse(
mesh, active_cells=actv, mapping=wires.hetero, gradient_type="components"
)
reg_m2.cell_weights = wires.hetero * wr
reg_m2.norms = [0, 0, 0, 0]
reg_m2.mref = np.zeros(sumMap.shape[1])
reg = reg_m1 + reg_m2
# Data misfit function
dmis = data_misfit.L2DataMisfit(simulation=prob, data=data)
# Add directives to the inversion
opt = optimization.ProjectedGNCG(
maxIter=100,
lower=0.0,
upper=1.0,
maxIterLS=20,
maxIterCG=10,
tolCG=1e-3,
tolG=1e-3,
eps=1e-6,
)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e-2)
# Here is where the norms are applied
# Use pick a threshold parameter empirically based on the distribution of
# model parameters
IRLS = directives.Update_IRLS(f_min_change=1e-3, minGNiter=1)
update_Jacobi = directives.UpdatePreconditioner()
inv = inversion.BaseInversion(invProb, directiveList=[IRLS, betaest, update_Jacobi])
# Run the inversion
m0 = np.ones(sumMap.shape[1]) * 1e-4 # Starting model
prob.model = m0
mrecSum = inv.run(m0)
if plotIt:
mesh.plot_3d_slicer(
actvMap * model,
aspect="equal",
zslice=30,
pcolor_opts={"cmap": "inferno_r"},
transparent="slider",
)
mesh.plot_3d_slicer(
actvMap * sumMap * mrecSum,
aspect="equal",
zslice=30,
pcolor_opts={"cmap": "inferno_r"},
transparent="slider",
)
if __name__ == "__main__":
run()
plt.show()
|
20,240 | f8927824578a17f60033f183d097edbe0280f440 | from django.shortcuts import render,HttpResponse
# Create your views here.
#定义视图函数
from app01.models import BookInfo,HeroInfo
def index(request):
#进行处理,和M和T交互
# return HttpResponse('okkkk')
b = BookInfo.objects.filter(id=1)[0].btitle
context = {'latest_question_list': b,'list':list(range(1,10))}
return render(request,'booktest/index.html',context)
def index2(request):
return HttpResponse('hello python')
def show_books(request):
'''显示图书信息'''
books = BookInfo.objects.all()
context = {'books':books}
return render(request,'booktest/show_books.html',context)
def detail(request,id):
'''查询图书关联的英雄信息'''
book = BookInfo.objects.get(id=id)
heros = book.heroinfo_set.all()
context = {'heros':heros,'book':book}
return render(request,'booktest/detail.html',context) |
20,241 | 7071504ca49b4c08480b895a5a77bb37f46483f1 | # https://docs.python.org/2.7/library/functions.html#dir
"""
dir() is more of a convenience function than a rigorously defined object inspector function. It does slightly different things depending on the type
of the object that is being inspected
"""
class OldClass():
def __init__(self, number):
self.number = number
class MyClass(object):
class_property = "Nice class property"
def __init__(self, name):
self.name = name
def say_hello(self):
print("Hello my name is " + self.name)
def inspect_oldclass_instance():
"""Old-style classes do no inherit from the Python 'object', therefore the attributes of 'object' won't be found"""
obj = OldClass(11)
print(dir(obj)) # ['__doc__', '__init__', '__module__', 'number']
# Why isn't __class__ seen by dir()? I don't know but it exists
print(obj.__class__) # __main__.OldClass
print(obj.__dict__) # {'number': 11}
#print(obj.__getattribute__) # AttributeError
#print(obj.__hash__) # AttributeError
#print(obj.__reduce__) # AttributeError
#print(obj.__setattr__) # AttributeError
#print(obj.__delattr__) # AttributeError
#print(obj.__new__) # AttributeError
#print(obj.__subclasshook__) # AttributeError
#print(obj.__weakref__) # AttributeError
def inspect_instance():
"""
When inspecting an object instance, dir() returns a list that contains:
- the object's attribute names: 'name'
- the names of its class's attributes: 'class_property', 'say_hello'
- the attributes of its base classes: everything defined on 'object'
As demonstrated by comparing with OldClass, the Python 'object' implements the following so that only its subclasses can use the methods without
explicitly defining them:
- __getattribute__
- __hash__
- __reduce__
- __setattr__
- __delattr__
- __new__
- __subclasshook__
- __weakref__
- others!
The Python 'object' does not appear to have the following attributes:
- __class__
"""
obj = MyClass("Jello")
print(dir(obj))
print("")
print(obj.__dict__) # {'name': 'Jello'}
#print(obj.__getattribute__) # <method-wrapper ...>
#print(obj.__hash__) # <method-wrapper ...>
#print(obj.__reduce__) # <method-wrapper ...>
#print(obj.__setattr__) # <method-wrapper ...>
#print(obj.__delattr__) # <method-wrapper ...>
#print(obj.__new__) # <method-wrapper ...>
#print(obj.__subclasshook__) # <method-wrapper ...>
#print(obj.__weakref__) # None
# I didn't expect this to be defined on 'object' anyway
#print(obj.__getattr__) # AttributeError
def inspect_type():
"""
When inspecting a type (class) object, dir() returns the attributes defined on the class and those of its base classes:
- 'class_property'
- 'say_hello'
- everything defined on 'object'
"""
obj = MyClass("Jello")
#print(dir(MyClass))
#print(MyClass.__class__) # <type 'type'> This must be a metaclass object
#print(MyClass.__dict__)
print(MyClass.__dict__.__dict__)
#print(MyClass.__weakref__) # <attribute '__weakref__' ...>
if __name__ == "__main__":
#inspect_oldclass_instance()
#inspect_instance()
inspect_type() |
20,242 | 50b1b0ec120868e1ca3866a85c8da60da0c21190 | # pylint: disable=W0622
# type: ignore
import os
import sys
import warnings
import onnx
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
# -- Project information -----------------------------------------------------
author = "ONNX"
copyright = "2022"
project = "ONNX"
release = onnx.__version__
version = onnx.__version__
# -- General configuration ---------------------------------------------------
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.imgmath",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.autodoc",
"sphinx.ext.githubpages",
"sphinx.ext.autodoc",
"sphinx.ext.graphviz",
"sphinx.ext.napoleon",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx_exec_code",
"sphinx_tabs.tabs",
"onnx_sphinx",
]
coverage_show_missing_items = True
exclude_patterns = []
graphviz_output_format = "svg"
html_css_files = ["css/custom.css"]
html_favicon = "onnx-favicon.png"
html_logo = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "../../onnx-horizontal-color.png"
)
html_sidebars = {}
html_static_path = ["_static"]
html_theme = "pydata_sphinx_theme"
language = "en"
mathdef_link_only = True
master_doc = "index"
onnx_doc_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), "operators")
pygments_style = "sphinx"
source_suffix = [".rst"]
templates_path = ["_templates"]
html_context = {
"default_mode": "auto", # auto: the documentation theme will follow the system default that you have set (light or dark)
}
html_theme_options = {
"collapse_navigation": True,
"external_links": [
{"name": "ONNX", "url": "https://onnx.ai/"},
{"name": "github", "url": "https://github.com/onnx/onnx"},
],
"github_url": "https://github.com/onnx/onnx",
"logo": {"image_dark": "onnx-horizontal-white.png"},
"navbar_center": [],
"navigation_depth": 5,
"page_sidebar_items": [], # default setting is: ["page-toc", "edit-this-page", "sourcelink"],
"show_nav_level": 0,
"show_prev_next": True,
"show_toc_level": 0,
}
intersphinx_mapping = {
"https://docs.python.org/": None,
"torch": ("https://pytorch.org/docs/stable/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"python": (f"https://docs.python.org/{sys.version_info.major}", None),
"scikit-learn": ("https://scikit-learn.org/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"sklearn": ("https://scikit-learn.org/stable/", None),
}
sphinx_gallery_conf = {
"examples_dirs": ["examples"],
"gallery_dirs": ["auto_examples", "auto_tutorial"],
"capture_repr": ("_repr_html_", "__repr__"),
"ignore_repr_types": r"matplotlib.text|matplotlib.axes",
"binder": {
"org": "onnx",
"repo": ".",
"notebooks_dir": "auto_examples",
"binderhub_url": "https://mybinder.org",
"branch": "master",
"dependencies": "./requirements.txt",
},
}
warnings.filterwarnings("ignore", category=FutureWarning)
|
20,243 | 611c870d21dae32e6da10d799e14f613f348e555 | from lib.action import NapalmBaseAction
class NapalmGetConfig(NapalmBaseAction):
def run(self, retrieve, **std_kwargs):
with self.get_driver(**std_kwargs) as device:
if not retrieve:
config_output = device.get_config()
else:
config_output = device.get_config(retrieve)
result = {'raw': config_output}
if self.htmlout:
result['html'] = self.html_out(result['raw'])
return (True, result)
|
20,244 | 034ea68f657d0a47aafaae003325654749d7fc25 | from flask import Blueprint
detailBlp = Blueprint("detailBlp", __name__, url_prefix="/detail")
from .views import *
|
20,245 | f8dff2d7573c8c6a7fcbe3859d1ddf1af4a9ac9b | #!/usr/bin/python
import sys
infile = open(sys.argv[1], 'r')
numCases = int(infile.readline())
caseNum = 0
for case in range(numCases):
caseNum += 1
numVals = int(infile.readline())
nums = [int(i) for i in infile.readline().split()]
numOff = 0
for i in range(len(nums)):
if i != nums[i]-1:
numOff += 1
print "Case #%s: %6f" % (caseNum, numOff)
|
20,246 | 4f2aee8a2d322fc4e9d2579bb1f36a02b992b9e7 | import logging
from dotenv import load_dotenv
import os
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
# Start Logger
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO)
log = logging.getLogger(__name__)
# Load API Key from .env
load_dotenv()
token = os.getenv("TOKEN")
class RequestsApi:
def __init__(self, base_url, **kwargs):
self.base_url = base_url
self.session = requests.Session()
self.session.headers.update({'Authorization': f'Token {token}'})
for arg in kwargs:
if isinstance(kwargs[arg], dict):
kwargs[arg] = self.__deep_merge(getattr(self.session, arg), kwargs[arg])
setattr(self.session, arg, kwargs[arg])
retry_strategy = Retry(
total=3,
backoff_factor=1,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["HEAD", "GET", "OPTIONS"]
)
adapter = HTTPAdapter(max_retries=retry_strategy)
self.session.mount("https://", adapter)
def request(self, method, url, **kwargs):
return self.session.request(method, self.base_url+url, **kwargs)
def head(self, url, **kwargs):
return self.session.head(self.base_url+url, **kwargs)
def get(self, url, **kwargs):
return self.session.get(self.base_url+url, **kwargs)
def post(self, url, **kwargs):
return self.session.post(self.base_url+url, **kwargs)
def put(self, url, **kwargs):
return self.session.put(self.base_url+url, **kwargs)
def patch(self, url, **kwargs):
return self.session.patch(self.base_url+url, **kwargs)
def delete(self, url, **kwargs):
return self.session.delete(self.base_url+url, **kwargs)
@staticmethod
def __deep_merge(source, destination):
for key, value in source.items():
if isinstance(value, dict):
node = destination.setdefault(key, {})
RequestsApi.__deep_merge(value, node)
else:
destination[key] = value
return destination
api = RequestsApi("https://localcoinswap.com/api/v2/")
|
20,247 | 11993b087d5e20ccebbef7e5241b14f147223cf6 | #!/usr/bin/env python
# coding: utf-8
# #Maximum Likelihood Estimates (MLEs)
#
# By Delaney Granizo-Mackenzie and Andrei Kirilenko developed as part of the Masters of Finance curriculum at MIT Sloan.
#
# Part of the Quantopian Lecture Series:
#
# * [www.quantopian.com/lectures](https://www.quantopian.com/lectures)
# * [github.com/quantopian/research_public](https://github.com/quantopian/research_public)
#
# Notebook released under the Creative Commons Attribution 4.0 License.
#
# ---
# In this tutorial notebook, we'll do the following things:
# 1. Compute the MLE for a normal distribution.
# 2. Compute the MLE for an exponential distribution.
# 3. Fit a normal distribution to asset returns using MLE.
# First we need to import some libraries
# In[13]:
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.stats
# ##Normal Distribution
# We'll start by sampling some data from a normal distribution.
# In[14]:
TRUE_MEAN = 40
TRUE_STD = 10
X = np.random.normal(TRUE_MEAN, TRUE_STD, 1000)
# Now we'll define functions that, given our data, will compute the MLE for the $\mu$ and $\sigma$ parameters of the normal distribution.
#
# Recall that
#
# $$\hat\mu = \frac{1}{T}\sum_{t=1}^{T} x_t$$
#
# $$\hat\sigma = \sqrt{\frac{1}{T}\sum_{t=1}^{T}{(x_t - \hat\mu)^2}}$$
# In[15]:
def normal_mu_MLE(X):
# Get the number of observations
T = len(X)
# Sum the observations
s = sum(X)
return 1.0/T * s
def normal_sigma_MLE(X):
T = len(X)
# Get the mu MLE
mu = normal_mu_MLE(X)
# Sum the square of the differences
s = sum( np.power((X - mu), 2) )
# Compute sigma^2
sigma_squared = 1.0/T * s
return math.sqrt(sigma_squared)
# Now let's try our functions out on our sample data and see how they compare to the built-in `np.mean` and `np.std`
# In[16]:
print "Mean Estimation"
print normal_mu_MLE(X)
print np.mean(X)
print "Standard Deviation Estimation"
print normal_sigma_MLE(X)
print np.std(X)
# Now let's estimate both parameters at once with scipy's built in `fit()` function.
# In[17]:
mu, std = scipy.stats.norm.fit(X)
print "mu estimate: " + str(mu)
print "std estimate: " + str(std)
# Now let's plot the distribution PDF along with the data to see how well it fits. We can do that by accessing the pdf provided in `scipy.stats.norm.pdf`.
# In[18]:
pdf = scipy.stats.norm.pdf
# We would like to plot our data along an x-axis ranging from 0-80 with 80 intervals
# (increments of 1)
x = np.linspace(0, 80, 80)
plt.hist(X, bins=x, normed='true')
plt.plot(pdf(x, loc=mu, scale=std))
plt.xlabel('Value')
plt.ylabel('Observed Frequency')
plt.legend(['Fitted Distribution PDF', 'Observed Data', ]);
# ##Exponential Distribution
# Let's do the same thing, but for the exponential distribution. We'll start by sampling some data.
# In[37]:
TRUE_LAMBDA = 5
X = np.random.exponential(TRUE_LAMBDA, 1000)
# `numpy` defines the exponential distribution as
# $$\frac{1}{\lambda}e^{-\frac{x}{\lambda}}$$
#
# So we need to invert the MLE from the lecture notes. There it is
#
# $$\hat\lambda = \frac{T}{\sum_{t=1}^{T} x_t}$$
#
# Here it's just the reciprocal, so
#
# $$\hat\lambda = \frac{\sum_{t=1}^{T} x_t}{T}$$
# In[38]:
def exp_lamda_MLE(X):
T = len(X)
s = sum(X)
return s/T
# In[39]:
print "lambda estimate: " + str(exp_lamda_MLE(X))
# In[40]:
# The scipy version of the exponential distribution has a location parameter
# that can skew the distribution. We ignore this by fixing the location
# parameter to 0 with floc=0
_, l = scipy.stats.expon.fit(X, floc=0)
# In[41]:
pdf = scipy.stats.expon.pdf
x = range(0, 80)
plt.hist(X, bins=x, normed='true')
plt.plot(pdf(x, scale=l))
plt.xlabel('Value')
plt.ylabel('Observed Frequency')
plt.legend(['Fitted Distribution PDF', 'Observed Data', ]);
# ##MLE for Asset Returns
#
# Now we'll fetch some real returns and try to fit a normal distribution to them using MLE.
# In[42]:
prices = get_pricing('TSLA', fields='price', start_date='2014-01-01', end_date='2015-01-01')
# This will give us the number of dollars returned each day
absolute_returns = np.diff(prices)
# This will give us the percentage return over the last day's value
# the [:-1] notation gives us all but the last item in the array
# We do this because there are no returns on the final price in the array.
returns = absolute_returns/prices[:-1]
# Let's use `scipy`'s fit function to get the $\mu$ and $\sigma$ MLEs.
# In[43]:
mu, std = scipy.stats.norm.fit(returns)
pdf = scipy.stats.norm.pdf
x = np.linspace(-1,1, num=100)
h = plt.hist(returns, bins=x, normed='true')
l = plt.plot(x, pdf(x, loc=mu, scale=std))
# Of course, this fit is meaningless unless we've tested that they obey a normal distribution first. We can test this using the Jarque-Bera normality test. The Jarque-Bera test will reject the hypothesis of a normal distribution if the p-value is under a c.
# In[45]:
from statsmodels.stats.stattools import jarque_bera
jarque_bera(returns)
# In[46]:
jarque_bera(np.random.normal(0, 1, 100))
# *This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
|
20,248 | f43f54ccd8d231aaa7d580dc0d8fb28e80af9aa2 | from shoestore.shoeapp.models import Manufacturer, Shoe, ShoeColor, ShoeType
from rest_framework import viewsets
from shoestore.shoeapp.serializers import (
ManufacturerSerializer, ShoeSerializer, ShoeColorSerializer, ShoeTypeSerializer)
class ManufacturerViewSet(viewsets.ModelViewSet):
queryset = Manufacturer.objects.all()
serializer_class = ManufacturerSerializer
class ShoeViewSet(viewsets.ModelViewSet):
queryset = Shoe.objects.all()
serializer_class = ShoeSerializer
class ShoeColorViewSet(viewsets.ModelViewSet):
queryset = ShoeColor.objects.all()
serializer_class = ShoeColorSerializer
class ShoeTypeViewSet(viewsets.ModelViewSet):
queryset = ShoeType.objects.all()
serializer_class = ShoeTypeSerializer
|
20,249 | b63d0d3176013d273f68bc02c50502fedc904f52 | from api.barriers.fields import BarrierReportStageListingField
from .base import BarrierSerializerBase
class BarrierReportSerializer(BarrierSerializerBase):
progress = BarrierReportStageListingField(many=True, read_only=True)
class Meta(BarrierSerializerBase.Meta):
fields = (
"admin_areas",
"all_sectors",
"caused_by_trading_bloc",
"code",
"country",
"created_by",
"created_on",
"id",
"is_summary_sensitive",
"is_top_priority",
"location",
"modified_by",
"modified_on",
"next_steps_summary",
"other_source",
"product",
"progress",
"sectors",
"main_sector",
"sectors_affected",
"source",
"status",
"status_date",
"status_summary",
"sub_status",
"sub_status_other",
"summary",
"tags",
"term",
"title",
"trade_direction",
"trading_bloc",
"categories",
"commodities",
"draft",
"caused_by_admin_areas",
"new_report_session_data",
"companies",
"related_organisations",
"start_date",
"is_start_date_known",
"is_currently_active",
"export_types",
"export_description",
)
|
20,250 | 7e04c77036cdf7b6ddf8a7127e59d9c4022b4a06 | import numpy as np
np.random.seed(0)
from nnfs.datasets import spiral_data
X,y = spiral_data(100,3)
class Layer_Dense:
def __init__(self,n_inputs,n_neurons):
self.weights = 0.20 * np.random.randn(n_inputs,n_neurons)
self.biases = np.zeros((1,n_neurons))
def forward(self,inputs):
self.output = np.dot(inputs,self.weights) + self.biases
class Activation_ReLU:
def forward(self,inputs):
self.output = np.maximum(0,inputs)
layer1 = Layer_Dense(2,5)
activation1 = Activation_ReLU()
layer1.forward(X)
activation1.forward(layer1.output)
#print (layer1.output)
print (activation1.output)
|
20,251 | fa23e8873c82526234debb68018cc8cd0d2dd576 | from .models import PersonMonoku
from rest_framework import serializers
class PersonaMonokuSerializer (serializers.HyperlinkedModelSerializer):
class Meta:
model = PersonMonoku
fields = ('id', 'name_person', 'email_person', 'age_person') |
20,252 | c35e9d7fcd7c8b2f7ec90f14d5f7f65d1c46bb14 | #!/usr/bin/env python
from time import clock, sleep
import wx
from numpy import *
import os
from wx.lib.floatcanvas import FloatCanvas, NavCanvas
#import hazmat, TAP_mod
ID_ABOUT_MENU = wx.NewId()
ID_EXIT_MENU = wx.NewId()
ID_ZOOM_IN_MENU = wx.NewId()
ID_ZOOM_OUT_MENU = wx.NewId()
ID_ZOOM_TO_FIT_MENU = wx.NewId()
ID_DRAWTEST_MENU = wx.NewId()
ID_DRAWMAP_MENU = wx.NewId()
ID_CLEAR_MENU = wx.NewId()
ID_SET_FRAMERATE_MENU = wx.NewId()
ID_OPEN = wx.NewId()
ID_RUN_MOVIE = wx.NewId()
ID_RUNONTOP_MOVIE = wx.NewId()
ID_RERUN_MOVIE = wx.NewId()
ID_PAUSE_BUTTON = wx.NewId()
colorlist = ["BLACK", "RED", "CYAN", "GREEN", "SALMON", "VIOLET"]
CurrentColor = [0]
def GetColor():
color = colorlist[CurrentColor[0]]
CurrentColor[0] += 1
if CurrentColor[0] > len(colorlist):
CurrentColor[0] = 0
return color
def EVT_NEW_FRAME_EVENT( window, function ):
window.Connect( -1, -1, NEW_FRAME_EVENT, function )
class FrameEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(NEW_FRAME_EVENT)
class DrawFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
## Set up the MenuBar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
file_menu.Append(ID_OPEN, "&Open map","Open a bna file")
wx.EVT_MENU(self, ID_OPEN, self.Open_bna)
file_menu.AppendSeparator()
file_menu.Append(ID_RUN_MOVIE, "Run &Movie","Run a movie of the trajectory")
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
file_menu.Append(ID_RUNONTOP_MOVIE, "Run On Top &Movie","Run a movie of the trajectory on top of existing")
wx.EVT_MENU(self, ID_RUNONTOP_MOVIE, self.RunOnTop_Movie)
file_menu.Append(ID_RERUN_MOVIE, "Re Run &Movie","Re-Run the existing movie of the trajectory")
wx.EVT_MENU(self, ID_RERUN_MOVIE, self.ReRun_Movie)
file_menu.AppendSeparator()
file_menu.Append(ID_EXIT_MENU, "E&xit","Terminate the program")
wx.EVT_MENU(self, ID_EXIT_MENU, self.OnQuit)
wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie)
MenuBar.Append(file_menu, "&File")
view_menu = wx.Menu()
view_menu.Append(ID_ZOOM_TO_FIT_MENU, "Zoom to &Fit","Zoom to fit the window")
wx.EVT_MENU(self, ID_ZOOM_TO_FIT_MENU,self.ZoomToFit)
view_menu.Append(ID_SET_FRAMERATE_MENU, "Set Frame &Rate","Set the Frame Rate for Movie playback")
wx.EVT_MENU(self, ID_SET_FRAMERATE_MENU,self.SetFrameRate)
MenuBar.Append(view_menu, "&View")
help_menu = wx.Menu()
help_menu.Append(ID_ABOUT_MENU, "&About",
"More information About this program")
wx.EVT_MENU(self, ID_ABOUT_MENU, self.OnAbout)
MenuBar.Append(help_menu, "&Help")
self.SetMenuBar(MenuBar)
self.CreateStatusBar()
self.SetStatusText("")
wx.EVT_CLOSE(self, self.OnCloseWindow)
# Add the Canvas
self.NavCanvas = NavCanvas.NavCanvas(self,-1,(500,500),
ProjectionFun = 'FlatEarth',
Debug = 0,
#BackgroundColor = "DARK SLATE BLUE")
BackgroundColor = "WHITE",
#UseBackground = 1,
).Canvas
self.Canvas = NavCanvas.Canvas
self.Canvas.NumBetweenBlits = 20
tb = self.NavCanvas.ToolBar
tb.AddSeparator()
RewindButton = wx.Button(tb, -1, "Rewind")
tb.AddControl(RewindButton)
wx.EVT_BUTTON(self, RewindButton.GetId() , self.Rewind)
StopButton = wx.Button(tb, -1, "Stop")
tb.AddControl(StopButton)
wx.EVT_BUTTON(self, StopButton.GetId() , self.Stop)
PlayButton = wx.Button(tb, -1, "Play")
tb.AddControl(PlayButton)
wx.EVT_BUTTON(self, PlayButton.GetId() ,self.Play)
tb.Realize()
self.Show(True)
self.LE_movie = None
self.LEsObjects = []
self.TimeStep = 0
self.FrameDelay = 10 # milliseconds
self.FileDialog = wx.FileDialog(self, "Pick a file",".","","*",wx.OPEN)
self.Timer = wx.PyTimer(self.ShowFrame)
return None
def Open_bna(self, event):
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.LoadMap(filename)
def LoadMap(self, filename):
self.Canvas.Clear()
try:
shorelines = hazmat.read_bna(filename,polytype = "PolygonSet")
for shoreline in shorelines:
self.Canvas.AddPolygon(shoreline,
LineWidth = 1,
LineColor = "Black",
FillColor = "Brown",
FillStyle = 'Solid',
Foreground = 0)
self.Canvas.ZoomToBB()
except:
dlg = wx.MessageDialog(self, 'There was something wrong with the selected map file',
'View Trajectories', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def Load_Movie(self, event):
import glob
dlg = self.FileDialog
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
(self.LE_movie,(NumTimesteps,NumLEs),HeaderData,flags) = TAP_mod.ReadTrajectory(filename)
wx.GetApp().Yield()
return True
else:
return None
def Run_Movie(self, event):
if self.Load_Movie(None):
if self.LEsObjects:
self.Canvas.RemoveObjects(self.LEsObjects)
self.LEsObjects = []
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = "Black", Diameter = 1.5,Foreground = 1))
CurrentColor[0] = 1
self.ReRun_Movie(None)
def RunOnTop_Movie(self, event):
if self.Load_Movie(None):
for object in self.LEsObjects:
object.PutInBackground()
self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = GetColor(), Diameter = 1.5,Foreground = 1) )
self.ReRun_Movie(None)
def ReRun_Movie(self, event):
if not self.LE_movie:
self.Run_Movie(None)
else:
self.Play(None)
## def UpdateThread(self):
## try:
## while hasattr(self, 'event') and not self.event.isSet():
## wx.PostEvent(self, FrameEvent())
## self.event.wait(self.FrameDelay)
## except wx.PyDeadObjectError: # BUG: we were destroyed
## return
def Running(self):
"""Returns true if the animation is running"""
return self.Timer.IsRunning()
def Play(self,event):
"""Start the animation"""
if not self.Running():
if self.LE_movie:
#self.event.clear()
#thread = threading.Thread(target = self.UpdateThread)
#thread.start()
self.Timer.Start(self.FrameDelay)
else:
self.Run_Movie(None)
def Stop(self,event):
self.Timer.Stop()
def ShowFrame(self):
if self.TimeStep < len(self.LE_movie):
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
# this sets the data for the next frame
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.Canvas.Draw()
self.TimeStep += 1
wx.GetApp().Yield(True)
else:
self.Timer.Stop()
def Rewind(self,event):
self.TimeStep = 0
if self.LE_movie:
self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep])
self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie)))
self.Canvas.Draw()
def OnAbout(self, event):
dlg = wx.MessageDialog(self, "This is a small program to demonstrate\n"
"the use of the FloatCanvas\n",
"About Me", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def ZoomToFit(self,event):
self.Canvas.ZoomToBB()
def Clear(self,event = None):
self.Canvas.Clear()
self.Canvas.Draw()
def OnQuit(self,event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
def RunMovie(self,event = None):
import RandomArray
start = clock()
shift = RandomArray.randint(0,0,(2,))
NumFrames = 50
for i in range(NumFrames):
points = self.LEs.Points
shift = RandomArray.randint(-5,5,(2,))
points += shift
self.LEs.SetPoints(points)
self.Canvas.Draw()
print "running the movie took %f seconds to disply %i frames"%((clock() - start),NumFrames)
def SetFrameRate(self,event):
dlg = wx.TextEntryDialog(self,
'Please set the time between frames in milliseconds',
'ViewTrajectories',
"%i"%self.FrameDelay)
dlg.SetValue("%i"%self.FrameDelay)
if dlg.ShowModal() == wx.ID_OK:
try:
self.FrameDelay = int(dlg.GetValue())
except:
pass
dlg.Destroy()
class TrajectoryViewer(wx.App):
"""
Any bugs, comments, feedback, questions, and especially code are welcome:
-Chris Barker
Chris.Barker@noaa.gov
"""
def OnInit(self):
frame = DrawFrame(None, title="Trajectory Viewer", size=(700,700))
self.SetTopWindow(frame)
return True
if __name__ == "__main__":
app = TrajectoryViewer(0)
app.MainLoop()
|
20,253 | d56ae143ae621bfc1643cd654489d8cbe0f2c634 | from setuptools import setup
setup(name='crossrep',
version='1.3',
description='scripts for cross region replication',
# url='https://github.com/sfc-gh-myang/crossrep.git',
author='Snowflake Inc. ',
author_email='minzhen.yang@snowflake.com',
license='Snowflake',
packages=['crossrep'],
zip_safe=False) |
20,254 | 4e794a17af6dea97fd86c9d8b4fd4181962914a0 | #! env python
# -*- coding: utf-8 -*-
# Date: 2017/09/16
# Filename: p38
__author__ = 'takutohasegawa'
__date__ = "2017/09/16"
import pymc as pm
import pymc as pm
lambda_1 = pm.Exponential("lambda_1", 1)
lambda_2 = pm.Exponential("lambda_2", 1)
tau = pm.DiscreteUniform("tau", lower=0, upper=10)
print(type(lambda_1)) # <class 'pymc.distributions.Exponential'>
print(type(lambda_2)) # <class 'pymc.distributions.Exponential'>
print(type(lambda_1+lambda_2)) # <class 'pymc.PyMCObjects.Deterministic'>
|
20,255 | 5905ce145e555cc867b329dfcbe9e6588660676e | import torch
import torch.nn as nn
embed = nn.EmbeddingBag(3, 5, mode='sum')
x = torch.LongTensor([[0, 1]])
y = embed(x)
print(embed.weight.data)
print(x)
print(y)
|
20,256 | c85d21fd3f348a785324ffeec064477ae75e5204 | import requests
from bs4 import BeautifulSoup
'''
用来获取form中的hidden
'''
def parse_form(url):
doc = requests.get(url=url)
doc.encoding = 'utf-8'
soup = BeautifulSoup(doc.text, 'lxml')
form_table = soup.select('#password-form input')[2:]
data = {}
for e in form_table:
try:
data[e['name']] = e['value']
if len(e['value']) ==0:
data[e['name']] = '1111111'
except KeyError:
data[e['name']] = '23333333'
for k,v in data.items():
print(k, v)
# print(len(form_table))
# print(form_table)
url = 'https://www.battlenet.com.cn/login/zh/'
parse_form(url)
|
20,257 | 45cb97bbf20371a940966fdc340216076767848e | from contextlib import contextmanager
@contextmanager
def simple_cm(n):
try:
print(f'setup {n}')
yield n + 1
finally:
print(f'wrap up {n}')
if __name__ == '__main__':
with simple_cm(10) as n:
print(f'do job {n}')
|
20,258 | 6bfbbcfd4bf6a9698a97ce370645bd916f0dd1b8 | import argparse
import logging
import os
import sys
class ConfigArgumentParser(argparse.ArgumentParser):
""" An ArgumentParser wrapper that converts each line of a config file into
an argument to be parsed by the ArgumentParser """
def __init__(self, *args, **kwargs):
super(ConfigArgumentParser, self).__init__(*args, **kwargs)
def convert_arg_line_to_args(self, line):
args = line.split()
for i in range(len(args)):
if i == 0:
# ignore commented lines
if args[i][0] == '#':
break
if not args[i].startswith('--'):
# add '--' to simulate cli option
args[i] = "--%s" % args[i]
# ignore blanks
if not args[i].strip():
continue
yield args[i]
class CliArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
""" Process command line arguments with a system of tubes """
# This is a non-positional argument parser that can be used for
# --config processing
self.parser = argparse.ArgumentParser(*args, **kwargs)
self.parser.add_argument("--config", metavar="FILE",
help="specify a configuration file")
self.parser.add_argument("--log", metavar="FILE",
help="specify a log file")
self.parser.add_argument("--log-level", metavar="LEVEL",
choices=["DEBUG", "INFO", "WARNING", "ERROR",
"CRITICAL"],
default="INFO",
help="{DEBUG,INFO,WARNING,ERROR,CRITICAL} "
"(default=INFO)")
# Save in case they are needed for reinitialization
self.kwargs = kwargs
self.kwargs['add_help'] = False
self.kwargs['parents'] = [self.parser]
argparse.ArgumentParser.__init__(self, *args, **self.kwargs)
def add_config_argument(self, *args, **kwargs):
# Modifying parent parser requires reinitialization
self.parser.add_argument(*args, **kwargs)
argparse.ArgumentParser.__init__(self, **self.kwargs)
def parse_args(self):
if len(sys.argv) == 1:
# n00bs need help!
args = argparse.ArgumentParser.parse_args(self, ['--help'])
else:
args = argparse.ArgumentParser.parse_args(self)
# Configuration error found, aborting
error = False
# Process config file if one is specified in the cli options
if args.config is not None:
args.config = os.path.abspath(os.path.expandvars(
os.path.expanduser(args.config)))
if os.access(args.config, os.R_OK):
configParser = ConfigArgumentParser(add_help=False,
fromfile_prefix_chars='@',
parents=[self.parser])
args = configParser.parse_args(args=["@%s" % args.config],
namespace=args)
else:
logging.error("Unable to read config file")
error = True
# I pity the fool who doesn't keep a log file!
if args.log is not None:
args.log = os.path.abspath(os.path.expandvars(os.path.expanduser(
args.log)))
if not os.access(os.path.dirname(args.log), os.W_OK):
logging.error("Unable to write to log file")
error = True
if error:
sys.exit(2)
return args
|
20,259 | b505b2fb6f7d63af322f37bf02e4a89aafdbdd7a | from selenium import webdriver
import re
x = 0;
y = 0;
driver = webdriver.Chrome();
driver.get("http://www.yale.edu/ymso/dummy/svg-edit-2.6/svg-editor.html?extensions=ext-beacon.js");
# Test 1
driver.find_element_by_id("ibeacon").click();
driver.find_element_by_id("svgroot").click();
test1 = driver.execute_script("return svgCanvas.getSvgString();");
print("Test 1, verify iBeacon button works in svg-edit.");
if "svg_1" in test1:
print("Test 1 passed.");
x = x + 1;
else:
print("Test 1 failed.");
y = y + 1;
print "Summary:"
print "{0} tests passed".format(x)
print "{0} tests failed".format(y)
driver.quit();
|
20,260 | bcfd6cf64001cf3a440efda340d31c21768e2092 | #!/bin/python3
# -*- coding: utf-8 -*-
import re
from django.shortcuts import HttpResponse
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
class RbacMidleware(MiddlewareMixin):
def process_request(self,request):
"""
当前请求的URL
:param request:
:return:
"""
current_url=request.path_info
# 1. 白名单处理
for valid in settings.WITH_LIST:
if re.match(valid,current_url):
return None
# 2. 获取权限信息
"""
permission_dict = {
'user_list': {'url': '/app01/user/', 'menu_id': 1, 'parent_name': None},
'user_add': {'url': '/app01/user/add/', 'menu_id': None, 'parent_name': 'user_list'},
'user_edit': {'url': '/app01/user/edit/(\\d+)', 'menu_id': None, 'parent_name': 'user_list'},
'order': {'url': '/app01/order/', 'menu_id': 2, 'parent_name': None}
}
"""
permissions_dict=request.session.get(settings.PERSSION_SESSION_KEY)
match=False
if not permissions_dict:
#用户没登录
return HttpResponse("当前用户无权限信息,请重新登录")
# 3. 权限匹配
for k,v in permissions_dict.items():
reg="^%s$" % v["url"]
if re.match(reg,current_url):
if v["menu_id"]:
request.default_select_menu_name=k
else:
request.default_select_menu_name=v['parent_name']
match=True
break
if not match:
return HttpResponse("无权访问")
|
20,261 | 241ae5b27e853dad629cb7cadd7b27e4508cb920 | class SchemeObject:
pass
class SchemeNumber(SchemeObject):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __eq__(self, other):
return isinstance(other, SchemeNumber) and isinstance(self.value,
type(other.value)) and self.value == other.value
class SchemeChar(SchemeObject):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return isinstance(other, SchemeChar) and self.value == other.value
def __str__(self):
return self.value if self.value.isspace() else f"\\#{self.value}"
class SchemeBool(SchemeObject):
scheme_true = None
scheme_false = None
def __new__(cls, value):
if value:
if cls.scheme_true is None:
cls.scheme_true = super().__new__(cls)
cls.scheme_true.value = True
return cls.scheme_true
else:
if cls.scheme_false is None:
cls.scheme_false = super().__new__(cls)
cls.scheme_false.value = False
return cls.scheme_false
def __eq__(self, other):
return isinstance(other, SchemeBool) and self.value == other.value
def __str__(self):
return "#t" if self.value else "#f"
def __bool__(self):
return self.value
class SchemeString(SchemeObject):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return isinstance(other, SchemeString) and self.value == other.value
def __str__(self):
return f'"{self.value}"'
class SchemeSymbol(SchemeObject):
instances = {}
def __new__(cls, value):
if value not in cls.instances:
instance = super().__new__(cls)
instance.value = value
cls.instances[value] = instance
else:
instance = cls.instances[value]
return instance
def __eq__(self, other):
return isinstance(other, SchemeSymbol) and self.value == other.value
def __str__(self):
return self.value
class SchemeEmptyList:
instance = None
def __new__(cls):
if cls.instance is None:
cls.instance = super().__new__(cls)
return cls.instance
def __iter__(self):
return iter([])
def __str__(self):
return "()"
def size(self):
return 0
def is_list(self):
return True
class SchemePair(SchemeObject):
def __init__(self, first, second):
self.first = first
self.second = second
def __str__(self):
if self.is_list():
return f"( {' '.join(str(element) for element in self)} )"
else:
return f"( {self.first} . {self.second} )"
def __iter__(self):
if not self.is_list():
raise Exception("trying to iterate a non list pair")
return SchemeListIterator(self)
def __eq__(self, other):
return isinstance(other, SchemePair) and self.car() == other.car() and self.cdr() == other.cdr()
def size(self):
if not self.is_list():
raise Exception("trying to iterate a non list pair")
return scheme_list_length(self)
def car(self):
return self.first
def cdr(self):
return self.second
def set_car(self, value):
self.first = value
def set_cdr(self, value):
self.second = value
def is_list(self):
return isinstance(self.cdr(), SchemePair) and self.cdr().is_list() or self.cdr() == SchemeEmptyList()
class SchemeListIterator:
def __init__(self, pair):
self.current = pair
def __next__(self):
if self.current is SchemeEmptyList():
raise StopIteration
element = self.current.first
self.current = self.current.cdr()
return element
def is_scheme_list(scheme_object):
return scheme_object is SchemeEmptyList() or isinstance(scheme_object, SchemePair) and scheme_object.is_list()
def make_scheme_list(elements):
if len(elements) == 0:
return SchemeEmptyList()
return SchemePair(elements[0], make_scheme_list(elements[1:]))
def scheme_list_length(scheme_list):
if scheme_list is SchemeEmptyList():
return 0
return 1 + scheme_list_length(scheme_list.cdr())
def scheme_list_tail(scheme_list):
if scheme_list is SchemeEmptyList():
return scheme_list
while scheme_list.cdr() is not SchemeEmptyList():
scheme_list = scheme_list.cdr()
return scheme_list
class SchemeProcedure(SchemeObject):
def __init__(self, **kwargs):
is_variadic = kwargs.get('variadic')
arity = kwargs.get('arity')
self.is_variadic = is_variadic if is_variadic is not None else False
if self.is_variadic:
self.arity = 1
else:
self.arity = arity if arity is not None else 0
def __str__(self):
return f"procedure {self}"
class BuiltInProcedure(SchemeProcedure):
def __init__(self, implementation, **kwargs):
super().__init__(**kwargs)
self.implementation = implementation
def __str__(self):
return f"built in procedure"
def call(self, args):
return self.implementation(*args)
class UserDefinedProcedure(SchemeProcedure):
def __init__(self, formal_parameters, body, surrounding_environment):
super().__init__(arity=len(formal_parameters.fixed_parameters), variadic=formal_parameters.has_list_parameter)
self.parameters = formal_parameters.fixed_parameters if not formal_parameters.has_list_parameter else [
formal_parameters.list_parameter_name]
self.body = body
self.environment = surrounding_environment
def __str__(self):
return f"scheme user defined procedure"
class SchemePromise(SchemeObject):
def __init__(self, procedure):
self.procedure = procedure
self.result = None
def __str__(self):
return f"scheme promise"
def has_result(self):
return self.result is not None
def get_result(self):
return self.result
def set_result(self, value):
self.result = value
class UnAssigned(SchemeObject):
def __eq__(self, other):
return isinstance(other, UnAssigned)
class SchemeRuntimeError(Exception):
def __init__(self, message=""):
self.message = message
|
20,262 | 91740e880024ba77c054837082ebd35a7b251644 | def password_menu():
from pw_generator import generate_password
length = int(input("Enter length of password: "))
password = generate_password(length)
print("Your password is: ",password)
def enc_decrypt_menu():
from pw_generator import encrypt
from pw_generator import decrypt
message=input("Enter your message: ")
key=int(input("Enter your key (1-26): "))
choice=input("Encrypt or Decrypt? (E/D): ")
if choice.lower().startswith('e'):
print("Encrypted message: ", encrypt(message,key))
elif choice.lower().startswith('d'):
print("Decrypted message: ", decrypt(message,key))
print("\nHello, Welcome to Security System!")
def main():
choice = input("\nEnter choice: \n1. Generate Password\n2.Encrypt/Decrypt\n3.Exit:\n")
if choice == '1':
try:
password_menu()
except Exception as e:
print(e)
elif choice == '2':
try:
enc_decrypt_menu()
except Exception as e:
print(e)
elif choice=='3':
print("Thank you!")
exit()
else:
print("Invalid choice!")
exit()
if __name__=='__main__':
while True:
main() |
20,263 | f6c40bbaf0ab5a8d475e9595e92d92df801493db | #coding: latin-1
''' inicializa o ambiente para captura de informacoes do clipping '''
import MySQLdb
import Identify
import mdNeural
import base64
import calendar
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import urllib
import urllib2
import urlparse
import thread
import umisc
import mdLayout
import mdER
import mdNeural
import subprocess
import string
conn= MySQLdb.connect(host='dbmy0023.whservidor.com', user='mindnet' , passwd='acc159753', db='mindnet')
connTrace= MySQLdb.connect(host='dbmy0032.whservidor.com', user='mindnet_2' , passwd='acc159753', db='mindnet_2')
conn4=MySQLdb.connect(host='dbmy0050.whservidor.com', user='mindnet_4' , passwd='acc159753', db='mindnet_4')
conn3= MySQLdb.connect(host='dbmy0035.whservidor.com', user='mindnet_3' , passwd='acc159753', db='mindnet_3')
conn= MySQLdb.connect(host='dbmy0023.whservidor.com', user='mindnet' , passwd='acc159753', db='mindnet')
connTrace= MySQLdb.connect(host='dbmy0032.whservidor.com', user='mindnet_2' , passwd='acc159753', db='mindnet_2')
conn4=MySQLdb.connect(host='dbmy0050.whservidor.com', user='mindnet_4' , passwd='acc159753', db='mindnet_4')
conn3= MySQLdb.connect(host='dbmy0035.whservidor.com', user='mindnet_3' , passwd='acc159753', db='mindnet_3')
def config_conns(conn_):
cursor=conn_.cursor()
cursor.execute('SET SESSION wait_timeout = 90000')
config_conns(conn)
config_conns(connTrace)
config_conns(conn4)
config_conns(conn3)
cursorTrace = connTrace.cursor ()
mdLayout.conn=conn
mdER.conn=conn
mdER.conn3=conn3
mdER.conn4=conn4
mdNeural.conn=conn
mdNeural.conn3=conn3
mdNeural.conn4=conn4
def clean_Trace(usr):
cursorTrace.execute ("delete from status_index where USERNAME = %s ",(usr))
conn.commit()
def finish_Trace(usr):
cursorTrace.execute ("insert into status_index(USERNAME,MSG) values ( %s , 'OK-FINAL' ) ",(usr))
def traceQ(progress,usr,pg_numer,job,address,msg ):
linha=str(progress)+'|'+ msg+ ',Pg:'+str(pg_numer)+',Process:'+str(job)+',Address:'+address
cursorTrace.execute ("insert into status_index(USERNAME,MSG) values ( %s , %s ) ",(usr,linha))
print 'TraceQ:===================='
print linha
print '==========================='
def mount_node(term,id,purpose):
l=Identify.prepare_layout(id,purpose)
allp=[]
onto=Identify.prepare_data_by_ask(l, term,id,purpose,allp )
return [onto,allp]
entry_doc =[]
#entry_doc = ['vectra é um bom carro com design inteligente, em composto por fibra de carbono restrito em carro de passeio ',' expectativas de carro com venda fortes','casa bom estado venda']
#entry('viagens melhores opções',['viagem para o nordeste é muito mais tranquilo, composto por translados mas restrito para criancas '])
#===================
def get_typ(obj,usr2):
cursor = conn.cursor ()
cursor.execute ("select TYP from DATA_BEHAVIOUR_PY where OBJETO='"+obj+"' and USERNAME='"+usr2+"' order by i")
resultSet = cursor.fetchall()
typ=0
for results in resultSet:
typ=results[0]
return typ
#===============================================
def prepare_search_customlayouts(purposes,dts,usr):
def clean_s(strc):
strc=strc.replace('<subsign>','-')
strc=strc.replace('<addsign>','+')
strc=strc.replace('<divsign>','/')
strc=strc.replace('<mulsign>','*')
strc=strc.replace('<cite>','')
strc=strc.replace('<em>','')
strc=strc.replace('</em>','')
strc=strc.replace('</cite>','')
strc=strc.replace('<strong>','')
strc=strc.replace('<string>','')
strc=strc.replace('</string>','')
strc=strc.replace('</strong>','')
strc=strc.replace('<span style=\"text-decoration: underline;\">','')
strc=strc.replace('<span','')
strc=strc.replace('id=\"\">','')
strc=strc.replace('</span>','')
strc=strc.replace('<br>','\n')
strc=strc.replace('<s>','')
strc=strc.replace('</s>','')
return strc
#chamar os codigos
codes_Result=[]
for dt in purposes:
cursor = conn.cursor ()
cursor.execute ("select CODE from DATA_BEHAVIOUR_CODE_PY where OBJETO='"+dt+"' and USERNAME='"+usr+"' order by i")
resultSet = cursor.fetchall()
for results in resultSet:
typ=get_typ(dt,usr)
o=clean_s(results[0])
code=(o)
sr_int_cmd_param=dts
if typ == 1: #executavel
code+=' \n\nretorno_srt=run(sr_int_cmd_param)'
else: # executa somente o codigo
pass
#==================================
try:
exec(code, locals(), locals())
except Exception,e:
print 'Exec Error:',e
if typ == 1: #executavel
# adicionar ao codes_Result o retorno_srt(lines->[endereco,dados] )
if retorno_srt != None:
codes_Result.append( retorno_srt )
#===================
return codes_Result
def prepare_search(dts):
if len( entry_doc ) > 0 : #debug direto
return [[['debug-title','debug-url']]]
rets=[]
if True :
qry=''
for d in dts:
qry+=d+' '
query=urllib.quote(qry)
url_q='http://www.mind-net.com/Neural/request_md3.py/entry?query='+query+'&t=web'
opener = urllib2.build_opener()
data_Res = opener.open(url_q, '' ).read()
lines=[]
cl=[]
tmp='';
kind=1
for ds in data_Res:
if ds == '|':
if len(tmp) > 0 :
cl.append(tmp)
tmp=''
lines.append(cl)
cl=[]
elif ds == '^':
cl.append(tmp)
tmp=''
else:
tmp+=ds
rets.append(lines)
#
return rets
def prepare_search_video(dts):
rets=[]
if True :
qry=''
for d in dts:
qry+=d+' '
query=urllib.quote(qry)
url_q='http://www.mind-net.com/Neural/request_md3.py/entry?query='+query+'&t=video'
opener = urllib2.build_opener()
data_Res = opener.open(url_q, '' ).read()
lines=[]
cl=[]
tmp='';
for ds in data_Res:
if ds == '|':
if len(tmp) > 0 :
cl.append(tmp)
tmp=''
lines.append(cl)
cl=[]
elif ds == '^':
cl.append(tmp)
tmp=''
else:
tmp+=ds
rets.append(lines)
#
return rets
def prepare_search_news(dts):
rets=[]
if True :
qry=''
for d in dts:
qry+=d+' '
query=urllib.quote(qry)
url_q='http://www.mind-net.com/Neural/request_md3.py/entry?query='+query+'&t=news'
opener = urllib2.build_opener()
data_Res = opener.open(url_q, '' ).read()
lines=[]
cl=[]
tmp='';
for ds in data_Res:
if ds == '|':
if len(tmp) > 0 :
cl.append(tmp)
tmp=''
lines.append(cl)
cl=[]
elif ds == '^':
cl.append(tmp)
tmp=''
else:
tmp+=ds
rets.append(lines)
#
return rets
def prepare_search_reputacao(dts):
'''
search all data
'''
rets=[]
if True :
qry=''
for d in dts:
qry+=d+' '
query=urllib.quote(qry)
url_q='http://www.mind-net.com/Neural/request_md3.py/entry?query='+query+'&t=blog'
opener = urllib2.build_opener()
data_Res = opener.open(url_q, '' ).read()
lines=[]
cl=[]
tmp='';
for ds in data_Res:
if ds == '|':
if len(tmp) > 0 :
cl.append(tmp)
tmp=''
lines.append(cl)
cl=[]
elif ds == '^':
cl.append(tmp)
tmp=''
else:
tmp+=ds
rets.append(lines)
#
return rets
def prepare_search_people(dts):
'''
search all data
'''
rets=[]
if True :
qry=''
for d in dts:
qry+=d+' '
query=urllib.quote(qry)
url_q='http://www.mind-net.com/Neural/request_md3.py/entry?query='+query+'&t=social'
opener = urllib2.build_opener()
data_Res = opener.open(url_q, '' ).read()
lines=[]
cl=[]
tmp='';
for ds in data_Res:
if ds == '|':
if len(tmp) > 0 :
cl.append(tmp)
tmp=''
lines.append(cl)
cl=[]
elif ds == '^':
cl.append(tmp)
tmp=''
else:
tmp+=ds
rets.append(lines)
#
return rets
class thread_cntl:
def __init__(self):
self.finished=False
class Task_C:
def __init__(self,Dt1=None,Dt2=None,Dt3=None):
self.dt1=Dt1
self.dt2=Dt2
self.dt3=Dt3
cursorpostp = conn.cursor ()
cursorpostl = conn.cursor ()
def post_pagina(endereco,conteudo_i,termo,usr,purp):
try:
conteudo=''
for l in conteudo_i:
conteudo+=(l+'\n')
if umisc.trim(conteudo) != '':
sql1="insert into WEB_CACHE (URL,PG,TERMO,PURPOSE,USR,SEMA_RESUME) values(%s,%s,%s,%s,%s,'')"
cursorpostp.execute (sql1,(MySQLdb.escape_string(endereco),MySQLdb.escape_string(conteudo),MySQLdb.escape_string(termo),purp,usr))
except :
pass
def fecha_busca3(arrc):
#==
sql1=" delete from WEB_CACHE_LINKS where USR = %s and url= %s "
cursor = conn.cursor ()
cursor.executemany (sql1,arrc)
#==
def post_pagina2(arrc):
try:
sql1="insert into WEB_CACHE (URL,PG,TERMO,USR,PURPOSE,SEMA_RESUME) values(%s,%s,%s,%s,%s,'')"
#cursorpostp.execute (sql1,(MySQLdb.escape_string(endereco),MySQLdb.escape_string(conteudo),MySQLdb.escape_string(termo),usr,purp))
cursorpostp.executemany(sql1,arrc)
except:
pass
fecha_buscas=[]
for ed in arrc:
[endereco,conteudo_pg,endereco,usr,purp]=ed
endereco=MySQLdb.escape_string(endereco);
fecha_buscas.append([endereco,usr])
fecha_busca3(fecha_buscas)
def post_links2(arrc):
try:
sql1="insert into WEB_CACHE_LINKS (URL,TERMO,USR,PURPOSE,PROCESSED) values(%s,%s,%s,%s,'N')"
#cursorpostl.execute (sql1,(MySQLdb.escape_string(endereco),MySQLdb.escape_string(termo),usr,purp))
cursorpostl.executemany(sql1,arrc)
except:
pass
def post_links(endereco,termo,usr,purp):
try:
sql1="insert into WEB_CACHE_LINKS (URL,TERMO,PURPOSE,USR,PROCESSED) values(%s,%s,%s,%s,'N')"
if umisc.trim(endereco) != '':
cursorpostl.execute (sql1,(MySQLdb.escape_string(endereco),MySQLdb.escape_string(termo),purp,usr))
except:
pass
def call_text(address):
proc = subprocess.Popen("php /home/mindnet/public_html/get_Text.php q="+address, shell=True,stdout=subprocess.PIPE)
script_response = proc.stdout.read()
script_response=string.replace(script_response, 'Content-type: text/html\r\n\r\n', '')
return script_response
def call_links(address):
proc = subprocess.Popen("php /home/mindnet/public_html/get_links.php q="+address, shell=True,stdout=subprocess.PIPE)
script_response = proc.stdout.read()
script_response=string.replace(script_response, 'Content-type: text/html\r\n\r\n', '')
return script_response
def process_termo2(termos,usr,purp,start_c,path_j,layout):
purposes=[]
purposes.append(purp)
#==================================
objs_search=(termos)
#==================================
if len(objs_search)> 0 :
def pg_open(addresss,th,pages,pgind,ind_emit,start_c):
print 'Start read page:',addresss
try:
for address in addresss:
lines_doc=[]
links_k2=[]
if address != 'debug-url':
#======================
#opener = urllib2.build_opener()
address=urllib.quote(address)
#url='http://www.mind-net.com/get_Text.php?q='+address
pg_add=address
#content = opener.open(url, '' ).read()
content=call_text(address)
tmpd=''
for d in content:
if d == '\n':
tmpd=umisc.trim(tmpd)
lines_doc.append(tmpd)
tmpd=''
else:
tmpd+=d
#======================
#opener = urllib2.build_opener()
#url='http://www.mind-net.com/get_links.php?q='+address
#content = opener.open(url, '' ).read()
content=call_links(address)
tmpd=''
for d in content:
if d == '\n':
tmpd=umisc.trim(tmpd)
links_k2.append(tmpd)
tmpd=''
else:
tmpd+=d
#============
pages.append(Task_C(pg_add,lines_doc,links_k2))
print 'Get content for page:',pgind,' was finished.Len:',len(lines_doc),' links count:',len(links_k2)
pgind+=1
else:
for line_deb in entry_doc:
lines_doc.append(line_deb)
pages.append(Task_C(pg_add,lines_doc,links_k2))
print 'Get content for page:',pgind,' was finished.Len:',len(lines_doc)
pgind+=1
th.finished=True
except Exception,er :
print er,'................'
th.finished=True
#=====================================================
cind=0
pages=[]
kind=0
print 'Init Collect...'
#=====================================
indres=0
ths=[]
ths2=[]
#========================
ind_emit=0
thrsd=0
tmp_pages=[]
for res in objs_search:
if thrsd > 200 : continue
pg_add=res
ind_emit+=1
if len(tmp_pages) >=10 :
ths.append(thread_cntl())
ic=ind_emit
tmp_pages2=tmp_pages
tmp_pages=[]
thrsd+=1
try:
thread.start_new_thread(pg_open,(tmp_pages2,ths[len(ths)-1],pages,kind,ic,start_c) )
except:
ths[len(ths)-1].finished=True
pass
print 'Thread :',thrsd
else:
tmp_pages.append(pg_add)
if len(tmp_pages) > 0 :
ths.append(thread_cntl())
ic=ind_emit
tmp_pages2=tmp_pages
tmp_pages=[]
thrsd+=1
print 'Thread :',thrsd
try:
thread.start_new_thread(pg_open,(tmp_pages2,ths[len(ths)-1],pages,kind,ic,start_c) )
except:
ths[len(ths)-1].finished=True
pass
ind_col=0
#=============================
while True:
print 'wait for pages...',len(ths)-ind_col
fnds_t=False
ind_col=0
for ths1 in ths:
if not ths1.finished:fnds_t=True
if ths1.finished: ind_col+=1
if fnds_t:
time.sleep(10)
continue
else:
break
print 'Collect OK!!'
cind2=0
threads_fd=[]
print 'Init Process pages:',len(pages)
pg_insert=[]
pg_links=[]
for pagina in pages:
cind2+=1
if pagina.dt1 == None: continue
print 'Processing page:',cind2
endereco=pagina.dt1
lines_doc=pagina.dt2
links=pagina.dt3
conteudo_pg=''
for l in lines_doc:
conteudo_pg+=(l+'\n')
endereco=MySQLdb.escape_string(endereco)
conteudo_pg=MySQLdb.escape_string(conteudo_pg)
if umisc.trim(conteudo_pg) != '':
pg_insert.append([endereco,conteudo_pg,endereco,usr,purp])
if len(pg_insert)>20:
post_pagina2(pg_insert)
post_links2(pg_links)
pg_insert=[]
pg_links=[]
for ln in links:
ln=MySQLdb.escape_string(ln)
pg_links.append([ln,endereco,usr,purp])
if len(pg_insert)> 0 :
post_pagina2(pg_insert)
post_links2(pg_links)
def process_termo(termo,usr,purp,start_c,path_j,layout):
objs_search=[]
purposes=[]
purposes.append(purp)
#==================================
objs_search.append(termo)
#==================================
if len(objs_search) > 0 :
def pg_open(addresss,th,pages,pgind,ind_emit,start_c):
print 'Start read page:',addresss
try:
for address in addresss:
lines_doc=[]
links_k2=[]
if address != 'debug-url':
#======================
opener = urllib2.build_opener()
address=urllib.quote(address)
url='http://www.mind-net.com/get_Text.php?q='+address
content = opener.open(url, '' ).read()
tmpd=''
for d in content:
if d == '\n':
tmpd=umisc.trim(tmpd)
lines_doc.append(tmpd)
tmpd=''
else:
tmpd+=d
#======================
opener = urllib2.build_opener()
url='http://www.mind-net.com/get_links.php?q='+address
content = opener.open(url, '' ).read()
tmpd=''
for d in content:
if d == '\n':
tmpd=umisc.trim(tmpd)
links_k2.append(tmpd)
tmpd=''
else:
tmpd+=d
#============
pages.append(Task_C(pg_add,lines_doc,links_k2))
print 'Get content for page:',pgind,' was finished.Len:',len(lines_doc),' links count:',len(links_k2)
pgind+=1
else:
for line_deb in entry_doc:
lines_doc.append(line_deb)
pages.append(Task_C(pg_add,lines_doc,links_k2))
print 'Get content for page:',pgind,' was finished.Len:',len(lines_doc)
pgind+=1
th.finished=True
except Exception,er :
print er,'................'
th.finished=True
#=====================================================
results_search=[]
results_reputac=[]
results_peop=[]
results_news=[]
results_videos=[]
custom_layouts=[]
#=====================================================
if 'web' in purposes:
results_search=prepare_search(objs_search)
elif 'reputacao' in purposes:
results_reputac=prepare_search_reputacao(objs_search)
elif 'people' in purposes:
results_peop=prepare_search_people(objs_search)
elif 'news' in purposes:
results_news=prepare_search_news(objs_search)
elif 'video' in purposes:
results_videos=prepare_search_video(objs_search)
elif 'url' in purposes:
results_search = [[['',objs_search[0]]]] #busca direta por url
else:
custom_layouts=prepare_search_customlayouts(purposes,objs_search,usr)
#---
result_onto_tree_er=[] # caracteristicas,descricoes,etc...
result_onto_tree_bpm=[] # fluxo de acoes, sequencia de actions
result_linked=[]
# montar ontologia dos resultados
cind=0
pages=[]
kind=0
print 'Init Collect...'
#=====================================
indres=0
ths=[]
ths2=[]
#========================
ind_emit=0
tmp_pages=[]
for res in results_search:
indaddrs=0
for addrs in res:
pg_add=addrs[1]
ind_emit+=1
if len(tmp_pages) >=5 :
ths.append(thread_cntl())
ic=ind_emit
tmp_pages2=tmp_pages
tmp_pages=[]
thread.start_new_thread(pg_open,(tmp_pages2,ths[len(ths)-1],pages,kind,ic,start_c) )
else:
tmp_pages.append(pg_add)
cind+=1
indres+=1
if len(tmp_pages) > 0 :
ths.append(thread_cntl())
ic=ind_emit
tmp_pages2=tmp_pages
tmp_pages=[]
thread.start_new_thread(pg_open,(tmp_pages2,ths[len(ths)-1],pages,kind,ic,start_c) )
ind_col=0
#=============================
while True:
print 'wait for pages...',len(ths)-ind_col
fnds_t=False
ind_col=0
for ths1 in ths:
if not ths1.finished:fnds_t=True
if ths1.finished: ind_col+=1
if fnds_t:
time.sleep(10)
continue
else:
break
#=====================================
cind=0
for res in results_reputac:
for addrs in res:
pg_add=addrs[1]
pages.append(Task_C(pg_add,addrs[2]))
#==============
cind+=1
#=====================================
cind=0
for res in results_peop:
for addrs in res:
pg_add=addrs[1]
pages.append(Task_C(pg_add,addrs[2]))
#==============
cind+=1
#=====================================
cind=0
for res in results_news:
for addrs in res:
pg_add=addrs[1]
pages.append(Task_C(pg_add,addrs[2]))
#==============
cind+=1
#=====================================
cind=0
for res in results_videos:
for addrs in res:
pg_add=addrs[1]
pages.append(Task_C(pg_add,addrs[2]))
#==============
cind+=1
#=====================================
cind=0
for res in custom_layouts:
for addrs in res:
pg_add=addrs[0]
pages.append(Task_C(pg_add,addrs[1]))
#==============
cind+=1
#=====================================
print 'Collect OK!!',len(results_reputac),',',len(results_peop),',',len(results_news),',',len(results_videos),',',len(custom_layouts)
cind2=0
threads_fd=[]
print 'Init Process pages:',len(pages)
for pagina in pages:
cind2+=1
if pagina.dt1 == None: continue
endereco=pagina.dt1
lines_doc=pagina.dt2
links=pagina.dt3
post_pagina(endereco,lines_doc,endereco,usr,purp)
for ln in links:
post_links(ln,endereco,usr,purp)
progress=(cind2/len(pages))
traceQ(progress,usr,cind2,1,endereco,'Processed page:' )
#===
clean_Trace(usr)
finish_Trace(usr)
def fecha_busca(termo,username):
#==
sql1=" update knowledge_manager set srched='S' where USERNAME = %s and dt= %s "
cursor = conn.cursor ()
cursor.execute (sql1,( username,MySQLdb.escape_string(termo)))
#==
def fecha_busca2(termo,username):
#==
sql1=" delete from WEB_CACHE_LINKS where USR = %s and url= %s "
cursor = conn.cursor ()
cursor.execute (sql1,( username,MySQLdb.escape_string(termo)))
#==
def limpa_resultados_anteriores(termo,username,purp):
#==
sql1="delete from WEB_CACHE where TERMO= %s and USR=%s and PURPOSE= %s "
cursor = conn.cursor ()
cursor.execute (sql1,(MySQLdb.escape_string(termo),username,purp))
#==
def abre_buscas():
cursor = conn.cursor ()
cursor.execute ("update knowledge_manager set srched='N' where typ=2 ")
#================
cursor = conn.cursor ()
cursor.execute ("delete from WEB_CACHE ")
#================
cursor = conn.cursor ()
cursor.execute ("delete from WEB_CACHE_LINKS ")
def process_sentences(start_c):
cursor = conn.cursor ()
cursor.execute ("SELECT USERNAME,DT,LAYOUT_ONTO,DEST FROM knowledge_manager where typ=2 and srched='N' LIMIT 0 , 50 ") # 50 rows por vez
resultSet = cursor.fetchall()
for results in resultSet:
username=results[0]
termo=results[1]
layout=results[2]
dest=results[3]
#r1.append([username,termo,layout,dest])
limpa_resultados_anteriores(termo,username,dest)
process_termo(termo,username,dest,start_c,sys.argv[0],layout)
#=== fecha os ja executados
fecha_busca(termo,username)
def process_sentences2(start_c):
def get_dist_usr():
rt=[]
cursor = conn.cursor ()
cursor.execute ("SELECT distinct USERNAME FROM knowledge_manager where typ=2 ") #
resultSet = cursor.fetchall()
for results in resultSet:
username=results[0]
rt.append(username)
return rt
usrs=get_dist_usr()
if True:
for us in usrs:
url=[]
cursor = conn.cursor ()
cursor.execute ("SELECT USR,URL,TERMO FROM WEB_CACHE_LINKS where USR=%s LIMIT 0 , 1000 ",MySQLdb.escape_string(us)) # 1000 rows por vez
resultSet = cursor.fetchall()
username=''
for results in resultSet:
username=results[0]
url.append(results[1])
layout=results[2]
dest='url'
if len(url) > 0 :
process_termo2(url,username,dest,start_c,sys.argv[0],layout)
#=== fecha os ja executados
start_c=0
reindex=False
index_all_of=False #usando para indicar se vai indexar ou nao o web_cache_url
if len(sys.argv) > 1:
reindex= int(sys.argv[1]) >0
if len(sys.argv) > 2:
index_all_of= int(sys.argv[2]) >0
if reindex:
abre_buscas()
process_sentences(start_c)
if index_all_of:
process_sentences2(start_c)
|
20,264 | cadcdd56c237c0206fa1502a2d4c4e7fd951c127 | from flask import Flask, render_template, request, redirect, flash
import ftplib
from pathlib import Path
from helpers.index import ls_trim
app = Flask(__name__)
app.secret_key = '1234'
client = None
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login', methods=['POST', 'GET'])
def login():
global client
if request.method == 'POST':
form = request.form
client = ftplib.FTP(form['host'], form['user'], form['password'])
return redirect('/main')
else:
return render_template('login.html')
@app.route('/main', methods=['GET'])
def main():
pwd = client.pwd()
ls = ['d back to parent dir ..']
client.dir(ls.append)
new_ls = ls_trim(ls)
return render_template('main.html', pwd=pwd, ls=new_ls)
@app.route('/command', methods=['GET'])
def command():
arg = request.args['arg']
cmd = request.args['cmd']
if cmd == 'cwd':
client.cwd(arg)
elif cmd == 'rmdir':
client.rmd(arg)
elif cmd == 'rm':
client.delete(arg)
elif cmd == 'get':
with open(f'{Path.home()}/Downloads/{arg}', 'wb') as f:
client.retrbinary(f'RETR {arg}', f.write)
flash(f'successfully get {arg} at ~/Downloads/')
return redirect('/main') |
20,265 | c35d128bb9258b160b43b82e22471271a75fea2e | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import gamer_pb2 as gamer__pb2
class GamerStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Ping = channel.unary_unary(
'/gamer.Gamer/Ping',
request_serializer=gamer__pb2.PingMessage.SerializeToString,
response_deserializer=gamer__pb2.PingMessage.FromString,
)
self.Action = channel.unary_unary(
'/gamer.Gamer/Action',
request_serializer=gamer__pb2.ActionInput.SerializeToString,
response_deserializer=gamer__pb2.ActionOutput.FromString,
)
class GamerServicer(object):
# missing associated documentation comment in .proto file
pass
def Ping(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Action(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GamerServicer_to_server(servicer, server):
rpc_method_handlers = {
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=gamer__pb2.PingMessage.FromString,
response_serializer=gamer__pb2.PingMessage.SerializeToString,
),
'Action': grpc.unary_unary_rpc_method_handler(
servicer.Action,
request_deserializer=gamer__pb2.ActionInput.FromString,
response_serializer=gamer__pb2.ActionOutput.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gamer.Gamer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
20,266 | d3c3838b74d7a26faffc833f9e3e8167b7b8434c |
#cg = "/work/projects/isbsequencing/luhmes_cell_line/users/akrishna/analysis/snv/gvcf/GS000022396.list.tested.LP6008092-DNA_G09.genome.testvariants.genotype"
cg = "/work/projects/isbsequencing/luhmes_cell_line/users/akrishna/analysis/snv/gvcf/GS000022396.list.tested.vqhigh.LP6008092-DNA_G09.genome.testvariants.genotype"
il = "/work/projects/isbsequencing/luhmes_cell_line/users/akrishna/analysis/snv/LP6008092-DNA_G09.genome.vcf.filt.testvariants.tested"
output="/work/projects/isbsequencing/luhmes_cell_line/users/akrishna/analysis/snv/merged/merged.testvariants.tested"
varid = set()
out = open(output, 'w')
with open(cg) as a:
out.write(next(a)[:-1]+"\tplatform\n")
for line in a:
out.write(line[:-1] + "\tcg\n")
with open(il) as b:
next(b)
for line in b:
line = line[:-1].split("\t")
#print line
out.write("\t".join(line[:-2] + line[-1:-3:-1] + ["il"])+"\n")
|
20,267 | 1a86a40275b3a6eead1bd271d99204d22d4ee5f3 | #! Python 3.7.1
# 2018/12/29
# 開発担当者 川本孝太朗
# WelIcingの距離の管理
import sqlite3
from contextlib import closing
import requests
import re
# -------------------------------------------------------------------------------------
# この形式だとデータベースはすっきりする代わりに通信量が増え,遅延が1秒程度発生する.
# 探索の計算オーダーはO(n)登録するデータはn個
# 登録の計算オーダーはO(1)
# 全組み合わせを登録する場合は探索の計算オーダーはO(n*n)登録するデータはn*(n-1)/2個
# 登録の計算オーダーはO(n*n)データベースは複雑になり,登録に恐ろしく時間がかかる
# マルチスレッドにすれば登録時間を改善できるが,それこそ通信料がバカにならない
# -------------------------------------------------------------------------------------
# sqliteの使い方: https://qiita.com/kawa-Kotaro/items/9933f56abd53a09826d9
# ToDo 変な入力の時の例外処理
# ToDo エラー回避のtry except
# テーブルの作成
def create_table(dbname, table_name):
with closing(sqlite3.connect(dbname)) as connection:
cursor = connection.cursor()
sql = 'create table ? (id int, postal_code varchar(64), name varchar(1024))'
data = (table_name,)
cursor.execute(sql, data)
connection.commit()
connection.close()
# データベースから端末の情報を取り出す
def get_data(dbname, table_name, idnum):
records = []
with closing(sqlite3.connect(dbname)) as connection:
cursor = connection.cursor()
select_sql = 'select * from ? where id = ?'
data = (table_name, idnum)
for row in cursor.execute(select_sql, data):
records.append(row)
connection.close()
return records
# データベースから端末の情報を取り出す
def get_all_data(dbname, table_name):
records = []
with closing(sqlite3.connect(dbname)) as connection:
cursor = connection.cursor()
select_sql = 'select * from ?'
data = (table_name,)
for row in cursor.execute(select_sql, data):
records.append(row)
connection.close()
return records
# データベースにある端末の情報を更新
def update_data(dbname, table_name, idnum, postal_code, place_name):
with closing(sqlite3.connect(dbname)) as connection:
cursor = connection.cursor()
sql = 'update ? postal_code=? name=? where id=?'
data = (table_name, postal_code, place_name, idnum)
cursor.execute(sql, data)
connection.commit()
connection.close()
# データベースに端末の情報を追加
def insert_data(dbname, table_name, idnum, postal_code, place_name):
with closing(sqlite3.connect(dbname)) as connection:
cursor = connection.cursor()
sql = 'insert into ? (id, postal_code, name) values (?,?,?)'
data = (table_name, idnum, postal_code, place_name)
cursor.execute(sql, data)
connection.commit()
connection.close()
# データベースから端末の情報を削除
def delete_data(dbname, table_name, idnum):
with closing(sqlite3.connect(dbname)) as connection:
cursor = connection.cursor()
sql = 'delete from ? where idnum = ? '
data = (table_name, idnum)
cursor.execute(sql, data)
connection.commit()
connection.close()
# urlにアクセスして距離を取得する
def request_distance(map_url):
res = requests.get(map_url)
pattern = r'\d\d*\.?\d* k?m'
regex = re.compile(pattern)
matches = re.findall(regex, res.text)
if 'k' in matches[0]:
distance = float(matches[0][0:-3])
distance *= 1000 # km表示のときにはm表示に変換する
else:
distance = float(matches[0][0:-2])
return distance
# 端末番号を2カ所引数に与えれば距離[m]を返す関数
def get_distance(idnum1, idnum2):
dbname = 'check_point.db'
table_name = 'check_point'
point1 = get_data(dbname, table_name, idnum1)
point2 = get_data(dbname, table_name, idnum2)
temp_url = 'https://www.google.co.jp/maps/dir/{}、〒{}/{}、〒{}/'
map_url = temp_url.format(point1[0][2], point1[0][1], point2[0][2], point2[0][1])
distance = request_distance(map_url)
return distance
if __name__ == '__main__':
db = 'check_point.db'
table = 'check_point'
print('端末の設定を開始します')
while True:
devices = get_all_data(db, table)
print('\n\n何をしますか?数字で入力してください')
print('1.端末情報の表示\n'
'2.端末の追加\n'
'3.端末情報の更新\n'
'4.端末の削除\n'
'5.終了')
choice = int(input('選択:'))
if choice == 1:
if devices:
print('登録している端末は以下の通りです')
for device in devices:
print('\n端末番号{}'.format(device[0]))
print('〒{} 場所:{}'.format(device[1], device[2]))
else:
print('登録している端末はありません')
elif choice == 2:
device_num = int(input('追加する端末の端末番号を入力してください:'))
post = input('郵便番号を入力してください. (入力形式:123-4567):〒')
place = input('登録場所の名前を入力してください:')
insert_data(db, table, device_num, post, place)
print('端末を登録しました')
elif choice == 3:
if devices:
print('登録している端末は以下の通りです')
for device in devices:
print('\n端末番号{}'.format(device[0]))
print('〒{} 場所:{}'.format(device[1], device[2]))
else:
print('登録している端末はありません')
continue
device_num = int(input('\n更新する端末の端末番号を入力してください:'))
post = input('郵便番号を入力してください. (入力形式:123-4567):〒')
place = input('登録場所の名前を入力してください:')
update_data(db, table, device_num, post, place)
print('端末情報を更新しました')
elif choice == 4:
if devices:
print('登録している端末は以下の通りです')
for device in devices:
print('\n端末番号{}'.format(device[0]))
print('〒{} 場所:{}'.format(device[1], device[2]))
else:
print('登録している端末はありません')
continue
print('削除する端末の端末番号を入力してください')
device_num = int(input('番号:'))
delete_data(db, table, device_num)
print('データを削除しました')
else:
print('設定を終了します')
break
|
20,268 | e7160f4c5bc67db7b3aa60321a451edf79d887f5 | from flask import Flask, request, render_template_string, stream_template_string
app = Flask(__name__)
@app.route("/test_taint/<name>/<int:number>") # $routeSetup="/test_taint/<name>/<int:number>"
def test_taint(name = "World!", number="0", foo="foo"): # $requestHandler routedParameter=name routedParameter=number
ensure_tainted(name, number) # $ tainted
ensure_not_tainted(foo)
# Manually inspected all fields of the Request object
# https://flask.palletsprojects.com/en/1.1.x/api/#flask.Request
ensure_tainted(
request.environ, # $ tainted
request.environ.get('HTTP_AUTHORIZATION'), # $ tainted
request.path, # $ tainted
request.full_path, # $ tainted
request.base_url, # $ tainted
request.url, # $ tainted
# These request.accept_* properties are instances of subclasses of werkzeug.datastructures.Accept
request.accept_charsets.best, # $ MISSING: tainted
request.accept_charsets.best_match(["utf-8", "utf-16"]), # $ MISSING: tainted
request.accept_charsets[0], # $ tainted
request.accept_encodings, # $ tainted
request.accept_languages, # $ tainted
request.accept_mimetypes, # $ tainted
# werkzeug.datastructures.HeaderSet (subclass of collections_abc.MutableSet)
request.access_control_request_headers, # $ tainted
request.access_control_request_method, # $ tainted
request.access_route, # $ tainted
request.access_route[0], # $ tainted
# By default werkzeug.datastructures.ImmutableMultiDict -- although can be changed :\
request.args, # $ tainted
request.args['key'], # $ tainted
request.args.get('key'), # $ tainted
request.args.getlist('key'), # $ tainted
# werkzeug.datastructures.Authorization (a dict, with some properties)
request.authorization, # $ tainted
request.authorization['username'], # $ tainted
request.authorization.username, # $ tainted
request.authorization.password, # $ tainted
request.authorization.realm, # $ tainted
request.authorization.nonce, # $ tainted
request.authorization.uri, # $ tainted
request.authorization.nc, # $ tainted
request.authorization.cnonce, # $ tainted
request.authorization.response, # $ tainted
request.authorization.opaque, # $ tainted
request.authorization.qop, # $ tainted
# werkzeug.datastructures.RequestCacheControl
request.cache_control, # $ tainted
# These should be `int`s, but can be strings... see debug method below
request.cache_control.max_age, # $ MISSING: tainted
request.cache_control.max_stale, # $ MISSING: tainted
request.cache_control.min_fresh, # $ MISSING: tainted
request.content_encoding, # $ tainted
request.content_md5, # $ tainted
request.content_type, # $ tainted
# werkzeug.datastructures.ImmutableTypeConversionDict (which is basically just a dict)
request.cookies, # $ tainted
request.cookies['key'], # $ tainted
request.data, # $ tainted
# a werkzeug.datastructures.MultiDict, mapping [str, werkzeug.datastructures.FileStorage]
request.files, # $ tainted
request.files['key'], # $ tainted
request.files['key'].filename, # $ tainted
request.files['key'].stream, # $ tainted
request.files['key'].read(), # $ tainted
request.files['key'].stream.read(), # $ tainted
request.files.get('key'), # $ tainted
request.files.get('key').filename, # $ tainted
request.files.get('key').stream, # $ tainted
request.files.getlist('key'), # $ tainted
request.files.getlist('key')[0].filename, # $ tainted
request.files.getlist('key')[0].stream, # $ tainted
# By default werkzeug.datastructures.ImmutableMultiDict -- although can be changed :\
request.form, # $ tainted
request.form['key'], # $ tainted
request.form.get('key'), # $ tainted
request.form.getlist('key'), # $ tainted
request.get_data(), # $ tainted
request.get_json(), # $ tainted
request.get_json()['foo'], # $ tainted
request.get_json()['foo']['bar'], # $ tainted
# werkzeug.datastructures.EnvironHeaders,
# which has same interface as werkzeug.datastructures.Headers
request.headers, # $ tainted
request.headers['key'], # $ tainted
request.headers.get('key'), # $ tainted
request.headers.get_all('key'), # $ tainted
request.headers.getlist('key'), # $ tainted
# popitem returns `(key, value)`
request.headers.popitem(), # $ tainted
request.headers.popitem()[0], # $ tainted
request.headers.popitem()[1], # $ tainted
# two ways to get (k, v) lists
list(request.headers), # $ tainted
request.headers.to_wsgi_list(), # $ tainted
request.json, # $ tainted
request.json['foo'], # $ tainted
request.json['foo']['bar'], # $ tainted
request.method, # $ tainted
request.mimetype, # $ tainted
request.mimetype_params, # $ tainted
request.origin, # $ tainted
# werkzeug.datastructures.HeaderSet (subclass of collections_abc.MutableSet)
request.pragma, # $ tainted
request.query_string, # $ tainted
request.referrer, # $ tainted
request.remote_addr, # $ tainted
request.remote_user, # $ tainted
# file-like object
request.stream, # $ tainted
request.input_stream, # $ tainted
request.url, # $ tainted
request.user_agent, # $ tainted
# werkzeug.datastructures.CombinedMultiDict, which is basically just a werkzeug.datastructures.MultiDict
request.values, # $ tainted
request.values['key'], # $ tainted
request.values.get('key'), # $ tainted
request.values.getlist('key'), # $ tainted
# dict
request.view_args, # $ tainted
request.view_args['key'], # $ tainted
request.view_args.get('key'), # $ tainted
)
ensure_not_tainted(
request.script_root,
request.url_root,
# The expected charset for parsing request data / urls. Can not be changed by client.
# https://github.com/pallets/werkzeug/blob/4dc8d6ab840d4b78cbd5789cef91b01e3bde01d5/src/werkzeug/wrappers/base_request.py#L71-L72
request.charset,
request.url_charset,
# request.date is a parsed `datetime`
# https://github.com/pallets/werkzeug/blob/4dc8d6ab840d4b78cbd5789cef91b01e3bde01d5/src/werkzeug/wrappers/common_descriptors.py#L76-L83
request.date,
# Assuming that endpoints are not created by user-input seems fair
request.endpoint,
# In some rare circumstances a client could spoof the host, but by default they
# should not be able to. See
# https://werkzeug.palletsprojects.com/en/1.0.x/wrappers/#werkzeug.wrappers.BaseRequest.trusted_hosts
request.host,
request.host_url,
request.scheme,
request.script_root,
)
# Testing some more tricky data-flow still works
a = request.args
b = a
gl = b.getlist
files = request.files
ensure_tainted(
request.args, # $ tainted
a, # $ tainted
b, # $ tainted
request.args['key'], # $ tainted
a['key'], # $ tainted
b['key'], # $ tainted
request.args.getlist('key'), # $ tainted
a.getlist('key'), # $ tainted
b.getlist('key'), # $ tainted
gl('key'), # $ tainted
files.get('key').filename, # $ tainted
)
# aliasing tests
req = request
gd = request.get_data
ensure_tainted(
req.path, # $ tainted
gd(), # $ tainted
)
# ----------------------------------
# non-request related taint-steps
# ----------------------------------
# render_template_string
source = TAINTED_STRING
ensure_tainted(source) # $ tainted
res = render_template_string(source)
ensure_tainted(res) # $ tainted
# since template variables are auto-escaped, we don't treat result as tainted
# see https://flask.palletsprojects.com/en/2.3.x/api/#flask.render_template_string
res = render_template_string("Hello {{ foo }}", foo=TAINTED_STRING)
ensure_not_tainted(res)
# stream_template_string
source = TAINTED_STRING
ensure_tainted(source) # $ tainted
res = stream_template_string(source)
for x in res:
ensure_tainted(x) # $ tainted
# since template variables are auto-escaped, we don't treat result as tainted
# see https://flask.palletsprojects.com/en/2.3.x/api/#flask.stream_template_string
res = stream_template_string("Hello {{ foo }}", foo=TAINTED_STRING)
for x in res:
ensure_not_tainted(x)
@app.route("/debug/<foo>/<bar>", methods=['GET']) # $routeSetup="/debug/<foo>/<bar>"
def debug(foo, bar): # $requestHandler routedParameter=foo routedParameter=bar
print("request.view_args", request.view_args)
print("request.headers {!r}".format(request.headers))
print("request.headers['accept'] {!r}".format(request.headers['accept']))
print("request.pragma {!r}".format(request.pragma))
return 'ok' # $HttpResponse
@app.route("/stream", methods=['POST']) # $routeSetup="/stream"
def stream(): # $requestHandler
print(request.path)
s = request.stream
print(s)
# just works :)
print(s.read())
return 'ok' # $HttpResponse
@app.route("/input_stream", methods=['POST']) # $routeSetup="/input_stream"
def input_stream(): # $requestHandler
print(request.path)
s = request.input_stream
print(s)
# hangs until client stops connection, since max number of bytes to read must
# be handled manually
print(s.read())
return 'ok' # $HttpResponse
@app.route("/form", methods=['POST']) # $routeSetup="/form"
def form(): # $requestHandler
print(request.path)
print("request.form", request.form)
return 'ok' # $HttpResponse
@app.route("/cache_control", methods=['POST']) # $routeSetup="/cache_control"
def cache_control(): # $requestHandler
print(request.path)
print("request.cache_control.max_age", request.cache_control.max_age, type(request.cache_control.max_age))
print("request.cache_control.max_stale", request.cache_control.max_stale, type(request.cache_control.max_stale))
print("request.cache_control.min_fresh", request.cache_control.min_fresh, type(request.cache_control.min_fresh))
return 'ok' # $HttpResponse
@app.route("/file_upload", methods=['POST']) # $routeSetup="/file_upload"
def file_upload(): # $requestHandler
print(request.path)
for k,v in request.files.items():
print(k, v, v.name, v.filename, v.stream)
return 'ok' # $HttpResponse
@app.route("/args", methods=['GET']) # $routeSetup="/args"
def args(): # $requestHandler
print(request.path)
print("request.args", request.args)
return 'ok' # $HttpResponse
# curl --header "My-Header: some-value" http://localhost:5000/debug/fooval/barval
# curl --header "Pragma: foo, bar" --header "Pragma: stuff, foo" http://localhost:5000/debug/fooval/barval
# curl -X POST --data 'wat' http://localhost:5000/stream
# curl -X POST --data 'wat' http://localhost:5000/input_stream
# curl --form foo=foo --form foo=123 http://localhost:5000/form
# curl --header "Cache-Control: max-age=foo, max-stale=bar, min-fresh=baz" http://localhost:5000/cache_control
# curl --header "Cache-Control: max-age=1, max-stale=2, min-fresh=3" http://localhost:5000/cache_control
# curl -F myfile=@<some-file> localhost:5000/file_upload
# curl http://localhost:5000/args?foo=42&bar=bar
if __name__ == "__main__":
app.run(debug=True)
|
20,269 | b389d17584bfa47424fe070d8389122da4e53952 | import requests
def main():
res = requests.get("http://api.fixer.io/latest/?base=USD&symbols=EUR")
if res.status_code != 200:
print(res.status_code)
data = res.json()
print(data)
if __name__ == '__main__':
main()
|
20,270 | b50314f433744a8b1ddc5431cf239d1edf97b2d4 | #!/usr/bin/python3
import socket
# The main method
def main():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("capoditutticapi01.3dsctf.org", 8001)) # Connect to host
data = s.recv(1024).decode("utf-8")
print(data, end='')
s.send(str.encode("start"))
print("start")
# Iterate through the Pages
for _ in range(10):
data = s.recv(1024).decode("utf-8")
print(data, end='')
data = s.recv(1024).decode("utf-8") # Get the question
print(data, end='')
crt = extractTuple(data) # Get the tuple [c, r, t]
ptxt = substituteCipher(caesarShift(crt))[2] # Apply decryption algo
print(ptxt)
s.send(str.encode(ptxt)) # Send the answer
data = s.recv(1024).decode("utf-8")
print(data, end='')
data = s.recv(1024).decode("utf-8")
print(data, end='') # Print the flag
# The ROT() function
def caesarShift(s):
a = s[0]
a += a[:s[1]]
a = a[s[1]:]
return [a, 0, s[2]]
# The Substitution function
def substituteCipher(s):
alpha = "abcdefghijklmnopqrstuvwxyz"
for i in range(26):
s[2] = s[2].replace(s[0][i], alpha[i])
s[0] = s[0].replace(s[0][i], alpha[i])
for i in range(26):
s[2] = s[2].replace(s[0][i], s[0][i].upper())
s[0] = s[0].replace(s[0][i], s[0][i].upper())
return s
# Extract the tuple from the data reeceived
def extractTuple(text):
crt = text.split("[c, r, p]: [",2)[1] # Get the tuple part
crt = crt.split("]")[0].split(", ", 2) # Get the c, r, p values
crt[1] = int(crt[1]) # Typecast r
return crt
if __name__ == "__main__":
main()
|
20,271 | fdcaf42488b06878f93f4cc3e53a81837a84cfa5 | import time
import imageGenerator
import textAnalyzer
def mainGUI(text):
# To process and validate the input text
ent, hair, eyes = textAnalyzer.startTextBreakdown(text)
if (ent == "404"):
print("Invalid text, couldnt locate enity/features to generate")
return "404"
if (ent == "403" ):
print("entity has been identified but the features could not")
return "403"
print(ent + " " + hair + " " + eyes)
# To generate the neeeded image
fileName= imageGenerator.generateImage(eye_color=eyes, hair_color=hair, entity_name=ent)
print("image Saved as: " + fileName)
return fileName
if __name__ == '__main__':
# text= "Akira is a girl with hair that is blue as the ocean, and eyes as red as the red sea"
# text = "Bill Gates is a person
text=""
# text= "she had blue eyes"
# text= "she had blue hair"
# ent,hair,eyes = nlpBreakdown.startTextBreakdown(text)
# print(ent+" "+hair+" "+eyes)
# fileName = ent+str((round(time.time() * 1000)))
# print("image Saved as: "+fileName)
mainGUI(text)
|
20,272 | 82245fb3eec4cdc1fd78b7708099b0d5af83cb92 | class Solution:
def findTheLongestSubstring(self, s):
"""
前缀和 + 状态压缩
"""
ans, status, n = 0, 0, len(s)
pos = [-1] * (1 << 5)
pos[0] = 0
for i in range(n):
if s[i] == 'a':
status ^= 1 << 0
elif s[i] == 'e':
status ^= 1 << 1
elif s[i] == 'i':
status ^= 1 << 2
elif s[i] == 'o':
status ^= 1 << 3
elif s[i] == 'u':
status ^= 1 << 4
if pos[status] != -1:
ans = max(ans, i + 1 - pos[status])
else:
pos[status] = i + 1
return ans |
20,273 | 1094999c4f3cd7d0b141bd5e89f7ecb892e24544 | import gc
import os
import random
import warnings
from collections import defaultdict
import lmdb
import msgpack_numpy
import numpy as np
import torch
import tqdm
from habitat import logger
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.base_il_trainer import BaseVLNCETrainer
from vlnce_baselines.common.env_utils import construct_envs
from vlnce_baselines.common.env_utils import construct_envs_auto_reset_false
from vlnce_baselines.common.utils import extract_instruction_tokens
from transformers import (BertConfig, BertTokenizer)
from r2r_src.r2r_agent import r2r_agent
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
class ObservationsDict(dict):
def pin_memory(self):
for k, v in self.items():
self[k] = v.pin_memory()
return self
def collate_fn(batch):
"""Each sample in batch: (
obs,
prev_actions,
oracle_actions,
inflec_weight,
)
"""
def _pad_helper(t, max_len, fill_val=0):
pad_amount = max_len - t.size(0)
if pad_amount == 0:
return t
pad = torch.full_like(t[0:1], fill_val).expand(
pad_amount, *t.size()[1:]
)
return torch.cat([t, pad], dim=0)
transposed = list(zip(*batch))
observations_batch = list(transposed[0])
prev_actions_batch = list(transposed[1])
corrected_actions_batch = list(transposed[2])
weights_batch = list(transposed[3])
B = len(prev_actions_batch)
new_observations_batch = defaultdict(list)
for sensor in observations_batch[0]:
for bid in range(B):
new_observations_batch[sensor].append(
observations_batch[bid][sensor]
)
observations_batch = new_observations_batch
max_traj_len = max(ele.size(0) for ele in prev_actions_batch)
for bid in range(B):
for sensor in observations_batch:
observations_batch[sensor][bid] = _pad_helper(
observations_batch[sensor][bid], max_traj_len, fill_val=1.0
)
prev_actions_batch[bid] = _pad_helper(
prev_actions_batch[bid], max_traj_len
)
corrected_actions_batch[bid] = _pad_helper(
corrected_actions_batch[bid], max_traj_len
)
weights_batch[bid] = _pad_helper(weights_batch[bid], max_traj_len)
for sensor in observations_batch:
observations_batch[sensor] = torch.stack(
observations_batch[sensor], dim=1
)
observations_batch[sensor] = observations_batch[sensor].view(
-1, *observations_batch[sensor].size()[2:]
)
prev_actions_batch = torch.stack(prev_actions_batch, dim=1)
corrected_actions_batch = torch.stack(corrected_actions_batch, dim=1)
weights_batch = torch.stack(weights_batch, dim=1)
not_done_masks = torch.ones_like(
corrected_actions_batch, dtype=torch.uint8
)
not_done_masks[0] = 0
observations_batch = ObservationsDict(observations_batch)
return (
observations_batch,
prev_actions_batch.view(-1, 1),
not_done_masks.view(-1, 1),
corrected_actions_batch,
weights_batch,
)
def _block_shuffle(lst, block_size):
blocks = [lst[i: i + block_size] for i in range(0, len(lst), block_size)]
random.shuffle(blocks)
return [ele for block in blocks for ele in block]
class IWTrajectoryDataset(torch.utils.data.IterableDataset):
def __init__(
self,
lmdb_features_dir,
use_iw,
inflection_weight_coef=1.0,
lmdb_map_size=1e9,
batch_size=1,
):
super().__init__()
self.lmdb_features_dir = lmdb_features_dir
self.lmdb_map_size = lmdb_map_size
self.preload_size = batch_size * 100
self._preload = []
self.batch_size = batch_size
if use_iw:
self.inflec_weights = torch.tensor([1.0, inflection_weight_coef])
else:
self.inflec_weights = torch.tensor([1.0, 1.0])
with lmdb.open(
self.lmdb_features_dir,
map_size=int(self.lmdb_map_size),
readonly=True,
lock=False,
) as lmdb_env:
self.length = lmdb_env.stat()["entries"]
def _load_next(self):
if len(self._preload) == 0:
if len(self.load_ordering) == 0:
raise StopIteration
new_preload = []
lengths = []
with lmdb.open(
self.lmdb_features_dir,
map_size=int(self.lmdb_map_size),
readonly=True,
lock=False,
) as lmdb_env, lmdb_env.begin(buffers=True) as txn:
for _ in range(self.preload_size):
if len(self.load_ordering) == 0:
break
new_preload.append(
msgpack_numpy.unpackb(
txn.get(str(self.load_ordering.pop()).encode()),
raw=False,
)
)
lengths.append(len(new_preload[-1][0]))
sort_priority = list(range(len(lengths)))
random.shuffle(sort_priority)
sorted_ordering = list(range(len(lengths)))
sorted_ordering.sort(key=lambda k: (lengths[k], sort_priority[k]))
for idx in _block_shuffle(sorted_ordering, self.batch_size):
self._preload.append(new_preload[idx])
return self._preload.pop()
def __next__(self):
obs, prev_actions, oracle_actions = self._load_next()
for k, v in obs.items():
obs[k] = torch.from_numpy(np.copy(v))
prev_actions = torch.from_numpy(np.copy(prev_actions))
oracle_actions = torch.from_numpy(np.copy(oracle_actions))
inflections = torch.cat(
[
torch.tensor([1], dtype=torch.long),
(oracle_actions[1:] != oracle_actions[:-1]).long(),
]
)
return (
obs,
prev_actions,
oracle_actions,
self.inflec_weights[inflections],
)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
start = 0
end = self.length
else:
per_worker = int(np.ceil(self.length / worker_info.num_workers))
start = per_worker * worker_info.id
end = min(start + per_worker, self.length)
# Reverse so we can use .pop()
self.load_ordering = list(
reversed(
_block_shuffle(list(range(start, end)), self.preload_size)
)
)
return self
@baseline_registry.register_trainer(name="dagger")
class DaggerTrainer(BaseVLNCETrainer):
def __init__(self, config=None):
self.lmdb_features_dir = config.IL.DAGGER.lmdb_features_dir.format(
split=config.TASK_CONFIG.DATASET.SPLIT
)
super().__init__(config)
def _make_dirs(self) -> None:
self._make_ckpt_dir()
os.makedirs(self.lmdb_features_dir, exist_ok=True)
if self.config.EVAL.SAVE_RESULTS:
self._make_results_dir()
def _update_dataset(self, data_it):
if torch.cuda.is_available():
with torch.cuda.device(self.device):
torch.cuda.empty_cache()
envs = construct_envs(self.config, get_env_class(self.config.ENV_NAME))
expert_uuid = self.config.IL.DAGGER.expert_policy_sensor_uuid
rnn_states = torch.zeros(
envs.num_envs,
self.policy.net.num_recurrent_layers,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
prev_actions = torch.zeros(
envs.num_envs,
1,
device=self.device,
dtype=torch.long,
)
not_done_masks = torch.zeros(
envs.num_envs, 1, dtype=torch.uint8, device=self.device
)
observations = envs.reset()
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
episodes = [[] for _ in range(envs.num_envs)]
skips = [False for _ in range(envs.num_envs)]
# Populate dones with False initially
dones = [False for _ in range(envs.num_envs)]
# https://arxiv.org/pdf/1011.0686.pdf
# Theoretically, any beta function is fine so long as it converges to
# zero as data_it -> inf. The paper suggests starting with beta = 1 and
# exponential decay.
p = self.config.IL.DAGGER.p
# in Python 0.0 ** 0.0 == 1.0, but we want 0.0
beta = 0.0 if p == 0.0 else p ** data_it
ensure_unique_episodes = beta == 1.0
def hook_builder(tgt_tensor):
def hook(m, i, o):
tgt_tensor.set_(o.cpu())
return hook
rgb_features = None
rgb_hook = None
if self.config.MODEL.RGB_ENCODER.cnn_type == "TorchVisionResNet50":
rgb_features = torch.zeros((1,), device="cpu")
rgb_hook = self.policy.net.rgb_encoder.layer_extract.register_forward_hook(
hook_builder(rgb_features)
)
depth_features = None
depth_hook = None
if self.config.MODEL.DEPTH_ENCODER.cnn_type == "VlnResnetDepthEncoder":
depth_features = torch.zeros((1,), device="cpu")
depth_hook = self.policy.net.depth_encoder.visual_encoder.register_forward_hook(
hook_builder(depth_features)
)
collected_eps = 0
ep_ids_collected = None
if ensure_unique_episodes:
ep_ids_collected = {
ep.episode_id for ep in envs.current_episodes()
}
with tqdm.tqdm(
total=self.config.IL.DAGGER.update_size, dynamic_ncols=True
) as pbar, lmdb.open(
self.lmdb_features_dir,
map_size=int(self.config.IL.DAGGER.lmdb_map_size),
) as lmdb_env, torch.no_grad():
start_id = lmdb_env.stat()["entries"]
txn = lmdb_env.begin(write=True)
while collected_eps < self.config.IL.DAGGER.update_size:
current_episodes = None
envs_to_pause = None
if ensure_unique_episodes:
envs_to_pause = []
current_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if dones[i] and not skips[i]:
ep = episodes[i]
traj_obs = batch_obs(
[step[0] for step in ep],
device=torch.device("cpu"),
)
del traj_obs[expert_uuid]
for k, v in traj_obs.items():
traj_obs[k] = v.numpy()
if self.config.IL.DAGGER.lmdb_fp16:
traj_obs[k] = traj_obs[k].astype(np.float16)
transposed_ep = [
traj_obs,
np.array([step[1] for step in ep], dtype=np.int64),
np.array([step[2] for step in ep], dtype=np.int64),
]
txn.put(
str(start_id + collected_eps).encode(),
msgpack_numpy.packb(
transposed_ep, use_bin_type=True
),
)
pbar.update()
collected_eps += 1
if (
collected_eps
% self.config.IL.DAGGER.lmdb_commit_frequency
) == 0:
txn.commit()
txn = lmdb_env.begin(write=True)
if ensure_unique_episodes:
if (
current_episodes[i].episode_id
in ep_ids_collected
):
envs_to_pause.append(i)
else:
ep_ids_collected.add(
current_episodes[i].episode_id
)
if dones[i]:
episodes[i] = []
if ensure_unique_episodes:
(
envs,
rnn_states,
not_done_masks,
prev_actions,
batch,
_,
) = self._pause_envs(
envs_to_pause,
envs,
rnn_states,
not_done_masks,
prev_actions,
batch,
)
if envs.num_envs == 0:
break
actions, rnn_states = self.policy.act(
batch,
rnn_states,
prev_actions,
not_done_masks,
deterministic=False,
)
actions = torch.where(
torch.rand_like(actions, dtype=torch.float) < beta,
batch[expert_uuid].long(),
actions,
)
for i in range(envs.num_envs):
if rgb_features is not None:
observations[i]["rgb_features"] = rgb_features[i]
del observations[i]["rgb"]
if depth_features is not None:
observations[i]["depth_features"] = depth_features[i]
del observations[i]["depth"]
episodes[i].append(
(
observations[i],
prev_actions[i].item(),
batch[expert_uuid][i].item(),
)
)
skips = batch[expert_uuid].long() == -1
actions = torch.where(
skips, torch.zeros_like(actions), actions
)
skips = skips.squeeze(-1).to(device="cpu", non_blocking=True)
prev_actions.copy_(actions)
outputs = envs.step([a[0].item() for a in actions])
observations, _, dones, _ = [list(x) for x in zip(*outputs)]
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
not_done_masks = torch.tensor(
[[0] if done else [1] for done in dones],
dtype=torch.uint8,
device=self.device,
)
txn.commit()
envs.close()
envs = None
if rgb_hook is not None:
rgb_hook.remove()
if depth_hook is not None:
depth_hook.remove()
def train(self) -> None:
split = self.config.TASK_CONFIG.DATASET.SPLIT
self.config.defrost()
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
if (
self.config.IL.DAGGER.expert_policy_sensor
not in self.config.TASK_CONFIG.TASK.SENSORS
):
self.config.TASK_CONFIG.TASK.SENSORS.append(
self.config.IL.DAGGER.expert_policy_sensor
)
# if doing teacher forcing, don't switch the scene until it is complete
if self.config.IL.DAGGER.p == 1.0:
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
"""
self.config.TASK_CONFIG.TASK.MEASUREMENTS = []
self.config.TASK_CONFIG.TASK.SENSORS = [
s for s in self.config.TASK_CONFIG.TASK.SENSORS if
"INSTRUCTION" in s
]
"""
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
self.config.freeze()
envs = construct_envs(self.config, get_env_class(self.config.ENV_NAME))
# expert_uuid = self.config.IL.DAGGER.expert_policy_sensor_uuid
# observations = envs.reset()
"""
batch = batch_obs(observations, self.device)
actions = batch[expert_uuid].long()
outputs = envs.step([a[0].item() for a in actions])
observations, _, dones, _ = [list(x) for x in zip(*outputs)]
"""
print("r2r_src.r2r_agent")
curr_r2r_agent = r2r_agent(self, self.device, self.config, envs)
curr_r2r_agent.train_listener()
"""
def eval(self) -> None:
print("eval")
"""
def inference(self) -> None:
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = self.config.INFERENCE.SPLIT
config.TASK_CONFIG.DATASET.ROLES = ["guide"]
config.TASK_CONFIG.DATASET.LANGUAGES = config.INFERENCE.LANGUAGES
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
config.IL.ckpt_to_load = config.INFERENCE.CKPT_PATH
config.TASK_CONFIG.TASK.MEASUREMENTS = []
config.TASK_CONFIG.TASK.SENSORS = [
s for s in config.TASK_CONFIG.TASK.SENSORS if "INSTRUCTION" in s
]
config.ENV_NAME = "VLNCEInferenceEnv"
config.freeze()
#envs = construct_envs_auto_reset_false(config,get_env_class(config.ENV_NAME))
envs = construct_envs(config,get_env_class(config.ENV_NAME))
curr_r2r_agent = r2r_agent(self, self.device, self.config, envs)
curr_r2r_agent.inference()
def eval(self):
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.DATASET.ROLES = ["guide"]
config.TASK_CONFIG.DATASET.LANGUAGES = config.EVAL.LANGUAGES
config.TASK_CONFIG.TASK.NDTW.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.TASK.SDTW.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
if len(config.VIDEO_OPTION) > 0:
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP_VLNCE")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.freeze()
curr_r2r_agent = r2r_agent(self, self.device, config, None)
#curr_r2r_agent = r2r_agent(self, self.device, None, None)
curr_r2r_agent.eval()
|
20,274 | 34c0055b2c462a9631a4da5040691bbf2b848594 | import cv2
if __name__ == '__main__':
video = cv2.imread()
ok, frame = video.read()
if ok:
cv2.imshow("tracker", frame)
while True:
ok, frame = video.read()
if ok:
cv2.imshow("tracker",frame)
k = cv2.waitKey(1) & 0xff
if k == 27:
break |
20,275 | 2f31eabb889f70b70f3e636e589e51a93b7d62c7 | from enum import Enum
class NodePosition(Enum):
"""
Enum of possible node positions in the network
"""
CENTER = 0
TOP = 1
BOTTOM = 2
LEFT = 3
RIGHT = 4
|
20,276 | 4672c6ff5c928f63bfd642feccadd21b6afe5436 | # Importing flask module in the project is mandatory
# An object of Flask class is our WSGI application.
from flask import Flask, request, jsonify
from utils import *
# Flask constructor takes the name of
# current module (__name__) as argument.
app = Flask(__name__)
num_elements = 16000
# Setting the maximum value in the randomly generated list.
max_num = 1000
# Instantiate a manager object to manage the data chunks, num of chunks = (nprocs - 1)
data_manager_master = DataManager(num_elements, 16 , max_num)
merger = Merger([])
# The route() function of the Flask class is a decorator,
# which tells the application which URL should call
# the associated function.
@app.route('/')
# ‘/’ URL is bound with hello_world() function.
def get_id():
return {'Message':'Hello'}
@app.route('/get_data')
# ‘/’ URL is bound with hello_world() function.
def getData():
arr_to_send, chunk_num = data_manager_master.get_next_chunk()
return {'Data':arr_to_send.tolist(), 'ID': chunk_num}
@app.route('/post_data', methods=['POST'])
# ‘/’ URL is bound with hello_world() function.
def postData():
data = request.get_json()
message = 'Received Data from ' + str(data['ID'])
print(message)
return {'Data':'Thanks..'}
# main driver function
if __name__ == '__main__':
# run() method of Flask class runs the application
# on the local development server.
app.run()
|
20,277 | 9ad24de62d5b5dfca43e4d4d69140ae2f60b319e | '''
Return true if you can partition the array into two subsets that have equal sum
[1,12,14,3,2] returns true
Because 1+12+3 = 16 and 14+2 = 16
'''
def partition(array, index, part_1, part_2):
if index == len(array):
if sum(part_1) == sum(part_2):
return True
else:
return False
part_1.append(array[index])
if partition(array,index+1,part_1,part_2):
return True
part_1.pop()
part_2.append(array[index])
if partition(array,index+1,part_1,part_2):
return True
part_2.pop()
return False
a = [1,12,14,3,2] # true
b = [1,3,5,7,1] # false
c = [1,2,3] # true
print(partition(b,0,[],[]))
|
20,278 | 4396077ee0b49947bcca87ae62ffe769565f9630 | #!/usr/bin/env python
# core.py
#
# Copyright (C) 2016 Diamond Light Source, Karl Levik
#
# 2016-11-30
#
# Methods to store and retrieve data in the core tables
#
try:
import mysql.connector
except ImportError, e:
print 'MySQL API module not found'
raise e
import string
import logging
import time
import os
import sys
import datetime
from logging.handlers import RotatingFileHandler
from ispyb.ExtendedOrderedDict import ExtendedOrderedDict
import copy
class Core:
'''Core provides methods to store and retrieve data in the core tables.'''
def __init__(self):
pass
def first_item_in_cursor(self, cursor):
rs = cursor.fetchone()
if len(rs) == 0:
return None
elif isinstance(cursor, mysql.connector.cursor.MySQLCursorDict):
return rs.iteritems().next()[1]
else:
try:
return int(rs[0])
except:
return rs[0]
_sample_params =\
ExtendedOrderedDict([('id',None), ('crystalid',None), ('containerid',None), ('name',None), ('code',None),
('location',None), ('holder_length',None), ('loop_length',None), ('loop_type',None),
('wire_width',None), ('comments',None), ('status',None), ('is_in_sc',None)])
def get_sample_params(self):
return copy.deepcopy(self._sample_params)
def str_format_ops(l):
return ','.join(['%s'] * len(l))
def put_sample(self, cursor, values):
id = None
if values[0] is not None:
self.update_sample(cursor, values)
id = values[0]
else:
id = self.insert_sample(cursor, values)
if id != None:
return int(id)
return None
def insert_sample(self, cursor, values):
'''Store new sample.'''
cursor.execute('select ispyb.upsert_sample(%s)' % ','.join(['%s'] * len(values)), values)
return self.first_item_in_cursor( cursor )
def update_sample(self, cursor, values):
'''Update existing sample.'''
cursor.execute('select ispyb.upsert_sample(%s)' % ','.join(['%s'] * len(values)), values)
return self.first_item_in_cursor( cursor )
def retrieve_visit_id(self, cursor, visit):
'''Get the database ID for a visit on the form mx1234-5.'''
cursor.execute('select ispyb.retrieve_visit_id(%s)', [visit])
return self.first_item_in_cursor( cursor )
def retrieve_datacollection_id(self, cursor, img_filename, img_fileloc):
'''Get the database ID for the data collection corresponding to the given diffraction image file.'''
cursor.execute('select ispyb.retrieve_datacollection_id(%s,%s)', [img_filename, img_fileloc])
return self.first_item_in_cursor( cursor )
def retrieve_current_sessions(self, cursor, beamline, tolerance_mins=0):
'''Get a result-set with the currently active sessions on the given beamline.'''
cursor.callproc(procname='ispyb.retrieve_current_sessions', args=(beamline,tolerance_mins))
for result in cursor.stored_results():
rs = result.fetchall()
cursor.nextset()
return rs
def retrieve_current_sessions_for_person(self, cursor, beamline, fed_id, tolerance_mins=0):
'''Get a result-set with the currently active sessions on the given beamline.'''
cursor.callproc(procname='ispyb.retrieve_current_sessions_for_person', args=(beamline, fed_id, tolerance_mins))
for result in cursor.stored_results():
rs = result.fetchall()
cursor.nextset()
return rs
def retrieve_most_recent_session(self, cursor, beamline, proposal_code):
'''Get a result-set with the most recent session on the given beamline for the given proposal code '''
cursor.callproc(procname='ispyb.retrieve_most_recent_session', args=(beamline, proposal_code))
for result in cursor.stored_results():
rs = result.fetchall()
cursor.nextset()
return rs
def retrieve_current_cm_sessions(self, cursor, beamline):
'''Get a result-set with the currently active commissioning (cm) sessions on the given beamline.'''
cursor.callproc(procname='ispyb.retrieve_current_cm_sessions', args=(beamline,))
for result in cursor.stored_results():
rs = result.fetchall()
cursor.nextset()
return rs
def retrieve_active_plates(self, cursor, beamline):
'''Get a result-set with the submitted plates not yet in local storage on a given beamline'''
cursor.callproc(procname="ispyb.retrieve_containers_submitted_non_ls", args=(beamline,))
for result in cursor.stored_results():
rs = result.fetchall()
cursor.nextset()
return rs
def retrieve_proposal_title(self, cursor, proposal_code, proposal_number):
'''Get the title of a given proposal'''
cursor.execute('select ispyb.retrieve_proposal_title(%s,%s)', [proposal_code, proposal_number])
return self.first_item_in_cursor( cursor )
core = Core()
|
20,279 | 77e232eba39eaa1cfedc10efbf197cce71f8523b | ../../lib_os_firewall/library/os_firewall_manage_iptables.py |
20,280 | a4bce951edbaf9911ca3933cc257dc7b460191cb | import getpass
import time
from typing import TYPE_CHECKING, Any
from fabric.connection import Connection
from paramiko.ssh_exception import (
AuthenticationException,
PasswordRequiredException,
SSHException,
)
if TYPE_CHECKING:
from ..model import AutojailConfig
global_context = None
def connect(
config: "AutojailConfig",
context: Any,
passwd_retries: int = 5,
timeout_retries=10,
) -> Connection:
login = config.login
connection = None
if login.is_ssh:
try:
for _timeout_retry in range(timeout_retries):
try:
connect_kwargs = None
if config.password is not None:
connect_kwargs = {"password": config.password}
connection = Connection(
login.host,
user=login.user,
port=login.port,
connect_kwargs=connect_kwargs,
)
connection.open()
except SSHException as e:
if _timeout_retry == timeout_retries - 1:
raise e
if isinstance(e, AuthenticationException):
if config.password is not None:
time.sleep(5)
continue
else:
raise e
time.sleep(5)
continue
except EOFError as e:
if _timeout_retry == timeout_retries - 1:
raise e
time.sleep(5)
continue
break
except (
AuthenticationException,
PasswordRequiredException,
SSHException,
EOFError,
) as e:
if config.password is not None:
raise e
for _retry in range(passwd_retries):
password = getpass.getpass(
prompt="Password for {}@{}:{}: ".format(
login.user, login.host, login.port
)
)
try:
for _timeout_retry in range(timeout_retries):
try:
connection = Connection(
user=login.user,
host=login.host,
port=login.port,
connect_kwargs={"password": password},
)
connection.open()
except SSHException as e:
if isinstance(e, AuthenticationException):
raise e
time.sleep(5)
continue
except EOFError:
time.sleep(5)
continue
break
except (
AuthenticationException,
PasswordRequiredException,
SSHException,
):
continue
break
else:
assert context is not None
connection = context.board(login.host).connect()
return connection
|
20,281 | 34f00a3e8e5f6934b5b02b807177019abcb29036 | #!/usr/bin/env python3
import argparse
import csv
import perf
import six
from itertools import chain
def export_csv(args, suite):
rows = []
for bench in sorted(suite.get_benchmarks(), key=lambda x: x.get_name()):
print("converting: {0}".format(bench.get_name()))
runs = bench.get_runs()
runs_values = [run.values for run in runs if run.values]
rows.append(list(chain(*runs_values)))
if six.PY3:
fp = open(args.csv_filename, 'w', newline='\n', encoding='ascii')
else:
fp = open(args.csv_filename, 'w')
with fp:
writer = csv.writer(fp)
writer.writerows(rows)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('json_filename')
parser.add_argument('csv_filename')
return parser.parse_args()
def main():
args = parse_args()
suite = perf.BenchmarkSuite.load(args.json_filename)
export_csv(args, suite)
if __name__ == "__main__":
main()
|
20,282 | 46d34d415aa8167fcf98311d1d918d3538307d21 | number = 786
print()
inputNumberAsString = str(number)
digitSum = 0
i = len(inputNumberAsString) - 1
while i >= 0:
digitSum = digitSum + int(inputNumberAsString[i])
i = i - 1
print() |
20,283 | a52668eef1f311e56c11c941d08723ef2a162fc5 | import re
n=input()
print(sum(map(int,re.findall('\d+',n)))) |
20,284 | 76006fa6eecbe465002ecd7abe484787e92ed460 | __all__ = ["profiles","findhalos","MassFunctions"]
|
20,285 | d59edb111f0670149744e26958f42596ed671cc7 | # cock_tail sort
from typing import List
def cocktail_list(numbers: List[int]) -> List[int]:
len_numbers = len(numbers)
swapped = True
start = 0
end = len_numbers - 1
while swapped:
swapped = False
for i in range(start, end):
if numbers[i] > numbers[i+1]:
numbers[i], numbers[i+1] = numbers[i+1], numbers[i]
swapped = True
if not swapped:
break
swapped = False
end = end - 1
for i in range(end-1, start-1, -1):
if numbers[i] > numbers[i + 1]:
numbers[i], numbers[i + 1] = numbers[i + 1], numbers[i]
swapped = True
start = start + 1
return numbers
if __name__ == '__main__':
import random
list = [4, 1, 8, 5, 3, 7]
nums = [random.randint(0, 1000) for i in range(10)]
print(cocktail_list(nums))
|
20,286 | 03157779bf30a2815dbc7543c39d840612d6fa58 | import pygame
from pygame.locals import *
import sys, os, time, random
from itertools import cycle
pygame.init()
#GameDisplay
display_height = 750
display_width = 1000
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('Turf Wars')
clock = pygame.time.Clock()
#paths
current_path = os.path.dirname(__file__) # Where your .py file is located
resource_path = os.path.join(current_path, 'resources') # The resource folder path
image_path = os.path.join(resource_path, 'images') # The image folder path
font_path = os.path.join(resource_path, 'fonts')
#fonts
DEFAULT_FONT = "freesansbold.ttf"
SMALL_FONT = pygame.font.Font(os.path.join(font_path, "COMIC.ttf"),40)
TINY_FONT = pygame.font.Font(os.path.join(font_path, "COMIC.ttf"),20)
LARGE_FONT = pygame.font.Font(os.path.join(font_path, "COMIC.ttf"),80)
#colors
purple = (128, 0, 128)
blue = (0,0,200)
red = (200,0,0)
green = (0,200,0)
black = (0,0,0)
white = (255,255,255)
bright_red = (255,0,0)
bright_blue = (0,0,255)
bright_green = (0,255,0)
#images
backgroundImg = pygame.image.load(os.path.join(image_path, 'logo.jpg'))
logoImg = pygame.image.load(os.path.join(image_path, 'logo.jpg'))
#imagesize
logoImg = pygame.transform.scale(logoImg, (250,250))
def enter_text(max_length, lower = False, upper = False, title = False):
"""
returns user name input of max length "max length and with optional
string operation performed
"""
pressed = ""
finished = False
# create list of allowed characters using ascii values
# numbers 1-9, letters a-z
all_chars = [i for i in range(97, 123)] +\
[i for i in range(48,58)]
# create blinking underscore
BLINKING_UNDERSCORE = pygame.USEREVENT + 0
pygame.time.set_timer(BLINKING_UNDERSCORE, 800)
blinky = cycle(["_", " "])
next_blink = next(blinky)
while not finished:
pygame.draw.rect(gameDisplay, red, (125,175,200,40))
print_text(TINY_FONT, 125, 150, "Enter Name:")
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
if event.type == BLINKING_UNDERSCORE:
next_blink = next(blinky)
# if input is in list of allowed characters, add to variable
elif event.type == pygame.KEYUP and event.key in all_chars \
and len(pressed) < max_length:
# caps entry?
if pygame.key.get_mods() & pygame.KMOD_SHIFT or pygame.key.get_mods()\
& pygame.KMOD_CAPS:
pressed += chr(event.key).upper()
# lowercase entry
else:
pressed += chr(event.key)
# otherwise, only the following are valid inputs
elif event.type == pygame.KEYUP:
if event.key == pygame.K_BACKSPACE:
pressed = pressed[:-1]
elif event.key == pygame.K_SPACE:
pressed += " "
elif event.key == pygame.K_RETURN:
finished = True
# only draw underscore if input is not at max character length
if len(pressed) < max_length:
print_text(TINY_FONT, 130, 180, pressed + next_blink)
else:
print_text(TINY_FONT, 130, 180, pressed)
pygame.display.update()
# perform any selected string operations
if lower: pressed = pressed.lower()
if upper: pressed = pressed.upper()
if title: pressed = pressed.title()
return pressed
def print_text(TINY_FONT, x, y, text, color = white):
"""Draws a text image to display surface"""
text_image = TINY_FONT.render(text, True, color)
gameDisplay.blit(text_image, (x,y))
def logo(x,y):
gameDisplay.blit(logoImg, (x,y))
x = (display_width * 0.10)
y = (display_height * 0.10)
def exit():
pygame.quit()
quit()
def textblock(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def textdisplay(text):
TextSurf, TextRectangle = textblock(text, LARGE_FONT)
TextRectangle.center = ((display_width/2),(display_height/2))
gameDisplay.blit(TextSurf, TextRectangle)
def button(msg, x,y,w,h,ic,ac,action=None):
# This function has the parameters of:
# msg: Message you want to display
# x: The x location of the top left of the button box.
# y: The y location of the top left of the button box.
# w: horizontal width.
# h: vertical height.
# ic: Inactive color (when a mouse is not hovering).
# ac: Active color (when a mouse is hovering).
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(gameDisplay, ac,(x,y,w,h))
if click[0] == 1 and action != None:
action()
else:
pygame.draw.rect(gameDisplay, ic,(x,y,w,h))
textSurf, textRectangle = textblock(msg, SMALL_FONT)
textRectangle.center = ( (x+(w/2)), (y+(h/2)) )
gameDisplay.blit(textSurf, textRectangle)
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
print(event)
if event.type == pygame.QUIT:
exit()
#background
gamemap = pygame.image.load(os.path.join(image_path, 'map.jpg'))
gamemap = pygame.transform.scale(gamemap,(1000,750))
gameDisplay.blit(gamemap, (0,0))
# logo
logo(x, y)
# title
TextSurf, TextRectangle = textblock("Digital Component", LARGE_FONT)
TextRectangle.center = ((display_width / 2),((display_height *0.5)))
gameDisplay.blit(TextSurf, TextRectangle)
#buttons
button('START!',150,550,300,150,bright_blue,blue,gameloop)
button('EXIT!', 550,550,300,150,red,bright_red,exit)
pygame.display.update()
clock.tick(15)
def gameloop():
gameDisplay.fill(white)
gameExit = False
fpsclock = pygame.time.Clock()
fps = 30
PurplePlayer = False
RedPlayer = False
GreenPlayer = False
BlackPlayer = False
while not gameExit:
fpsclock.tick(fps)
pressed = None
for event in pygame.event.get():
print(event)
if event.type == pygame.KEYUP:
print(pygame.key.name(event.key))
print(ord(pygame.key.name(event.key)))
if event.type == pygame.QUIT:
exit()
if not PurplePlayer:
PurplePlayer = enter_text(15)
pygame.draw.rect(gameDisplay, purple, (90,500,150,40))
print_text(TINY_FONT, 100, 500, PurplePlayer)
if not RedPlayer:
RedPlayer = enter_text(15)
pygame.draw.rect(gameDisplay, red, (290,500,150,40))
print_text(TINY_FONT, 300, 500, RedPlayer)
if not GreenPlayer:
GreenPlayer = enter_text(15)
pygame.draw.rect(gameDisplay, green, (490,500,150,40))
print_text(TINY_FONT, 500, 500, GreenPlayer)
if not BlackPlayer:
BlackPlayer = enter_text(15)
pygame.draw.rect(gameDisplay, black, (690,500,150,40))
print_text(TINY_FONT, 700, 500, BlackPlayer)
button('PROCEED!',300,300,300,150,bright_green,black,game_start)
pygame.draw.rect(gameDisplay, white, (125,175,200,40))
pygame.display.update()
clock.tick(15)
def game_start():
start = True
gameDisplay.fill(white)
while start:
for event in pygame.event.get():
print(event)
if event.type == pygame.QUIT:
exit()
gameDisplay.fill(white)
pygame.display.update()
clock.tick(15)
game_intro()
exit() |
20,287 | 961c4fc013e9d089672bb50c364b1df39001501e | # ******************************************
# Author : Ali Azhari
# Created On : Thu Jul 18 2019
# File : app.py
# *******************************************/
import time
import datetime
# first solution. it takes O(n**2)
class Solution(object):
def twoSum1(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in range(0, len(nums) - 1):
for j in range(i + 1, len(nums)):
if nums[i] + nums[j] == target:
return [i, j]
return None
# second solution. it takes O(n)
def twoSum2(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
complement = {}
complement[target - nums[0]] = 0
for i in range(1, len(nums) ):
if nums[i] in complement:
return [complement[nums[i]], i]
else:
complement[target - nums[i]] = i
return None
# flag = 10000000
# t1 = time.time()
start = datetime.datetime.now()
solution1 = Solution()
print(solution1.twoSum1([2, 7, 3, 8, 5,], 9))
end = datetime.datetime.now()
elapsed = end - start
print('elapsed time fo solution 1 is: ')
print(elapsed)
# t2 = time.time()
start = datetime.datetime.now()
solution2 = Solution()
print(solution2.twoSum2([2, 7, 3, 8, 5,], 9))
end = datetime.datetime.now()
elapsed = end - start
print('elapsed time fo solution 2 is: ')
print(elapsed ) |
20,288 | a4da1c3b7da197f6b198f609e8770ef7894f7ef2 | import pgf
import itertools
import ttutils
####################################
# Prerequisites:
# - L4 file, from which we generate
# * GF grammar, with dynamically constructed lexicon
# * s(CASP) code: L4 rules and declarations translated into s(CASP) rules and facts
# Pipeline:
# - Generate the files in Haskell, on the client side.
# - Create an expert system, and move the generated files to Docassemble server.
# - Run the interview, get answers from user, and run a query.
# - Query produces answers: a set of s(CASP) models. Parse those answers in Python.
# - Initial transformation of the s(CASP) (Python) AST into GF trees.
# We need to match the s(CASP) statements in the answer (is_player(A)) to the GF funs (Player : Arg -> Statement).
# Orthographic restrictions:
# * L4 predicates and GF cats+funs can be anything
# * s(CASP) atoms must be lowercase and variables uppercase
# Maybe this needs to be addressed already when we generate the grammar. Make everything lowercase.
# - TODO: figure out if someone else wants to take over the L4toSCASP translation. Does Martin have a normalised format that is easier to use in transformation?
# - Tree transformations from GF trees to other GF trees.
# That's what I'm doing in this file. The rest of the pipeline depends on other people's work.
####################################
## Dummy data for testing
## Real data is parsed from s(CASP) models
# TODO: replace these with some good way to get the data
gr = pgf.readPGF("/tmp/RPSTop.pgf")
R = gr.embed("RPSTop")
eng = gr.languages["RPSTopEng"]
aRock_cScissors = [
"A wins RPS",
"RPS is a game",
"A is a participant in RPS",
"A is a player",
"A throws rock",
"C is a player",
"C is a participant in RPS",
"C throws scissors",
"rock beats scissors",
]
aScissors_cPaper = [
"A wins RPS",
"RPS is a game",
"A is a participant in RPS",
"A is a player",
"A throws scissors",
"C is a player",
"C is a participant in RPS",
"C throws paper",
"scissors beats paper",
]
aPaper_cRock = [
"A wins RPS",
"RPS is a game",
"A is a participant in RPS",
"A is a player",
"A throws paper",
"C is a player",
"C is a participant in RPS",
"C throws rock",
"paper beats rock",
]
testCorpus = [aRock_cScissors, aScissors_cPaper, aPaper_cRock]
def getExpr(sentence):
i = eng.parse(sentence)
prob,expr = next(i)
return expr
parsedTestCorpus = [
[getExpr(s) for s in model]
for model in testCorpus]
####################################
## Translating the Haskell functions
def aggregateBy(exprs,
sortf=lambda e : e,
groupf=lambda e : e,
name='',
debug=False):
"""Generic aggregation function"""
sortByShow = lambda e : ttutils.showExprs(sortf(e))
results = []
if debug:
print("debug: aggregateBy"+name)
for e in [sortByShow(e) for e in exprs]:
print(e)
print("---------")
exprs = sorted(exprs, key=sortByShow)
for _, g in itertools.groupby(exprs, sortByShow):
grp = list(g)
l = len(grp)
if l==0:
raise Exception("aggregateBy" + name + ": empty input")
elif l==1:
results.append(grp[0])
else:
results.append(groupf(grp))
return results
def aggregateByPredicate(exprs):
# internal aggregation fun
def aggregate(es):
subjs = [ttutils.getSubj(e) for e in es]
fullExpr = es[0]
aggrSubjs = listArg(subjs)
c, args = fullExpr.unpack()
if c=="App1":
pr, _ = args
return R.AggregateSubj1(pr, aggrSubjs)
elif c=="App2":
pr, _, obj = args
return R.AggregateSubj2(pr, obj, aggrSubjs)
else:
raise Exception("aggregatebyPredicate: expected simple expr, got instead", show(fullExpr))
return aggregateBy(exprs, ttutils.getPred, aggregate, name="Predicate", debug=False)
def aggregateBySubject(exprs):
# Internal aggregate fun
def aggregate(es):
preds = [ttutils.getPred(e) for e in es]
fullExpr = es[0] # we can take any expr from group, they all have same subject
if len(preds)==2: # GF grammar works for only two -- TODO make more generic!
pr1, pr2 = preds
_, args = fullExpr.unpack()
subjs = args[-1]
return R.AggregatePred(mkPred(pr1), mkPred(pr2), subjs)
else:
raise Exception("aggregateBySubject: expected 2 preds, got instead", show(preds))
return aggregateBy(exprs, ttutils.getSubj, aggregate, name="Subject", debug=False)
def aggregateAll(exprs, typography):
"""Takes a list of expressions and typography (R.Bullets or R.Inline).
Returns the expressions aggregated and put in a single
"""
aggr = aggregateBySubject(aggregateByPredicate(exprs))
return wrapStatement(typography, aggr)
### Manipulate arguments to become input to aggregation funs
def mkPred(args):
if len(args)<1:
raise Exception("mkPred: too short list", args)
elif len(args)==2:
p, o = args
return R.TransPred(p,o)
else:
p = args[0]
return R.IntransPred(p)
### Specialised versions of generic functions from ttutils
def wrapStatement(typography, statements):
return R.ConjStatement(typography, listStatement(statements))
def listArg(args):
return ttutils.listGeneric(args, R.BaseArg, R.ConsArg)
def listStatement(args):
return ttutils.listGeneric(args, R.BaseStatement, R.ConsStatement)
def show(e):
return ttutils.showExprs(e)
def prettyLin(e):
return ttutils.pretty(eng.linearize(e))
### Main function
def nlgModels(models):
concls = [m[0] for m in models]
evidence = [m[1:] for m in models]
if all(x == concls[0] for x in concls):
conclusion = concls[0]
else:
raise Exception("nlgModels: expected identical conclusions, got", show(concls))
allEvidence = [e for es in evidence for e in es] # flatten to find duplicates
sharedEvidence = [
g[0]
for g in aggregateBy(allEvidence)
if isinstance(g, list)]
aggrShared = aggregateAll(sharedEvidence, R.Bullets)
uniques = [
aggregateAll([e for e in es if e not in sharedEvidence], R.Inline)
for es in evidence]
aggrUniques = R.DisjStatement(R.Bullets, listStatement(uniques))
## Final NLG
result = [
prettyLin(conclusion) + ",",
"\nif all of the following hold:",
prettyLin(aggrShared),
"\nand one of the following holds:",
prettyLin(aggrUniques)
]
return '\n'.join(result)
###### Main
if __name__ == "__main__":
print(type(parsedTestCorpus[0]))
print(nlgModels(parsedTestCorpus))
################# TESTS #################
## Some rudimentary tests.
## TODO: use a real testing framework?
def samePred(expr1, expr2):
e1p = ttutils.getPred(expr1)
e2p = ttutils.getPred(expr2)
return e1p == e2p
samePredTrue = samePred(getExpr("A is a player"), getExpr("C is a player"))
assert samePredTrue == True
samePredFalse = samePred(getExpr("A is a player"), getExpr("C is a game"))
assert samePredFalse == False
def sameSubj(expr1, expr2):
e1s = ttutils.getSubj(expr1)
e2s = ttutils.getSubj(expr2)
return e1s == e2s
sameSubjSimpleTrue = sameSubj(getExpr("A is a participant in RPS"), getExpr("A is a player"))
assert sameSubjSimpleTrue == True
sameSubjSimpleFalse = sameSubj(getExpr("A is a participant in RPS"), getExpr("B is a player"))
assert sameSubjSimpleFalse == False
sameSubjComplexTrue = sameSubj(getExpr("A and C are participants in RPS"), getExpr("A and C are players"))
assert sameSubjComplexTrue == True
sameSubjComplexFalse = sameSubj(getExpr("A and C are participants in RPS"), getExpr("A and B are players"))
assert sameSubjComplexFalse == False
def nlgSingleModel(model):
conclusion, evidence = model[0], model[1:]
firstAggr = aggregateByPredicate(evidence)
secondAggr = aggregateBySubject(firstAggr)
finalExpr = R.IfThen(conclusion, (wrapStatement(R.Bullets, secondAggr)))
return prettyLin(finalExpr)
aRock_cScissors_gold = """A wins RPS if
* A throws rock,
* C throws scissors,
* RPS is a game,
* rock beats scissors and
* A and C are players and participants in RPS"""
# print(type(parsedTestCorpus))
# print(type(parsedTestCorpus[0]))
aRock_cScissors_system = nlgSingleModel(parsedTestCorpus[0])
with open('/tmp/gold', 'w') as g:
g.write(aRock_cScissors_gold)
with open('/tmp/system', 'w') as s:
s.write(aRock_cScissors_system)
assert aRock_cScissors_system == aRock_cScissors_gold
|
20,289 | 586bd6bbfe8468832cabeff51054f3a4df82f120 | # def countdown(x):
# while x >= 0:
# print(x)
# x = x - 1
# countdown(5)
# def printAndReturn(list):
# print(list[0])
# return(list[1])
# print(printAndReturn([1,2]))
# def firstPlusLength(list):
# return((list[0])+(len(list)))
# print(firstPlusLength([1,2,3,4,5]))
# def valuesGreaterThanSecond(list):
# i = 0
# count = 0
# arr = []
# while i < len(list):
# if(list[i] > list[1]):
# count = count + 1
# arr.append(list[i])
# i = i + 1
# print(count)
# return arr
# print(valuesGreaterThanSecond([5,2,3,2,1,4]))
# def thisLengthThatValue(l, v):
# i = 0
# arr = []
# while i < l:
# arr.append(v)
# i = i + 1
# return arr
# print(thisLengthThatValue(4,7)) |
20,290 | 1a789513a11db68b96c5a2c675df1cac83225efd | # Generated by Django 3.0 on 2021-07-14 14:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addtocart', '0026_auto_20210713_1504'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('head', models.CharField(max_length=50)),
('body', models.CharField(max_length=200)),
],
),
migrations.AlterField(
model_name='cart',
name='arrived_at',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 7, 14, 14, 16, 29, 81969), null=True),
),
migrations.AlterField(
model_name='cart',
name='finished_at',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 7, 16, 14, 16, 29, 78228), null=True),
),
]
|
20,291 | 457284f4c13f3562f48eeed8d6a7134a2032f315 | # Initialize a variable with a user-specified value
user = input('I am Python. What is your name? : ')
# Output a string and a variable value
print('Welcome', user)
# Initialize another variable with a user-specified value
lang = input('Favourite programming language? : ')
# Output a string a variable value
print(lang, 'Is', 'Fun', sep = ' * ', end = '!\n')
|
20,292 | 413745261cfc5044eb10bc5cce753dce8aafab82 | n=int(input())
d={}
for i in range(n):
a=input().split(';')
if a[0] in d.keys():
if a[1]>a[3]:
d[a[0]][0]+=1
d[a[0]][1] += 1
d[a[0]][4] += 3
elif a[1]==a[3]:
d[a[0]][0] += 1
d[a[0]][2] += 1
d[a[0]][4] += 1
else:
d[a[0]][0] += 1
d[a[0]][3] += 1
else:
d[a[0]]=[0,0,0,0,0]
if a[1]>a[3]:
d[a[0]][0]+=1
d[a[0]][1] += 1
d[a[0]][4] += 3
elif a[1]==a[3]:
d[a[0]][0] += 1
d[a[0]][2] += 1
d[a[0]][4] += 1
else:
d[a[0]][0] += 1
d[a[0]][3] += 1
if a[2] in d.keys():
if a[1] < a[3]:
d[a[2]][0] += 1
d[a[2]][1] += 1
d[a[2]][4] += 3
elif a[1] == a[3]:
d[a[2]][0] += 1
d[a[2]][2] += 1
d[a[2]][4] += 1
else:
d[a[2]][0] += 1
d[a[2]][3] += 1
else:
d[a[2]] = [0, 0, 0, 0, 0]
if a[1] < a[3]:
d[a[2]][0] += 1
d[a[2]][1] += 1
d[a[2]][4] += 3
elif a[1] == a[3]:
d[a[2]][0] += 1
d[a[2]][2] += 1
d[a[2]][4] += 1
else:
d[a[2]][0] += 1
d[a[2]][3] += 1
for a,b in d.items():
print(a+':'+str(b[0]),b[1],b[2],b[3],b[4])
|
20,293 | cef42e676040b4e2c9939085b186ec3523c27944 | import luigi
from tasks.process.log_analysis import GenerateCrawlLogReports
from tasks.common import logger
class GenerateCrawlReport(luigi.Task):
"""
"""
task_namespace = 'scan'
job = luigi.Parameter()
launch = luigi.Parameter()
#def requires(self):
# return GenerateCrawlLogReports(self.job, self.launch)
if __name__ == '__main__':
luigi.run(['report.GenerateCrawlReport', '--local-scheduler'])
#luigi.run(['GenerateCrawlReport', '--date-interval', "2017-01-13-2017-01-18", '--local-scheduler'])
|
20,294 | ff4b72af33cde370cdedbba778b376ccdad62ef9 | import pandas as pd
import numpy as np
import nltk
import re
import string
import matplotlib.pyplot as plt
import matplotlib
import pdb
import seaborn as sns
from nltk.corpus import stopwords
from nltk.corpus import brown
# for single tweet test
df_single_word = pd.read_csv('./df_word.csv')
df_bigrm = pd.read_csv('./df_bigrm.csv')
fake_p_prior = 0.32
word_set = set(df_single_word['word'])
bigrm_set = set(df_bigrm['two_words'])
#use NLTK get the stopwords
stopwordslist = set(stopwords.words('english'))
#strip punctuation from string for future use
punct_regex = re.compile('[%s]' % re.escape('!"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~'))
#give a num to each tag, make it easier to count for bigram-tagging
sing_tag_dict = {'ADJ': 1, 'ADP':2, 'ADV': 3, 'AUX': 4, 'CCONJ': 5, 'DET': 6, 'INTJ': 7, 'NOUN': 8, 'NUM': 9, 'PART': 10, 'PRON': 11, 'PROPN': 12, 'PUNCT': 13, 'SCONJ': 14, 'SYM': 15, 'VERB': 16, 'X': 17, '.': 18}
bi_tags_dict = {}
#18 * 18 types of tagged bigrams
for i in sing_tag_dict.keys():
for j in sing_tag_dict.keys():
bi_tags_dict[tuple([i,j])] = (sing_tag_dict[i] - 1 ) * 18 + (sing_tag_dict[j])
#1 make a function to clean the tweets text
def wash_pandas_str( input_df ):
ret_text = input_df['text'].str.replace(r'…', '')
ret_text = ret_text.str.replace(u'\u2019', '')
ret_text = ret_text.str.replace(r'https\S*?\s', ' ')
ret_text = ret_text.str.replace(r'https\S*?$', '')
ret_text = ret_text.str.replace(r'RT\s', '')
ret_text = ret_text.str.replace(r'\s$', '')
ret_text = ret_text.str.replace(r'@\S*?\s', '')
ret_text = ret_text.str.replace(r'@\S*?$', '')
ret_text = ret_text.str.replace('“', '')
ret_text = ret_text.str.replace('--', '')
ret_text = ret_text.str.replace('-', ' ')
input_df['text'] = ret_text
return input_df
#2 Data analysis, train the data by using Naive Bayes
#2.1 To save testing time, I've set a default limit 2000 words, it can also be changed to higher value to get higher accuracy.
def naive_bayes_train(X_train, Y_train, limit = 2000):
#count the true tweets and high tweets numbers.
fake_cnt = len(Y_train[Y_train == 1].index)
true_cnt = len(Y_train[Y_train == 0].index)
#get the priori probability of fake tweet.
fake_prob_prior = fake_cnt / (fake_cnt + true_cnt)
#{word:(cnt_in_true, cnt_in_fake),}, cnt_in_true/fake means the number of word occurrences in true/fake tweet
ret_dict = {}
for ind in X_train.index:
twit_txt = punct_regex.sub('', X_train['text'][ind])
#use set() convert tweets words to tuple, no repetition
for i in set(twit_txt.lower().split()):
if i not in stopwordslist:
if i not in ret_dict.keys(): # new word found
if Y_train[ind] == 0: # new word found in true tweet
# because I need the cnt for future calculations, 0 & 1 will cause problem, so I add 1 & 2.
ret_dict[i] = [2,1]
else: # new word found in fake tweet
ret_dict[i] = [1,2]
else: # old word found
if Y_train[ind] == 0: # old word found in true tweet
ret_dict[i][0] += 1
else: # old word found in fake tweet
ret_dict[i][1] += 1
#[word, cnt_in_true, cnt_in_fake,freq_true, freq_fake,total_cnt]
train_df = pd.DataFrame.from_dict(ret_dict, orient = 'index')
train_df = train_df.reset_index()
train_df.columns = ['word', 'cnt_in_true', 'cnt_in_fake']
train_df['freq_true'] = train_df['cnt_in_true'] / true_cnt
train_df['freq_fake'] = train_df['cnt_in_fake'] / fake_cnt
train_df['total_cnt'] = train_df['cnt_in_true'] + train_df['cnt_in_fake']
#sort by the word occurrences number, get 500 words.
train_df = train_df.sort_values(by = ['total_cnt'],ascending=False).iloc[0:limit,:]
return train_df, fake_prob_prior
#2.2 generate the high frequency words map
#if use ipython --pylab in Visual Studio Code can get the high quality image for word frequency
def plot_word_map(train_df, word_count = 50, xlimit = 0.3):
X = train_df['freq_true'].tolist()
Y = train_df['freq_fake'].tolist()
s = (train_df['total_cnt']/40).tolist()
labels = train_df['word'].tolist()
assist_x = [0, 0.3]
assist_y = [0, 0.3]
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(X, Y, s = s)
plt.plot(assist_x, assist_y, color = 'r')
for i in range(len(labels)):
#pdb.set_trace()
plt.text(X[i] + 0.01 * (np.random.rand()-0.4), Y[i] + 0.01 * (np.random.rand()-0.4), labels[i])
ax.set_aspect('equal')
plt.xlim(0,xlimit)
plt.ylim(0,xlimit)
plt.xlabel('Frequency in true tweets')
plt.ylabel('Frequency in fake tweets')
matplotlib.rcParams.update({'font.size': 16})
fig.show()
#3 Use the train df from above function, generate the "words feature"
def naive_bayes_generate_feature(train_df, fake_prob_prior,X_input,Y_input):
#the most frequently occurring words
words_set = set(train_df['word'])
accurate_count = 0
ret_list=[]
j = 0
for ind in X_input.index:
twit_txt = punct_regex.sub('', X_input['text'][ind])
fake_prob = fake_prob_prior #priori probability of fake tweet
true_prob = 1 - fake_prob_prior #priori probability of true tweet
for i in set(twit_txt.lower().split()):
if i in words_set:
#train_df['word','cnt_in_true', 'cnt_in_fake','freq_true', 'freq_fake','total_cnt']
#Probability of being a true tweet, and a fake tweet(according to naivebayes)
true_prob_temp = true_prob * train_df[train_df['word'] == i].iloc[0,3]
fake_prob_temp = fake_prob * train_df[train_df['word'] == i].iloc[0,4]
#Since the probability values become smaller when multiplied together, I changed the format
true_prob = true_prob_temp / (fake_prob_temp + true_prob_temp)
fake_prob = fake_prob_temp / (fake_prob_temp + true_prob_temp)
ret_list.append(fake_prob)
#if the probability of being a fake tweet larger than true tweet, predict it to be fake.
pred = int(fake_prob > true_prob)
#if the prediction is correct, count to accurate
accurate_count += (Y_input[ind] == pred)
j += 1
#as this function takes quite a few mins to completion, I add this print to show the process
if j % 1000 == 0:
print ('{0} processed, {1:3f}'.format( j, accurate_count/j) )
return ret_list
def conv_array(ret_list):
x = np.array(ret_list)
x = x.reshape(-1,1)
return x
#4 Bigram frequency feature:
#4.1 To save testing time, I've set a default limit 2000 two_words, it can also be changed to higher value to get higher accuracy.
def naive_bayes_bigrm_train(X_train, Y_train, limit = 2000):
#count the true tweets and high tweets numbers.
fake_cnt = len(Y_train[Y_train == 1].index)
true_cnt = len(Y_train[Y_train == 0].index)
#get the priori probability of fake tweet.
fake_prob_prior = fake_cnt / (fake_cnt + true_cnt)
#{two_word:(cnt_in_true, cnt_in_fake),}, cnt_in_true/fake means the number of two_word occurrences in true/fake tweet
ret_dict = {}
for ind in X_train.index:
tweet = punct_regex.sub('', X_train['text'][ind])
tweet = tweet.lower()
tokens = nltk.word_tokenize(tweet)
bigrm = list(nltk.bigrams(tokens))
#use set() convert tweets words to tuple in two_words, no repetition
for i in bigrm:
if i not in ret_dict.keys(): # new two_words found
if Y_train[ind] == 0: # new two_words found in true tweet
ret_dict[i] = [2,1]
else: # new two_words found in fake tweet
ret_dict[i] = [1,2]
else: # old two_words found
if Y_train[ind] == 0: # old two_words found in true tweet
ret_dict[i][0] += 1
else: # old two_words found in fake tweet
ret_dict[i][1] += 1
#[two_words, cnt_in_true, cnt_in_fake,freq_true, freq_fake,total_cnt]
train_df_bigrm = pd.DataFrame.from_dict(ret_dict, orient = 'index')
train_df_bigrm = train_df_bigrm.reset_index()
train_df_bigrm.columns = ['two_words', 'cnt_in_true', 'cnt_in_fake']
train_df_bigrm['freq_true'] = train_df_bigrm['cnt_in_true'] / true_cnt
train_df_bigrm['freq_fake'] = train_df_bigrm['cnt_in_fake'] / fake_cnt
train_df_bigrm['total_cnt'] = train_df_bigrm['cnt_in_true'] + train_df_bigrm['cnt_in_fake']
#sort by the bigram occurrences number, get 2000 bigrams.
train_df_bigrm = train_df_bigrm.sort_values(by = ['total_cnt'],ascending=False).iloc[0:limit,:]
return train_df_bigrm, fake_prob_prior
#4.2 Use the train df from above function, generate the "bigram feature" of train data
def naive_bayes_generate_feature_bigrm(train_df_bigrm, fake_prob_prior,X_input,Y_input):
#the most frequently occurring two_words
words_set = set(train_df_bigrm['two_words'])
accurate_count = 0
#the "bigram feature"--the probability of fake tweet, will be save to this list
ret_list=[]
j = 0
for ind in X_input.index:
tweet = punct_regex.sub('', X_input['text'][ind])
tweet = tweet.lower()
tokens = nltk.word_tokenize(tweet)
bigrm = list(nltk.bigrams(tokens))
fake_prob = fake_prob_prior #priori probability of fake tweet
true_prob = 1 - fake_prob_prior #priori probability of true tweet
for i in bigrm:
if i in words_set:
#train_df['word','cnt_in_true', 'cnt_in_fake','freq_true', 'freq_fake','total_cnt']
#Probability of being a true tweet, and a fake tweet
true_prob_temp = true_prob * train_df_bigrm[train_df_bigrm['two_words'] == i].iloc[0,3]
fake_prob_temp = fake_prob * train_df_bigrm[train_df_bigrm['two_words'] == i].iloc[0,4]
#Since the probability values become smaller when multiplied together, I changed the format
true_prob = true_prob_temp / (fake_prob_temp + true_prob_temp)
fake_prob = fake_prob_temp / (fake_prob_temp + true_prob_temp)
ret_list.append(fake_prob)
#if the probability of being a fake tweet larger than true tweet, predict it to be fake.
pred = int(fake_prob > true_prob)
#if the prediction is correct, count to accurate
accurate_count += (Y_input[ind] == pred)
j += 1
#as this function takes quite a few mins to completion, I add this print to show the process
if j % 1000 == 0:
print ('{0} processed {1:3f}'.format( j, accurate_count/j) )
return ret_list
#5 Bigram with Tagging feature:
def tags_bigram_generate_features(X_train):
#from bigrams of tags for SVM.
#got the num of total bigram_tagged number : 18 * 18 = 324.
ret_array = np.zeros((len(X_train), 324))
cnt = 0
for ind in X_train.index:
tweet = punct_regex.sub('', X_train['text'][ind])
tweet = tweet.lower()
tokens = nltk.word_tokenize(tweet)
bigrm = list(nltk.bigrams(tokens))
for i in bigrm:
#convert the bigram to tags
j = nltk.pos_tag([i[0], i[1]], tagset='universal')
tags_bigrm = tuple([j[0][1], j[1][1]])
if tags_bigrm in bi_tags_dict.keys(): #insurance only
#pdb.set_trace()
ret_array[cnt, bi_tags_dict[tags_bigrm] - 1] += 1
cnt += 1
if cnt%1000 == 0:
print(str(cnt) + " processed")
return ret_array
#6 functions for single tweet test:
#function to clean the single tweet text.
def clean_str( input_string ):
text = input_string.replace(r'…', '')
text = text.replace(u'\u2019', '')
text = text.replace(r'https\S*?\s', ' ')
text = text.replace(r'https\S*?$', '')
text = text.replace(r'RT\s', '')
text = text.replace(r'\s$', '')
text = text.replace(r'@\S*?\s', '')
text = text.replace(r'@\S*?$', '')
text = text.replace('“', '')
text = text.replace('--', '')
text = text.replace('-', ' ')
text = punct_regex.sub('', text)
text = text.lower()
input_string = text
return input_string
def generate_feature_1( input_string ):
fake_prob = fake_p_prior #priori probability of fake tweet
true_prob = 1 - fake_p_prior #priori probability of true tweet
for i in set(input_string.lower().split()):
if i in word_set:
true_prob_temp = true_prob * df_single_word[df_single_word['word'] == i].iloc[0,3]
fake_prob_temp = fake_prob * df_single_word[df_single_word['word'] == i].iloc[0,4]
#Since the probability values become smaller when multiplied together, I changed the format
true_prob = true_prob_temp / (fake_prob_temp + true_prob_temp)
fake_prob = fake_prob_temp / (fake_prob_temp + true_prob_temp)
return fake_prob
def generate_feature_2( text ):
tokens = nltk.word_tokenize(text)
bigrm = list(nltk.bigrams(tokens))
fake_prob = fake_p_prior #priori probability of fake tweet
true_prob = 1 - fake_p_prior #priori probability of true tweet
for i in bigrm:
#pdb.set_trace()
if str(i) in bigrm_set:
true_prob_temp = true_prob * df_bigrm[df_bigrm['two_words'] == str(i)].iloc[0,3]
fake_prob_temp = fake_prob * df_bigrm[df_bigrm['two_words'] == str(i)].iloc[0,4]
#Since the probability values become smaller when multiplied together, I changed the format
true_prob = true_prob_temp / (fake_prob_temp + true_prob_temp)
fake_prob = fake_prob_temp / (fake_prob_temp + true_prob_temp)
return fake_prob
def generate_feature_3(text):
ret_array = np.zeros((324))
#simple feature from bigrams of tags for SVM.
tokens = nltk.word_tokenize(text)
bigrm = list(nltk.bigrams(tokens))
for i in bigrm:
j = nltk.pos_tag([i[0], i[1]], tagset='universal')
tags_bigrm = tuple([j[0][1], j[1][1]])
if tags_bigrm in bi_tags_dict.keys(): #insurance only
#pdb.set_trace()
ret_array[bi_tags_dict[tags_bigrm] - 1] += 1
return ret_array
|
20,295 | 4a32fc00a2b8fd0a8b35b1db3fc325da23bfc2cf | import numpy as np
HILL_KEY = [[21, 109, 119, 23, 88, 15, 116, 66], [22, 119, 70, 118, 111, 82, 121, 98], [79, 86, 2, 96, 90, 54, 95, 83], [22, 100, 113, 122, 92, 6, 52, 60], [1, 9, 9, 4, 112, 13, 26, 74],
[3, 100, 92, 83, 51, 122, 102, 63], [71, 110, 92, 74, 26, 96, 92, 24], [30, 10, 85, 92, 47, 91, 114, 108]]
HILL_KEY_REVERSE = [[138, 124, 28, 104, 136, 176, 193, 182], [65, 229, 101, 214, 103, 57, 4, 224], [140, 138, 214, 71, 46, 62, 148, 184], [77, 64, 202, 44, 119, 246, 60, 86],
[69, 173, 41, 8, 106, 175, 255, 119], [105, 45, 131, 23, 116, 193, 29, 114], [190, 79, 82, 26, 81, 22, 187, 253], [70, 99, 51, 2, 221, 248, 152, 59]]
DES_KEY = [65, 66, 67, 68, 69, 70, 71, 72]
def get_content():
content = input("Enter the word to encrypt:")
return content
def string_to_ascii_list(content):
out = []
for letter in content:
out.append(ord(letter))
return out
def ascii_list_to_bin_list(asciiList, binLen=8):
out = []
for ascii in asciiList:
itemBin = bin(ascii)
for i in range(binLen + 2 - len(itemBin)):
out.append(0)
for b in itemBin[2:]:
out.append(int(b))
return out
def bin_to_string(binList, binFormatLen=8):
out = ""
for i in range(int(len(binList) / binFormatLen)):
ascii = ""
for j in range(binFormatLen):
ascii += str(binList[i * binFormatLen + j])
out += chr(int(ascii, 2))
return out
def ascii_list_to_string(list):
str = ""
for item in list:
str += chr(item)
return str
def padding_content(content, blocksize=64):
for i in range(int((len(content) - 1) / blocksize + 1) * blocksize - len(content)):
content.append(0)
return content
def drop_padding(content):
for i in range(len(content)):
if content[i] == 0:
return content[:i]
return content
def content_to_block_array(content):
contentBlockArray = []
for i in range(0, int(len(content) / 64)):
contentBlock = []
for j in range(0, 8):
contentLine = []
for k in range(0, 8):
contentLine.append(content[i * 8 * 8 + j * 8 + k])
contentBlock.append(contentLine)
contentBlockArray.append(contentBlock)
return contentBlockArray
def content_to_des_block_array(content):
contentBlockArray = []
for i in range(0, int(len(content) / 64)):
contentBlock = []
for j in range(0, 64):
contentBlock.append(content[i * 64 + j])
contentBlockArray.append(contentBlock)
return contentBlockArray
def block_array_to_content(contentBlockArray, block_height=8, block_length=8):
content = []
for contentBlock in contentBlockArray:
for contentLine in contentBlock:
for contentItem in contentLine:
content.append(contentItem)
return content
def des_block_array_to_content(contentBlockArray):
content = []
for contentBlock in contentBlockArray:
for contentLine in contentBlock:
content.append(contentLine)
return content
def block_to_content(contentBlock, block_height=8, block_length=8):
content = []
for contentLine in contentBlock:
for contentItem in contentLine:
content.append(contentItem)
return content
def hill_encrypt_block_array(contentBlockArray, keyBlock, field):
cipherBlockArray = []
keyBlockNum = 0
for contentBlock in contentBlockArray:
outMetrix = hill_encrypt_block(contentBlock, keyBlock, field)
cipherBlockArray.append(outMetrix)
return cipherBlockArray
def hill_decrypt_block_array(contentBlockArray, keyBlock, field):
plainBlockArray = []
for contentBlock in contentBlockArray:
outMetrix = hill_decrypt_block(contentBlock, keyBlock, field)
plainBlockArray.append(outMetrix)
return plainBlockArray
def hill_encrypt_block(contentBlock, keyBlock, field):
cipherBlock = []
contentArray = np.array(contentBlock)
keyArray = np.array(keyBlock)
cipherBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field)
return cipherBlock
def hill_decrypt_block(contentBlock, keyBlock, field):
plainBlock = []
contentArray = np.array(contentBlock)
keyArray = np.array(keyBlock)
plainBlock = np.ndarray.tolist(np.dot(contentArray, keyArray) % field)
return plainBlock
def des_string_proc(content):
return content_to_des_block_array(padding_content(ascii_list_to_bin_list(string_to_ascii_list(content))))
def des_ascii_list_proc(content, formatBase=8):
return content_to_des_block_array(padding_content(ascii_list_to_bin_list(content, formatBase)))
# def des_encypt_block_array(content,keyBlock):
# cipherBlockArray = []
# contentBlockArray=des_content_proc(content)
# keyBlockNum = 0
# for contentBlock in contentBlockArray:
# outMetrix = des_encypt_block(contentBlock, keyBlock)
# cipherBlockArray.append(outMetrix)
# return cipherBlockArray
def des_encypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8):
cipherBlockArray = []
subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase)
file = open("debug.txt", "a")
file.write("\n加密子密钥:\n")
file.writelines(str(subKeyArray))
file.close()
for contentBlock in contentBlockArray:
outMetrix = des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase)
cipherBlockArray.append(outMetrix)
return cipherBlockArray
def des_decrypt_block_array(contentBlockArray, keyBlock, keyBlockFormatBase=8):
cipherBlockArray = []
subKeyArray = get_sub_key(keyBlock, keyBlockFormatBase)
subDecryptKeyArray = subKeyArray[::-1]
file = open("debug.txt", "a")
file.write("\n解密子密钥:\n")
file.writelines(str(subDecryptKeyArray))
file.close()
for contentBlock in contentBlockArray:
outMetrix = des_encypt_block(contentBlock, subDecryptKeyArray, keyBlockFormatBase)
cipherBlockArray.append(outMetrix)
return cipherBlockArray
def list_xor(list1, list2):
out = []
for i in range(len(list1)):
out.append(list1[i] ^ list2[i])
return out
# def des_key_proc(keyBlock):
# return ascii_list_to_bin_list(keyBlock)
def get_sub_key(keyBlock, keyBlockFormatBase=8):
key = ascii_list_to_bin_list(keyBlock, keyBlockFormatBase)
file = open("debug.txt", "a")
file.write("\n密钥:\n")
file.writelines(str(key))
file.close()
key56 = des_key_do_pc_1(key)
keyBlock = des_key_do_shift_pc_2(key56)
return keyBlock
def des_do_extend_permutation(content32List):
'''扩展置换:将32位输入置换成48位输出。'''
'''扩展置置换目标是IP置换后获得的右半部分R0,将32位输入扩展为48位(分为4位×8组)输出。'''
E = [32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1]
return [content32List[E[i] - 1] for i in range(48)]
def des_key_do_pc_1(keyList):
'''密钥置换:不考虑每个字节的第8位,DES的密钥由64位减至56位,每个字节的第8位作为奇偶校验位。'''
PC = [
57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4
]
return [keyList[PC[i] - 1] for i in range(56)]
def des_key_do_shift_pc_2(keyList):
'''在DES的每一轮中,从56位密钥产生出不同的48位子密钥'''
'''该处输出为所有轮次的子密钥'''
PC = [14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32]
MOV = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
result = []
key56=keyList
for i in range(16):
# 每28位为一部分,分别进行循环左移
key0 = des_do_shift(key56[:28], MOV[i])
key1 = des_do_shift(key56[28:], MOV[i])
key56 = key0 + key1
# 对56位密钥进行 PC-2 变换,将其压缩为48位
key48 = [key56[PC[j] - 1] for j in range(48)]
result.append(key48)
return result
def des_do_shift(keyList, mov):
return keyList[mov:] + keyList[:mov]
def des_do_s_box(list48):
'''S-盒置换:将48位输入均分成长度为6的8个小组,每个小组按顺序进入相应的S盒各得到4位输出,返回合并后的32位结果。'''
# S 盒
S_BOX = [[
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
],
[
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
],
[
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
],
[
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
],
[
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
],
[
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
],
[
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
],
[
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]]
result = []
for i in range(0, 8):
temp = list48[i * 6:i * 6 + 6]
row = int(str(temp[0]) + str(temp[-1]), 2)
column = int(str(temp[1]) + str(temp[2]) + str(temp[3]) + str(temp[4]), 2)
letter = S_BOX[i][row][column]
result.append(letter)
return ascii_list_to_bin_list(result, 4)
def des_do_p_box(list32):
P_BOX = [16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25]
return [list32[P_BOX[i] - 1] for i in range(32)]
def des_do_right32(left32, right32, subKey):
right48 = des_do_extend_permutation(right32)
right48tmp = list_xor(right48, subKey)
right32tmp = des_do_s_box(right48tmp)
right32tmp = des_do_p_box(right32tmp)
right32 = list_xor(left32, right32tmp)
return right32
def des_encypt_block(contentBlock, subKeyArray, keyBlockFormatBase=8):
# step1
'''初始置换 IP'''
text = des_do_ip(contentBlock)
# step2
'''16轮迭代运算'''
# subKeyArray=get_sub_key(keyBlock,keyBlockFormatBase)
for i in range(16):
l,r=text[:32], text[32:]
lNext=r
rNext=des_do_right32(l, r, subKeyArray[i])
text=lNext+rNext
file = open("debug.txt", "a")
file.write("\n第" + str(i + 1) + "轮输出:\n")
file.writelines(str(text))
file.close()
# print("第"+str(i+1)+"轮输出:")
# print(round[i])
# step3
'''逆初始置换IP-1'''
text = text[32:] + text[:32]
out = des_do_ip_inverse(text)
return out
def des_do_ip(contentBlock):
'''IP置换'''
IP = [
58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53,
45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7
]
# content=block_to_content(contentBlock)
return [contentBlock[IP[i] - 1] for i in range(64)]
def des_do_ip_inverse(contentBlock):
'''IP逆置换'''
IP_INVERSE = [
40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2,
42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25
]
# content=block_to_content(contentBlock)
return [contentBlock[IP_INVERSE[i] - 1] for i in range(64)]
def hill():
# text = content_to_block_array(padding_content(get_content()))
message = "Typora will give you a seamless experience as both a reader and a writer. It removes the preview window, mode switcher, syntax symbols of markdown source code, and all other unnecessary distractions.."
text = content_to_block_array(padding_content(string_to_ascii_list(message)))
print("明文数组")
print(text)
# 希尔加密
cipher = hill_encrypt_block_array(text, HILL_KEY, 256)
cipher = drop_padding(block_array_to_content(cipher))
print("HILL 密文:")
# print(cipher)
print(ascii_list_to_string(cipher))
# 希尔解解密
cipher = content_to_block_array(padding_content(cipher))
plain = hill_decrypt_block_array(cipher, HILL_KEY_REVERSE, 256)
plain = drop_padding(block_array_to_content(plain))
print("HILL 解密文:")
# print(plain)
print(ascii_list_to_string(plain))
def des():
message = "aaaaaaaa"
# message=[15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14]
# key=[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]z
message = des_string_proc(message)
file = open("debug.txt", "a")
file.write("DES 文本:\n")
file.writelines(str(message))
file.write("DES 加密开始\n")
file.close()
# DES 加密
cipher = des_encypt_block_array(message, DES_KEY)
cipher = des_block_array_to_content(cipher)
# print("DES 密文数组:")
# print(cipher)
print("DES 密文 ASCII:")
print(bin_to_string(cipher))
file = open("debug.txt", "a")
file.write("\n\nDES 密文数组:\n")
file.writelines(str(cipher))
file.write("\n\nDES 解密开始\n")
file.close()
# DES 解密
cipher = content_to_des_block_array(cipher)
plain = des_decrypt_block_array(cipher, DES_KEY)
palin = des_block_array_to_content(plain)
# print("DES 明文数组:")
# print(palin)
print("DES 明文 ASCII:")
print(bin_to_string(palin))
file = open("debug.txt", "a")
file.write("\n\nDES 明文数组:\n")
file.writelines(str(palin))
file.close()
if __name__ == "__main__":
hill()
des() |
20,296 | a2f9c5dc6a79e5c3691d14115b2f324042c11a23 | A_0203_8 = {0: {'A': 0.09260914508442766, 'C': -0.055842410332572236, 'E': -0.3237738512971216, 'D': -0.3237738512971216, 'G': -4.0, 'F': 0.5917842377663532, 'I': 0.7378292207178623, 'H': -0.432906376367205, 'K': -0.432906376367205, 'M': 0.5432476943183139, 'L': 0.35845112187102407, 'N': 0.16009500333529633, 'Q': -0.01855040550629643, 'P': -4.0, 'S': 0.639648423026295, 'R': -0.5603995474769126, 'T': -0.5058245686981333, 'W': 0.4454052228054611, 'V': -0.1818823898945458, 'Y': 0.6611209064731303}, 1: {'A': -0.2672333895677309, 'C': -0.4387840922483413, 'E': -0.4034109306495243, 'D': -0.47264480959625693, 'G': -4.0, 'F': -0.060389530160822776, 'I': 0.8134150443683076, 'H': -0.6981830595546019, 'K': -0.6981830595546019, 'M': 0.8134150443683076, 'L': 1.284933344625203, 'N': -0.2843470728996994, 'Q': -0.2843470728996994, 'P': -0.8509836054074027, 'S': -0.4387840922483413, 'R': -0.6602253929302401, 'T': -0.5051824995083194, 'W': -0.060389530160822776, 'V': -0.27167556428897965, 'Y': -0.060389530160822776}, 2: {'A': 0.5944967243530065, 'C': -0.1331071980408997, 'E': -0.44991829196421135, 'D': -0.44991829196421135, 'G': -4.0, 'F': 0.0480091971149035, 'I': -0.060910697021088235, 'H': -0.2960414820391224, 'K': -0.16214418685980708, 'M': 0.31870745030596187, 'L': 0.17249942369917873, 'N': -0.21323348425458497, 'Q': -0.21323348425458497, 'P': 0.013500177056832884, 'S': 0.3392036140548539, 'R': -0.28684854054720765, 'T': -0.4047432021454702, 'W': 0.09213686661979076, 'V': 0.024167506418157222, 'Y': 0.23320436941780093}, 3: {'A': -0.45119238630881936, 'C': -0.017753603886303048, 'E': -0.3024559078237844, 'D': 0.1979379544185194, 'G': 0.5187730968633795, 'F': 0.40301061188302617, 'I': 0.044071190879106645, 'H': -0.5790212159745092, 'K': -0.7190773351082351, 'M': 0.07306025317323778, 'L': 0.5521037532261205, 'N': 0.6683477977546922, 'Q': -0.4731044582458323, 'P': 0.25790621987206236, 'S': -0.18512153706813617, 'R': -0.2736458903737848, 'T': -0.017753603886303048, 'W': 0.40301061188302617, 'V': -0.4024734320254284, 'Y': 0.40301061188302617}, 4: {'A': -0.21166324861165306, 'C': 0.4038774903975255, 'E': -4.0, 'D': -4.0, 'G': -4.0, 'F': 0.128146179675162, 'I': 0.39616299739384064, 'H': -0.4961438373508598, 'K': -0.22592258747230348, 'M': -0.26897097310492857, 'L': 0.5110235159547518, 'N': -0.5744385554381883, 'Q': -0.5744385554381883, 'P': -0.24348070301476793, 'S': 0.2714059323833559, 'R': -0.4327101711686107, 'T': 0.31407213371612497, 'W': 0.0700533530880546, 'V': 1.1224851556845803, 'Y': 0.04810907958553632}, 5: {'A': 0.06126330113553939, 'C': -0.24726893217963136, 'E': -0.2524981572064032, 'D': -0.05947400252871723, 'G': -0.14531564904288058, 'F': 0.3031427936619408, 'I': 0.1734039731710738, 'H': -0.20065830621643388, 'K': -0.13128090933290837, 'M': 0.6084101278060998, 'L': 0.7123749318815351, 'N': -0.7355694854078886, 'Q': -0.549397255101923, 'P': -4.0, 'S': -0.15250637237103232, 'R': -0.1597094699718068, 'T': -0.5021838995482352, 'W': 0.06509043782539037, 'V': 0.6084101278060998, 'Y': 0.06509043782539037}, 6: {'A': 0.18885660137286675, 'C': 0.32103563700866794, 'E': 0.7470030835769148, 'D': -0.13994263832237933, 'G': 0.4958040595465809, 'F': -0.5158972753996656, 'I': -0.2603316309203583, 'H': -0.3451104237986978, 'K': -0.3451104237986978, 'M': -0.43856789104712984, 'L': -0.44549036746130627, 'N': -0.2194109175293774, 'Q': -0.45461463096632054, 'P': 0.9108004345404269, 'S': -0.1377162864029968, 'R': -0.28237775462120024, 'T': 0.8038535015561439, 'W': -0.5158972753996656, 'V': -0.43856789104712984, 'Y': -0.5158972753996656}, 7: {'A': 0.01986027046527329, 'C': -4.0, 'E': -4.0, 'D': -4.0, 'G': -4.0, 'F': -4.0, 'I': -0.47798394667218896, 'H': -4.0, 'K': -4.0, 'M': 0.46422218178701863, 'L': 0.27049422758067765, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': -4.0, 'R': -4.0, 'T': -4.0, 'W': -4.0, 'V': 0.8112761915974741, 'Y': -4.0}, -1: {'slope': 0.17023328482584466, 'intercept': -0.6108292920939158}} |
20,297 | 52415f516f6d3af07b66264ffd5ec732a0e9c09c | import time
def f():
[
# Must be split over multiple lines to see the error.
# https://github.com/benfred/py-spy/pull/208
time.sleep(1)
for _ in range(1000)
]
f()
|
20,298 | 29c68db38468c739b058ffe0d9194741ea7c7ab4 | #!/usr/bin/python
#coding:utf-8
#2016年9月9日
#用sprite的知识来改写snake游戏,注意有关snake的坐标都是y在前,x在后
#修改了初始界面,点击可变色按钮再进入游戏
#修改了点击按钮Restart游戏的界面,删除了is_over标志
#由于pygame的sprite group add方法是类似于字典的add,无法像list一样便捷操作
#仅仅是将每个snake方块变成class snake的实例,再统一放到snake_group(list)中
#尝试用碰撞检测的方法来判断蛇头和蛇身碰撞的问题
import pygame
import sys
from pygame.locals import *
from random import randrange
import time
def print_text(text_surface,x,y):
screen.blit(text_surface,(x,y))
class Button(object):
"""按钮类,点击按钮之后执行相应的操作"""
def __init__(self,position,up_image_file,down_image_file=None):
self.position = position
self.image = pygame.image.load(up_image_file)
if down_image_file == None:
pass
else:
self.down_image_file = pygame.image.load(down_image_file)
self.game_start = False
self.game_restart = False
def render(self,surface):
"""绘制按钮"""
x, y = self.position
w, h = self.image.get_size()
x -= w/2
y -= h/2
surface.blit(self.image,(x,y))
def is_push(self):
"""如果点击的范围在按钮自身的范围内,返回True"""
point_x, point_y = pygame.mouse.get_pos()
x, y = self.position
w, h = self.image.get_size()
x -= w/2
y -= h/2
if_in_x = point_x >= x and point_x < x+w
if_in_y = point_y >= y and point_y < y+h
return if_in_x and if_in_y
def is_start(self):
if self.is_push():
b1,b2,b3 = pygame.mouse.get_pressed()
if b1 == True:
self.game_start = True
def is_restart(self):
if self.is_push():
b1,b2,b3 = pygame.mouse.get_pressed()
if b1 == True:
self.game_restart = True
def render2(self,surface):
x,y = self.position
w,h = self.image.get_size()
x -= w/2
y -= h/2
if self.is_push(): #调用自己的函数,self.函数名
surface.blit(self.down_image_file,(x,y))
else:
surface.blit(self.image,(x,y))
def get_image_size(self):
return self.image.get_size()
#精灵类————食物
class Food(pygame.sprite.Sprite):
def __init__(self,color,position):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([20,20])
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.topleft = position
def getFoodPosition(self):
return self.rect.x,self.rect.y #y坐标在前
class Snake(pygame.sprite.Sprite):
def __init__(self,color,position):
pygame.sprite.Sprite.__init__(self) #父类的init不能少啊
self.image = pygame.Surface([20,20])
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.topleft = position
def draw(self):
screen.blit(self.image,self.rect)
def getpos(self):
return self.rect.topleft
def SnakeMove(snake_list,dir):
#dir(snake_list[0].rect.topleft)
if dir == up:
head_x = snake_list[0].rect.x
head_y = (snake_list[0].rect.y//20 -1 )*20
if dir == right:
head_x = (snake_list[0].rect.x//20 +1 )*20
head_y = snake_list[0].rect.y
if dir == down:
head_x = snake_list[0].rect.x
head_y = (snake_list[0].rect.y//20 +1 )*20
if dir == left:
head_x = (snake_list[0].rect.x//20 -1 )*20
head_y = snake_list[0].rect.y
new_head = Snake((0,0,255),[head_x,head_y])
snake_list = [new_head]+snake_list[:-1]
return snake_list
def SnakeGrow(snake_list,dir):
if dir == up:
head_x = snake_list[0].rect.x
head_y = (snake_list[0].rect.y//20 -1 )*20
if dir == right:
head_x = (snake_list[0].rect.x//20 +1 )*20
head_y = snake_list[0].rect.y
if dir == down:
head_x = snake_list[0].rect.x
head_y = (snake_list[0].rect.y//20 +1 )*20
if dir == left:
head_x = (snake_list[0].rect.x//20 -1 )*20
head_y = snake_list[0].rect.y
new_head = Snake((0,0,255),[head_x,head_y])
snake_list = [new_head]+snake_list
return snake_list
up = lambda x:(x[0]-1,x[1])
down = lambda x:(x[0]+1,x[1])
left = lambda x:(x[0],x[1]-1)
right = lambda x:(x[0],x[1]+1)
move = lambda x,y:[y(x[0])]+x[:-1] #移动一格,list拼接,舍弃原来list的最后一个元素
grow = lambda x,y:[y(x[0])]+x #长度增加一格,list拼接,把增长的一格加在头部
d = up
init_d = up
#创建精灵类————蛇
snake_group = []
for init_snake_pos in [(5,10),(5,11),(5,12)]:
temp = Snake([0,0,255],(init_snake_pos[0]*20,init_snake_pos[1]*20))
snake_group.append(temp)
init_snake_group = snake_group
print type(snake_group[0])
#food = randrange(0,30),randrange(0,40) #原来的“食物代码”,后面要乘以20
#创建精灵类————食物
is_food_snake_overlap = True #一开始刷新食物时,不与蛇的位置重合。游戏中,可能会与蛇的位置重合,但是只有蛇头碰到食物才算“吃掉”
while is_food_snake_overlap:
food_position = randrange(0,40)*20,randrange(0,30)*20 #这里乘以20,getFoodPosition中除以20
if food_position not in [(5,10),(5,11),(5,12)]:
is_food_snake_overlap = False
food = Food((0,255,0),food_position)
food_group = pygame.sprite.Group()
food_group.add(food)
FPSCLOCK = pygame.time.Clock()
pygame.init()
pygame.display.set_mode((800,600))
pygame.mouse.set_visible(0)
screen = pygame.display.get_surface()
screen.fill((255,255,255))
times = 0.0
#游戏标志
is_over = False
is_collide = None #蛇头和蛇身是否碰撞的标志
snake_no_head = pygame.sprite.Group()
#游戏初始界面文字:点击鼠标游戏开始
welcome_font = pygame.font.SysFont("arial",64)
welcome_text = welcome_font.render("Welcome To Play The Snake!",True,(0,255,0),(255,255,255))
button_x = 400
button_y = 400
up_image_file = "game_start_up.png"
down_image_file = "game_start_down.png"
buttons = {}
buttons["Start"] = Button((button_x,button_y),up_image_file,down_image_file)
#贪食蛇死亡时出现文字:贪食蛇碰到四周时显示GAME OVER字样
gameover_font = pygame.font.SysFont("arial",64)
gameover_text = gameover_font.render("GAME OVER!",True,(255,0,0),(255,255,255))
#在GAME OVER时,增加一个Restart的按钮
button_x = 400
button_y = 400
button_width = 30
buttons["Restart"] = Button((button_x,button_y),"Restart.png",None)
my_font = pygame.font.SysFont("arial",24)
restart_text = my_font.render("Restart",True,(0,0,0),(255,255,255))
w, h =buttons["Restart"].get_image_size()
restart_font_x = button_x + button_width - w/2
restart_font_y = button_y - h/2
while True:
flag = False
restart_game = False
screen.fill((255,255,255))
time_passed = FPSCLOCK.tick(30)
#修改这个条件判断中的数值可以控制蛇移动的快慢/绘图的反应速度?
if times>100:
times = 0.0
#s = move(s,d)
snake_group = SnakeMove(snake_group,d)
else:
times += time_passed
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN and event.key == K_UP:
d = up
if event.type == KEYDOWN and event.key == K_LEFT:
#d = dire[turn_left(dire.index(d))]
d = left
if event.type == KEYDOWN and event.key == K_RIGHT:
#d =dire[turn_right(dire.index(d))]
d =right
if event.type == KEYDOWN and event.key == K_DOWN:
d = down
pygame.mouse.set_visible(True)
x = screen.get_width()/2-welcome_text.get_width()/2
y = screen.get_height()/2-welcome_text.get_height()/2
print_text(welcome_text,x,y)
buttons["Start"].render2(screen)
is_start = buttons["Start"].is_start()
if buttons["Start"].game_start == True:
if is_over:
screen.fill((255,255,255)) #这句话不错,可以有“清屏”的效果
x = screen.get_width()/2-gameover_text.get_width()/2
y = screen.get_height()/2-gameover_text.get_height()/2
print_text(gameover_text,x,y)
pygame.mouse.set_visible(True)
buttons["Restart"].render(screen)
print_text(restart_text,restart_font_x,restart_font_y)
snake_group = init_snake_group #初始化蛇的位置
d = init_d #不加上这句,restart的时候贪食蛇的方向会是上次死亡时的方向
buttons["Restart"].is_restart()
if buttons["Restart"].game_restart == True:
is_over = False
buttons["Restart"].game_restart = False #需要重置成False
else:
screen.fill((255,255,255))
if snake_group[0].rect.topleft == food.getFoodPosition():
print food.getFoodPosition()
snake_group = SnakeGrow(snake_group,d)
food_group.remove(food)
food_position = randrange(0,40)*20,randrange(0,30)*20 #食物被吃掉之后才重新刷新
food = Food((0,255,0),food_position)
food_group.add(food)
snake_no_head.empty()
for snake_part in snake_group[1:]:
snake_no_head.add(snake_part)
is_collide = None
is_collide = pygame.sprite.spritecollideany(snake_group[0],snake_no_head)
if is_collide != None:
is_over = True
snake_no_head.empty()
is_collide = None #一定要注意清空工作
if snake_group[0].rect[0]<0 or snake_group[0].rect[0]//20 >= 40 or snake_group[0].rect[1]//20<0 or snake_group[0].rect[1]//20>=30:
is_over = True
#for r,c in s:
# pygame.draw.rect(screen,(255,0,0),(c*20,r*20,20,20))
for aa in snake_group:
aa.draw()
food_group.draw(screen)
pygame.display.update() |
20,299 | f97ff5ec81fb49afbebb173c593eec2c3fe3eb52 | class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def inorder(root):
"""
Left, Root, Right
"""
if root:
inorder(root.left)
print (root.value)
inorder(root.right)
def preorder(root):
"""
Root, Left, Right
"""
if root:
print (root.value)
preorder(root.left)
preorder(root.right)
def postorder(root):
"""
Left, Right, Root
"""
if root:
postorder(root.left)
postorder(root.right)
print (root.value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.