index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
14,400 | 49ebc9445040e93dbac8e7871f51cfd1c7c511a1 | print("Hello World from Leonela Comina /n ESPE /n GEO")
#imput variables
addend1 = imput("Enter The first number --> ")
addend2 = imput("Enter The second number --> ")
#add two numbers
sum= float(addend1) + (float(addend2)
# displaying the sum
print("the sum of the (0) and (1) is (2)". format(addend1, addend2, sum))
print("The sum is %.if" %(float(imput("Enter first number: "))+float(imput("Enter second number: ")))) |
14,401 | 43c0f511c44ba5b901a7b011722a408329916716 | #anand python problem 4 3:4
# Write a program to print directory tree. The program should take path of a directory as argument and print all the files in it recursively as a tree.
import os
a='|--'
b='| '
c='\--'
def directory_tree(dire,v=0):
#print os.listdir(dire)
names=os.listdir(dire)
for i in names:
#print 'the v :'+str(v)
if os.path.isdir(os.path.join(dire,i)):
if i == names[-1]:
print b*v+c+i
else:
print b*v+a+i
directory_tree(os.path.join(dire,i),v+1)
if i == names[-1]:
print b*v+c+i
else:
print b*v+a+i
if __name__ == '__main__':
directory_tree('/home/shuhaib/anand_python/modules_ch_3/')
|
14,402 | 7eb3f9952b6d7aa1e658976f6949ea0a50adcee7 | import tkinter as tk
from responsechatbot import ChatbotResponse as cr
from posTag_spellCheck import check, pos_tagging
def show_entry_fields():
print("First Name: %s" % (e1.get()))
def insertContent():
reply = ""
cor = ""
#
try:
cor = checkSpelling()
print(cor)
reply = cr.getResponse( cor )
except:
reply = "I'm sorry, I didn't quite understand that.\nTry asking for help to see the scope of my functionality, or try asking another question.\n"
print(reply)
T.insert(tk.END, cor + '\n', "odd")
T.insert(tk.END, reply + '\n', "even")
e1.delete(0, tk.END)
def checkSpelling():
sentence = e1.get()
#print(pos_tagging(sentence))
dic = check(sentence)
tips = 'here are some spelling mistakes:\n'
for k in dic:
tips = tips + str(k) + ' -> ' + str(dic[k]) + '\n'
sentence = sentence.replace(k, dic[k])
if len(dic) == 0:
tips = 'it seems all spelling are correct\n'
T.insert(tk.END, tips + '\n', "tip")
print(tips)
return sentence
def posTagging():
sentence = e1.get()
dic = pos_tagging(sentence)
tips = 'here are the POS tagging:\n'
for k in dic:
tips = tips + str(k) + ' -> ' + str(dic[k]) + '\n'
if len(dic) == 0:
tips = 'it is empty\n'
T.insert(tk.END, tips + '\n', "tip")
print(tips)
return
master = tk.Tk()
master.geometry("1000x400")
tk.Label(master, text="Say something to Chatbot below then press 'Send'").grid(row=0, column=0)
e1 = tk.Entry(master, width=160)
e1.grid(row=1, column=0, padx=5, pady=5)
send = tk.Button(master, text='Send', command=insertContent)
send.grid(row=3, column=0, sticky=tk.W, pady=5)
checkSpell = tk.Button(master, text='check spelling', command=checkSpelling)
checkSpell.grid(row=4, column=0, sticky=tk.W, pady=5)
posTag = tk.Button(master, text='POS tagging', command=posTagging)
posTag.grid(row=5, column=0, sticky=tk.W, pady=5)
record = "==========================================================Chat Log==========================================================\n"
T = tk.Text(master, height=20, width=140)
T.grid(row=6, column=0, sticky=tk.W, pady=4, padx=4)
T.tag_configure("even", background="#ffffff")
T.tag_configure("odd", background="#7bbfea")
T.tag_configure("tip", background="#ccffff")
T.insert(tk.END, record)
tk.mainloop()
|
14,403 | 27fc2ece806720fa87e994230001d2a7a3c01c32 | import re
def parse(xml):
for element in re.findall('<\w*?>.*?</\w*?>', xml):
data = element.replace('>', '<').split('<')
if not data[1] == 'tokens':
yield(data[1], data[2].strip())
def unparse(data):
xml = ''
for element in data:
xml += '<{type}> {text} </{type}>\n'.format(type=element[0], text=element[1])
return xml
|
14,404 | 6834ecc52bf8cf497c0084ecc2fb7f3394444e1f | # Generated by Django 3.2 on 2021-05-05 19:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('acc', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='safety',
old_name='opertion_status',
new_name='operation_status',
),
]
|
14,405 | e42e70dea00d09b3732c8339c2188cea86bfca6d | import sys
import pandas
import argparse
from intervaltree import Interval, IntervalTree
class gitree(object):
def __init__(self, parameter_list):
pass
def build_from_bed(self, bedfile): |
14,406 | 545f6acbba781f76cc638dab68e67805da794c4c | #!/usr/bin/env python
import os
import sys
import socket
import gtk.glade
import cloudfiles
from config import CloudFilesGconf
from constants import GLADE_DIR
from show_container import ShowContainersList
CF_CONNECTION = None
USERNAME = None
API_KEY = None
class CheckUsernameKey(object):
"""
Check Username and Key
"""
def __init__(self):
self.dialog_error = None
self.init_dialog_error()
def hide_widget(self, widget, *args, **kwargs):
widget.hide()
return True
def check(self, username, api_key):
global CF_CONNECTION
try:
CF_CONNECTION = cloudfiles.get_connection(username, api_key)
except(cloudfiles.errors.AuthenticationError,
cloudfiles.errors.AuthenticationFailed):
self.dialog_error.set_markup(
'Your username (%s) or API Key (%s) does not seem to match or are incorrect.' % \
(username, api_key)
)
self.dialog_error.run()
return False
except(socket.gaierror):
self.dialog_error.set_markup(
'Cannot connect to Rackspace Cloud Files.')
self.dialog_error.run()
return False
return True
def init_dialog_error(self, parent=None):
self.dialog_error = gtk.MessageDialog(parent=parent,
type=gtk.MESSAGE_ERROR,
flags=gtk.DIALOG_MODAL,
buttons=gtk.BUTTONS_CLOSE,
message_format='Error.')
self.dialog_error.set_title("Cloud Files Uploader")
self.dialog_error.connect('delete-event', self.hide_widget)
self.dialog_error.connect('response', self.hide_widget)
class AskUsernameKey(object):
def __init__(self, username=None):
self.username = username
self.api_key = None
self.auth_window = None
self.entry_username = None
self.entry_api_key = None
self.entry_message = None
self.gconf_key = CloudFilesGconf()
self.authenticated = False
def clicked(self, *kargs, **kwargs):
self.username = self.entry_username.get_text()
self.api_key = self.entry_api_key.get_text()
if not self.username:
self.entry_message.set_text("You have not entered a Username")
self.entry_message.show()
if not self.api_key:
self.entry_message.set_text("You have not entered an API Key")
self.entry_message.show()
check_username = CheckUsernameKey()
if check_username.check(self.username, self.api_key):
self.authenticated = True
self.auth_window.destroy()
self.gconf_key.set_entry("username", self.username, "string")
self.gconf_key.set_entry("api_key", self.api_key, "string")
return True
self.entry_message.set_text("Authentication has failed")
self.entry_message.show()
def quit(self, *args, **kwargs):
self.auth_window.destroy()
def show(self):
gladefile = os.path.join(GLADE_DIR, 'dialog_authentication.glade')
window_tree = gtk.glade.XML(gladefile)
self.entry_username = window_tree.get_widget("entry_username")
self.entry_api_key = window_tree.get_widget("entry_api_key")
self.entry_message = window_tree.get_widget("entry_message")
if not self.entry_message.get_text():
self.entry_message.hide()
if self.username:
self.entry_username.set_text(self.username)
self.entry_api_key.grab_focus()
if self.api_key:
self.entry_username.set_text(self.api_key)
self.auth_window = window_tree.get_widget('dialog_authentication')
button_ok = window_tree.get_widget('button1')
button_cancel = window_tree.get_widget('button2')
button_ok.connect('clicked', self.clicked)
button_cancel.connect('clicked', self.quit)
self.auth_window.connect('destroy', self.quit)
self.auth_window.run()
class CloudFileUploader(object):
def __init__(self, stuff_to_upload):
self.gconf_key = CloudFilesGconf()
self.stuff_to_upload = stuff_to_upload
def main(self):
global USERNAME, API_KEY
username = self.gconf_key.get_entry("username", "string")
api_key = self.gconf_key.get_entry("api_key", "string")
check_username = CheckUsernameKey()
if not(all([username, api_key]) and \
check_username.check(username, api_key)):
ask = AskUsernameKey(username=username)
ask.show()
if not ask.authenticated:
#make sure it has been destroyed
ask.auth_window.destroy()
return
username = self.gconf_key.get_entry("username", "string")
api_key = self.gconf_key.get_entry("api_key", "string")
USERNAME = username
API_KEY = api_key
self.gconf_key.set_entry("username", username, "string")
self.gconf_key.set_entry("api_key", api_key, "string")
container_list = ShowContainersList(CF_CONNECTION, self.stuff_to_upload)
container_list.show()
return True
|
14,407 | 9b44e6048816465abfe8044aa0e1c74fdc08de26 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetCertificateIssuanceConfigResult',
'AwaitableGetCertificateIssuanceConfigResult',
'get_certificate_issuance_config',
'get_certificate_issuance_config_output',
]
@pulumi.output_type
class GetCertificateIssuanceConfigResult:
def __init__(__self__, certificate_authority_config=None, create_time=None, description=None, key_algorithm=None, labels=None, lifetime=None, name=None, rotation_window_percentage=None, update_time=None):
if certificate_authority_config and not isinstance(certificate_authority_config, dict):
raise TypeError("Expected argument 'certificate_authority_config' to be a dict")
pulumi.set(__self__, "certificate_authority_config", certificate_authority_config)
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if key_algorithm and not isinstance(key_algorithm, str):
raise TypeError("Expected argument 'key_algorithm' to be a str")
pulumi.set(__self__, "key_algorithm", key_algorithm)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if lifetime and not isinstance(lifetime, str):
raise TypeError("Expected argument 'lifetime' to be a str")
pulumi.set(__self__, "lifetime", lifetime)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if rotation_window_percentage and not isinstance(rotation_window_percentage, int):
raise TypeError("Expected argument 'rotation_window_percentage' to be a int")
pulumi.set(__self__, "rotation_window_percentage", rotation_window_percentage)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="certificateAuthorityConfig")
def certificate_authority_config(self) -> 'outputs.CertificateAuthorityConfigResponse':
"""
The CA that issues the workload certificate. It includes the CA address, type, authentication to CA service, etc.
"""
return pulumi.get(self, "certificate_authority_config")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The creation timestamp of a CertificateIssuanceConfig.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> str:
"""
One or more paragraphs of text description of a CertificateIssuanceConfig.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="keyAlgorithm")
def key_algorithm(self) -> str:
"""
The key algorithm to use when generating the private key.
"""
return pulumi.get(self, "key_algorithm")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Set of labels associated with a CertificateIssuanceConfig.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def lifetime(self) -> str:
"""
Workload certificate lifetime requested.
"""
return pulumi.get(self, "lifetime")
@property
@pulumi.getter
def name(self) -> str:
"""
A user-defined name of the certificate issuance config. CertificateIssuanceConfig names must be unique globally and match pattern `projects/*/locations/*/certificateIssuanceConfigs/*`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="rotationWindowPercentage")
def rotation_window_percentage(self) -> int:
"""
Specifies the percentage of elapsed time of the certificate lifetime to wait before renewing the certificate. Must be a number between 1-99, inclusive.
"""
return pulumi.get(self, "rotation_window_percentage")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> str:
"""
The last update timestamp of a CertificateIssuanceConfig.
"""
return pulumi.get(self, "update_time")
class AwaitableGetCertificateIssuanceConfigResult(GetCertificateIssuanceConfigResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateIssuanceConfigResult(
certificate_authority_config=self.certificate_authority_config,
create_time=self.create_time,
description=self.description,
key_algorithm=self.key_algorithm,
labels=self.labels,
lifetime=self.lifetime,
name=self.name,
rotation_window_percentage=self.rotation_window_percentage,
update_time=self.update_time)
def get_certificate_issuance_config(certificate_issuance_config_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateIssuanceConfigResult:
"""
Gets details of a single CertificateIssuanceConfig.
"""
__args__ = dict()
__args__['certificateIssuanceConfigId'] = certificate_issuance_config_id
__args__['location'] = location
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('google-native:certificatemanager/v1:getCertificateIssuanceConfig', __args__, opts=opts, typ=GetCertificateIssuanceConfigResult).value
return AwaitableGetCertificateIssuanceConfigResult(
certificate_authority_config=pulumi.get(__ret__, 'certificate_authority_config'),
create_time=pulumi.get(__ret__, 'create_time'),
description=pulumi.get(__ret__, 'description'),
key_algorithm=pulumi.get(__ret__, 'key_algorithm'),
labels=pulumi.get(__ret__, 'labels'),
lifetime=pulumi.get(__ret__, 'lifetime'),
name=pulumi.get(__ret__, 'name'),
rotation_window_percentage=pulumi.get(__ret__, 'rotation_window_percentage'),
update_time=pulumi.get(__ret__, 'update_time'))
@_utilities.lift_output_func(get_certificate_issuance_config)
def get_certificate_issuance_config_output(certificate_issuance_config_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCertificateIssuanceConfigResult]:
"""
Gets details of a single CertificateIssuanceConfig.
"""
...
|
14,408 | fa58a76929ebb6cedb4e453027d5c6c6da2af0d3 | import sys
import termios
import tty
import cv2
import numpy as np
from PIL import Image
from nes_py.wrappers import JoypadSpace
import gym_super_mario_bros
env = gym_super_mario_bros.make('SuperMarioBros-v2', apply_api_compatibility=True, render_mode="human")
MY_MARIO_MOVEMENT = [
['right', 'B'],
#['right'],
['A'],
['left'],
]
env = JoypadSpace(env, MY_MARIO_MOVEMENT)
from auto_everything.cryptography import Encryption_And_Decryption, Password_Generator
password_generator = Password_Generator(base_secret_string="yingshaoxo is the strongest person in this world.")
from auto_everything.disk import Disk
disk = Disk()
current_folder = disk.get_directory_path(__file__)
def get_char_input() -> tuple[str, int]:
#https://www.physics.udel.edu/~watson/scen103/ascii.html
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
char = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return char, ord(char)
def crop_image_to_pieces(numpy_image, height, width):
im = Image.fromarray(numpy_image)
imgwidth, imgheight = im.size
for i in range(imgheight//height):
for j in range(imgwidth//width):
box = (j*width, i*height, (j+1)*width, (i+1)*height)
yield np.array(im.crop(box))
def save_numpy_image(image, path):
disk.create_a_folder(disk.get_directory_path(path))
cv2.imwrite(path, image)
def image_process(frame):
side_length = 225
if frame is not None:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (side_length, side_length)).astype(np.uint8)
else:
frame = np.zeros((3, side_length, side_length)) #may it is not a good idea to put 0 here, may cause other weights get into 0 quickly
width, height = frame.shape[1], frame.shape[0]
frame = frame[40:height, 0:width]
# frame = frame / 255
return frame
i = 0
state,_ = env.reset()
state = image_process(state)
last_state = state
while True:
try:
input_char, input_char_id = get_char_input()
action = env.action_space.sample()
if input_char == '0':
action = 0
elif input_char == '1':
action = 1
elif input_char == '2':
action = 2
# elif input_char == '3':
# action = 3
state, reward, terminated, truncated, info = env.step(action) # type: ignore
# state, reward, done, info = env.step(env.action_space.sample()) # type: ignore
state = image_process(state)
done = terminated or truncated
# height, width, _ = state.shape
# sub_height = height // (16//2)
# sub_width = width // (16//2)
# sub_image_list = crop_image_to_pieces(state, sub_height, sub_width)
save_numpy_image(last_state, disk.join_paths(current_folder, "raw_images", f"{action}", f"{password_generator.get_random_password(length=12)}.jpg"))
# save_numpy_image(state, disk.join_paths(current_folder, "raw_images", f"{i}.jpg"))
# for index, sub_image in enumerate(sub_image_list):
# save_numpy_image(sub_image, disk.join_paths(current_folder, "raw_seperate_images", f"{i}_{index}.jpg"))
if done:
state, _ = env.reset()
state = image_process(state)
last_state = np.copy(state)
env.render()
i += 1
except Exception as e:
print(e)
env.close() |
14,409 | 1d6c3c7f9106d62826651d5071def16427fbd034 | """Screening test questions for the Facebook Production Engineering Apprenticeship
role."""
# My Code:
# Complete the stickers_for function below.
def stickers_for(phrase):
"""returns number of 'instagram' stickers needed to make a phrase"""
stic_dict = {}
for ltr in 'instagram':
if ltr not in stic_dict:
stic_dict[ltr] = 1
else:
stic_dict[ltr] += 1
print(ltr)
phr_dict = {}
for ltr in phrase:
if ltr not in phr_dict:
phr_dict[ltr] = 1
else:
phr_dict[ltr] += 1
print(ltr)
ratios = []
for ltr in stic_dict:
if ltr in phr_dict:
ratio = phr_dict[ltr] / stic_dict[ltr]
ratios.append(ratio)
if len(ratios) > 0:
num_sticks = max(ratios)
num_sticks = int(round(num_sticks, 0))
return stic_dict, phr_dict, ratios, num_sticks
else:
return 0
if __name__ == '__main__':
# TEST 1
test_1 = 'artisan martians'
print(test_1)
print(stickers_for(test_1))
test_2 = 'taming giant gnats'
print(test_2)
print(stickers_for(test_2))
"""All test cases pass in hacker rank""" |
14,410 | 6918c586805d21fc3e11f90a8604c4fe7b7a03f8 | from ichnaea.models.content import (
decode_datamap_grid,
encode_datamap_grid,
DataMap,
RegionStat,
Stat,
StatKey,
)
from ichnaea.tests.base import (
DBTestCase,
TestCase,
)
from ichnaea import util
class TestDataMapCodec(TestCase):
def test_decode_datamap_grid(self):
self.assertEqual(
decode_datamap_grid(b'\x00\x00\x00\x00\x00\x00\x00\x00'),
(-90000, -180000))
self.assertEqual(
decode_datamap_grid(b'AAAAAAAAAAA=', codec='base64'),
(-90000, -180000))
self.assertEqual(
decode_datamap_grid(b'\x00\x01_\x90\x00\x02\xbf '),
(0, 0))
self.assertEqual(
decode_datamap_grid(b'AAFfkAACvyA=', codec='base64'),
(0, 0))
self.assertEqual(
decode_datamap_grid(b'\x00\x02\xbf \x00\x05~@'),
(90000, 180000))
self.assertEqual(
decode_datamap_grid(b'\x00\x02\xbf \x00\x05~@', scale=True),
(90.0, 180.0))
self.assertEqual(
decode_datamap_grid(b'AAK/IAAFfkA=', codec='base64'),
(90000, 180000))
self.assertEqual(
decode_datamap_grid(b'AAK/IAAFfkA=', scale=True, codec='base64'),
(90.0, 180.0))
def test_encode_datamap_grid(self):
self.assertEqual(encode_datamap_grid(-90000, -180000),
b'\x00\x00\x00\x00\x00\x00\x00\x00')
self.assertEqual(encode_datamap_grid(-90000, -180000, codec='base64'),
b'AAAAAAAAAAA=')
self.assertEqual(encode_datamap_grid(0, 0),
b'\x00\x01_\x90\x00\x02\xbf ')
self.assertEqual(encode_datamap_grid(0, 0, codec='base64'),
b'AAFfkAACvyA=')
self.assertEqual(encode_datamap_grid(90.0, 180.0, scale=True),
b'\x00\x02\xbf \x00\x05~@')
self.assertEqual(encode_datamap_grid(90000, 180000),
b'\x00\x02\xbf \x00\x05~@')
self.assertEqual(encode_datamap_grid(90000, 180000, codec='base64'),
b'AAK/IAAFfkA=')
class TestDataMap(DBTestCase):
def test_fields(self):
today = util.utcnow().date()
lat = 12345
lon = -23456
model = DataMap.shard_model(lat, lon)
self.session.add(model(grid=(lat, lon), created=today, modified=today))
self.session.flush()
result = self.session.query(model).first()
self.assertEqual(result.grid, (lat, lon))
self.assertEqual(result.created, today)
self.assertEqual(result.modified, today)
def test_scale(self):
self.assertEqual(DataMap.scale(-1.12345678, 2.23456789),
(-1123, 2235))
def test_shard_id(self):
self.assertEqual(DataMap.shard_id(None, None), None)
self.assertEqual(DataMap.shard_id(85000, 180000), 'ne')
self.assertEqual(DataMap.shard_id(36000, 5000), 'ne')
self.assertEqual(DataMap.shard_id(35999, 5000), 'se')
self.assertEqual(DataMap.shard_id(-85000, 180000), 'se')
self.assertEqual(DataMap.shard_id(85000, -180000), 'nw')
self.assertEqual(DataMap.shard_id(36000, 4999), 'nw')
self.assertEqual(DataMap.shard_id(35999, 4999), 'sw')
self.assertEqual(DataMap.shard_id(-85000, -180000), 'sw')
def test_grid_bytes(self):
lat = 12000
lon = 34000
grid = encode_datamap_grid(lat, lon)
model = DataMap.shard_model(lat, lon)
self.session.add(model(grid=grid))
self.session.flush()
result = self.session.query(model).first()
self.assertEqual(result.grid, (lat, lon))
def test_grid_none(self):
self.session.add(DataMap.shard_model(0, 0)(grid=None))
with self.assertRaises(Exception):
self.session.flush()
def test_grid_length(self):
self.session.add(DataMap.shard_model(0, 9)(grid=b'\x00' * 9))
with self.assertRaises(Exception):
self.session.flush()
def test_grid_list(self):
lat = 1000
lon = -2000
self.session.add(DataMap.shard_model(lat, lon)(grid=[lat, lon]))
with self.assertRaises(Exception):
self.session.flush()
class TestRegionStat(DBTestCase):
def test_fields(self):
self.session.add(RegionStat(
region='GB', gsm=1, wcdma=2, lte=3, blue=4, wifi=5))
self.session.flush()
result = self.session.query(RegionStat).first()
self.assertEqual(result.region, 'GB')
self.assertEqual(result.gsm, 1)
self.assertEqual(result.wcdma, 2)
self.assertEqual(result.lte, 3)
self.assertEqual(result.blue, 4)
self.assertEqual(result.wifi, 5)
class TestStat(DBTestCase):
def test_fields(self):
utcday = util.utcnow().date()
self.session.add(Stat(key=StatKey.cell, time=utcday, value=13))
self.session.flush()
result = self.session.query(Stat).first()
self.assertEqual(result.key, StatKey.cell)
self.assertEqual(result.time, utcday)
self.assertEqual(result.value, 13)
def test_enum(self):
utcday = util.utcnow().date()
self.session.add(Stat(key=StatKey.cell, time=utcday, value=13))
self.session.flush()
result = self.session.query(Stat).first()
self.assertEqual(result.key, StatKey.cell)
self.assertEqual(int(result.key), 1)
self.assertEqual(result.key.name, 'cell')
|
14,411 | 2313bfe65cb87840251463fc943d2a425f797d08 | """Lambda that reads from a table and sends emails out for new music within the last 7 days. Invoked daily"""
# Standard library imports
import base64
import datetime
import json
import logging.config
import os
import traceback
# Third party library imports
import boto3
import requests
USER_FAVORITES_TABLE = boto3.resource('dynamodb').Table(os.environ['USER_FAVORITES_TABLE'])
SES_CLIENT = boto3.client('ses')
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
HTML_START = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
"""
HTML_END = """
</body>
</html>
"""
def handler(event, context):
"""Handler function that acts as a controller function"""
LOGGER.debug(event)
LOGGER.debug(context)
records = get_users()
artists = get_artists(records)
if not artists:
return {'message': 'Nothing to search :/'}
spotify_authorization = authorize()
spotify_responses = get_new_music_from_spotify(artists, spotify_authorization)
for record in records:
email_body = build_email_body_for_user(record['artists'], spotify_responses)
send_email(email_body, record['email'])
return {'message': 'all done :)'}
def get_users():
"""Scan table for all users to get favorite artists' new music"""
table_response = USER_FAVORITES_TABLE.scan()
return table_response['Items']
def get_artists(records):
"""Get a set of artists so there is only one spotify call per artist"""
artists = set()
for record in records:
artists.update(record['artists'])
return artists
def authorize():
"""Get spotify authorization token"""
encoded_auth = base64.b64encode(
(os.environ["SPOTIFY_CLIENT_ID"] + ':' + os.environ["SPOTIFY_CLIENT_SECRET"]).encode())
headers = {
'Authorization': 'Basic {}'.format(encoded_auth.decode("utf-8"))
}
response = requests.post(os.environ['SPOTIFY_AUTH_URL'], data={'grant_type': 'client_credentials'},
headers=headers).text
return json.loads(response)
def get_new_music_from_spotify(artists, spotify_authorization):
"""Get new music for all artists"""
spotify_responses = dict()
spotify_artists = get_artist_ids_from_spotify(artists, spotify_authorization)
for artist_id, artist in spotify_artists.items():
new_music = get_new_music_for_artist(artist_id, spotify_authorization)
spotify_responses.update({artist: new_music})
return spotify_responses
def get_artist_ids_from_spotify(artists, spotify_authorization):
"""Get artist IDs to search for artists in Spotify"""
spotify_artists = dict()
url = os.environ['SPOTIFY_SEARCH_URL']
headers = {
'Authorization': f'Bearer {spotify_authorization["access_token"]}'
}
for artist in artists:
params = '?q={}&type=artist&items=1'.format(artist)
response = json.loads(requests.get(url + params, headers=headers).text)
if not response['artists'].get('items', []):
spotify_artists.update({artist: None})
else:
spotify_artists.update({response['artists']['items'][0]['id']: artist})
return spotify_artists
def get_new_music_for_artist(artist_id, spotify_authorization):
"""Use Spotify ID to collect new singles for artists"""
url = os.environ['SPOTIFY_ARTISTS_URL'].format(artist_id)
headers = {
'Authorization': f'Bearer {spotify_authorization["access_token"]}'
}
params = '?include_groups=single'
response = json.loads(requests.get(url + params, headers=headers).text)
return filter_music_for_last_seven_days(response)
def filter_music_for_last_seven_days(spotify_response):
"""Filters music from last seven days and builds a response"""
new_music = list()
for item in spotify_response['items']:
if item['release_date'] >= (datetime.datetime.now() - datetime.timedelta(days=7)).strftime('%Y-%m-%d'):
images = item['images']
thumbnail = [image for image in images if is_image_size_64(image)]
new_music.append({
'name': item.pop('name'),
'type': item.pop('type'),
'releaseDate': item['release_date'],
'url': item['external_urls'].pop('spotify'),
'thumbnail': thumbnail
})
return new_music
def is_image_size_64(image):
"""Checks for 64x64 pixel image, returns boolean"""
return image['height'] == 64 and image['width'] == 64
def build_email_body_for_user(artists, spotify_responses):
"""Build email body for new music or no new music for artists"""
email_body = ''
for artist in artists:
if artist in spotify_responses:
email_body += create_artist_new_music_line(spotify_responses[artist])
return email_body
def create_artist_new_music_line(spotify_artist_music):
"""Build HTML line with image and text for email"""
body = ''
for item in spotify_artist_music:
if item['thumbnail']:
artist_string = '<p><img src="{}" width="{}" height="{}" /> {} released on {}--{}</p>\n'
body += artist_string.format(item['thumbnail'][0]['url'], item['thumbnail'][0]['width'],
item['thumbnail'][0]['height'], item['name'], item['releaseDate'], item['url'])
return body
def send_email(email_body, email_to):
"""Send email through SES"""
html = f'{HTML_START}{email_body}{HTML_END}'
try:
SES_CLIENT.send_email(
Source=os.environ['SENDER_EMAIL'],
Destination={
'ToAddresses': [
email_to
]
},
Message={
'Subject': {
'Data': 'Newest Music in Last 7 Days (Spotify)',
},
'Body': {
'Html': {
'Data': html,
}
}
}
)
except:
traceback.print_exc()
return False
return True
|
14,412 | add34816606b6c5a629643610ff9c837e3f97158 | import ipaddress
import socket
from urllib.parse import urlparse
from django.shortcuts import reverse as django_reverse
from django.utils.translation import ugettext as _
from django.conf import settings
URL_TEMPLATE = "{scheme}://{domain}/{path}"
def build_url(path, scheme="http", domain="localhost"):
return URL_TEMPLATE.format(scheme=scheme, domain=domain, path=path.lstrip("/"))
def is_absolute_url(path):
"""Test wether or not `path` is absolute url."""
return path.startswith("http")
def get_absolute_url(path):
"""Return a path as an absolute url."""
if is_absolute_url(path):
return path
site = settings.SITES['front']
return build_url(path, scheme=site['scheme'], domain=site['domain'])
def reverse(viewname, *args, **kwargs):
"""Same behavior as django's reverse but uses django_sites to compute absolute url."""
return get_absolute_url(django_reverse(viewname, *args, **kwargs))
class HostnameException(Exception):
pass
class IpAddresValueError(ValueError):
pass
def validate_private_url(url):
host = urlparse(url).hostname
port = urlparse(url).port
try:
socket_args, *others = socket.getaddrinfo(host, port)
except Exception:
raise HostnameException(_("Host access error"))
destination_address = socket_args[4][0]
try:
ipa = ipaddress.ip_address(destination_address)
except ValueError:
raise IpAddresValueError(_("IP Address error"))
if ipa.is_private:
raise IpAddresValueError("Private IP Address not allowed") |
14,413 | 9ae5295a04a838a829a31ec1ddffd33c2e0c279c | from enum import Enum
import cv2
import numpy as np
from scipy.stats.stats import pearsonr
def get_bounds(x, y, descriptor_offset, search_offset):
# For descriptor blocks, search_offset should be 1
# This defines bounds of sliding window
top = y - int(descriptor_offset * search_offset) - 1
bottom = y + int(descriptor_offset * search_offset)
left = x - int(descriptor_offset * search_offset) - 1
right = x + int(descriptor_offset * search_offset)
return top, bottom, left, right
def adjust_bounds(top, bottom, left, right, image_height, image_width):
if top < 0:
top = 0
if bottom > image_height:
bottom = image_height
if left < 0:
left = 0
if right > image_width:
right = image_width
return top, bottom, left, right
def custom_descriptor(intensities):
# Compute image derivatives
dX = cv2.Sobel(intensities, cv2.CV_64F, 1, 0, ksize=1)
dY = cv2.Sobel(intensities, cv2.CV_64F, 0, 1, ksize=1)
# Compute gradient magnitude
GM = np.sqrt(dX ** 2 + dY ** 2)
# Compute gradient direction
GD = np.arctan2(dY, dX)
# Binned Gradient Orientation
return np.concatenate([intensities, GM, GD], axis=1)
def compute_similarity(x_prev, y_prev, curr_frame, curr_keypoints, descriptor_offset, prev_frame_descriptor,
similarity_mode):
min_sum_squares = float('inf') # want to minimize this
max_ncc = 0 # want to maximize this
x_curr = x_prev
y_curr = y_prev
curr_keypoints = curr_keypoints.tolist()
for (i, j) in curr_keypoints:
top, bottom, left, right = get_bounds(i, j, descriptor_offset, 1)
# top, bottom, left, right = adjust_bounds(top, bottom, left, right, prev_frame.shape[0], prev_frame.shape[1])
curr_frame_intensities = curr_frame[top:bottom, left:right]
curr_frame_descriptor = custom_descriptor(curr_frame_intensities)
# flatten both descriptors (create 1d vector)
curr_frame_descriptor = curr_frame_descriptor.flatten()
prev_frame_descriptor = prev_frame_descriptor.flatten()
if similarity_mode == "ssd":
# Compute sum of squared diff
sum_squares_tmp = np.sum((curr_frame_descriptor - prev_frame_descriptor) ** 2)
if sum_squares_tmp < min_sum_squares:
min_sum_squares = sum_squares_tmp
x_curr = i
y_curr = j
elif similarity_mode == "ncc":
# Compute Normalized Cross Correlation
curr_frame_descriptor = (curr_frame_descriptor - np.mean(curr_frame_descriptor)) / (
np.std(curr_frame_descriptor))
prev_frame_descriptor = (prev_frame_descriptor - np.mean(prev_frame_descriptor)) / (
np.std(prev_frame_descriptor))
ncc_tmp = pearsonr(curr_frame_descriptor, prev_frame_descriptor)
if ncc_tmp[0] > max_ncc:
max_ncc = ncc_tmp[0]
x_curr = i
y_curr = j
else:
print("Please enter valid similarity poisson_mode")
return x_curr, y_curr
def find_points(prev_frame, curr_frame, prev_frame_points, detector, similarity_mode):
global curr_points, display_keypoints
curr_points = []
display_keypoints = []
for idx, (x_prev, y_prev) in enumerate(prev_frame_points):
# Create block of image intensities
# using neighboring pixels around each
# previously identified corner point
#20 works for bed scene
descriptor_offset = 20
search_offset = .5
# Get bounds of block
top, bottom, left, right = get_bounds(x_prev, y_prev, descriptor_offset, 1)
# Adjust the bounds
# top, bottom, left, right = adjust_bounds(top,bottom,left,right,prev_frame.shape[0], prev_frame.shape[1])
# Get descriptor for previous image
prev_frame_intensities = prev_frame[top:bottom, left:right]
prev_frame_descriptor = custom_descriptor(prev_frame_intensities)
print("SHAPE",prev_frame_descriptor.shape)
# Define bounds of search area
top, bottom, left, right = get_bounds(x_prev, y_prev, descriptor_offset, search_offset)
# Adjust the bounds
# top, bottom, left, right = adjust_bounds(top,bottom,left,right, prev_frame.shape[0], prev_frame.shape[1])
# Get search window
search_window = curr_frame[top:bottom, left:right]
# Compute keypoints
keypoints = None
if detector == 'harris':
harris_corners = compute_harris(search_window)
# Threshold harris corners
keypoints = np.argwhere(harris_corners > .7 * harris_corners.max())
# Recall numpy arrays use y,x indexing
keypoints = np.flip(keypoints, axis=1)
elif detector == 'orb':
keypoints = compute_orb(search_window)
if len(keypoints) == 0:
print("No keypoints could be found near ({},{})".format(x_prev, y_prev))
continue
keypoints_adjusted = np.zeros_like(keypoints)
keypoints_adjusted[:, 0] = x_prev - int(search_offset * descriptor_offset) + keypoints[:, 0]
keypoints_adjusted[:, 1] = y_prev - int(search_offset * descriptor_offset) + keypoints[:, 1]
# Visualize all keypoints
display_keypoints.extend(keypoints_adjusted.tolist())
# Slide window throughout search area of size equal
# to feature descriptor block
x_curr, y_curr = compute_similarity(x_prev, y_prev, curr_frame, keypoints_adjusted, descriptor_offset,
prev_frame_descriptor, similarity_mode)
curr_points.append([x_curr, y_curr])
return curr_points
def compute_harris(window):
gray_frame = cv2.cvtColor(window, cv2.COLOR_BGR2GRAY)
# result is dilated for marking the corners, not important
harris_frame = cv2.cornerHarris(gray_frame, 5, 3, 0.04)
harris_frame = cv2.dilate(harris_frame, None)
return harris_frame
def compute_orb(window):
detector = cv2.ORB_create(edgeThreshold=0)
keypoints_ = detector.detect(window, None) # list of keypoint objects, get raw indicies
keypoints = []
for kp in keypoints_:
x, y = kp.pt
keypoints.append([int(x), int(y)])
print("Number of ORB keypoins found: {}".format(len(keypoints)))
return np.array(keypoints)
def draw_point(frame, x, y, color, radius):
cv2.circle(frame, (x, y), radius, color, -1)
def draw_points(frame, points, color, radius):
for (x, y) in points:
draw_point(frame, x, y, color, radius)
current_frame_gui = None
clicked_points = []
display_keypoints = []
class Modes:
MOVIE = 1
IMAGE = 2
OTHER = 3
# POINTS SHOULD BE ADDED IN THE FOLLOWING ORDER:
#
# TOP LEFT, TOP RIGHT, BOTTOM LEFT, BOTTOM RIGHT
def click(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
clicked_points.append((x, y))
draw_point(current_frame_gui, x, y, (0, 255, 0), 5)
def apply_point_offset(points):
offset = 20
points_offset = []
# top left
x, y = points[0]
x = x - offset
y = y - offset
points_offset.append([x, y])
# top right
x, y = points[1]
x = x + offset
y = y - offset
points_offset.append([x, y])
# bottom left
x, y = points[2]
x = x - offset
y = y + offset
points_offset.append([x, y])
# bottom rightharris-laplace
x, y = points[3]
x = x + offset
y = y + offset
points_offset.append([x, y])
return points_offset
def create_text_bubble(points, frame, bubble_text_queue, bubble_text_bin):
# Height and width
H = frame.shape[0]
W = frame.shape[1]
# Find centroid of points
c_x = 0
c_y = 0
for p in points:
c_x += p[0]
c_y += p[1]
c_x = c_x//len(points)
c_y = c_y//len(points)
cv2.circle(frame, (c_x,c_y), 20, (255,0,0), thickness=-1, lineType=8, shift=0)
# Ellipse size
ellipse_vertical_offset = -140
ellipse_horizontal_offset = -70
ellipse_major_axis_size = 200
ellipse_minor_axis_size = 100
# Centroid offset
c_x += ellipse_horizontal_offset
c_y += ellipse_vertical_offset
# Adjust bounds (if needed)
if c_x - ellipse_major_axis_size < 0:
c_x = ellipse_major_axis_size
elif c_x + ellipse_major_axis_size > W:
c_x = W - ellipse_major_axis_size
if c_y - ellipse_minor_axis_size < 0:
c_y = ellipse_minor_axis_size
elif c_y + ellipse_minor_axis_size > H:
c_y = H - ellipse_minor_axis_size
# ###### MANUALLY OVERRIDE CENTROID LOCATION
# # i.e. no tracking, text stays in fixed location
# c_x = 400
# c_y = 700
# Create overlay
overlay = frame.copy()
# https://docs.opencv.org/4.1.2/d6/d6e/group__imgproc__draw.html
cv2.circle(overlay, (c_x, c_y), 20, (0, 0, 255), -1)
# Change speaker bubble color based on who is speaking/texting
speaker = bubble_text_queue[bubble_text_bin][0]
message = bubble_text_queue[bubble_text_bin][1]
bubble_color = (255, 255, 51)
if(speaker == "John"):
bubble_color = (100,0,255)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, bubble_color, -1)
cv2.ellipse(overlay, (c_x, c_y), (ellipse_major_axis_size, ellipse_minor_axis_size), 0, 0, 360, (0, 0, 255), 4)
# https://stackoverflow.com/questions/27647424/opencv-puttext-new-line-character
text = "{}:\n{}".format(speaker,message)
text_vertical_offset = int(-ellipse_minor_axis_size * .55)
text_horizontal_offset = int(-ellipse_major_axis_size * .6)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = .7
thickness = 1
textHeight = cv2.getTextSize(text,fontFace,fontScale,thickness)[0][1]
# For simulating newlines
dy = textHeight + 10
# Insert text
c_x += text_horizontal_offset
c_y += text_vertical_offset
for i, line in enumerate(text.split('\n')):
cv2.putText(overlay, line, (c_x, c_y + i * dy), fontFace, fontScale, thickness)
# alpha blend overlay with frame
alpha = 0.8
frame = alpha * overlay + (1-alpha) * frame
return frame
def create_warp_comosite(composite_image,curr_frame_points_offset,current_frame):
# Create point correspondences for perspective transformation
curr_frame_points_offset_array = np.array(curr_frame_points_offset).astype(np.float32)
input_image_boundary_points_array = np.array(
[(0, 0), (composite_image.shape[1], 0), (0, composite_image.shape[0]),
(composite_image.shape[1], composite_image.shape[0])], dtype=np.float32)
M = cv2.getPerspectiveTransform(input_image_boundary_points_array, curr_frame_points_offset_array)
maxWidth = current_frame.shape[1]
maxHeight = current_frame.shape[0]
# Warp composite image using perspective transformation matrix
warped = cv2.warpPerspective(composite_image, M, (maxWidth, maxHeight))
# use warped as mask to superimpose warped on current background
# frame
mask = (warped == [0, 0, 0]).all(-1)
assert (current_frame.shape == composite_image.shape)
current_frame_output_composite = np.where(mask[..., None], current_frame, warped)
return current_frame_output_composite
def main():
# Open and save video files using unique path id
scene = "bed_scene"
warp_flag = False
bubble_flag = True
# Used for text scene
#bubble_text_queue = [("Hayley","Evil interdimensional\nmonsters are attacking\ncampus"),("Hayley","Snevy needs us to\ndefeat their boss,\nThe GOLIATH"),("Hayley","So the monsters can\ngo back to their\nown dimension"),("John","I'm in! (For Snevy)\n"),("Hayley","Great! Okay, run\nto the VCC! Be careful\n...monsters around")]
bubble_text_queue = [("Snevy","A giant scary\nmonster is attacking!\nCan you help me\ndefeat it?"),("Snevy","Thank you!")]
###### TRACKING VIDEO
# Open video stream to input movie
tracking_video = "inputs/tracking_videos/{}.MOV".format(scene)
track_cap = cv2.VideoCapture(tracking_video)
start_frame = 0
# Get metadata from input movie
track_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
fps = track_cap.get(cv2.CAP_PROP_FPS)
frame_count = int(track_cap.get(cv2.CAP_PROP_FRAME_COUNT))
frame_width = int(track_cap.get(3))
frame_height = int(track_cap.get(4))
###### OUTPUT VIDEO
# Define the codec and create VideoWriter object
# to write a new movie to disk
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out_composite = cv2.VideoWriter('outputs/video_output_composite_{}.MOV'.format(scene), fourcc, fps,
(frame_width, frame_height))
out_tracking = cv2.VideoWriter('outputs/video_output_tracking_{}.MOV'.format(scene), fourcc, fps,
(frame_width, frame_height))
###### Composite Input
mode = Modes.MOVIE
# Choose to composite a video or image into the tracked planar object
composite_cap = None
composite_image = None
if mode == Modes.IMAGE:
composite_image = cv2.imread("inputs/composite_images/brick_wall.JPG")
elif mode == Modes.MOVIE:
composite_cap = cv2.VideoCapture("inputs/composite_videos/space.mp4")
####### MANUALLY SELECT POINTS
# Set mouse clip
cv2.namedWindow("Create Features To Track")
cv2.setMouseCallback("Create Features To Track", click)
# Get a video frame and select 4 points
# From here on out all frames will be tracked
ret, current_frame = track_cap.read()
if not ret:
print("Unable to process first frame...terminating script")
return 1
# Save copy of frame into global variable (for use in callback function)
global current_frame_gui
current_frame_gui = current_frame.copy()
# Allow user to select bounding box points
while True:
# Display image
cv2.imshow('Create Features To Track', current_frame_gui)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
print("Quit")
cv2.destroyAllWindows()
break
if key == ord('u'): # erase points
global clicked_points
clicked_points = []
current_frame_gui = current_frame.copy()
if len(clicked_points) != 4:
print("In order to apply the perspective transform you must select exactly 4 points")
# Write first frame to disk
out_composite.write(current_frame_gui)
out_tracking.write(current_frame_gui)
###### FEATURE TRACKING FRAMES
print("User added the following points {}".format(clicked_points))
print("Starting Harris Corner Tracking...")
prev_frame = current_frame
prev_frame_points = clicked_points
frame_index = 0
### Required for text bubble
bubble_text_bin = 0
swap_text_index = frame_count//max(len(bubble_text_queue),1) #avoid division by 0
while track_cap.isOpened():
# Get frame from Tracking video
ret, current_frame = track_cap.read()
if not ret:
print("Unable to process frame...terminating script")
return 1
# Get frame from composite video
if mode == Modes.MOVIE:
ret, composite_image = composite_cap.read()
# Compute sum of squared diff between current and prev harris images
curr_frame_points = find_points(prev_frame, current_frame, prev_frame_points, "orb", "ncc")
# Display points and keypoints
current_frame_output = current_frame.copy()
draw_points(current_frame_output, display_keypoints, (0, 0, 255), 5)
draw_points(current_frame_output, curr_frame_points, (0, 255, 0), 3)
# Apply perspective transform and composite image/video on top of tracked points
current_frame_output_composite = current_frame.copy()
if len(curr_frame_points) == 4 and warp_flag:
# add small constant to each pixel in input image to ensure that
# the image has no pure black pixels
# this is neccessary to utilize the input image
# as a mask when inserting it onto a movie
# frame
# ENSURE THE IMAGE PIXEL INTENSITIES DO NOT OVERFLOW (UINT8)
composite_image[composite_image != 255] += 1
# Resize image such that it is the same as the video resolution
composite_image = cv2.resize(composite_image, (frame_width, frame_height))
# Apply offset to cover frame markers
curr_frame_points_offset = apply_point_offset(curr_frame_points)
current_frame_output_composite = create_warp_comosite(composite_image,curr_frame_points_offset,current_frame_output_composite)
# Create text bubble
if bubble_flag and len(bubble_text_queue) >= 1 and len(curr_frame_points) >= 1:
# if (frame_index % swap_text_index == 0) and (bubble_text_bin != len(bubble_text_queue) -1):
if frame_index == 240:
bubble_text_bin += 1
current_frame_output_composite = create_text_bubble(curr_frame_points,
current_frame_output_composite, bubble_text_queue, bubble_text_bin)
# Convert frame to uint8
current_frame_output_composite = current_frame_output_composite.astype(np.uint8)
# Display the frame for diagnostic purposes
cv2.imshow('Final Frame', current_frame_output_composite)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
# Write frame to disk
out_composite.write(current_frame_output_composite)
out_tracking.write(current_frame_output)
# Set current frame to previous frame
prev_frame = current_frame
prev_frame_points = curr_frame_points
frame_index += 1
print(frame_index)
cv2.destroyAllWindows()
print("Finished Processing All Frames")
return 0
if __name__ == "__main__":
main()
|
14,414 | 8e91bba5053c114a34ba879aa9d207bf68849034 | # flake8: noqa
from . import history
|
14,415 | 80f0a2d25669bbd7d4f46ab38877f453d6ee267b | """
Utility functions for Brisera
"""
##########################################################################
## Imports
##########################################################################
import os
import time
##########################################################################
## Module Constants
##########################################################################
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
FIXTURES = os.path.join(BASE_DIR, "fixtures")
##########################################################################
## Utility Functions
##########################################################################
def fixture(fname, label="reference"):
"""
Returns a path to a fixture via the given fname and label
"""
return os.path.join(FIXTURES, label, fname)
def fasta(path):
"""
Reads a file in FASTA format, returning a tuple, (label, sequence).
"""
label = None
sequence = None
with open(path, 'r') as data:
for line in data:
line = line.strip()
if line.startswith('>'):
if label and sequence:
yield (label, sequence)
label = line[1:]
sequence = ""
else:
sequence += line
if label and sequence:
yield (label, sequence)
def timeit(func):
"""
Wrapper function for timing function calls
"""
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
finit = time.time()
delta = finit-start
return result, delta
return wrapper
def revc(sequence):
"""
Returns the complement of the DNA sequence
"""
complements = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'N': 'N',
'.': '.',
}
def inner(sequence):
for char in reversed(sequence):
yield complements[char]
sequence = sequence.upper()
return ''.join(inner(sequence))
|
14,416 | 870b8d97a1c2cf83d24ae15ad96ad7e8506d2050 |
# coding: utf-8
# In[4]:
import regex, os
# In[104]:
f1 = "37) Alfa 31. Bis.jpg" #folder 1
f2 = "38) Palmira. T. di Bel, temenos. 20 marzo '93. Bis.jpg" #folder 1
f3 = "149 Apamea. Alfa 31. Bis.jpg" # folder 2
f4 = "138 Apamea. 20 marzo '93 Bis.jpg"# folder 2
f5 = "148 Santuario San Simeone. Chiesa esterna. 20 marzo '93. Bis.jpg" # folder 2
f6 = "183 Apamea. Antiquarium. 20 marzo '93. Bis..jpg"
f7 = "170 San Simeone. Nartece della facciata.20 marzo '93. Bis.jpg"
fold1 = "1_SIRIA - Palmira"
fold2 = "2_SIRIA - Damasco e Bosra e Apamea e S. Simeone e Sergiopolis e Aleppo"
fold3 = "3_SIRIA - Zenobia e Dura Europos e Mari"
foldernames = [fold1, fold2, fold3]
foldfnames = [(fold1, f1),(fold1,f2),(fold2, f3),(fold2, f4), (fold2, f5), (fold2, f6), (fold2, f7)]
# In[144]:
def create_filename(fold, fobj):
import os
"""Takes a DataFrame row from the 'merged' dataframe and returns a string filename."""
# remove all underscores
no_underscores = regex.sub(r"_", " ", fobj)
# <Filename0>, dummy, <Filename1> = <Filename>.partition(" ")
filename0, dummy, filename1 = no_underscores.partition(" ")
#print("filename0: {}\n dummy: {}\n filename1: {}\n".format(filename0, dummy, filename1))
# Grab the extension for later use
ext = regex.findall(r"\.\w+",filename1)[-1]
#print(ext)
# <Filename_1_clean> = <Filename1> with "20 marzo..." removed, any dubble spaces replaced by single ones
# and finally the spaces substituted by underscores (_).
yeardate_patt = regex.compile(r"[\. ]?\d+[o']? m? ?arz[o0p] ?'?\d+[\. ]?",flags=regex.I)
year_patt = regex.compile(r" '\d\d[\.,]? ",flags=regex.I)
date_patt = regex.compile(r"20 marzo", flags=regex.I)
yeardate_match = yeardate_patt.search(filename1)
year_match = year_patt.search(filename1)
date_match = date_patt.search(filename1)
print("filename1: {}".format(filename1))
if yeardate_match:
filename_1_clean = regex.sub(yeardate_patt, " ",filename1)
print("yeardate_match. filename_1_clean is:\n {:<30}".format(filename_1_clean))
elif year_match and not yeardate_match:
filename_1_clean = regex.sub(year_patt, " ",filename1)
print("year_match and not yeardate_match. filename_1_clean is:\n {:<30}".format(filename_1_clean))
#elif date_match and not yeardate_match:
# print("date_match: {} in filename1: {}".format(date_match,filename1))
else:
filename_1_clean = filename1
print("no year_match or yeardate_match. filename_1_clean is:\n {:<30}".format(filename_1_clean))
# Remove the extension from filename_1_clean
fname, extension = os.path.splitext(filename_1_clean)
filename_1_clean = fname
#print(filename_1_clean)
# Remove all 'Bis' from end of filename_1_clean
filename_1_clean = regex.sub(r"Bis", "", filename_1_clean, flags=regex.I)
print("filename_1_clean: {}".format(filename_1_clean))
# Remove all leading and trailing whitespace from end of filename_1_clean
# Ensure no double spaces left
filename_1_clean = filename_1_clean.strip(". ").replace(" ", "_").replace("__", "_")
#print(repr(filename_1_clean))
# <Filename_0_clean> = <Filename0> with any trailing brackets ()) removed.
filename_0_clean = regex.sub(r"\).?","",filename0)
#print(filename_0_clean)
# <Folder_#>, dummy, dummy = <Folder>.partition("_")
folder_no, dummy, dummy = fold.partition("_")
#print("Folder number is: {}".format(folder_no))
################## Final piecing together of filename ####################################
#Filename: <Filename_1_clean>_-_DecArch_-_<Folder_no>-<Filename_0_clean>.<ext>
#Filename example:
#So for 49) Palmira. Via colonnata presso il teatro. 20 marzo '93. Bis.jpg end result is
#Palmira._Via_colonnata_presso_il_teatro._-_DecArch_-1-49.jpg
#Palmira._Via_colonnata_presso_il_teatro._-_DecArch_-1-49.jpg
filename = filename_1_clean + "_-_DecArch_-_" + folder_no + "-" + filename_0_clean
print("filename: {}\n".format(filename))
# Ensure no multiple spaces left
#filename = regex.sub(r" +"," ", filename)
#print("filename is: {}".format(filename))
return filename
# In[145]:
for fold, fobj in foldfnames:
create_filename(fold, fobj)
# In[139]:
f7 = "170 San Simeone. Nartece della facciata.20 marzo '93. Bis.jpg"
date_patt = regex.compile(r"20 marzo", flags=regex.I)
date_match = date_patt.search(f7)
date_match is True
# In[ ]:
|
14,417 | 80b45e2772a277d953ded8dfbd05124883e83f33 | # Generated by Django 2.0.1 on 2018-02-08 13:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0049_auto_20180201_1259'),
]
operations = [
migrations.AddField(
model_name='tool',
name='pmneeded',
field=models.BooleanField(default=False, help_text='Will this tool have PM checks done?'),
),
migrations.AlterField(
model_name='tool',
name='name',
field=models.CharField(max_length=200, null=True, verbose_name='tool name'),
),
migrations.AlterField(
model_name='tool',
name='num',
field=models.CharField(max_length=100, null=True, unique=True, verbose_name='tool no.'),
),
migrations.AlterField(
model_name='toolsn',
name='pm',
field=models.BooleanField(default=False, help_text='Is PM complete for this tool?'),
),
migrations.AlterField(
model_name='toolsn',
name='repair',
field=models.BooleanField(default=False, help_text='Does this tool need to be repaired?'),
),
]
|
14,418 | 6a6553098e8a23076fab8f4a3aba065dd5a214f8 | '''
Created on Feb 2, 2015
@author: daqing_yi
'''
from TrajectoryReader import *
from WorldMapMgr import *
import copy
import numpy as np
class HomotopyMgr(object):
def __init__(self, world_map, reader):
self.world_map = world_map
self.reader = reader
self.refStrPath = None
self.dividingRefs = []
self.startReg = None
self.endReg = None
self.allHomotopyClasses = []
def init(self, refStrPath, startReg, endReg):
self.refStrPath = refStrPath
self.refStrPathShort = self.reader.shortenString(refStrPath)
self.startReg = startReg
self.endReg = endReg
self.allHomotopyClasses = self.world_map.getTopologicalGraph().findAllPathsByBFS(self.startReg.getName(), self.endReg.getName())
print "REF"
print self.refStrPath
print "ALL " + str(len(self.allHomotopyClasses))
print self.refStrPath in self.allHomotopyClasses
print self.allHomotopyClasses
def inSameHomotopy(self, path):
return self.reader.compareStringPath(path, self.refStrPath)
def extendPath(self, currentPath, start, end):
newPath = copy.deepcopy(currentPath)
newBit = []
subseg_list = self.world_map.getCrossingSubsegments(start, end)
if len(subseg_list) > 0:
for subseg in subseg_list:
if len(newPath) > 0:
if subseg.name != newPath[len(newPath)-1]:
newPath.append(subseg.name)
newBit.append( subseg.name )
else:
newPath.append(subseg.name)
newBit.append( subseg.name )
return newPath, newBit
def isCrossingDividingRefs(self, start, end):
intseg = self.world_map.getCrossingSubsegList(start, end, self.dividingRefs)
if intseg == None:
return False
return True
def getDividingRefs(self, start, end):
rad_start = np.arctan2(start[1]-self.world_map.centralPoint[1], start[0]-self.world_map.centralPoint[0])
if rad_start < 0:
rad_start += 2*np.pi
rad_end = np.arctan2(end[1]-self.world_map.centralPoint[1], end[0]-self.world_map.centralPoint[0])
if rad_end < 0:
rad_end += 2*np.pi
if rad_start < rad_end:
rad1 = rad_start
rad2 = rad_end
else:
rad1 = rad_end
rad2 = rad_start
between_rads = []
for rr in self.world_map.rad_list:
if rr >= rad1 and rr < rad2:
between_rads.append(rr)
between_rads.sort(reverse=False)
ref_rad = None
if len(between_rads) > 0:
ref_rad = between_rads[int(len(between_rads)/2)]
print "GET DIVIDING REFS "
print ref_rad
precisionDelta = 0.07
self.dividingRefs = []
for subseg in self.world_map.subsegments:
if (subseg.rad>=ref_rad-precisionDelta and subseg.rad<ref_rad+precisionDelta) \
or (subseg.rad>=ref_rad+np.pi-precisionDelta and subseg.rad<ref_rad+np.pi+precisionDelta) \
or (subseg.rad>=ref_rad-np.pi-precisionDelta and subseg.rad<ref_rad-np.pi+precisionDelta):
self.dividingRefs.append(subseg)
return self.dividingRefs
|
14,419 | 952863a616535d0e45b6567088bc301ccf584b05 | #set fileencoding=utf-8
"""
00. 文字列の逆順
文字列"stressed"の文字を逆に(末尾から先頭に向かって)並べた文字列を得よ.
"""
string="stressed" #逆順にする文字列
print (string[::-1])
###############################################
# rev="" #答えを入れる変数
# for i in reversed(range(len(string))):
# rev+=string[i]
# print rev
|
14,420 | 02976e22d4801ed89d58acb936bd2b5621d25ec9 | import os
from itertools import zip_longest
from string import ascii_lowercase
# Complete the makingAnagrams function below.
def makingAnagrams(s1, s2):
collection_1 = dict()
collection_2 = dict()
for i, j in zip_longest(s1, s2):
collection_1[i] = collection_1.get(i, 0) + 1
collection_2[j] = collection_2.get(j, 0) + 1
count = 0
for s in ascii_lowercase:
val_1 = collection_1.get(s, 0)
val_2 = collection_2.get(s, 0)
count += max(val_1, val_2) - min(val_1, val_2)
return count
if __name__ == "__main__":
fptr = open(os.environ["OUTPUT_PATH"], "w")
s1 = input()
s2 = input()
result = makingAnagrams(s1, s2)
fptr.write(str(result) + "\n")
fptr.close()
|
14,421 | 79203fed3ec345f5036df0d75359bad7312c79a6 | """
Test cases for the core Models
"""
from django.test import TestCase
from core.models import Person, Story, CuratedStory, Gift
from common import LhrTests
class ModelTests(LhrTests):
"""
Test case for Models
"""
def setUp(self):
"""
"""
pass
def tearDown(self):
# Reminder, this will delete any created models
super(ModelTests, self).tearDown()
def testSingleCuratedStoryCreation(self):
p = self._create_person()
s = self._create_story(p)
res = CuratedStory.objects.create_from_story(s)
self.assertTrue(res)
c = CuratedStory.objects.get(story=s)
self.assertEquals(c.story, s)
story_fields = Story._meta.get_fields()
for field in story_fields:
# CuratedStories don't have and id or status
if field in ('id', 'status'):
curated_value = getattr(c, attr_name)
story_value = getattr(s, attr_name)
self.assertEquals(curated_value, story_value)
def testMultipleCuratedStoryCreation(self):
p1 = self._create_person()
p2 = self._create_person()
s1 = self._create_story(p1)
s2 = self._create_story(p2)
stories = Story.objects.all()
created, problems = CuratedStory.objects.create_from_stories(stories)
curated_stories_from_db = CuratedStory.objects.all()
self.assertEquals(created, 2)
self.assertEquals(problems, 0)
def testDuplicateCuratedStoryCreation(self):
p1 = self._create_person()
p2 = self._create_person()
s1 = self._create_story(p1)
s2 = self._create_story(p2)
CuratedStory.objects.create_from_story(s1)
stories = Story.objects.all()
created, problems = CuratedStory.objects.create_from_stories(stories)
self.assertEquals(created, 1)
self.assertEquals(problems, 0)
|
14,422 | fa469e3ab126adbe7ce779f569852865ae7b6f7e | import trimesh
import numpy as np
class Camera():
# camera coordinates: y up, z forward, x right.
# consistent with blender definitions.
# res = [w,h]
def __init__(self):
self.position = np.array([1.6, 0, 0])
self.rx = np.array([0, 1, 0])
self.ry = np.array([0, 0, 1])
self.rz = np.array([1, 0, 0])
self.focal_length = 0.05
self.res = [600,600]
# set the diagnal to be 35mm film's diagnal
self.set_diagal((0.036**2 + 0.024**2)**0.5)
def rotate(self, rot_mat):
self.rx = rot_mat[:, 0]
self.ry = rot_mat[:, 1]
self.rz = rot_mat[:, 2]
def move_cam(self, new_pos):
self.position = new_pos
def set_pose(self, inward, up):
# print(inward)
# print(up)
self.rx = np.cross(up, inward)
self.ry = np.array(up)
self.rz = np.array(inward)
self.rx = self.rx / np.linalg.norm(self.rx)
self.ry = self.ry/np.linalg.norm(self.ry)
self.rz = self.rz/np.linalg.norm(self.rz)
def set_diagal(self, diag):
h_relative = self.res[1] / self.res[0]
self.sensor_width = np.sqrt(diag**2 / (1 + h_relative**2))
def lookat(self, orig, target, up):
self.position = np.array(orig)
target = np.array(target)
inward = self.position - target
right = np.cross(up, inward)
up = np.cross(inward, right)
self.set_pose(inward, up)
def generate_rays(self):
orig = np.zeros([self.res[0],self.res[1],3])
orig[:,:,0] = self.position[0]
orig[:,:,1] = self.position[1]
orig[:,:,2] = self.position[2]
w = self.sensor_width
h_linspace = np.linspace(-w / 2, w / 2, self.res[0])
w_linspace = np.linspace(-w / 2, w / 2, self.res[1])
H,W = np.meshgrid(h_linspace,w_linspace)
H = H[...,None]
W = W[..., None]
ends = (self.position-self.rz*self.focal_length)+H*self.ry[None,None,:] + W*self.rx[None,None,:]
direction = (ends-orig)/np.linalg.norm(ends-orig,axis=2,keepdims=True)
return orig,direction,ends |
14,423 | 45e82b7ab8dad14b76a633d800a91ddb1ea4c867 | import numpy as np
import matplotlib.pyplot as plt
import cv2
image = cv2.imread("Assets/Demo.jpg")
#image[0:100,] = image[0:100, 50:100]
# Car Coords = # Car Coords = 142:220, 80:130
image[0:78, 0:50] = image[142:220, 80:130]
cv2.imshow("image", image)
# plt.imshow(image)
# plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
|
14,424 | 8143e443c86cdcf853b8755c3ef95b670798a78a | import tkinter
from tkinter import messagebox
import string
import random
from ArbolBinario import ArbolBinario
from tkinter import *
class VentanaArbolAle:
def __init__(self):
def informacionNodo():
charNodo.set(text_nodo.get())
flag = arbol.buscarNodo(arbol.root, charNodo)
hijos = arbol.buscarHijos(arbol.root, charNodo)
papa = arbol.buscarPapa(arbol.root, charNodo)
hermano = arbol.buscarHermano(arbol.root, charNodo)
ancestros = arbol.buscarAncestros(arbol.root, charNodo)[:-1]
abuelo = arbol.buscarAbuelo(arbol.root,charNodo)
tio = arbol.buscarTio(arbol.root, charNodo)
posicion = arbol.buscarDerechoIzquierdo(arbol.root, charNodo)
nodoPosicion = 'No es izquierdo ni derecho'
if posicion == 1:
nodoPosicion = 'derecho'
if posicion == 0:
nodoPosicion = 'izquierdo'
if flag:
messagebox._show(title="Información del Nodo " + charNodo.get(),
message= "La información del Nodo " + charNodo.get()+" es: "+
"\n\nNúmero de hijos: "+str(len(hijos))+" | Hijos: "+','.join(hijos)+
"\nPapá: "+ str(papa)+"\nHermano: " + str(hermano)+
"\nAncestros: "+','.join(ancestros)+"\nAbuelos: "+','.join(abuelo)+
"\nTio: "+ str(tio) +
"\n Posición nodo: " + nodoPosicion)
else:
messagebox.showinfo(title="Manual de Usuario", message="No se encontro el nodo")
def buildTree():
cadenaTamano.set(text_box.get())
arbolTamano = int(cadenaTamano.get())
ListaAbc = [char for char in string.ascii_uppercase]
cadenaArbol = []
for _ in range(arbolTamano):
numeroAleatorio = random.randint(0, len(ListaAbc) - 1)
cadenaArbol.append(ListaAbc[numeroAleatorio])
del ListaAbc[numeroAleatorio]
stringArbol = tkinter.StringVar(None,','.join(cadenaArbol))
arbol.buildTree(stringArbol)
iniciarComponentesABA()
def iniciarComponentesABA():
# Propiedades panel de resultado
result_box = Label(windowRandomTree, font=("Consolas", 18), bg="#005e35", fg="#ffffff", width="63",
height="7")
result_box.config(text="PreOrder: "+','.join(arbol.PreOrder(arbol.root))+"\nInOrder: "+','.join(arbol.InOrder(arbol.root))+
"\nPostOrder: "+','.join(arbol.PosOrder(arbol.root))+"\nAltura: "+str(arbol.hallarAltura())+
"\nHojas: "+','.join(arbol.encontrarHoja(arbol.root))+"\nGrado: "+str(arbol.grado(arbol.root)))
result_box.place(x=12, y=145)
# Propiedades de la caja de texto nodo
text_nodo.place(x=14, y=370)
# Propiedades botón enviar nodo
button_nodo = Button(windowRandomTree, text="Enviar", font=("Consolas", 10),
bg="#005e35", fg="#ffffff", width="14", height="2", command = informacionNodo)
button_nodo.place(x=12, y=400)
#Variables
charNodo = StringVar()
cadenaTamano = StringVar()
arbol = ArbolBinario()
#Propiedades de la ventana
windowRandomTree = Tk()
windowRandomTree.resizable(False,False)
windowRandomTree.geometry("850x450")
windowRandomTree.title("Ingreso de Árbol Binario Aleatorio")
windowRandomTree.config(background="#757574")
#Propiedades del Label
label = Label(windowRandomTree, text="Escribe el tamaño del árbol y luego da click al botón 'Crear' para generar el árbol",
font=("Consolas", 9), bg="#005e35", fg="#ffffff", width="117", height="3")
label.place(x=12, y=10)
# Propiedades de la caja de texto
text_box = Entry(windowRandomTree, font=("Consolas", 12), width="11",justify=tkinter.CENTER)
text_box.place(x=392, y=65)
text_nodo = Entry(windowRandomTree, font=("Consolas", 12), width="11", justify=tkinter.CENTER,
textvariable=charNodo)
#Propiedades de los botones
button_send = Button(windowRandomTree, text="Crear", command=buildTree, font=("Consolas", 10),
bg="#005e35", fg="#ffffff", width="14", height="2")
button_send.place(x=390, y=95)
button_end = Button(windowRandomTree, text="Cerrar", font=("Consolas", 10), command=windowRandomTree.destroy,
bg="#005e35", fg="#ffffff", width="14", height="2")
button_end.place(x=730, y=400)
windowRandomTree.mainloop() |
14,425 | e2f6c567c5ca47ddd9bcec2de64d1c6c596e9c54 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
from glob import glob
import sys
from datetime import datetime, timedelta
from collections import OrderedDict
import time
import settings
import queries
from utils import *
# --------------------------------------------------------------------------------
# список запитів, які повинні виконатись, щоб відправити на сервер змінені
# (з моменту попереднього запуску скрипту) дані довідників та чеки:
# --------------------------------------------------------------------------------
q = {}
fn = {}
## список одиниць виміру:
fn['unit'] = '1-unit.csv'
q['unit'] = queries.unit_query
# перелік магазинів і терміналів (кас):
fn['terminal'] = '2-terminal.csv'
q['terminal'] = queries.terminal_query
# перелік магазинів і їх назви:
#fn['shop'] = '3-shop.csv'
#q['shop'] = queries.shop_query
# список касирів (код, ПІП):
fn['cashier'] = '4-cashier.csv'
q['cashier'] = queries.cashier_query
# дерево категорій:
fn['category'] = '5-category.csv'
q['category'] = queries.category_query
# список товарів:
fn['product'] = '6-product.csv'
q['product'] = queries.product_query
# чеки
fn['receipt'] = '7-receipt-%i.csv'
q['receipt'] = queries.receipt_query
#-----------------------------------------------------------------------------
# 2. завантажуємо дельту по одиницям виміру (unit): //////////////////////////
#-----------------------------------------------------------------------------
def load_units(cursor):
log('ОБРОБКА `Unit`')
print (updatenum['pack'][PREV], updatenum['pack'][CUR])
query = q['unit'] % (
updatenum['pack'][PREV], updatenum['pack'][CUR]
)
#print query
ID=0
NAME=1
PACKED=2
PACK_CAPACITY=3
rows = load_sql(cursor, query)
if rows:
t1 = time.time()
filename = iter_dir_name + '/' + fn['unit']
print filename
write_csv(rows, filename )
t2 = time.time()
log('Файл %s збережено за %s sec. Записів: %i'
% (filename, str(timedelta(seconds=t2-t1)), len(rows))
)
ii=0
t1 = time.time()
data_list = []
for row in rows:
ii += 1
data = {
'unit_id': row[ID],
'name': row[NAME],
'packed': row[PACKED],
'pack_capacity': row[PACK_CAPACITY]
}
data_list.append(data)
retcode, result = POST(
'http://bi.datawiz.io/api/v1/units/',
data=data_list,
key_id=KEY_ID,
secret=SECRET
)
if retcode == ERROR:
errlog(
u'Помилка передачі даних через API для `Unit`'
)
errlog(
'При наступному запуску ітерація #%i буде повторена'
% iteration_number
)
errlog('Програма припинена з кодом помилки 1')
exit_and_send_error_message()
t2 = time.time()
log('На сервер bi.datawiz.io/api за %s sec передано %i записів по '
'довіднику `Unit` (~%s зап/сек)' %
(str(timedelta(seconds=t2-t1)), ii, round(ii/(t2-t1),2))
)
else:
log('Довідник `unit` не змінювався. Нічого зберігати не потрібно.')
#-----------------------------------------------------------------------------
# 3. завантажуємо дельту по касирам (cashier): ///////////////////////////////
#-----------------------------------------------------------------------------
def load_cashiers(cursor):
log('ОБРОБКА `Cashier`')
print (updatenum['cashier'][PREV], updatenum['cashier'][CUR])
query = q['cashier'] % (
updatenum['cashier'][PREV], updatenum['cashier'][CUR]
)
#print query
rows = load_sql(cursor, query)
if rows:
t1 = time.time()
filename = iter_dir_name + '/' + fn['cashier']
print filename
write_csv(rows, filename )
t2 = time.time()
log('Файл %s збережено за %s sec. Записів: %i'
% (filename, str(timedelta(seconds=t2-t1)), len(rows))
)
ii=0
t1 = time.time()
data_list = []
for row in rows:
ii += 1
data = { 'cashier_id': row[0], 'name': row[1] }
data_list.append(data)
retcode, result = POST(
'http://bi.datawiz.io/api/v1/cashiers/',
data=data_list,
key_id=KEY_ID,
secret=SECRET
)
if retcode == ERROR:
errlog(
u'Помилка передачі даних через API для `Cashier` '
#'{id=%i, name=%s}' % (row[0], row[1])
)
errlog(
'При наступному запуску ітерація #%i буде повторена'
% iteration_number
)
errlog('Програма припинена з кодом помилки 1')
exit_and_send_error_message()
t2 = time.time()
log('На сервер bi.datawiz.io/api за %s sec передано %i записів по '
'довіднику `Cashier` (~%s зап/сек)' %
(str(timedelta(seconds=t2-t1)), ii, round(ii/(t2-t1),2))
)
else:
log('Довідник `Cashier` не змінювався. Нічого зберігати не потрібно.')
def _order_categories(cat, cdict, ordict):
cid = cat['category_id']
parent_id = cat['parent_id']
if parent_id in cdict:
_order_categories(cdict[parent_id], cdict, ordict)
if cid not in ordict:
ordict[cid] = cat
#-----------------------------------------------------------------------------
# 4. завантажуємо дельту по категоріям (categories): /////////////////////////
#-----------------------------------------------------------------------------
def load_category(cursor):
log('ОБРОБКА Category')
ID=0
PARENTID=1
NAME=2
query = q['category'] % (
updatenum['category'][PREV],
updatenum['category'][CUR]
)
rows = load_sql(cursor, query)
if rows:
ii=0
cat_dict = {}
orddict = OrderedDict()
for row in rows:
ii += 1
cid = row[ID]
parent_id = row[PARENTID]
name = row[NAME]
cat = { 'category_id': cid, 'parent_id': parent_id, 'name': name }
cat_dict[cid] = cat
for cat in cat_dict.values():
_order_categories(cat, cat_dict, orddict)
cat_list = orddict.values()
t1 = time.time()
filename = iter_dir_name + '/' + fn['category']
write_csv(rows, filename)
t2 = time.time()
log('Файл %s збережено за %s sec. Записів: %i' %
(filename, str(timedelta(seconds=t2-t1)), len(rows))
)
t1 = time.time()
#for cat in cat_list:
# #log(cat)
retcode, result = POST(
'http://bi.datawiz.io/api/v1/categories/',
data=cat_list,
key_id=KEY_ID,
secret=SECRET
)
if retcode == ERROR:
errlog(
u'Помилка передачі даних через API для category '
#'{id=%(category_id)i, parent_id=%(parent_id)s, name=%(name)s}' % cat
)
errlog(
'При наступному запуску ітерація #%i буде повторена'
% iteration_number
)
errlog('Програма припинена з кодом помилки 1')
exit_and_send_error_message()
t2 = time.time()
log('Через API на сервер bi.datawiz.io/api за %s sec було передано %i записів по '
'довіднику Category (~%s зап/сек)' %
(str(timedelta(seconds=t2-t1)), ii, round(ii/(t2-t1),2))
)
else:
log('Довідник `%s` не змінювався. Нічого зберігати не потрібно.' % 'category')
##-----------------------------------------------------------------------------
## 5. завантажуємо дельту по продуктам (products): ////////////////////////////
##-----------------------------------------------------------------------------
def load_product(cursor):
log('ОБРОБКА Product')
ID=0
CATEGORYID=1
UNITID=2
NAME=3
query = q['product'] % (
updatenum['product'][PREV],
updatenum['product'][CUR],
updatenum['pack'][PREV],
updatenum['pack'][CUR],
)
rows = load_sql(cursor, query)
if rows:
t1 = time.time()
filename = iter_dir_name + '/' + fn['product']
write_csv(rows, filename)
t2 = time.time()
log('Файл %s збережено за %s sec. Записів: %i' %
(filename, str(timedelta(seconds=t2-t1)), len(rows))
)
t1 = time.time()
ii=0
prod_list = []
for row in rows:
ii += 1
pid = row[ID]
category_id = row[CATEGORYID]
unit_id = row[UNITID]
name = row[NAME]
prod = {
'product_id': pid,
'category_id': category_id,
'unit_id': unit_id,
'name': name
}
prod_list.append(prod)
retcode, result = POST(
'http://bi.datawiz.io/api/v1/products/',
data=prod_list,
key_id=KEY_ID,
secret=SECRET
)
if retcode == ERROR:
#prod['ii'] = ii
errlog(
u'Помилка передачі даних через API для product' #%(ii)i '
#'{id=%(product_id)s, category_id=%(category_id)s, '
#'unit_id=%(unit_id)s, name=%(name)s}' % prod
)
errlog(
'При наступному запуску ітерація #%i буде повторена'
% iteration_number
)
errlog('Програма припинена з кодом помилки 1')
exit_and_send_error_message()
t2 = time.time()
log('На сервер bi.datawiz.io/api за %s sec було передано %i записів по '
'довіднику Product (~%f зап/сек)' %
(str(timedelta(seconds=t2-t1)), ii, round(ii/(t2-t1),2))
)
else:
log('Довідник `Product` не змінювався. Нічого зберігати не потрібно.')
##-----------------------------------------------------------------------------
## 6. завантажуємо дельту по чекам (receipts): ////////////////////////////////
##-----------------------------------------------------------------------------
def load_receipts(cursor):
#import pudb; pudb.set_trace()
# sales_dt, terminal_identifier, receipt_identifier,
# posnum, price, total_price, qty, product_identifier, packed,
# cashier_identifier
log('ОБРОБКА Receipts')
DATE = 0
TERM_ID = 1
RECEIPT_ID = 2
POSNUM = 3
PRICE = 4
TOTAL_PRICE = 5
QTY = 6
PRODUCT_ID = 7
PACKED = 8
CASHIER_ID = 9
BLOCK = 1000 # скільки об'єктів виванажувати за один раз
# підвантажуємо кожний магазин окремо:
for shop_id, lastdates in sales_lastdate.items():
print 'shop_id=', shop_id, 'sales_lastdate[shop_id][PREV]=',\
sales_lastdate[shop_id][PREV], 'sales_lastdate[shop_id][CUR]=',\
sales_lastdate[shop_id][CUR]
query = q['receipt'] % {
'shop_id': shop_id,
'date_from': sales_lastdate[shop_id][PREV],
'date_to': sales_lastdate[shop_id][CUR]
}
ii=0 # к-сть (підрахунок) cartitems
iii=0 # к-сть (підрахунок) receipts
beg_iii=0 # номер першого receipt в блоці (при зававантаженні блоками)
receipt = None
rows = load_sql(cursor, query)
if rows:
t1 = time.time()
filename = iter_dir_name + '/' + fn['receipt'] % shop_id
write_csv(rows, filename)
t2 = time.time()
log('Файл %s збережено за %s sec. Записів: %i' %
(filename, str(timedelta(seconds=t2-t1)), len(rows))
)
t1=time.time()
receipt_list = []
for row in rows:
ii += 1
#shop_identifier = shop_id #row[SHOP_ID]
term_identifier = row[TERM_ID]
order_id = row[RECEIPT_ID]
posnum = row[POSNUM]
cashier_identifier = row[CASHIER_ID]
dt = datetime.strptime(row[DATE], '%Y%m%d%H%M%S')\
.strftime('%Y-%m-%dT%H:%M:%S')
pid = row[PRODUCT_ID]
price = float(row[PRICE])
base_price = price
qty = float(row[QTY])
total_price = float(row[TOTAL_PRICE])
if (not receipt or (
receipt and (
receipt['terminal_id'] != term_identifier or
receipt['order_id'] != order_id or
receipt['date'] != dt
)
)
):
if receipt:
#log( 'save receipt: %s_%s_%s' % ( term_identifier, dt, order_id ))
receipt_list.append(receipt)
if len(receipt_list) >= BLOCK:
retcode, new_product = POST(
'http://bi.datawiz.io/api/v1/receipts/',
data=receipt_list,
key_id=KEY_ID,
secret=SECRET
)
if retcode == ERROR:
#receipt['ii'] = ii
errlog(
u'Помилка передачі даних через API для receipt' #%(ii)i '
#'{date=%(date)s, order_id=%(order_id)s, '
#'terminal_id=%(terminal_id)s, cashier_id=%(cashier_id)s}' % receipt
)
errlog(
'При наступному запуску ітерація #%i буде повторена'
% iteration_number
)
errlog('Програма припинена з кодом помилки 1')
exit_and_send_error_message()
# порцію даних передал - список обнулили:
log(u'Передано чеків %i - %i' % (beg_iii, iii))
beg_iii = iii+1
receipt_list = []
iii += 1
receipt = {}
receipt['date'] = dt
receipt['order_id'] = order_id
receipt['terminal_id'] = term_identifier
receipt['cashier_id'] = cashier_identifier
receipt['cartitems'] = []
if qty == 0:
log( "QTY == 0 !" )
log( '><>>>>', row )
log( 'ROW #', ii )
continue
if total_price == 0:
log( "TOTAL_PRICE == 0 !" )
log( '><>>>>', row )
log( 'ROW #', ii )
continue
if round(price,2) <> round(qty*total_price,2):
price = round( total_price / qty, 2)
cartitem = {}
cartitem['product_id'] = pid
cartitem['order_no'] = posnum
cartitem['base_price'] = base_price
cartitem['price'] = price
cartitem['qty'] = qty
cartitem['total_price']= total_price
receipt['cartitems'].append(cartitem)
#log( 'save receipt: %s_%s_%s' % ( term_identifier, dt, order_id ))
receipt_list.append(receipt)
retcode, new_product = POST(
'http://bi.datawiz.io/api/v1/receipts/',
data=receipt_list,
key_id=KEY_ID,
secret=SECRET
)
if retcode == ERROR:
#prod['ii'] = ii
errlog(
u'Помилка передачі даних через API для receipt' #%(ii)i '
#'{date=%(date)s, order_id=%(order_id)s, '
#'terminal_id=%(terminal_id)s, cashier_id=%(cashier_id)s}' % receipt
)
errlog(
'При наступному запуску ітерація #%i буде повторена'
% iteration_number
)
errlog('Програма припинена з кодом помилки 1')
exit_and_send_error_message()
# порцію даних передал - список обнулили:
log(u'Передано чеків %i - %i' % (beg_iii, iii))
t2=time.time()
log('На сервер bi.datawiz.io за %s sec було '
'передано %i позицій(ю) по магазину %s (~%s позицій/сек)'
% (str(timedelta(seconds=t2-t1)), ii, shop_id, round(ii/(t2-t1),2) )
)
else:
log('Нових чеків по магазину %s нема. Пропускаємо...' % shop_id)
# ----------------------------------------------------------------------------------
# BEGIN
# ----------------------------------------------------------------------------------
if __name__ == '__main__':
init_log()
#------------------------------------------------------------------------------
# CONNECTION STRING:
#------------------------------------------------------------------------------
dsn = settings.DSN
db_user = settings.DB_USER
db_password = settings.DB_PASSWORD
database = settings.DATABASE
con_string = 'DSN=%s;UID=%s;PWD=%s;DATABASE=%s;' % (
dsn, db_user, db_password, database
)
try:
cnxn = pyodbc.connect(con_string)
cursor = cnxn.cursor()
except Exception as e:
errlog('Exception: %s' % e)
exit_and_send_error_message()
#-------------------------------------------------------------------------------
# API_KEY користувача (має відповідати API_KEY з api1_authinfo)
KEY_ID = settings.KEY_ID
# секретний рядок користувача (має відповідати SECRET з api1_authinfo)
SECRET = settings.SECRET
# назва файлу, в якому зберігається номер останного циклу передачі даних між
# серверами (клієнта і Datawiz) автоматично збільшується на 1):
iteration_number_filename = 'iteration_number.txt'
# номер циклу передачі даних між серверами (iteration_number) використовується
# для логування процесу передачі даних
# Принцип логування:
# в кожній ітерації створюється каталог з назвою рівною iteration_number
# в цей каталог вносятья дельта файли (у форматі csv), які передаються на
# сервер. Також в цьому каталозі зберігаються файли `datepump_filename`
# `sales-lastdate` на основі яких вивантажувались дельта-файли
# (опис цих файлів див.нижче)
iter_dir = '%06i'
# updatenum всіх довідників, щоб вивантажити тільки зміни з попереднього циклу:
datapump_filename = 'datapump.csv'
datapump_query = queries.datapump_query
# зчитуємо дати останніх чеків в базі даних по кожному з магазинів.
sales_lastdate_filename = 'sales-lastdate.csv'
sales_lastdate_query = queries.sales_lastdate_query
# Перевіряємо, чи цей скрипт вже не працює (на файловій системі в поточному
# каталозі створюється файл 'RUNNING'. Якщо такий файл вже існує - значить
# скрипт вже працює - вважаємо це коллізією і повідомляємо про помилку
# + відправляємо листа. Якщо ж такого файлу НЕ існує, то створюємо його і
# продовжуємо роботу
if os.path.isfile(RUN_LABEL):
errlog(
'При спробі запуску скрипта було виявлено, що попередній '
'примірник скрипта ще не закінчив свою роботу'
)
#еrrlog(u"(виявлено файл-мітку 'RUNNING', створений попередньою ітерацією).")
errlog("Чекаємо завершення роботи попереднього примірника скрипта.")
errlog(
"Якщо скрипт довго не завершується - перевірте лог файл і при "
"потребі вилучте файл-мітку 'RUNNING' з поточного каталога скрипта"
)
errlog('При наступній ітерації спроба запуску скрипта буде повторена')
errlog('Програма припинена з кодом помилки 1')
exit_and_send_error_message(keep_running=True)
# створюємо свою мітку 'RUNNING' i продовжуємо роботу:
touch(RUN_LABEL)
##-----------------------------------------------------------------------------
## завантажуємо дельту по чекам (receipts): ///////////////////////////////////
##-----------------------------------------------------------------------------
# 1. завантажуємо останні дати змін чеків по магазинам:
log('Load the last sales dates per shop...')
cur_sales_lastdate_rows = load_sql(cursor, sales_lastdate_query)
# чекаємо 30 сек, щоб були довантажені всі дані до дати, яку ми виставили, як
# кінцеву
log('Wait 30 sec...')
time.sleep(30)
# 2. завантажуємо попередні останні дати змін чеків по магазинамв (якщо вони є):
log('Load previous sales dates per shop...')
if os.path.isfile(sales_lastdate_filename):
prev_sales_lastdate_rows = [
[ int(i[0]), int(i[1]) ] for i in read_csv(sales_lastdate_filename) if i
]
else:
prev_sales_lastdate_rows = []
print '='*100
print "prev_sales_lastdate_rows=", prev_sales_lastdate_rows
print '-'*100
print "cur_sales_lastdate_rows=", cur_sales_lastdate_rows
print '='*100
# приєднуємо prev sales_lastdate до cur cur_sales_lastdate і результат зберігаємо
# в довіднику у вигляді:
# sales_lastdate['shopid'] = (prev_sales_lastdate, cur_sales_lastdate)
PREV=0
CUR=1
KEY=0
DT=1
sales_lastdate = dict(
[ (row[CUR][KEY], (int(row[PREV][DT]), int(row[CUR][DT])))
for row in full_outer_join(
prev_sales_lastdate_rows,
cur_sales_lastdate_rows,
0,
(0, 0)
)
]
)
print '>>>>>> Sales_lastdate:', sales_lastdate
print '='*100
# читаємо номер циклу доступу до бази даних
if os.path.isfile(iteration_number_filename):
try:
with open(iteration_number_filename, 'r') as f:
line = f.readline()
if line:
try:
iteration_number = int(line)
except:
iteration_number = 1
except IOError, e:
errlog(e)
errlog('Програма припинена з кодом помилки 1')
exit_and_send_error_message()
else:
iteration_number = 1
# пробуємо записати iteration_number назад у файл (перевірка прав доступу):
try:
with open(iteration_number_filename, 'w') as f:
print >>f, iteration_number
except IOError, e:
errlog(e)
errlog('Не можу писати у файл `%s`. Перевірте вільний простір на диску або '
'права доступу до файлу' % iteration_number_filename
)
errlog('Програма припинена з кодом помилки 1')
exit_and_send_error_message()
iter_dir_name = iter_dir % iteration_number
if not glob(iter_dir_name):
try:
os.mkdir(iter_dir_name)
except IOError, e:
errlog(e)
errlog(
'Не можу створити каталог `%s`. Перевірте права доступу'
% iter_dir_name
)
log(em('-- BEGIN ITERATION #%s ' %iteration_number +'-'*60))
subject = 'ERROR message (ITERATION #%s) from Kolos Datawiz API'\
%iteration_number
# 1. завантажуємо останні updatenum для довідників:
cur_updatenum_rows = load_sql(cursor, datapump_query)
# 2. завантажуємо попередні updatenum для довідників (якщо вони є):
if os.path.isfile(datapump_filename):
prev_updatenum_rows = read_csv(datapump_filename)
else:
prev_updatenum_rows = []
print '='*100
print 'prev_updatenum_rows=', prev_updatenum_rows
print '-'*100
print 'cur_updatenum_rows=', cur_updatenum_rows
print '='*100
# приєднуємо prev updatenum до cur updatenum і результат зберігаємо
# в довіднику у вигляді:
# updatenum['dictname'] = (prev_updatenum, cur_updatenum)
PREV=0
CUR=1
KEY=0
UPDATENUM=1
updatenum = dict(
[ (row[CUR][KEY], (int(row[PREV][UPDATENUM]), int(row[CUR][UPDATENUM])))
for row in full_outer_join(
prev_updatenum_rows,
cur_updatenum_rows,
0,
(0, 0)
)
]
)
print 'UPDATENUM:', updatenum
# завантажуємо дельту по довідникам:
load_units(cursor)
load_cashiers(cursor)
load_category(cursor)
load_product(cursor)
##-----------------------------------------------------------------------------
## довідники збережені. зберігаємо cur updatenum, як prev updatenum: /////////
##-----------------------------------------------------------------------------
if os.path.isfile(datapump_filename):
os.rename(datapump_filename, iter_dir_name+'/'+datapump_filename)
write_csv(cur_updatenum_rows, datapump_filename)
# завантажуємо дельту по чекам:
load_receipts(cursor)
##-----------------------------------------------------------------------------
## чеки збережені. зберігаємо cur sales_lastdate, як prev sales_lastdate //////
##-----------------------------------------------------------------------------
if os.path.isfile(sales_lastdate_filename):
# копіюємо попередній файл в папку ітерації
os.rename(sales_lastdate_filename, iter_dir_name+'/'+sales_lastdate_filename)
write_csv(cur_sales_lastdate_rows, sales_lastdate_filename)
# зберігаємо НАСТУПНИЙ itration_number
with open(iteration_number_filename, 'w') as f:
print >>f, iteration_number + 1
t2=time.time()
log('-- ITERATION #%s DONE SUCCESSFULLY! (скрипт працював %s sec) '
% (iteration_number, str(timedelta(seconds=t2-t0)))
+'-'*30
)
# якщо попередня ітерація завершилась помилкою,
# а ця - успішно, то відправляємо листа про успішність завершення ітерації:
if get_last_retcode() != 0:
subject = 'SUCCESSFUL message (ITERATION #%s) from Kolos Datawiz API' \
% iteration_number
msg_list = [
'Cкрипт %s, запущенний в %s, відпрацював успішно!' % (
FULL_SCRIPT_NAME,
str(datetime.fromtimestamp(int(t0)))
),
''
'Скрипт працював %s sec' % str(timedelta(seconds=t2-t0))
]
message = '\n'.join(msg_list)
try:
send_mail(message, subject)
except Exception as e:
errlog('Невдача при відправці ел.пошти:')
errlog('Exception: %s' %e)
errlog('Ігноруємо відправку. :-(')
# зберігаємо у файлі останній код повернення - 0 :
retcode = 0
save_last_retcode(retcode)
# Програма успішно завершується - вилучаємо файл-ознаку "RUNNING":
if os.path.isfile(RUN_LABEL):
os.remove(RUN_LABEL)
# повертаємо код завершення - 0
sys.exit(retcode)
|
14,426 | 1f624b03b9f5f09f75ca7bd8d7ec1faf161b92ae | from flask import Flask, flash, render_template, redirect, url_for, request, session
from dao.DAOUsuario import DAOUsuario, DAOProducto, DAOCheckout, DAOLista
app = Flask(__name__)
app.secret_key = "mys3cr3tk3y"
db = DAOUsuario()
dbProducto = DAOProducto()
dbCheckout = DAOCheckout()
dbLista = DAOLista()
@app.route('/')
def principal():
return render_template('login.html')
@app.route('/registro')
def index():
return render_template('registro.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/productor/contacto/<string:usuario>')
def contactanos(usuario):
user = db.sesion(usuario)
return render_template('productor/contactanos.html', user = user)
@app.route('/nuevo_inicio', methods = ['POST', 'GET'])
def ninicio():
if request.method == 'POST'and request.form['registrar']:
if db.create(request.form):
if request.form['tipo'] == 'cliente':
return redirect(url_for('cliente',usuario =request.form['usuario']))
else:
return redirect(url_for('productor',usuario =request.form['usuario']))
else:
flash("ERROR al crear usuario, emplee otro usuario")
return render_template('registro.html')
else:
return render_template('registro.html')
@app.route('/iniciar', methods = ['POST'])
def iniciar():
if request.method == 'POST'and request.form['iniciar']:
data = db.validate(request.form)
if data == None:
flash("ERROR, usuario invalido")
return redirect(url_for('ninicio'))
else:
if len(data) != 0:
if data[5] == 'cliente':
return redirect(url_for('cliente',usuario = data[2]))
else:
return redirect(url_for('productor',usuario = data[2]))
else:
return render_template('login.html')
@app.route('/productor/<string:usuario>')
def productor(usuario):
user = db.sesion(usuario)
stock = dbProducto.stock(user[0])
productos = dbProducto.readProductor(user[0])
"""db.sesion es una funcion para conocer el usuario que esta conectado"""
return render_template('productor/productor_index.html', user = user, stock = stock, productos = productos)
@app.route('/cliente/<string:usuario>')
def cliente(usuario):
user = db.sesion(usuario)
data = dbProducto.read(None)
"""db.sesion es una funcion para conocer el usuario que esta conectado"""
return redirect(url_for('tienda', usuario = usuario))
@app.route('/productor/producto/<string:usuario>')
def productor_producto(usuario):
print(usuario)
user = db.sesion(usuario)
data = dbProducto.readProductor(user[0])
return render_template('productor/productos.html', data = data, user = user)
@app.route('/productor/add/<string:usuario>')
def productor_add(usuario):
user = db.sesion(usuario)
data = dbProducto.readProductor(user[0])
return render_template('productor/add.html', data = data, user = user)
@app.route('/productor/update/<string:usuario>/<int:producto>')
def producto_update(usuario,producto):
user = db.sesion(usuario)
data = dbProducto.read(producto)
return render_template('productor/update.html', data = data, user = user)
@app.route('/productor/anadirProducto/<string:usuario>', methods = ['POST', 'GET'])
def anadirProducto(usuario):
user = db.sesion(usuario)
if request.method == 'POST' and request.form['guardar']:
if dbProducto.insert(request.form):
print(request.form)
flash("Nuevo producto creado")
else:
flash("ERROR, al crear producto")
return redirect(url_for('productor_add', usuario = user[2]))
else:
return redirect(url_for('productor_add', usuario = user[2]))
@app.route('/productor/update_lista/<string:usuario>/<int:producto>', methods = ['POST', 'GET'])
def producto_update_lista(usuario,producto):
user = db.sesion(usuario)
if request.method == 'POST' and request.form['update']:
print(request.form)
print(producto)
if dbProducto.update(producto,request.form):
flash("Nuevo producto creado")
else:
flash("ERROR, al crear producto")
return redirect(url_for('productor_producto', usuario = user[2]))
else:
return redirect(url_for('productor_producto', usuario = user[2]))
@app.route('/productor/delete/<string:usuario>/<int:producto>')
def eliminarProducto(usuario,producto):
user = db.sesion(usuario)
print(producto)
dbProducto.delete(producto)
return redirect(url_for('productor_producto', usuario = user[2]))
########################################################################################
#INICIA PARTE MARITA MIRELLA
@app.route('/anadircarrito/<string:usuario>/<int:idproducto>')
def anadircarrito(usuario,idproducto):
user = db.sesion(usuario)
data = dbProducto.read(None)
dbLista.insert(idproducto,user[0])
return redirect(url_for('tienda', usuario = usuario))
@app.route('/Acercade/<string:usuario>')
def Acercade(usuario):
user = db.sesion(usuario)
return render_template('/about.html', user= user)
@app.route('/Contacto/<string:usuario>')
def contactos(usuario):
user = db.sesion(usuario)
return render_template('/contact.html',user= user)
@app.route('/Index/<string:usuario>')
def iniciousuario(usuario):
user = db.sesion(usuario)
return render_template('/index.html',user= user)
@app.route('/Registrarse/')
def registrarse():
return render_template('/registro.html')
@app.route('/Tienda/<string:usuario>/')
def tienda(usuario):
user = db.sesion(usuario)
data = dbProducto.read(None)
return render_template('/shop.html',user = user, data=data )
@app.route('/Carrito/<string:usuario>/')
def carrito(usuario):
user = db.sesion(usuario)
datalist=dbLista.readUser(user[0])
rows=len(datalist)
data = [[0 for x in range(3)] for y in range(rows)]
prod = dbProducto.read(None)
for i in range(rows):
dat=datalist[i][1]
print(dat)
data[i][0]=prod[int(dat-1)][8]
data[i][1]=prod[int(dat-1)][3]
data[i][2]=1
print(data)
#return render_template('/index.html')
return render_template('/cart.html',user= user ,data=data)
# @app.route('/Carrito/')
# def carrito():
# datalist=dbLista.read(None)
# rows=len(datalist)
# data = [[0 for x in range(3)] for y in range(rows)]
# prod = dbProducto.read(None)
# print(prod)
# print(datalist)
# for i in range(rows):
# dat=datalist[i][1]
# print(dat)
# print(prod[int(dat-1)][1])
# print(data)
@app.route('/Checkout/<string:usuario>/')
def checkout(usuario):
user = db.sesion(usuario)
return render_template('checkout.html', user = user)
@app.route('/productSingle/<string:usuario>/<int:idproducto>')
def productSingle(usuario,idproducto):
user = db.sesion(usuario)
data = dbProducto.read(idproducto)
dbLista.insert(idproducto,user[0])
return render_template('/product-single.html', user = user, data = data)
@app.route('/QR/<string:usuario>')
def qr(usuario):
user = db.sesion(usuario)
return render_template('/QR.html',user= user)
@app.route('/guardarcheckout/<string:usuario>', methods = ['POST', 'GET'])
def guardarcheckout(usuario):
user = db.sesion(usuario)
if request.method == 'POST' and request.form['save']:
if dbCheckout.insertUsertoo(request.form,user[0]):
print("Se guardo el pedido")
return redirect(url_for('qr', usuario = usuario))
else:
print("ERROR")
return redirect(url_for('checkout',usuario = usuario))
else:
return redirect(url_for('checkout', usuario = usuario))
@app.errorhandler(404)
def page_not_found(error):
return render_template('error.html')
if __name__ == '__main__':
app.run(port=3000, host="0.0.0.0",debug=True)
|
14,427 | ea3fd69e4d2b35a3d363a54fdea085ed78ed7b2f | from flask_login import login_required
from flask import flash,render_template,redirect, abort, url_for
from . import aboutt
from .forms import AboutForm
from .. import db
from ..models import about
@aboutt.route('/info',methods=['GET','POST'])
@login_required
def info():
result= about.query.all()
return render_template('admin/inform.html',aboutme=result)
@aboutt.route('/info/add',methods=['GET','POST'])
@login_required
def add_info():
add_information=True
form=AboutForm()
if form.validate_on_submit():
new_info=about()
new_info.about_me=form.about_me.data
new_info.address=form.address.data
new_info.email=form.email.data
new_info.first_name=form.first_name.data
new_info.last_name=form.last_name.data
new_info.mobileno=form.mobileno.data
db.session.add(new_info)
db.session.commit()
flash('Info has been added')
return redirect(url_for('aboutt.info'))
return render_template('admin/info.html',form=form,add_information=add_information,action="Add" )
@aboutt.route('/info/edit/<int:id>',methods=['GET','POST'])
@login_required
def edit_info(id):
add_information=False
result=about.query.get_or_404(id)
form=AboutForm(obj=result)
if form.validate_on_submit():
result.about_me=form.about_me.data
result.address=form.address.data
result.email=form.email.data
result.first_name=form.first_name.data
result.last_name=form.last_name.data
result.mobileno=form.mobileno.data
db.session.commit()
flash('Info has been updated')
return redirect(url_for('aboutt.info'))
return render_template('admin/info.html',form=form,add_information=add_information,action="Edit" )
@aboutt.route('/info/delete/<int:id>',methods=['GET','POST'])
@login_required
def delete_info(id):
result=about.query.get_or_404(id)
db.session.delete(result)
db.session.commit()
flash('Info has been deleted')
return redirect(url_for('aboutt.info'))
|
14,428 | f1a56ab35d5543615988f084bd703f22aa5cf941 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
forex_11 = pd.read_csv('FOREX_20180111.csv', sep=',', parse_dates=['Date'])
forex_12 = pd.read_csv('FOREX_20180112.csv', sep=',', parse_dates=['Date'])
time_format = '%d-%m-%Y'
forex = forex_11.append(forex_12, ignore_index=False)
forex['Date'] = forex['Date'].dt.strftime(time_format)
# print(forex)
forex = forex.reset_index(drop=True)
forex = forex.loc[forex.groupby("Symbol")["Open"].idxmax()]
# forex = forex.drop_duplicates(subset=['Symbol'], keep='first', inplace=False)
print(forex)
print(forex[forex['Date']== '12-01-2018'])
|
14,429 | 48db83d807b50519f3f6fddd84f478928bed0b0b | # db models for bolchal
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
db = SQLAlchemy()
# relationship between page and profile where profile is admin of page
admins = db.Table('admins', db.Column('page_id', db.Integer, db.ForeignKey('page.id', ondelete='CASCADE')),
db.Column('profile_id', db.Integer, db.ForeignKey('profile.id', ondelete='CASCADE')))
# relationship between page and profile where profile is member of page
members = db.Table('members', db.Column('page_id', db.Integer, db.ForeignKey('page.id', ondelete='CASCADE')),
db.Column('profile_id', db.Integer, db.ForeignKey('profile.id', ondelete='CASCADE')))
class Profile(db.Model):
__tablename__ = "profile"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(100), nullable=False)
last_name = db.Column(db.String(100), nullable=False)
mobile_no = db.Column(db.String(10))
email = db.Column(db.String(100), nullable=False, unique=True)
username = db.Column(db.String(25), nullable=False, unique=True)
password = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime, default=func.now())
# relations
posts = db.relationship("Post", backref="profile",
lazy=True, cascade='all,delete')
like = db.relationship("Like", backref="profile", lazy=True)
def __init__(self, first_name, last_name, email, username, password, mobile_no=""):
self.first_name = first_name
self.last_name = last_name
self.email = email
self.username = username
self.password = password
self.mobile_no = mobile_no
def __repr__(self):
return f'<Profile {self.username}>'
def __str__(self):
return f'{self.username}'
class Page(db.Model):
__tablename__ = "page"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False, unique=True)
desc = db.Column(db.Text, nullable=False)
category = db.Column(db.String(50))
views = db.Column(db.Integer, default=0)
# relations
posts = db.relationship("Post", backref="page", lazy=True)
likes = db.relationship("Like", backref="page", lazy=True)
admins = db.relationship("Profile", secondary=admins,
lazy=True, backref=db.backref('adminof', lazy=True))
members = db.relationship("Profile", secondary=members,
lazy=True, backref=db.backref('memberof', lazy=True))
def __init__(self, name, desc, category=""):
self.name = name
self.desc = desc
self.category = category
class Post(db.Model):
__tablename__ = "post"
id = db.Column(db.Integer, primary_key=True)
post_type = db.Column(db.String(10), nullable=False)
body = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime, default=func.now())
# relationships
comments = db.relationship("Comment", backref="post", lazy=True)
likes = db.relationship("Like", backref="post", lazy=True)
profile_id = db.Column(db.Integer, db.ForeignKey(
"profile.id", ondelete='CASCADE'))
page_id = db.Column(db.Integer, db.ForeignKey(
"page.id", ondelete='CASCADE'))
def __init__(self, post_type, body, profile_id, page_id):
self.post_type = post_type
self.body = body
if profile_id:
self.profile_id = profile_id
else:
self.page_id = page_id
class Comment(db.Model):
__tablename__ = 'comment'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime, default=func.now())
# relations
post_id = db.Column(db.Integer, db.ForeignKey(
"post.id", ondelete='CASCADE'))
profile_id = db.Column(db.Integer, db.ForeignKey(
"profile.id", ondelete='CASCADE'))
page_id = db.Column(db.Integer, db.ForeignKey(
"page.id", ondelete='CASCADE'))
def __init__(self, body, post_id, profile_id, page_id):
self.body = body
self.post_id = post_id
if profile_id:
self.profile_id = profile_id
else:
self.page_id = page_id
class Like(db.Model):
__tablename__ = 'like'
id = db.Column(db.Integer, primary_key=True)
# relations
post_id = db.Column(db.Integer, db.ForeignKey(
"post.id", ondelete='CASCADE'))
profile_id = db.Column(db.Integer, db.ForeignKey(
"profile.id", ondelete='CASCADE'))
page_id = db.Column(db.Integer, db.ForeignKey(
"page.id", ondelete='CASCADE'))
def __init__(self, post_id, profile_id, page_id):
self.post_id = post_id
if profile_id:
self.profile_id = profile_id
else:
self.page_id = page_id
|
14,430 | f869c1bef7571f021c0c71cbcea95bffad84b8d2 | import json
import random
terrainlists = lambda : {"plains": [], "farm": [], "forest": [], "mountain": [], "waste": [], "swamp": [], "cave": [], "water": [], "deepwater": [], "coast": []}
class Random4Indies:
def __init__(self, rarechance, strength):
self.rarechance = rarechance
self.strength = strength // 100
self.poptypes = terrainlists()
self.rares = terrainlists()
self.throne_guards = {key: terrainlists() for key in xrange(1, 4)}
self.allpop = []
self.index = 25
random.seed()
def readIndies(self, sourcefile):
with open(sourcefile, 'r') as infile:
data = json.load(infile)
if data.get('poptypes') is not None:
for entry in data['poptypes']:
entry['index'] = str(self.index)
for terrain in entry['terrain']:
if self.poptypes.get(terrain) is None:
print "Error: found unknown terrain " + terrain + " in " + sourcefile
else:
self.poptypes[terrain].append(entry)
self.allpop.append(entry)
self.index += 1
if data.get('rares') is not None:
for entry in data['rares']:
for terrain in entry['terrain']:
if self.rares.get(terrain) is None:
print "Error: found unknown terrain " + terrain + " in " + sourcefile
else:
self.rares[terrain].append(entry)
if data.get('throne_guards') is not None:
for level in data['throne_guards']:
for entry in data['throne_guards'][level]:
for terrain in entry['terrain']:
if self.throne_guards[int(level)].get(terrain) is None:
print "Error: found unknown terrain " + terrain + " in " + sourcefile
else:
self.throne_guards[int(level)][terrain].append(entry)
def getPoptypeForTerrain(self, terrains):
choices = []
for terrain in terrains:
if self.poptypes[terrain] is not None:
choices = choices + self.poptypes[terrain]
result = None
while True:
poptype = random.choice(choices)
if poptype.get('rare') is not None:
if random.random() < poptype['rare']:
result = poptype
break
else:
continue
else:
result = poptype
break
return result
def getRaresForTerrain(self, terrains):
choices = []
for terrain in terrains:
if self.rares[terrain] is not None:
choices = choices + self.rares[terrain]
result = None
while True:
rare = random.choice(choices)
if rare.get('rare') is not None:
if random.random() < rare['rare']:
result = rare
break
else:
continue
else:
result = rare
break
return result
def getThroneGuardsForTerrain(self, terrains, throne_level, throne_tags):
choices = [choice for terrain in terrains for choice in self.throne_guards[throne_level][terrain]]
filtered = [f for tag in throne_tags for f in choices if f.get('tags') is not None and tag in f['tags']]
if len(filtered) != 0:
choices = filtered
result = None
while True:
guards = random.choice(choices)
if guards.get('rare') is not None:
if random.random() < guards['rare']:
result = guards
break
else:
continue
else:
result = guards
break
return result
def getUnitsForEntry(self, entry):
stringList = []
for commander in entry['commander']:
for i in xrange(1 if commander.get('count') is None else commander['count']):
# generate one each time due to possible random attributes
stringList += self.getCommanderString(commander)
for unit in entry['unit']:
count = max(int(random.randrange(int(unit['count']*0.7), int(unit['count']*1.3)) * self.strength), 1)
stringList += '#units ' + str(count) + ' ' + self.getString(unit['type']) + '\n'
return ''.join(stringList)
def getIndiesFor(self, terrains, throne_level, throne_tags):
poptype = self.getPoptypeForTerrain(terrains)
stringList = []
stringList += '#poptype ' + poptype['index'] + '\n'
stringList += self.getUnitsForEntry(poptype)
if random.random() * 100 < self.rarechance:
rare = self.getRaresForTerrain(terrains)
stringList += self.getUnitsForEntry(rare)
if throne_level > 0:
throneguards = self.getThroneGuardsForTerrain(terrains, throne_level, throne_tags)
stringList += self.getUnitsForEntry(throneguards)
return ''.join(stringList)
def getCommanderString(self, commander):
stringList = []
stringList += '#commander ' + self.getString(commander['type']) + '\n'
if commander.get('items') is not None:
stringList += '#randomequip ' + str(commander['items']) + '\n'
if commander.get('name') is not None:
stringList += '#comname "' + random.choice(commander['name']) + '"\n'
if commander.get('xp') is not None:
stringList += '#xp ' + str(commander['xp']) + '\n'
if commander.get('magic') is not None:
for path in commander['magic']:
stringList += '#mag_' + path + ' ' + str(commander['magic'][path]) + '\n'
return ''.join(stringList)
def writeModFile(self, modfile):
with open(modfile + '.dm', 'w') as ofile:
self.writeHeader(ofile)
self.writePoptype(ofile)
def writeHeader(self, ofile, modfile):
ofile.write('#modname "Random indies for ' + modfile + '"\n')
ofile.write('#description "Random indies for ' + modfile + '"\n')
ofile.write('#version 100\n')
ofile.write('#domversion 350\n')
def writePoptype(self, ofile):
for poptype in self.allpop:
ofile.write('\n')
ofile.write('#selectpoptype ' + str(poptype['index']) + '\n')
ofile.write('#clearrec\n')
ofile.write('#cleardef\n')
ofile.write('#defcom1 ' + self.getString(poptype['pd_commander']) + '\n')
prefix = ['', 'b', 'c']
for pd in xrange(min(len(poptype['pd']), 3)):
ofile.write('#defunit1' + prefix[pd] + ' ' + self.getString(poptype['pd'][pd]['type']) + '\n')
ofile.write('#defmult1' + prefix[pd] + ' ' + str(poptype['pd'][pd]['count']) + '\n')
for reccom in poptype['recruitable_commanders']:
ofile.write('#addreccom ' + self.getString(reccom) + '\n')
for recunit in poptype['recruitable_units']:
ofile.write('#addrecunit ' + self.getString(recunit) + '\n')
ofile.write('#end\n')
def getString(self, item):
if isinstance(item, basestring):
return '"' + item + '"'
return str(item)
if __name__ == "__main__":
print "This file is not intended to be run as stand alone!" |
14,431 | f170a00182adff18819faba24c1994fbf68259a9 | import os
import math
import random
import datetime
import functools
from app.models import *
# from app import csrf
from hashlib import md5
from .forms import TaskForm
from app import api
from . import main
from flask_restful import Resource
from flask import jsonify # flask 封装后的json方法
from flask_sqlalchemy import Pagination
from flask import render_template, request, redirect, session
def set_pwd(pwd): # 密码加密
hl = md5(pwd.encode(encoding='utf-8'))
new_pwd = hl.hexdigest()
return new_pwd
# def back_page(pages, current_page): # 返回页数
# if pages <= 5:
# return range(1, pages + 1)
# if current_page <= 3:
# return range(1, 6)
# elif current_page + 3 >= pages:
# return range(pages - 4, pages + 1)
# else:
# return range(current_page - 2, current_page + 2)
def loginValid(fun):
@functools.wraps(fun)
def inner(*args, **kwargs):
id = request.cookies.get('id', 0)
username = request.cookies.get('username')
session_username = session.get('username')
user = User.query.get(int(id))
if user:
if user.username == username and username == session_username:
return fun(*args, **kwargs)
return redirect('/login/')
return inner
class Calendar: # 日历类
def __init__(self, year=datetime.datetime.now().year, month=datetime.datetime.now().month):
assert int(month) <= 12
date = datetime.datetime(year, month, 1, 0, 0) # 当前月1日
self.start_day = date.weekday() # 当前月1号是周几
self.days = list(self.back_days(year, month)) # 当月天数
self.work = ['语文', '数学', '英语', '物理', '化学', '地理', '生物']
def back_days(self, year, month): # 返回当月天数
big_month = [1, 3, 5, 7, 8, 10, 12]
small_month = [4, 6, 9, 11]
two_month = 28
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
two_month = 29
assert int(month) <= 12
if month in big_month:
return range(1, 32)
elif month in small_month:
return range(1, 31)
else:
return range(1, two_month + 1)
def first_list(self, start_day, days): # 日历第一行
ca_list = [{self.days.pop(0): random.choice(self.work)} for i in range(1, 8 - start_day)]
[ca_list.insert(0, 'empty') for j in range(7 - len(ca_list))]
return ca_list
def return_calendar(self): # 返回日历的列表
first_line = self.first_list(self.start_day, self.days) # 日历第一行
lines = [first_line] # 存日历的列表
while self.days: # 得到每一行
line = [{self.days.pop(0): random.choice(self.work)} for i in range(7) if self.days]
[line.append('empty') for j in range(7 - len(line))] # 长度不足补空
lines.append(line)
return lines
class Paginator:
def __init__(self, datas, page_size):
self.datas = datas
self.page_size = page_size
self.all_pages = math.ceil(self.datas.count() / self.page_size)
def back_page(self, current_page):
if self.all_pages <= 5:
return range(1, self.all_pages + 1)
if current_page <= 3:
return range(1, 6)
elif current_page + 3 >= self.all_pages:
return range(self.all_pages - 4, self.all_pages + 1)
else:
return range(current_page - 2, current_page + 2)
def back_data(self, current_page):
datas = self.datas.offset((current_page - 1) * self.page_size).limit(self.page_size)
return datas
@main.route('/') # 路由
def base(): # 视图
# c = Curriculum(c_id='0001', c_name='python', c_time=datetime.datetime.now())
# c.save()
return render_template('base.html')
@main.route('/index/') # 路由
@loginValid
def index(): # 视图
return render_template('index.html')
@main.route('/register/', methods=['GET', 'POST']) # 路由
def register(): # 视图
err_msg = ''
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if username:
if email:
if password:
user = User()
user.username = username
user.email = email
user.password = set_pwd(password)
user.save()
return redirect('/login/')
else:
err_msg = '密码不可为空'
else:
err_msg = '邮箱不可为空'
else:
err_msg = '用户名不可为空'
return render_template('register.html', **locals())
@main.route('/login/', methods=['GET', 'POST']) # 路由
def login(): # 视图
err_msg = ''
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if user:
if set_pwd(password) == user.password:
response = redirect('/index/')
response.set_cookie('email', user.email)
response.set_cookie('id', str(user.id))
response.set_cookie('username', user.username)
print(user.username)
session['username'] = user.username
return response
else:
err_msg = '密码错误'
else:
err_msg = '该账号未注册'
return render_template('login.html', **locals())
@main.route('/logout/')
def logout(): # 退出
response = redirect('/login/')
response.delete_cookie('email')
response.delete_cookie('id')
response.delete_cookie('username')
session.pop('username')
return response
@main.route('/user_info/') # 路由
@loginValid
def user_info(): # 个人中心
c = Calendar()
datas = c.return_calendar()
day = datetime.datetime.now().day
return render_template('user_info.html', **locals())
@main.route('/leave/', methods=['get', 'post'])
# @csrf.exempt
@loginValid
def leave():
err_msg = ''
if request.method == 'POST':
leave_name = request.form.get('leave_name')
leave_type = request.form.get('leave_type')
leave_start = request.form.get('leave_start')
leave_end = request.form.get('leave_end')
leave_desc = request.form.get('leave_desc')
leave_phone = request.form.get('leave_phone')
if leave_name and leave_type and leave_start and leave_end and leave_desc and leave_phone:
id = int(request.cookies.get('id'))
lea = Leave()
lea.leave_id = id
lea.leave_name = leave_name
lea.leave_type = leave_type
lea.leave_start = leave_start
lea.leave_end = leave_end
lea.leave_desc = leave_desc
lea.leave_phone = leave_phone
lea.leave_status = '0'
lea.save()
else:
err_msg = '请填写全部内容'
return render_template('leave.html', **locals())
@main.route('/leave_list/<p>/', methods=['get', 'post'])
@loginValid
def leave_list(p):
page_size = 5
p = int(p)
id = int(request.cookies.get('id'))
leaves = Leave.query.filter_by(leave_id=id)
pagin = Paginator(leaves, page_size)
pages = pagin.back_page(p)
leaves = pagin.back_data(p)
return render_template('leave_list.html', **locals())
@main.route('/cancel/')
def cancel():
id = request.args.get('id') # 通过args接受get请求数据
leave = Leave.query.get(int(id))
leave.delete()
return jsonify({'data': '删除成功'})
@main.route('/add_task/', methods=['get', 'post'])
def add_task():
'''
task.errors # 表单校验错误
task.validate_on_submit() # 判断是否是一个有效的post请求
task.validate() # 判断是否是一个有效的post请求
task.data # 提交的数据
:return:
'''
errors = {}
task = TaskForm()
if request.method == 'POST':
if task.validate_on_submit():
formData = task.data
else:
errors_list = list(task.errors.keys())
errors = task.errors
print(errors)
return render_template('add_task.html', **locals())
@api.resource('/Api/Leave/')
class LeaveApi(Resource):
def __init__(self): # 定义返回的格式
super(LeaveApi, self).__init__()
self.result = {
'version': '1.0',
'data': ''
}
def set_data(self, leave): # 定义返回的数据
result_data = {
'leave_name': leave.leave_name,
'leave_type': leave.leave_type,
'leave_start': leave.leave_start,
'leave_end': leave.leave_end,
'leave_desc': leave.leave_desc,
'leave_phone': leave.leave_phone,
}
return result_data
def get(self):
data = request.args # 获取请求的数据
id = data.get('id')
if id:
leave = Leave.query.get(int(id))
result_data = self.set_data(leave)
else:
leaves = Leave.query.all()
result_data = []
for leave in leaves:
result_data.append(self.set_data(leave))
self.result['data'] = result_data
return self.result
def post(self):
data = request.form
leave_id = data.get("leave_id")
leave_name = data.get("leave_name")
leave_type = data.get("leave_type")
leave_start = data.get("leave_start")
leave_end = data.get("leave_end")
leave_desc = data.get("leave_desc")
leave_phone = data.get("leave_phone")
leave = Leave()
leave.leave_id = leave_id
leave.leave_name = leave_name
leave.leave_type = leave_type # 假期类型
leave.leave_start = leave_start # 起始时间
leave.leave_end = leave_end # 结束时间
leave.leave_desc = leave_desc # 请假事由
leave.leave_phone = leave_phone # 联系方式
leave.leave_status = "0" # 假条状态
leave.save()
self.result["data"] = self.set_data(leave)
return self.result
def put(self):
data = request.form
id = data.get('id')
leave = Leave.query.get(int(id))
for key, value in data.items():
if key != 'id':
setattr(leave, key, value)
leave.save()
self.result['data'] = self.set_data(leave)
return self.result
def delete(self):
data = request.form
id = data.get('id')
leave = Leave.query.get(int(id))
leave.delete()
self.result['data'] = 'ID为%s的数据,删除成功' % id
return self.result
|
14,432 | d4ad8019c7fbb870d45d97ec5c46e4213a2f88d8 | """
字典dict
练习:exercise01~03
"""
# 创建
dict01 = {10001: "金海", 10002: "铁林", 10003: "徐天"}
# 序列 --> 字典:一分为二
# dict02 = dict([(10001,"金海"), [10002, "铁林"], "徐天"])
list01 = [(10001, "金海"), (10002, "铁林"), (10003, "徐天")]
dict02 = dict(list01)
# 添加(字典不存在该key)
if 10004 not in dict01:
dict01[10004] = "田丹"
# 定位 根据key
# -- 读取
print(dict01[10004])
# -- 修改(字典存在该key)
if 10004 in dict01:
dict01[10004] = "丹丹"
# 删除
del dict01[10002]
# 循环
# -- 遍历所有key
for key in dict01:
print(key)
print(dict01[key])
# -- 遍历所有value
for value in dict01.values():
print(value)
# -- 遍历所有key,value
# for item in dict01.items():
# print(item[0])
# print(item[1])
for k,v in dict01.items():
print(k)
print(v)
|
14,433 | 422747bb20f1e8b65b0b37ec56937a7991630acc | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 02:20:27 2018
@author: keshav
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 21 23:11:34 2018
@author: keshav
"""
import pyaudio
import wave
from array import array
#from matplotlib import pyplot as plt
FORMAT=pyaudio.paInt16
CHANNELS=2
RATE=16000
CHUNK=1024
RECORD_SECONDS=6
FILE_NAME="sentence"
audio=pyaudio.PyAudio()
stream=audio.open(format=FORMAT,channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
frames=[]
i1=0
way=0
for i in range(0,int(RATE/CHUNK*RECORD_SECONDS)):
data=stream.read(CHUNK)
data_chunk=array('h',data)
frames.append(data)
wavfile=wave.open(FILE_NAME+".wav",'wb')
wavfile.setnchannels(CHANNELS)
wavfile.setsampwidth(audio.get_sample_size(FORMAT))
wavfile.setframerate(RATE)
wavfile.writeframes(b''.join(frames))
wavfile.close()
stream.stop_stream()
stream.close()
audio.terminate()
#Importing pyplot
#Plotting to our canvas
#plt.plot(data_chunk)รณ
#Showing what we plotted
#plt.show()
|
14,434 | cf739da1cde151bb00ae9b22026a5081d4a29580 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
from aerialassist.scouting_data import GetScoutingData, PostScoutingData, PostBenchmarkingData
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('2016 Sparx 1126 Scouting Service!')
app = webapp2.WSGIApplication([
('/', MainHandler),
webapp2.Route(r'/api/2016/v1/ScoutingData/all',
GetScoutingData,
methods=['GET'],
handler_method='getAll'),
webapp2.Route(r'/api/2016/v1/BenchmarkingData/all',
GetScoutingData,
methods=['GET'],
handler_method='getBenchmarkingAll'),
webapp2.Route(r'/api/2016/v1/ScoutingData', PostScoutingData, methods=['POST']),
webapp2.Route(r'/api/2016/v1/BenchmarkingData', PostBenchmarkingData, methods=['POST']),
webapp2.Route(r'/api/2016/v1/ScoutingData/<team_key>',
GetScoutingData,
methods=['GET'],
handler_method='getTeamLevel'),
webapp2.Route(r'/api/2016/v1/ScoutingData/<team_key>/<event_key>',
GetScoutingData,
methods=['GET'],
handler_method='getEventLevel'),
webapp2.Route(r'/api/2016/v1/ScoutingData/<team_key>/<event_key>/<match_key>',
GetScoutingData,
methods=['GET'],
handler_method='getMatchLevel'),
webapp2.Route(r'/api/2016/v1/BenchmarkingData/<team_key>/<event_key>',
GetScoutingData,
methods=['GET'],
handler_method='getBenchmarkingEventLevel')
], debug=True)
|
14,435 | da4b7154e3780c37ffbb5bb437216c71f30fc466 | #
# @lc app=leetcode id=494 lang=python3
#
# [494] Target Sum
#
# @lc code=start
class Solution:
def findTargetSumWays(self, nums: List[int], S: int) -> int:
dp = {0: 1}
for n in nums:
nxtDp = {}
for cur in dp:
nxtDp[cur + n] = nxtDp.get(cur + n, 0) + dp[cur]
nxtDp[cur - n] = nxtDp.get(cur - n, 0) + dp[cur]
dp = nxtDp
return dp.get(S, 0)
# @lc code=end
|
14,436 | a1cb98a5d7c83ef66b456e7b286ba72476420d62 | import heapq
from typing import List
class Solution:
def smallestRange(self, nums: List[List[int]]) -> List[int]:
# 核心思想--最小堆
# 在堆heap中放入每个列表的第一个元素,堆中元素的最大值和最小值,即为区间范围res
# 利用堆的性质,每次从堆中弹出最小的一个元素,
# 并将该元素所在列表的下一个元素存入堆中,因为由题可知,列表都是有序的,往下一个元素移动,res范围可能会变小
# 维护最大值与最小值,若区间范围变小,则更新res
# 当弹出的数是对应序列的最后一个值的时候,跳出循环,返回最终的res
heap = [] # 设定一个堆
n = len(nums)
mi = float('inf')
ma = float('-inf')
for i in range(n):
heapq.heappush(heap, (nums[i][0], 0, i)) # 将每个列表的第一个元素放入堆中
# 堆中每一个元素包含该元素的:值、所在列表中的位置,所在列表
mi = min(mi, nums[i][0])
ma = max(ma, nums[i][0])
res = [mi, ma] # 取值范围
while True:
cur = heapq.heappop(heap) # 弹出堆中最小的元素
if cur[1] == len(nums[cur[2]]) - 1:
# 当弹出的元素是所在列表中的最后一个元素时,则结束循环
break
heapq.heappush(heap, (nums[cur[2]][cur[1]+1], cur[1]+1, cur[2])) # 将弹出元素所在列表的下一个元素加入
ma = max(ma, nums[cur[2]][cur[1]+1]) # 更新此时堆中的最大值
mi = heap[0][0] # 更新测试堆中的最小值
# 根据题目判断范围大小的两种情况,判断此时的res范围是否缩小,缩小则更新res
if ma-mi < res[1]-res[0]:
res = [mi, ma]
elif ma-mi == res[1]-res[0] and mi < res[0]:
res = [mi, ma]
return res
|
14,437 | c1bc79589f4eb9ee9c2590f74185f7db6cd37e04 | # programmers L2 : 소수 찾기
# solved by JY
# DATE : 2021.02.21
# 완전탐색, 순열
# 소수 구하는 방법 (시간복잡도는 O(√N))
# 약수들의 곱으로 봤을때 루트를 씌운 값이 중간값임
# 2에서부터 √N의 값까지 검색하면 이후의 값은 확인할 필요가 없음
import itertools
def find(num): # num이 소수이면 True 반환
i = 2
while i*i <= num:
if num % i == 0: # 나머지가 0이면 소수가 아님
return False
i += 1
return True
def solution(numbers):
answer = 0
number = []
for i in range(1, len(numbers)+1): # 순열을 통해 만들 수 있는 수 조합을 구함
number += map(int, map(''.join, itertools.permutations(numbers, i)))
number = set(number) # 중복 제거를 위한 set 처리
for n in number: # 소수 확인을 위한 완전탐색 수행
if n != 0 and n != 1 and find(n) == True:
answer += 1
return answer
# run test
print(solution("17")) # 3
|
14,438 | a28df42234e7906f4c5c3409674a3694a955eb8e | '''
Created on 2015/11/25
@author: FZY
'''
#the baseline of mase of two vlaue is
'''
Created on 2015/11/4
@author: FZY
'''
import random
from sklearn.linear_model import Ridge,Lasso,LogisticRegression
from hyperopt import hp
import numpy as np
import pandas as pd
from hyperopt.pyll_utils import hyperopt_param
from sklearn.cross_validation import train_test_split
from sklearn.metrics import mean_squared_error,explained_variance_score,mean_absolute_error,auc
from sklearn.metrics import average_precision_score
from hyperopt import Trials,tpe,fmin
from hyperopt.base import STATUS_OK
from ml_metrics import accuracy_model,Ret_Plus_error,Ret_Plus_error_xgb
from sklearn.datasets import dump_svmlight_file
import os
import xgboost as xgb
from sklearn.svm import SVR,LinearSVR
import time
from CVModel import loadCVIndex
#fit the param
debug = False
xgb_random_seed = 2015
if debug:
hyperopt_param = {}
hyperopt_param['lasso_max_evals'] = 2
hyperopt_param['ridge_max_evals'] = 2
hyperopt_param['lr_max_evals'] = 2
hyperopt_param["xgb_max_evals"] = 2
hyperopt_param['svr_max_evals'] = 2
hyperopt_param['lsvr_max_evals']=2
xgb_min_num_round = 2
xgb_max_num_round = 4
xgb_nthread=32
xgb_num_round_step = 1
else:
hyperopt_param = {}
hyperopt_param['ridge_max_evals'] = 400
hyperopt_param['lasso_max_evals'] = 1
hyperopt_param['lr_max_evals'] = 400
hyperopt_param["xgb_max_evals"] = 400
hyperopt_param['svr_max_evals'] = 400
hyperopt_param['lsvr_max_evals'] = 100
xgb_min_num_round = 200
xgb_max_num_round = 300
xgb_nthread= 8
xgb_num_round_step = 10
Ridge_param= {
'task':'skl_ridge',
'alpha': hp.loguniform("alpha", np.log(4), np.log(20)),
'random_state':2015,
'max_evals':hyperopt_param['ridge_max_evals']
}
Lasso_param = {
'task':'skl_lasso',
'alpha': hp.loguniform("alpha", np.log(0.00001), np.log(20)),
'random_state':2015,
'max_evals':hyperopt_param['lasso_max_evals']
}
"""
xgb_regression_param = {
'task': 'regression',
'booster': 'gblinear',
'objective': 'reg:linear',
'lambda' : hp.quniform('lambda', 0.001, 0.005, 0.001),
'alpha' : hp.loguniform("alpha", np.log(1), np.log(5)),
'num_round' : hp.quniform('num_round', xgb_min_num_round, xgb_max_num_round, xgb_num_round_step),
'nthread': xgb_nthread,
'silent' : 1,
'seed': xgb_random_seed,
"max_evals": hyperopt_param["xgb_max_evals"],
}
"""
xgb_regression_param = {
'task': 'regression',
'booster': 'gblinear',
'objective': 'reg:linear',
'lambda' : 0.1,
'alpha' : 0.0,
'num_round' : 120,
'lambda_bias':0.2,
'nthread': xgb_nthread,
'silent' : 1,
'seed': xgb_random_seed,
"max_evals": hyperopt_param["xgb_max_evals"],
}
xgb_regression_param_by_tree = {
'task': 'regression',
'booster': 'gbtree',
'objective': 'reg:linear',
'eta' : hp.quniform('eta', 0.1, 1.0, 0.1),
'eta':0.1,
'gamma':hp.quniform('gamma',4,5,0.1),
'gamma':4,
'max_depth':12.0,
'max_depth':hp.quniform('max_depth',6,15,1),
'min_child_weight':hp.quniform('min_child_weight',1,5,1),
'colsample_bytree':hp.quniform('colsample_bytree',0.5,1,0.1),
'num_round' : hp.quniform('num_round', xgb_min_num_round, xgb_max_num_round, xgb_num_round_step),
'nthread': xgb_nthread,
'silent' : 1,
'seed': xgb_random_seed,
"max_evals": hyperopt_param["xgb_max_evals"],
}
xgb_tree_param = {
'task': 'class',
'booster': 'gbtree',
'objective': 'multi:softmax',
'eta' : hp.quniform('eta', 0.1, 1, 0.1),
'gamma': hp.quniform('gamma',0.1,2,0.1),
'num_round' : hp.quniform('num_round', xgb_min_num_round, xgb_max_num_round, xgb_num_round_step),
'nthread': xgb_nthread,
'silent' : 1,
'seed': xgb_random_seed,
"max_evals": hyperopt_param["xgb_max_evals"],
"num_class": 9,
'max_depth': hp.quniform('max_depth', 6, 12, 1),
}
skl_lr_param = {
'task' : 'skl_lr',
'C' : hp.quniform('C',1,20,0.1),
'seed':xgb_random_seed,
'max_evals':hyperopt_param['lr_max_evals']
}
skl_LibSVM_param = {
'task':'skl_LibSVM',
'kernel':hp.choice('kernel',['rbf']),
#'epsilon':hp.quniform('epsilon',0.2,0.6,0.1),
'epsilon':hp.quniform('epsilon',0.01,0.02,0.01),
'cache_size':1024.0,
'gamma':hp.quniform('gamma',0.05,0.1,0.01),
'max_evals':hyperopt_param['svr_max_evals']
}
skl_linearSVR_param= {
'task' : 'skl_linearSVR',
'epsilon':hp.quniform('epsilon',0.0001,0.0005,0.0001),
'C' : hp.quniform('C',5,15,1),
'loss':'squared_epsilon_insensitive',
'seed':2015,
'dual':False,
'max_evals':hyperopt_param['lsvr_max_evals']
}
def dumpMessage(bestParams,loss,loss_std,f_name,start,end,feature):
f = open("../../data/feature/weight/%s_%s_bestParamodel_log.txt"%(f_name,feature),"wb")
f.write('loss:%.6f \nStd:%.6f \n'%(loss,loss_std))
for(key,value) in bestParams.items():
f.write("%s:%s\n"%(key,str(value)))
f.write("start_time:%s\n"%(start))
f.write("end_time:%s\n"%(end))
f.close()
def trainModel(param,data,features,feature):
#we just judge our model
#so we do not use bagging ,just one loop of CV
train_feature = features
pred_label = feature
feature_valid = ['Ret_PlusOne','Ret_PlusTwo','Weight_Daily']
#create CV
err_cv = []
std_cv = []
for run in range(0,3):
print "this is run:%d"%(run+1)
train_index = loadCVIndex("../../data/cv/train.run%d.txt"%(run+1))
test_index = loadCVIndex("../../data/cv/valid.run%d.txt"%(run+1))
error_data = data.iloc[test_index]
X_train = data.iloc[train_index][train_feature]
X_test = data.iloc[test_index][train_feature]
Y_train = data.iloc[train_index][pred_label]
Y_test = data.iloc[test_index][pred_label]
if param['task'] == 'skl_ridge':
ridge = Ridge(alpha=param['alpha'],normalize=True)
ridge.fit(X_train,Y_train)
pred_value = ridge.predict(X_test)
pd.DataFrame(ridge.coef_,columns=train_feature).to_csv("ridge.csv")
pred_value = pd.DataFrame(pred_value,columns=['1','2'])
train_data = data.iloc[test_index]
print train_data.shape
error_train = Ret_Plus_error(pred_value,train_data[feature_valid])/(40000*0.7*62)
print error_train
variance = 0
err_cv.append(error_train)
std_cv.append(variance)
elif param['task'] == 'skl_lasso':
lasso = Lasso(alpha=param['alpha'],normalize=True,fit_intercept=True,tol=0.00000000001)
lasso.fit(X_train,Y_train)
pred_value = lasso.predict(X_test)
pred_value = pd.DataFrame(pred_value,columns=['1','2'])
train_data = data.iloc[test_index]
error_train = Ret_Plus_error(pred_value,train_data[feature_valid])
print error_train
variance = 0
err_cv.append(error_train)
std_cv.append(variance)
elif param['task'] == 'skl_lr':
clf = LogisticRegression(C=param['C'])
clf.fit(X_train,Y_train)
pred_value = clf.predict(X_test)
error_train = 1 - accuracy_model(pred_value, Y_test)
variance = error_train
err_cv.append(error_train)
std_cv.append(variance)
elif param['task'] == 'regression':
train_data = xgb.DMatrix(X_train,label=np.array(Y_train))
valid_data = xgb.DMatrix(X_test,label=np.array(Y_test))
watchlist = [(train_data,'train'),(valid_data,'valid')]
bst = xgb.train(param, train_data, int(param['num_round']),watchlist)
valid_data = xgb.DMatrix(X_test)
pred_value = bst.predict(valid_data)
tmp_data = error_data[feature_valid]
for feat in pred_label:
print tmp_data.shape
print pred_value.shape
error_train = Ret_Plus_error_xgb(tmp_data,feat,list(pred_value))/(40000*0.3*62)
variance = 0
err_cv.append(error_train)
std_cv.append(variance)
print error_train
elif param['task'] == 'class':
train_data = xgb.DMatrix(X_train,label=Y_train)
valid_data = xgb.DMatrix(X_test,label=Y_test)
watchlist = [(train_data,'train'),(valid_data,'valid')]
bst = xgb.train(param, train_data, int(param['num_round']),watchlist)
valid_data = xgb.DMatrix(X_test)
pred_value = bst.predict(valid_data)
error_train = 1 - accuracy_model(pred_value, Y_test)
variance = 0
err_cv.append(error_train)
std_cv.append(variance)
print error_train
elif param['task'] == 'skl_LibSVM':
svr = SVR(epsilon=param['epsilon'],tol=param['tol'],cache_size=param['cache_size'],gamma=param['gamma'])
svr.fit(X_train,Y_train['Ret_PlusOne'])
pred_value1 = svr.predict(X_test)
svr.fit(X_train,Y_train['Ret_PlusTwo'])
pred_value2 = svr.predict(X_test)
if param['kernel'] == 'linear':
pd.DataFrame(svr.coef_,columns=train_feature).to_csv("svr.csv")
pred_value = pd.DataFrame({'1':pred_value1,'2':pred_value2})
train_data = data.iloc[test_index]
error_train = Ret_Plus_error(pred_value,train_data[feature_valid])
print error_train/(40000*0.3*62)
variance = 0
err_cv.append(error_train)
std_cv.append(variance)
elif param['task'] == 'skl_linearSVR':
print param['epsilon']
print param['C']
svr = LinearSVR(C=param['C'],epsilon=param['epsilon'],dual=param['dual'],loss=param['loss'],random_state=param['seed'])
svr.fit(X_train,Y_train['Ret_PlusOne'])
pred_value1 = svr.predict(X_test)
svr.fit(X_train,Y_train['Ret_PlusTwo'])
pred_value2 = svr.predict(X_test)
pred_value = pd.DataFrame({'1':pred_value1,'2':pred_value2})
train_data = data.iloc[test_index]
error_train = Ret_Plus_error(pred_value,train_data[feature_valid])
print error_train/(40000*0.3*62)
variance = 0
err_cv.append(error_train)
std_cv.append(variance)
#print "error.train:%f error.test:%f"%(error_train,error)
error = np.mean(err_cv)
std_cv = np.mean(err_cv)
print "error:%f"%(error)
return {'loss':error,'attachments':{'std':variance},'status':STATUS_OK}
def TunningParamter(param,data,features,feature):
ISOTIMEFORMAT='%Y-%m-%d %X'
start = time.strftime(ISOTIMEFORMAT, time.localtime())
trials = Trials()
objective = lambda p : trainModel(p, data, features,feature)
best_parameters = fmin(objective, param, algo =tpe.suggest,max_evals=param['max_evals'],trials= trials)
#now we need to get best_param
print best_parameters
trials_loss = np.asanyarray(trials.losses(),dtype=float)
best_loss = min(trials_loss)
ind = np.where(trials_loss==best_loss)[0][0]
best_loss_std = trials.trial_attachments(trials.trials[ind])['std']
end = time.strftime(ISOTIMEFORMAT,time.localtime())
dumpMessage(best_parameters, best_loss, best_loss_std,param['task'],start,end,feature)
if __name__ == "__main__":
train_price = pd.read_csv("../../data/train_ts_price.csv")
train = pd.read_csv("../../data/train_clean_fill_all_2.csv")
#add price message to train
features = ["Ret_%d_price"%(i) for i in range(2,121)]
train[features] = train_price[features]
all_features= []
#add the feature to predict
features = [ "Feature_%d"%(i) for i in range(1,26)]
all_features.extend(features)
features =["Ret_%d"%(i) for i in range(2,121)]
all_features.extend(features)
features = ["Ret_%d_price"%(i) for i in range(2,121)]
all_features.extend(features)
all_features.append('Ret_total_price')
all_features.append('Ret_mean')
all_features.append('Ret_mean_price')
all_features.append('Ret_max_price')
all_features.append('Ret_max')
all_features.append('Ret_min_price')
all_features.append('Ret_min')
all_features.append('Ret_var')
all_features.append('Ret_var_price')
all_features.append('Ret_120')
lables = ['Ret_PlusOne','Ret_PlusTwo']
TunningParamter(Ridge_param, train, all_features, lables)
#lables = ['Ret_PlusOne','Ret_PlusTwo']
#TunningParamter(skl_LibSVM_param, train, all_features, lables)
#lables = ['Ret_PlusOne']
#TunningParamter(xgb_regression_param, train, all_features,lables)
#lables = ['Ret_PlusTwo']
#TunningParamter(xgb_regression_param, train, all_features,lables)
#lables = ['Ret_PlusOne']
#TunningParamter(skl_linearSVR_param, train, all_features,lables)
|
14,439 | 4920e8fb269de84db586cb789b72a9f48ba2d353 | import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
prompt_text = input("text: ")
num_results = int(input("num results: "))
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors="pt")
prediction_scores, past = model.forward(encoded_prompt)
num = 0
predictability = 0
for x in range(0, encoded_prompt.numel()-1):
prediction_list = [(index.item()) for index in prediction_scores[0, num].topk(num_results).indices]
print([tokenizer.decode(index.item()) for index in prediction_scores[0, num].topk(num_results).indices])
if encoded_prompt[0, num+1] in prediction_list:
predictability = predictability + ((50257 - prediction_list.index(encoded_prompt[0, num+1]))/50257)
num += 1
print(predictability/(encoded_prompt.numel()-1)) |
14,440 | 79b114cf58e7bb95ee1337d1c94b241d985c1dd4 | import sys
import mysql.connector
# Usage: $ python testMysql.py 'root' 'password'
# Validate arguments
if len(sys.argv)!=3:
print("USAGE: $ python testMYSql.py <user> <password>")
exit()
# Connect to database
print("1. Connecting to db...")
mydb = mysql.connector.connect(
port=3306,
user=sys.argv[1],
passwd=sys.argv[2]
)
print("Connected!")
print(mydb)
# Create table and insert data
print("\n2. Testing db...")
mycursor = mydb.cursor()
print("\n2.1. Databases list is...")
mycursor.execute('SHOW DATABASES')
dblist = []
for x in mycursor.fetchall():
print(x[0])
dblist.append(x[0])
if 'ies_test' in dblist:
print("\n2.2. There is no need to create ies_test for tests, as it is already created!")
else:
print("\n2.2. Creating database ies_test...")
mycursor.execute("CREATE DATABASE ies_test")
print("Changing context to ies_test...")
mycursor.execute("USE ies_test")
print("\n2.3. Tables list is...")
mycursor.execute("SHOW TABLES")
tableslist = []
for x in mycursor.fetchall():
print(x[0])
tableslist.append(x[0])
if 'customers' in tableslist:
print("\n2.4. Table customers is already created!")
print("\n2.5. Not adding sample data, as it is expected to already exist!")
else:
print("\n2.4. Creating table customers...")
mycursor.execute("CREATE TABLE customers (name VARCHAR(255), address VARCHAR(255))")
print("\n2.5. Adding sample data to db...")
mycursor.execute("INSERT INTO customers(name, address) VALUES (%s, %s)", ("Person1", "Aveiro"))
mycursor.execute("INSERT INTO customers(name, address) VALUES (%s, %s)", ("Person2", "Coimbra"))
mycursor.execute("INSERT INTO customers(name, address) VALUES (%s, %s)", ("Person3", "Porto"))
print("\n2.6. The data on that table is...")
mycursor.execute("SELECT * FROM customers")
for x in mycursor.fetchall():
print(x)
mydb.commit()
print("\nALL TESTS DONE!")
mycursor.close()
mydb.close() |
14,441 | 8263f254c29b1e5ebea1315c2602f5b59fd8c6e6 | from app import cfg
from app import db
from app import util
from flask import Blueprint
from flask import jsonify
from flask import render_template
from flask import request
import math
bp_bans = Blueprint('bans', __name__)
@bp_bans.route("/bans")
def page_bans():
page = request.args.get('page', type=int, default=1)
search_query = request.args.get('q', type=str, default="")
json_format = request.args.get('json', type=int, default=0)
page = max(min(page, 1000000), 1) # Arbitrary number. We probably won't ever have to deal with 1,000,000 pages of bans. Hopefully..
query = db.query_grouped_bans(search_query=search_query)
length = query.count()
displayed_bans = query.offset((page-1)*cfg.WEBSITE["items-per-page"]).limit(cfg.WEBSITE["items-per-page"])
buttons = [page > 1, page < length / cfg.WEBSITE["items-per-page"]]
if json_format:
return jsonify([
{
"id": ban.id,
"bantime": str(ban.bantime),
"round_id": ban.round_id,
"expiration_time": str(ban.expiration_time) if ban.expiration_time else None,
"reason": ban.reason,
"ckey": ban.ckey,
"a_ckey": ban.a_ckey,
"unbanned_datetime": str(ban.unbanned_datetime) if ban.unbanned_datetime else None,
"unbanned_ckey": ban.unbanned_ckey,
"roles": ban.roles.split(","),
"server_name": ban.server_name,
"global_ban": ban.global_ban
} for ban in displayed_bans
])
return render_template("bans.html", bans=displayed_bans, buttons=buttons, page=page, search_query=search_query, pages=math.ceil(length / cfg.WEBSITE["items-per-page"]))
|
14,442 | a2f99be0c94a4152dd6b028db82327d2ffba75f1 | '''
i/p:['five plus three', 'seven minus two', 'two plus eight minus five', 'eight divide four']
o/p:['eight', 'five', 'five', 'four']'''
list1=['five plus three', 'seven minus two', 'two plus eight minus five', 'eight divide four']
dict1={'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'plus':'+','minus':'-','divide':'/'}
list2=[]
list3=[]
list4=[]
for i in list1:
list2=i.split(' ')
string1 =''
for j in list2:
string1=string1+str(dict1[j])
list3.append(int(eval(string1)))
for i in list3:
for k, v in dict1.items():
if i==v:
list4.append(k)
print(list4)
'''
ip_list = ['five plus three', 'seven minus two', 'two plus eight minus five', 'eight divide four']
ref_dict = {'plus': '+', 'minus': '-', 'divide': '/', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'seven': '7',
'eight': '8'}
p_list = []
for i in ip_list:
str_list = i.split(' ')
string = ''
for j in str_list:
string = string + ref_dict[j]
p_list.append(string)
op_list = []
for i in p_list:
for j, k in ref_dict.items():
if str(int(eval(i))) == k:
op_list.append(j)
print(op_list)''' |
14,443 | 6aec0f87e9138a07fd3cc970564e39b8bfa66b15 | from copy import deepcopy
INPUT = """165
78
151
15
138
97
152
64
4
111
7
90
91
156
73
113
93
135
100
70
119
54
80
170
139
33
123
92
86
57
39
173
22
106
166
142
53
96
158
63
51
81
46
36
126
59
98
2
16
141
120
35
140
99
121
122
58
1
60
47
10
87
103
42
132
17
75
12
29
112
3
145
131
18
153
74
161
174
68
34
21
24
85
164
52
69
65
45
109
148
11
23
129
84
167
27
28
116
110
79
48
32
157
130"""
def get_difference(input, start=0):
input = sorted(input)
highest = input[-1] + 3
input.append(highest)
count = {}
for i in input:
diff = i - start
count.setdefault(diff, 0)
count[diff] += 1
start = i
return count
def get_possible_next(input, current):
result = []
for i in range(1, 4):
if current + i < len(input) and input[current + i] - input[current] <= 3:
result.append(input[current + i])
return result
def add_to_ways(ways, current, solutions):
if solutions:
for solution in solutions:
ways.setdefault(solution, 0)
ways[solution] = ways[solution] + ways[current]
ways.pop(current, None)
return ways
def get_ways(input):
input = input + [0, (max(input) + 3)]
input = sorted(input)
ways = {0: 1}
for i in range(0, len(input)):
solutions = get_possible_next(input, i)
ways = add_to_ways(ways, input[i], solutions)
return ways
if __name__ == '__main__':
input = INPUT.split("\n")
input = [int(i) for i in input]
# differences = get_difference(input)
# print(differences[1] * differences[3])
print(get_ways(input))
|
14,444 | 6a4e827de9ff591744cf6585d149a60712680f19 | from aylienapiclient import textapi
import api_access
client = textapi.Client(api_access.id, api_access.key)
url = "http://techcrunch.com/2015/04/06/john-oliver-just-changed-the-surveillance-reform-debate"
extract = client.Extract({"url": url, "best_image": True})
print(type(extract))
print(extract["title"])
|
14,445 | 9cbf2b076446c58351f84a81403de8c6d7d0aee8 | import time
localtime = time.localtime(time.time())
print("Local current time :", localtime)
from datetime import datetime
dt1 = datetime(year=2020, month=3, day=31)
print(dt1)
dt1 = datetime(year=2020, month=3, day=31, hour=21, minute=21, second=21)
print(dt1)
dt1 = datetime(2020, 3, 31)
print(dt1)
dt1 = datetime(2020, 3, 31, 21, 21, 20)
print(dt1)
print(dt1.year); print(dt1.month); print(dt1.day)
print(dt1.hour); print(dt1.minute); print(dt1.second)
print("=====================================================")
print(datetime.now())
print(datetime.today())
print("=====================================================")
print("=====================================================")
ct = datetime.now()
ct_y = ct.year
ct_m = ct.month
ct_d = ct.day
ct_h = ct.hour
ct_m = ct.minute
ct_s = ct.second
ct_ms = ct.microsecond
ct_m_y = ct_y, ct_m, ct_d, ct_h, ct_m, ct_s, ct_ms
print(ct_m_y)
print("=====================================================san")
ct = datetime.now()
print(ct.microsecond)
|
14,446 | 47b6c95137869509c14152ea0d92125995a44bdd | # pymonome - library for interfacing with monome devices
#
# Copyright (c) 2011-2014 Artem Popov <artfwo@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import asyncio, aiosc
import itertools
__all__ = ['SerialOsc', 'Monome', 'BitBuffer']
def pack_row(row):
return row[7] << 7 | row[6] << 6 | row[5] << 5 | row[4] << 4 | row[3] << 3 | row[2] << 2 | row[1] << 1 | row[0]
def unpack_row(val):
return [
val & 1,
val >> 1 & 1,
val >> 2 & 1,
val >> 3 & 1,
val >> 4 & 1,
val >> 5 & 1,
val >> 6 & 1,
val >> 7 & 1
]
class Monome(aiosc.OSCProtocol):
def __init__(self, prefix='python'):
self.prefix = prefix.strip('/')
self.id = None
self.width = None
self.height = None
self.rotation = None
super().__init__(handlers={
'/sys/disconnect': lambda *args: self.disconnect,
'/sys/{id,size,host,port,prefix,rotation}': self.sys_info,
'/{}/grid/key'.format(self.prefix): lambda addr, path, x, y, s: self.grid_key(x, y, s),
'/{}/tilt'.format(self.prefix): lambda addr, path, n, x, y, z: self.tilt(n, x, y, z),
})
def connection_made(self, transport):
super().connection_made(transport)
self.host, self.port = transport.get_extra_info('sockname')
self.connect()
def connect(self):
self.send('/sys/host', self.host)
self.send('/sys/port', self.port)
self.send('/sys/prefix', self.prefix)
self.send('/sys/info', self.host, self.port)
def disconnect(self):
self.transport.close()
def sys_info(self, addr, path, *args):
if path == '/sys/id':
self.id = args[0]
elif path == '/sys/size':
self.width, self.height = (args[0], args[1])
elif path == '/sys/rotation':
self.rotation = args[0]
# TODO: refine conditions for reinitializing
# in case rotation, etc. changes
# Note: arc will report 0, 0 for its size
if all(x is not None for x in [self.id, self.width, self.height, self.rotation]):
self.ready()
def ready(self):
pass
def grid_key(self, x, y, s):
pass
def tilt(self, n, x, y, z):
pass
def led_set(self, x, y, s):
self.send('/{}/grid/led/set'.format(self.prefix), x, y, s)
def led_all(self, s):
self.send('/{}/grid/led/all'.format(self.prefix), s)
def led_map(self, x_offset, y_offset, data):
args = [pack_row(data[i]) for i in range(8)]
self.send('/{}/grid/led/map'.format(self.prefix), x_offset, y_offset, *args)
def led_row(self, x_offset, y, data):
args = [pack_row(data[i*8:(i+1)*8]) for i in range(len(data) // 8)]
self.send('/{}/grid/led/row'.format(self.prefix), x_offset, y, *args)
def led_col(self, x, y_offset, data):
args = [pack_row(data[i*8:(i+1)*8]) for i in range(len(data) // 8)]
self.send('/{}/grid/led/col'.format(self.prefix), x, y_offset, *args)
def led_intensity(self, i):
self.send('/{}/grid/led/intensity'.format(self.prefix), i)
def led_level_set(self, x, y, l):
self.send('/{}/grid/led/level/set'.format(self.prefix), x, y, l)
def led_level_all(self, l):
self.send('/{}/grid/led/level/all'.format(self.prefix), l)
def led_level_map(self, x_offset, y_offset, data):
self.send('/{}/grid/led/level/map'.format(self.prefix), x_offset, y_offset, *data)
def led_level_row(self, x_offset, y, data):
self.send('/{}/grid/led/level/row'.format(self.prefix), x_offset, y, *data)
def led_level_col(self, x, y_offset, data):
self.send('/{}/grid/led/level/col'.format(self.prefix), x, y_offset, *data)
def tilt_set(self, n, s):
self.send('/{}/tilt/set'.format(self.prefix), n, s)
class BitBuffer:
def __init__(self, width, height):
self.leds = [[0 for col in range(width)] for row in range(height)]
self.width = width
self.height = height
def __and__(self, other):
result = BitBuffer(self.width, self.height)
for row in range(self.height):
for col in range(self.height):
result.leds[row][col] = self.leds[row][col] & other.leds[row][col]
return result
def __xor__(self, other):
result = BitBuffer(self.width, self.height)
for x in range(self.width):
for y in range(self.height):
result.leds[row][col] = self.leds[row][col] ^ other.leds[row][col]
return result
def __or__(self, other):
result = BitBuffer(self.width, self.height)
for x in range(self.width):
for y in range(self.height):
result.leds[row][col] = self.leds[row][col] | other.leds[row][col]
return result
def led_set(self, x, y, s):
if x < self.width and y < self.height:
row, col = y, x
self.leds[col][row] = s
def led_all(self, s):
for x in range(self.width):
for y in range(self.height):
row, col = y, x
self.leds[row][col] = s
def led_map(self, x_offset, y_offset, data):
for r, row in enumerate(data):
self.led_row(x_offset, y_offset + r, row)
def led_row(self, x_offset, y, data):
for x, s in enumerate(data):
self.led_set(x_offset + x, y, s)
def led_col(self, x, y_offset, data):
for y, s in enumerate(data):
self.led_set(x, y_offset + y, s)
def get_map(self, x_offset, y_offset):
m = []
for y in range(y_offset, y_offset + 8):
row = []
for x in range(x_offset, x_offset + 8):
row.append(self.leds[x][y])
m.append(row)
return m
class Page:
def __init__(self, app):
self.app = app
self.intensity = 15
def ready(self):
self._buffer = BitBuffer(self.width, self.height)
def led_set(self, x, y, s):
self._buffer.led_set(x, y, s)
if self is self.app.current_page and not self.app.switching:
self.app.led_set(x, y, s)
def led_all(self, s):
self._buffer.led_all(s)
if self is self.app.current_page and not self.app.switching:
self.app.led_all(s)
def led_map(self, x_offset, y_offset, data):
self._buffer.led_map(x_offset, y_offset, data)
if self is self.app.current_page and not self.app.switching:
self.app.led_map(x_offset, y_offset, data)
def led_row(self, x_offset, y, data):
self._buffer.led_row(x_offset, y, data)
if self is self.app.current_page and not self.app.switching:
self.app.led_row(x_offset, y, data)
def led_col(self, x, y_offset, data):
self._buffer.led_col(x, y_offset, data)
if self is self.app.current_page and not self.app.switching:
self.app.led_col(x, y_offset, data)
def led_intensity(self, i):
self.intensity = i
if self is self.app.current_page and not self.app.switching:
self.app.led_intensity(i)
from enum import Enum
class PageCorner(Enum):
top_left = 1
top_right = 2
bottom_left = 3
bottom_right = 4
class Pages(Monome):
def __init__(self, pages, switch=PageCorner.top_right):
super().__init__('/pages')
self.pages = pages
self.current_page = self.pages[0]
self.switching = False
self.pressed_buttons = []
self.switch = switch
def ready(self):
for p in self.pages:
p.width = self.width
p.height = self.height
p.ready()
if self.switch == PageCorner.top_left:
self.switch_button = (0, 0)
elif self.switch == PageCorner.top_right:
self.switch_button = (self.width - 1, 0)
elif self.switch == PageCorner.bottom_left:
self.switch_button = (0, self.height - 1)
elif self.switch == PageCorner.bottom_right:
self.switch_button = (self.width - 1, self.height - 1)
else:
raise RuntimeError
def disconnect(self, *args):
for p in self.pages:
p.disconnect()
def grid_key(self, x, y, s):
if (x, y) == self.switch_button:
if s == 1:
# flush remaining presses
for x, y in self.pressed_buttons:
self.current_page.grid_key(x, y, 0)
# render selector page and set choose mode
self.switching = True
self.display_chooser()
else:
self.switching = False
# TODO: ideally we only need to send key-ups if page changed
# but if non-page mode key-up happened during switching,
# it still has to be sent to original page
self.leave_chooser()
return
if self.switching:
pass # set current page based on coords
if x < len(self.pages):
self.current_page = self.pages[x]
self.display_chooser()
return
# remember pressed buttons so we can flush them later
if s == 1:
self.pressed_buttons.append((x, y))
else:
# TODO: still getting x not in list errors here
self.pressed_buttons.remove((x, y))
self.current_page.grid_key(x, y, s)
def display_chooser(self):
self.led_all(0)
page_row = [1 if i < len(self.pages) else 0 for i in range(self.width)]
page_num = self.pages.index(self.current_page)
self.led_row(0, self.height - 1, page_row)
self.led_col(page_num, 0, [1] * self.height)
def leave_chooser(self):
for x_offset in [i * 8 for i in range(self.width // 8)]:
for y_offset in [i * 8 for i in range(self.height // 8)]:
led_map = self.current_page._buffer.get_map(x_offset, y_offset)
self.led_map(0, 0, led_map)
class BaseSerialOsc(aiosc.OSCProtocol):
def __init__(self):
super().__init__(handlers={
'/serialosc/device': self.serialosc_device,
'/serialosc/add': self.serialosc_add,
'/serialosc/remove': self.serialosc_remove,
})
self.devices = {}
def connection_made(self, transport):
super().connection_made(transport)
self.host, self.port = transport.get_extra_info('sockname')
self.send('/serialosc/list', self.host, self.port)
self.send('/serialosc/notify', self.host, self.port)
def device_added(self, id, type, port):
self.devices[id] = port
def device_removed(self, id, type, port):
del self.devices[id]
def serialosc_device(self, addr, path, id, type, port):
self.device_added(id, type, port)
def serialosc_add(self, addr, path, id, type, port):
self.device_added(id, type, port)
self.send('/serialosc/notify', self.host, self.port)
def serialosc_remove(self, addr, path, id, type, port):
self.device_removed(id, type, port)
self.send('/serialosc/notify', self.host, self.port)
class SerialOsc(BaseSerialOsc):
def __init__(self, apps, loop=None):
super().__init__()
self.apps = apps
if loop is None:
loop = asyncio.get_event_loop()
self.loop = loop
def device_added(self, id, type, port):
super().device_added(id, type, port)
if id in self.apps:
asyncio.async(self.autoconnect(self.apps[id], port))
elif '*' in self.apps:
asyncio.async(self.autoconnect(self.apps['*'], port))
@asyncio.coroutine
def autoconnect(self, app, port):
transport, app = yield from self.loop.create_datagram_endpoint(
app,
local_addr=('127.0.0.1', 0),
remote_addr=('127.0.0.1', port)
)
def device_removed(self, id, type, port):
super().device_removed(id, type, port)
if id in self.apps:
self.apps[id].disconnect()
del self.apps[id]
elif '*' in self.apps:
self.apps['*'].disconnect()
del self.apps['*']
@asyncio.coroutine
def create_serialosc_connection(app_or_apps, loop=None):
if isinstance(app_or_apps, dict):
apps = app_or_apps
else:
apps = {'*': app_or_apps}
if loop is None:
loop = asyncio.get_event_loop()
transport, serialosc = yield from loop.create_datagram_endpoint(
lambda: SerialOsc(apps),
local_addr=('127.0.0.1', 0),
remote_addr=('127.0.0.1', 12002)
)
return serialosc
|
14,447 | f43fd29de45fb088f5ab751cdeccc60ef387a554 | import src.simple_arules.simple_arules as ar
transactions = [["bli", "bla", "blubb"], ["bla"]]
support_dictionaries_expected = [
{("bla",): 2, ("bli",): 1, ("blubb",): 1},
{("bla", "bli"): 1, ("bla", "blubb"): 1, ("bli", "blubb"): 1},
{("bla", "bli", "blubb"): 1},
]
association_rules_expected_7 = [
(("bla",), ("bli",), 2, 1, 0.5),
(("bli",), ("bla",), 1, 1, 1.0),
(("bla",), ("blubb",), 2, 1, 0.5),
(("blubb",), ("bla",), 1, 1, 1.0),
(("bli",), ("blubb",), 1, 1, 1.0),
(("blubb",), ("bli",), 1, 1, 1.0),
(("bla",), ("bli", "blubb"), 2, 1, 0.5),
]
def test_tuple_reduce():
assert ar._tuple_reduce((1,)) == 1
assert ar._tuple_reduce(("bla", "blubb")) == ("bla", "blubb")
def test_get_support_dictionary_for_set_size():
assert (
ar._get_support_dictionary_for_set_size(transactions, 3, 1)
== support_dictionaries_expected[2]
)
def test_get_support_dictionaries():
assert (
ar._get_support_dictionaries(transactions, 3, 1)
== support_dictionaries_expected
)
def test_all_sub_combinations():
assert list(ar._all_sub_combinations([1, 2, 3])) == [
(1,),
(2,),
(3,),
(1, 2),
(1, 3),
(2, 3),
]
def test_get_association_rules():
ar._get_association_rules(support_dictionaries_expected)[:7]==association_rules_expected_7
|
14,448 | 71c99be60201d5df09cc695e73a7674466b09587 | from abc import ABC, abstractclassmethod
import pathlib
import imghdr
import os
from typing import Union, Tuple, List, Callable, Any, Dict
class dataLoaderABC(ABC):
@abstractclassmethod
def __init__(self, file_directory: Union[str, pathlib.Path, os.DirEntry], fileType: str='image'):
self.fileType = fileType if fileType in ["image", 'csv', 'excel'] else None
self.file_directory: Union[str, pathlib.Path, os.DirEntry] = file_directory
self.file_list: List[pathlib.Path] = []
self.file_filter: Callable = lambda: True
@abstractclassmethod
def LoadFileList(self, file_type:str=None) -> List[pathlib.Path]:
pass
@abstractclassmethod
def check_file_is_image(
self,
file_path: Union[str, pathlib.Path, os.DirEntry],
footer: Union[str, List[str], Tuple[str]]=None
) -> bool:
pass
class dataLoader(dataLoaderABC):
def __init__(self,file_directory: Union[str, pathlib.Path, os.DirEntry]):
super().__init__(file_directory)
def LoadFileList(self, file_type:str=None, fileNameList:List=None) -> List[pathlib.Path]:
dir_path = pathlib.Path(self.file_directory)
if not dir_path.is_dir():
raise TypeError(f'Not a valid directory: {dir_path}')
if self.fileType is not None:
if self.fileType == "image": self.file_filter = self.check_file_is_image
if fileNameList is None:
self.file_list = [
pathlib.Path(file) for file in os.scandir(dir_path) if self.file_filter(file)
]
else:
for fileName in fileNameList:
file_path = pathlib.Path(dir_path) / fileName
if file_path.exists() and self.file_filter(file_path):
self.file_list.append(file_path)
return self.file_list
def check_file_is_image(
self,
file_path: Union[str, pathlib.Path, os.DirEntry],
footer: Union[str, List[str], Tuple[str]]='jpg'
) -> bool:
if not pathlib.Path(file_path).is_file():
return False
file_type = imghdr.what(file_path)
if file_type is None:
return False
if footer is not None:
return pathlib.Path(file_path).name.lower().endswith(footer)
return True
|
14,449 | b715a9d7405765e6ab02d81248293f5d39a49efe | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_hypothesisapi
----------------------------------
Tests for `hypothesisapi` module.
"""
import unittest
from hypothesisapi import hypothesisapi
class TestHypothesisapi(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
14,450 | 027d4993312a1cd9f8c320f100e76622ae926deb | import numpy as np
from .strategy import Strategy
from sklearn.cluster import KMeans
from dataset.data import MoleculeDataset
class KMeansSampling(Strategy):
def query(self, n):
idxs_unlabeled = np.arange(self.n_pool)[~self.idxs_lb]
if self.args.data_pool is not None:
idxs_unlabeled = np.random.choice(idxs_unlabeled, self.args.data_pool, replace=False)
embedding = self.get_embedding(MoleculeDataset(self.data[idxs_unlabeled]))
cluster_learner = KMeans(n_clusters=n)
cluster_learner.fit(embedding)
cluster_idxs = cluster_learner.predict(embedding)
centers = cluster_learner.cluster_centers_[cluster_idxs]
dis = (embedding - centers)**2
dis = dis.sum(axis=1)
q_idxs = np.array(
[
np.arange(embedding.shape[0])[cluster_idxs == i][dis[cluster_idxs == i].argmin()]
for i in range(n)
]
)
return idxs_unlabeled[q_idxs]
|
14,451 | 464ba03f787a4b255a7c3b295bc75ec215aee4ae | # coding: utf-8
"""
tweak-api
Tweak API to integrate with all the Tweak services. You can find out more about Tweak at <a href='https://www.tweak.com'>https://www.tweak.com</a>, #tweak.
OpenAPI spec version: 1.0.8-beta.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class TeamBuilderConfig(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, debug_mode=False, features=None, ui=None, is_default=False, created=None, modified=None, id=None, team_id=None, team=None, portals=None, product_groups=None, product_types=None, product_sizes=None, product_size_materials=None, product_size_materials_rel=None):
"""
TeamBuilderConfig - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'debug_mode': 'bool',
'features': 'object',
'ui': 'object',
'is_default': 'bool',
'created': 'datetime',
'modified': 'datetime',
'id': 'str',
'team_id': 'str',
'team': 'Team',
'portals': 'list[Portal]',
'product_groups': 'list[ProductGroup]',
'product_types': 'list[ProductType]',
'product_sizes': 'list[ProductSize]',
'product_size_materials': 'list[ProductSizeMaterial]',
'product_size_materials_rel': 'list[TeamBuilderConfigProductSizeMaterial]'
}
self.attribute_map = {
'name': 'name',
'debug_mode': 'debugMode',
'features': 'features',
'ui': 'ui',
'is_default': 'isDefault',
'created': 'created',
'modified': 'modified',
'id': 'id',
'team_id': 'teamId',
'team': 'team',
'portals': 'portals',
'product_groups': 'productGroups',
'product_types': 'productTypes',
'product_sizes': 'productSizes',
'product_size_materials': 'productSizeMaterials',
'product_size_materials_rel': 'productSizeMaterialsRel'
}
self._name = name
self._debug_mode = debug_mode
self._features = features
self._ui = ui
self._is_default = is_default
self._created = created
self._modified = modified
self._id = id
self._team_id = team_id
self._team = team
self._portals = portals
self._product_groups = product_groups
self._product_types = product_types
self._product_sizes = product_sizes
self._product_size_materials = product_size_materials
self._product_size_materials_rel = product_size_materials_rel
@property
def name(self):
"""
Gets the name of this TeamBuilderConfig.
:return: The name of this TeamBuilderConfig.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TeamBuilderConfig.
:param name: The name of this TeamBuilderConfig.
:type: str
"""
self._name = name
@property
def debug_mode(self):
"""
Gets the debug_mode of this TeamBuilderConfig.
:return: The debug_mode of this TeamBuilderConfig.
:rtype: bool
"""
return self._debug_mode
@debug_mode.setter
def debug_mode(self, debug_mode):
"""
Sets the debug_mode of this TeamBuilderConfig.
:param debug_mode: The debug_mode of this TeamBuilderConfig.
:type: bool
"""
self._debug_mode = debug_mode
@property
def features(self):
"""
Gets the features of this TeamBuilderConfig.
:return: The features of this TeamBuilderConfig.
:rtype: object
"""
return self._features
@features.setter
def features(self, features):
"""
Sets the features of this TeamBuilderConfig.
:param features: The features of this TeamBuilderConfig.
:type: object
"""
self._features = features
@property
def ui(self):
"""
Gets the ui of this TeamBuilderConfig.
:return: The ui of this TeamBuilderConfig.
:rtype: object
"""
return self._ui
@ui.setter
def ui(self, ui):
"""
Sets the ui of this TeamBuilderConfig.
:param ui: The ui of this TeamBuilderConfig.
:type: object
"""
self._ui = ui
@property
def is_default(self):
"""
Gets the is_default of this TeamBuilderConfig.
:return: The is_default of this TeamBuilderConfig.
:rtype: bool
"""
return self._is_default
@is_default.setter
def is_default(self, is_default):
"""
Sets the is_default of this TeamBuilderConfig.
:param is_default: The is_default of this TeamBuilderConfig.
:type: bool
"""
self._is_default = is_default
@property
def created(self):
"""
Gets the created of this TeamBuilderConfig.
:return: The created of this TeamBuilderConfig.
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this TeamBuilderConfig.
:param created: The created of this TeamBuilderConfig.
:type: datetime
"""
self._created = created
@property
def modified(self):
"""
Gets the modified of this TeamBuilderConfig.
:return: The modified of this TeamBuilderConfig.
:rtype: datetime
"""
return self._modified
@modified.setter
def modified(self, modified):
"""
Sets the modified of this TeamBuilderConfig.
:param modified: The modified of this TeamBuilderConfig.
:type: datetime
"""
self._modified = modified
@property
def id(self):
"""
Gets the id of this TeamBuilderConfig.
:return: The id of this TeamBuilderConfig.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this TeamBuilderConfig.
:param id: The id of this TeamBuilderConfig.
:type: str
"""
self._id = id
@property
def team_id(self):
"""
Gets the team_id of this TeamBuilderConfig.
:return: The team_id of this TeamBuilderConfig.
:rtype: str
"""
return self._team_id
@team_id.setter
def team_id(self, team_id):
"""
Sets the team_id of this TeamBuilderConfig.
:param team_id: The team_id of this TeamBuilderConfig.
:type: str
"""
self._team_id = team_id
@property
def team(self):
"""
Gets the team of this TeamBuilderConfig.
:return: The team of this TeamBuilderConfig.
:rtype: Team
"""
return self._team
@team.setter
def team(self, team):
"""
Sets the team of this TeamBuilderConfig.
:param team: The team of this TeamBuilderConfig.
:type: Team
"""
self._team = team
@property
def portals(self):
"""
Gets the portals of this TeamBuilderConfig.
:return: The portals of this TeamBuilderConfig.
:rtype: list[Portal]
"""
return self._portals
@portals.setter
def portals(self, portals):
"""
Sets the portals of this TeamBuilderConfig.
:param portals: The portals of this TeamBuilderConfig.
:type: list[Portal]
"""
self._portals = portals
@property
def product_groups(self):
"""
Gets the product_groups of this TeamBuilderConfig.
:return: The product_groups of this TeamBuilderConfig.
:rtype: list[ProductGroup]
"""
return self._product_groups
@product_groups.setter
def product_groups(self, product_groups):
"""
Sets the product_groups of this TeamBuilderConfig.
:param product_groups: The product_groups of this TeamBuilderConfig.
:type: list[ProductGroup]
"""
self._product_groups = product_groups
@property
def product_types(self):
"""
Gets the product_types of this TeamBuilderConfig.
:return: The product_types of this TeamBuilderConfig.
:rtype: list[ProductType]
"""
return self._product_types
@product_types.setter
def product_types(self, product_types):
"""
Sets the product_types of this TeamBuilderConfig.
:param product_types: The product_types of this TeamBuilderConfig.
:type: list[ProductType]
"""
self._product_types = product_types
@property
def product_sizes(self):
"""
Gets the product_sizes of this TeamBuilderConfig.
:return: The product_sizes of this TeamBuilderConfig.
:rtype: list[ProductSize]
"""
return self._product_sizes
@product_sizes.setter
def product_sizes(self, product_sizes):
"""
Sets the product_sizes of this TeamBuilderConfig.
:param product_sizes: The product_sizes of this TeamBuilderConfig.
:type: list[ProductSize]
"""
self._product_sizes = product_sizes
@property
def product_size_materials(self):
"""
Gets the product_size_materials of this TeamBuilderConfig.
:return: The product_size_materials of this TeamBuilderConfig.
:rtype: list[ProductSizeMaterial]
"""
return self._product_size_materials
@product_size_materials.setter
def product_size_materials(self, product_size_materials):
"""
Sets the product_size_materials of this TeamBuilderConfig.
:param product_size_materials: The product_size_materials of this TeamBuilderConfig.
:type: list[ProductSizeMaterial]
"""
self._product_size_materials = product_size_materials
@property
def product_size_materials_rel(self):
"""
Gets the product_size_materials_rel of this TeamBuilderConfig.
:return: The product_size_materials_rel of this TeamBuilderConfig.
:rtype: list[TeamBuilderConfigProductSizeMaterial]
"""
return self._product_size_materials_rel
@product_size_materials_rel.setter
def product_size_materials_rel(self, product_size_materials_rel):
"""
Sets the product_size_materials_rel of this TeamBuilderConfig.
:param product_size_materials_rel: The product_size_materials_rel of this TeamBuilderConfig.
:type: list[TeamBuilderConfigProductSizeMaterial]
"""
self._product_size_materials_rel = product_size_materials_rel
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
14,452 | d3cd2f591dfd8cf8c4a4e3151270083edce94cac | from flask import Flask, request
import ml
import pandas as pd
import numpy as np
import json
from utils import allowed_file
app = Flask(__name__)
# data = pd.read_csv('DELL EMEA TEST DATASET.csv')
# df = pd.DataFrame(data)
# df["slow_dispatch"] = np.where(df["velocity"].astype('int64') >= 30, 1, 0)
# df = df.dropna()
# df = df.drop(["slow_dispatch", "velocity"], axis=1)
# @app.route("/")
# def test():
# data = encode.encode(df)
# preds = predict.predict(data)
# preds = np.array(preds)
# preds_flat = preds.flatten()
# x = [str(x) for x in preds_flat]
# return json.dumps(x)
@app.route("/predict", methods=["GET", "POST"])
def predict():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
return {"message": "no file selected"}
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return {"message": "no file selected"}
if file and allowed_file(file.filename):
df = pd.read_csv(file)
df["slow_dispatch"] = np.where(df["velocity"].astype('int64') >= 30, 1, 0)
df = df.dropna()
df = df.drop(["slow_dispatch", "velocity"], axis=1)
data = ml.encode(df)
preds = ml.predict(data)
preds = np.array(preds)
preds_flat = preds.flatten()
x = [str(x) for x in preds_flat]
return json.dumps(x)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
if __name__ == '__main__':
app.run(debug=True) |
14,453 | 60014a1371e938704aa59b4132ae5fd2dfe29f15 | from pymongo import MongoClient
import requests
import json
client = MongoClient("localhost", 27017)
URL_decrypt_dev = "http://10.0.5.4:8108/crypto/v1/decrypt?env=dev&orgname=hire&domain_name=scikey"
#URL_decrypt_qa = "http://10.0.4.4:8108/crypto/v1/decrypt?env=dev&orgname=hire&domain_name=scikey"
db = client.hire_scikey
collection = db.candidates
main_arr = []
location = ["Rajkot"]
skill_name = ["Ubuntu"]
for i in location:
for j in skill_name:
pipeline = [
{"$match" : {"$and" : [ {"professional_skills" : {"$exists" : True}} , {"preferred_job_locations" : {"$exists" : True}} ]}},
{"$unwind" : "$preferred_job_locations"},
{"$unwind" : "$professional_skills"},
{"$match" : {"$and" : [ {"professional_skills.name" : j}, {"preferred_job_locations.city_name" : i} ]}},
{"$project" : {"_id" : 0, "email" : "$email", "name" : "$name"}}
]
#[
# {"$match" : {"$and" : [ {"professional_skills" : {"$exists" : True}} , {"preferred_job_locations" : {"$exists" : True}} ]}},
# {"$unwind" : "$preferred_job_locations"},
# {"$unwind" : "$professional_skills"},
# {"$match" : {"$and" : [ {"professional_skills.name" : j}, {"preferred_job_locations.city_name" : i} ]}},
# {"$project" : {"_id" : 0, "email" : "$email", "contact" : "$contact", "skill_name" : "$professional_skills.name", "city_name" : "$preferred_job_locations.city_name"}}
#]
my_list = list(db.candidates.aggregate(pipeline))
email_list = [li['email'] for li in my_list]
name_list = [li['name'] for li in my_list]
print("Data for skill_name " + j + " and for location " + i + " Fetching ..." )
# a = 0
temp_arr = []
for z,y in zip(email_list,name_list):
decrypt_data = [{
"resources": {
"email": z
}
}]
result = requests.post(url=URL_decrypt_dev, data=json.dumps(decrypt_data),
headers={"Content-Type": "application/json"})
temp_dict = {"email" : result.json()['data'][0]['resources']['email'], "name" : y,"location" : i,"skill" : j}
temp_arr.append(temp_dict)
final_list = [dict(t) for t in {tuple(d.items()) for d in temp_arr}]
print(final_list)
print("\n")
print("\n") |
14,454 | e0950ad92b2b39650b21215c9a5f0278ac62f4a1 | # Django
from django.apps import AppConfig
class ClassroomsAppConfig(AppConfig):
name = "asistance.classrooms"
verbose_name = "Classrooms" |
14,455 | abc8762a3aa2ae308c07327716fdf2fd4a98b98c | from flask import Flask, render_template, jsonify, request
app = Flask(__name__)
API_RESULTS = {
"Arby's": {"lng":-122.429763, "lat":37.680394},
"Hackbright":{"lng":-122.411540, "lat":37.788862},
"Barney's": {'lat': 37.8781148,'lng': -122.2693882}
}
@app.route("/")
def show_homepage():
"""show the index page"""
return render_template("blank-slate.html")
@app.route("/find-lat-long")
def find_lat_long():
# get what the user typed
user_val = request.args.get("place_to_eat")
print '\n\n\nHERE IS WHAT THE USER TYPED'
print user_val
print '********************\n\n\n\n\n'
# use an api to find lat long
something_found = API_RESULTS.get(user_val)
if something_found:
return jsonify({'result': something_found})
else:
return jsonify({'result': 'Your result was not found.', 'meggie': 'is cool. :)'})
@app.route("/profile/<place_name>")
def show_profile(place_name):
latlong = API_RESULTS.get(place_name)
if latlong:
return render_template("another.html", latlong=latlong)
else:
print "\n\n\nNOT FOUND!!\n\n\n"
return render_template("blank-slate.html")
@app.route('/todo')
def show_todo_app():
return render_template('todo.html')
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
|
14,456 | 380166d306e568505ab753eb5ec61a331f76ddb6 | """
Get a full absolute path a file
"""
import os
def full_path(file):
return os.path.abspath(os.path.expanduser(file))
|
14,457 | e289a5dbf3cd87ac61ed89c45775951c7caf65b1 | var1=3
var2=34
var3=112
newfile=open("storedata.txt","w")
newfile.write(str(var1)+"\n")
newfile.write(str(var2)+"\n")
newfile.write(str(var3)+"\n")
newfile.close()
s1=["david","John","sara","michael","lucy","Brian"]
newfile=open("dostoredata.txt","w")
for name in s1:
newfile.write(name+"\n")
newfile.close()
s1=["david","John","sara","michael","lucy","Brian"]
newfile=open("usestoredata.txt","w")
newfile.writelines(s1)
newfile.close()
s1=["david","John","sara","michael","lucy","Brian"]
newfile=open("dousestoredata.txt","w")
newfile.write("\n".join(s1))
newfile.close()
s1=["david","John","sara","michael","lucy","Brian"]
newfile=open("1usestoredata.txt","w")
for name in s1:
print(name, file = newfile)
newfile.close()
|
14,458 | e6ddae835eb1ba4f32c4839d14e61042cc690040 | from DbConnector_MySQL import DbConnector_MySQL
from decouple import config
from tabulate import tabulate
from haversine import haversine
import datetime
import numpy as np
import pandas as pd
class QueryExecutor:
def __init__(self):
self.connection = DbConnector_MySQL()
self.db_connection = self.connection.db_connection
self.cursor = self.connection.cursor
def query_one(
self, table_name_users, table_name_activities, table_name_trackpoints
):
"""
How many users, activities and trackpoints are there in the dataset (after it is inserted into the database).
"""
query = (
"SELECT UserCount.NumUsers, ActivitiesCount.NumActivities, TrackpointCount.NumTrackpoints FROM "
"(SELECT COUNT(*) as NumUsers FROM %s) AS UserCount,"
"(SELECT COUNT(*) as NumActivities FROM %s) AS ActivitiesCount,"
"(SELECT COUNT(*) as NumTrackpoints FROM %s) AS TrackpointCount"
)
self.cursor.execute(
query % (table_name_users, table_name_activities,
table_name_trackpoints)
)
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def query_two(self, table_name):
"""
Find the average, minimum and maximum number of activities per user.
"""
query = (
"SELECT MAX(count) as Maximum,"
"MIN(count) as Minimum,"
"AVG(count) as Average "
"FROM (SELECT COUNT(*) as count FROM %s GROUP BY user_id) as c"
)
self.cursor.execute(query % (table_name))
rows = self.cursor.fetchall()
print("Data from table %s, tabulated:" % table_name)
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def query_three(self, table_name_activities):
"""
Find the top 10 users with the highest number of activities
"""
query = (
"SELECT user_id, COUNT(*) as Count "
"FROM %s "
"GROUP BY user_id "
"ORDER BY Count DESC "
"LIMIT 10"
)
self.cursor.execute(query % table_name_activities)
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def query_four(self, table_name):
"""
Find the number of users that have started the activity in one day and ended the activity the next day.
NOTE : We assuming counting number of distinct users
"""
query = (
"SELECT user_id, COUNT(*) as NumActivites "
"FROM %s "
"WHERE DATEDIFF(start_date_time, end_date_time) = -1 "
"GROUP BY user_id "
)
self.cursor.execute(query % (table_name))
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def query_five(self, table_name_activities):
"""
Find activities that are registered multiple times. You should find the query
even if you get zero results.
NOTE: We inlcude transportation_mode
"""
query = (
"SELECT user_id, transportation_mode, start_date_time, end_date_time, COUNT(*) AS NumDuplicates "
"FROM %s "
"GROUP BY user_id, transportation_mode, start_date_time, end_date_time "
"HAVING NumDuplicates >1 "
)
self.cursor.execute(query % table_name_activities)
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def query_six(self, table_name_activities, table_name_trackpoints):
"""
Find the number of users which have been close to each other in time and
space (Covid-19 tracking). Close is defined as the same minute (60 seconds)
and space (100 meters).
"""
query = (
"SELECT t1.user_id, t1.lat, t1.lon, t2.user_id, t2.lat, t2.lon "
"FROM (SELECT user_id, lat, lon, date_time FROM %s inner join %s on Activity.id=TrackPoint.activity_id) as t1, "
"(SELECT user_id, lat, lon, date_time FROM Activity inner join TrackPoint on Activity.id=TrackPoint.activity_id) as t2 "
"where t1.user_id != t2.user_id "
"AND ABS(TIMESTAMPDIFF(SECOND,t1.date_time, t2.date_time)) <= 60"
)
self.cursor.execute(
query % (table_name_activities, table_name_trackpoints))
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
user_dict = dict()
for row in rows:
if haversine((row[1], row[2]), (row[4], row[5]), unit="km") <= 0.1:
if row[0] in user_dict:
user_dict[row[0]].append(row[3])
else:
user_dict[row[0]] = [row[3]]
users = 0
for value in users_dict.values():
users += len(value)
users = users / 2
print(users)
return users
def query_seven(self, table_name_activities):
"""
Find all users that have never taken a taxi.
NOTE: We only consider labeled activities, but not all activities for that user have to be labeled to consider that user to never have taken a taxi
"""
query = (
"SELECT user_id "
"FROM %s "
"WHERE transportation_mode != 'taxi' AND transportation_mode <> 'None' "
"GROUP BY user_id "
)
self.cursor.execute(query % table_name_activities)
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def query_eight(self, table_name):
"""
Find all types of transportation modes and count how many distinct users that
have used the different transportation modes. Do not count the rows where the
transportation mode is null.
"""
query = (
"SELECT transportation_mode as TransportationMode, COUNT(DISTINCT user_id) as NumDistinctUsers "
"FROM %s "
"WHERE transportation_mode <> 'None' "
"GROUP BY transportation_mode"
)
self.cursor.execute(query % (table_name))
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def query_nine_a(self, table_name_activities):
"""
a) Find the year and month with the most activities.
NOTE: We assume that if activities start in one month (year) and end the next month (year)
(e.g., start 30th december and end 1st january), they are counted regarding to the start_date_time
"""
query = (
"SELECT YEAR(start_date_time) as Year, MONTH(start_date_time) as Month, COUNT(*) AS ActivityCount "
"FROM %s "
"GROUP BY YEAR(start_date_time), MONTH(start_date_time) "
"ORDER BY ActivityCount DESC "
"LIMIT 1 "
)
self.cursor.execute(query % table_name_activities)
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def query_nine_b(self, table_name_activities):
"""
b) Which user had the most activities this year and month, and how many
recorded hours do they have? Do they have more hours recorded than the user
with the second most activities?
"""
query = (
"SELECT user_id, COUNT(*) AS ActivityCount"
", SUM(TIMESTAMPDIFF(HOUR, start_date_time, end_date_time)) as HoursActive "
"FROM %s "
"WHERE YEAR(start_date_time) = '2008' AND MONTH(start_date_time) = '11' "
"GROUP BY user_id "
"ORDER BY ActivityCount DESC "
"LIMIT 10"
)
self.cursor.execute(query % table_name_activities)
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
not_var = ""
if rows[0][0][0] < rows[1][0][0]:
not_var = "NOT"
print(
"The user with the most activities does",
not_var,
"have more hours than the user with the second most activities.",
)
return rows
def query_ten(self, table_name_activities, table_name_trackpoints):
"""
Find the total distance (in km) walked in 2008, by user with id=112.
"""
query = (
"SELECT Activity.id,lat,lon "
"FROM %s INNER JOIN %s on Activity.id = TrackPoint.activity_id "
"WHERE user_id='112' and "
"EXTRACT(YEAR FROM date_time) = 2008 "
"and transportation_mode='walk' "
"ORDER BY date_time ASC"
)
self.cursor.execute(
query % (table_name_activities, table_name_trackpoints))
rows = self.cursor.fetchall()
activity_dict = dict()
for row in rows:
if row[0] in activity_dict:
activity_dict[row[0]].append((row[1], row[2]))
else:
activity_dict[row[0]] = [(row[1], row[2])]
distance = 0
for value in activity_dict.values():
for i in range(len(value) - 1):
distance += haversine(value[i], value[i + 1], unit="km")
print(distance)
def query_eleven(self, table_name_activities, table_name_trackpoints):
"""
Find the top 20 users who have gained the most altitude meters
"""
query = (
"SELECT user_id, SUM(AltitudeTPTable.altitudeGained)*0.3048 AS MetersGained "
"FROM %s INNER JOIN "
" (SELECT id, activity_id, altitude, "
" LAG(altitude) OVER (PARTITION BY activity_id) as PreviousAltitude, "
" altitude - LAG(altitude) OVER(PARTITION BY activity_id) AS altitudeGained "
" FROM %s "
" WHERE altitude != -777 "
" ) as AltitudeTPTable "
"ON Activity.id = AltitudeTPTable.activity_id "
"WHERE altitudeGained > 0 "
"GROUP BY user_id "
"ORDER BY MetersGained DESC "
"LIMIT 20"
)
self.cursor.execute(
query % (table_name_activities, table_name_trackpoints))
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def query_twelve(self, table_name_activity, table_name_trackpoint):
"""
Find all users who have invalid activities, and the number of invalid activities per user
- An invalid activity is defined as an activity with consecutive trackpoints where the timestamps deviate with at least 5 minutes.
"""
query = (
"WITH data as (SELECT user_id, date_time, TrackPoint.id as tid, activity_id, LEAD(date_time) OVER(PARTITION BY activity_id ORDER BY TrackPoint.id ASC) AS next_date_time, TIMESTAMPDIFF(MINUTE, date_time, LEAD(date_time) OVER(PARTITION BY activity_id ORDER BY TrackPoint.id ASC)) as difference FROM %s INNER JOIN %s on Activity.id = TrackPoint.activity_id ) "
"SELECT user_id, COUNT(DISTINCT activity_id) AS NumInvalid "
"FROM data "
"WHERE difference >= 5 "
"GROUP BY user_id HAVING NumInvalid >= 1 "
)
self.cursor.execute(
query % (table_name_activity, table_name_trackpoint))
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
return rows
def show_tables(self):
self.cursor.execute("SHOW TABLES")
rows = self.cursor.fetchall()
print(tabulate(rows, headers=self.cursor.column_names))
def main():
executor = None
try:
executor = QueryExecutor()
executor.show_tables()
print("Executing Queries: ")
_ = executor.query_one(
table_name_users="User",
table_name_activities="Activity",
table_name_trackpoints="TrackPoint",
)
_ = executor.query_two(table_name="Activity")
_ = executor.query_three(table_name_activities="Activity")
_ = executor.query_four(table_name="Activity")
_ = executor.query_five(table_name_activities="Activity")
_ = executor.query_six(
table_name_activities="Activity", table_name_trackpoints="TrackPoint"
)
_ = executor.query_seven(table_name_activities="Activity")
_ = executor.query_eight(table_name="Activity")
_ = executor.query_nine_a(table_name_activities="Activity")
_ = executor.query_nine_b(table_name_activities="Activity")
_ = executor.query_ten(
table_name_activities="Activity", table_name_trackpoints="TrackPoint"
)
_ = executor.query_eleven(
table_name_activities="Activity", table_name_trackpoints="TrackPoint"
)
_ = executor.query_twelve(
table_name_activity="Activity", table_name_trackpoint="TrackPoint"
)
except Exception as e:
print("ERROR: Failed to use database:", e)
finally:
if executor:
executor.connection.close_connection()
if __name__ == "__main__":
main()
|
14,459 | c4243cac20963cca5a097fe09fb8c2dd67268760 |
class FileSorter(unittest.TestCase):
def_sort_file(files):
|
14,460 | 93d2fbc99db7a8b42bfb120ab70e4ddc15c90a05 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 11 07:58:19 2020
@author: migue
"""
import pandas as pd
path_guardado = "C:/Users/migue/OneDrive/Documentos/EPN/Sexto Semestre/Desarrollo web con python/Github/py-reina-gamboa-miguel-esteban/04 - Pandas/data/artwork_data.pickle"
df = pd.read_pickle(path_guardado)
filtrado_horizontal = df.loc[1035] # Serie
print(filtrado_horizontal)
print(filtrado_horizontal['artist'])
print(filtrado_horizontal.index)
serie_vertical = df['artist']
print(serie_vertical)
print(serie_vertical.index)
print(df[['artist']])
# Filtrado por indice
df_1035 = df [df.index == 1035]
segundo = df.loc[1035] #Flitrar por indice (1)
segundo = df.loc[[1035, 1036]] # Filtrar por arr indices
segundo = df.loc[3:5] # Filtrando desdex indice hasta indice
segundo = df.loc[df.index == 1035] #filtrar po arreglo -> T or F
segundo = df.loc[1035, 'artist'] # 1 Indice
segundo = df.loc[1035, ['artist', 'medium']] #Varios indices
#print(df.loc[0]) # Indice dentro del DataFrame
#print(df[0]) #Indice dentro del DataFrame
#iloc -> acceder grupo filas y columnas - indices en 0
tercero = df.iloc[0]
tercero = df.iloc[[0, 1]]
tercero = df.iloc[0: 10]
tercero = df.iloc[df.index == 1035]
tercero = df.iloc[0: 10, 0: 4] # Filtrado indices por rango de indices 0: 4
##############################################################################
datos = {
"nota 1": {
"Pepito":7,
"Juanita":8,
"Maria":9
},
"nota 2": {
"Pepito":7,
"Juanita":8,
"Maria":9
},
"disciplina": {
"Pepito":4,
"Juanita":9,
"Maria":2
}
}
notas = pd.DataFrame(datos)
condicion_nota = notas["nota 1"] <= 7
condicion_nota_dos = notas["nota 2"] <= 7
condicion_disc = notas["disciplina"] <= 7
mayores_siete_completo = notas.loc[condicion_nota]
mayores_siete = notas.loc[condicion_nota, ["nota 1"]]
pasaron = notas.loc[condicion_nota][condicion_disc][condicion_nota_dos]
notas.loc["Maria", "disciplina"] = 7
notas.loc[:, "disciplina"] = 7
########################## Promedio de las tres notas.########################
promedio_nota_uno = (notas["nota 1"][0] + notas["nota 1"][1] + notas["nota 1"][2]) / 3
promedio_nota_dos = (notas["nota 2"][0] + notas["nota 2"][1] + notas["nota 2"][2]) / 3
promedio_disciplina = (notas["disciplina"][0] + notas["disciplina"][1] + notas["disciplina"][2]) / 3
promedio_general = (promedio_nota_uno + promedio_nota_dos + promedio_disciplina) / 3
##############################################################################
|
14,461 | 1caef96820af02fdf1be4ca43d335d0ea2290699 | maximum = 0
def maxPathSum(self, A):
global maximum
if not A:
return 0
val = A.val + self.maxPathSum(A.left) + self.maxPathSum(A.right)
if val > maximum:
maximum = val
return maximum
|
14,462 | 9f3f402411600c9421a62369882cfb689d067748 | # -----------------------------------------------------------------------
#
# nir library v0.1.3
# ------------------
# (Newman, Ian R = nir)
# https://github.com/irnewman/nir
#
# This is a library of tools for the Cognitive Science Lab at the
# University of Saskatchewan. Please note that this is a work in progress
# and is written by an amateur; use at your own risk!
#
# All correspondence should be directed to:
#
# Ian R. Newman
# University of Saskatchewan
# ian.newman@usask.ca
#
# -----------------------------------------------------------------------
# ---------------------------------------------
# ---- Import Libraries
# ---------------------------------------------
from psychopy import event #visual, data, sound, gui, core, logging
import os
# import sys
# import csv
# from itertools import product
import pandas
# ---------------------------------------------
# ---------------------------------------------
# ---- Save Data to File
# ---------------------------------------------
def save_to_file(data_frame, header, file_name, p_num):
"""
Function: save data frame to file
Arguments:
data_frame = list of arrays, each array is a variable/column
header = list of length data, column names for the data frame
file_name = full path to save the file
Note: if the arrays are of different lengths, this will not work properly
"""
# create directory for participant
folder = 'data' + os.sep + str(p_num)
if not os.path.isdir(folder):
os.makedirs(folder)
# transpose the data and add header
df_temp = pandas.DataFrame(data_frame)
df = df_temp.transpose()
df.columns = header
# save to file
save_file = folder + os.sep + file_name
df.to_csv(save_file, index=False)
return
# ---------------------------------------------
# ---------------------------------------------
# ---- Save Screen to Image File
# ---------------------------------------------
def save_to_image(win, image_name):
"""
Function: save stimuli screenshot as png
Arguments:
win = psychopy window
image_name = name of image file to save
Note: may change to save to particular folder in the future
"""
# file name as png
save_name = image_name + ".png"
# save the image
win.getMovieFrame(buffer='front')
win.saveMovieFrames(save_name)
# clear buffer
event.clearEvents()
return
# ---------------------------------------------
|
14,463 | 9854a3bf9e56cee33a3baa260962236e7b0c808b | '''
# not used, no time yo.
This file stores constants and global configuration variables
'''
import pickle
# constants
MODELS_DIR = 'models/'
MAIN_RESULTS_DIR = 'results/'
ALL_RESULTS_FILE = MAIN_RESULTS_DIR + 'all_results.pkl'
# settable vars
hyperparams = {
'preprocessing': {},
"dataloader": {
"params": {
'batch_size': 32,
'shuffle': True
}
},
"optimizer": {},
"loss_function": {},
"model": {}
}
def empty_results():
output = open(ALL_RESULTS_FILE, 'wb')
pickle.dump([], output)
output.close() |
14,464 | 07b651e2563358ec7008d52b10d44664c9bac4c4 | #!/bin/python
#*******************************************************************************
# @File's name : auto_test_command.py
# @Company : Light Co.
# @Author : Hung.Bui - Light's QC team
# @Revision : 1.0.0
# @Date : 16-August-2016
#*******************************************************************************
#*******************************************************************************
# REVISION HISTORY
#*******************************************************************************
# * 1.0.0 16-August-2016 Initial revision
#
#*******************************************************************************
#*******************************************************************************
# Copyright
# Light Co.'s copyright 2016
#*******************************************************************************
#*******************************************************************************
# @Brief:
# This script is use to execute whole LCC COMMAND for Light.
#*******************************************************************************
#*******************************************************************************
# IMPORT STANDARD MODULES
#*******************************************************************************
import os
import sys
import multiprocessing
import getpass
import subprocess
import pexpect
#*******************************************************************************
# ADD INCLUDE PATHS
#*******************************************************************************
sys.path.append(os.path.join(os.getcwd(), "../../../04_COMMON/qc_tools/common"))
#sys.path.append(os.path.join(os.getcwd(), "../hal"))
#*******************************************************************************
# IMPORT DEVELOMENT MODULES
#*******************************************************************************
import serial_port
import spec_proc
#*******************************************************************************
# NONE-VOLATILE VARIABLES
#*******************************************************************************
modules_list = {
'UCID' : '1' ,
}
current_dir = os.getcwd()
pw = getpass.getpass()
#*******************************************************************************
# INPUTS PROCESSING
#*******************************************************************************
#*******************************************************************************
# LOCAL FUNCTION
#*******************************************************************************
def modules_test(testlist={}):
print "********************************************************************"
print "EXECUTE LCC COMMAND AUTOMATION TEST"
print "********************************************************************"
# Create logs folder
os.system("mkdir logs")
for module in testlist.keys():
module_test(testlist, module)
# Rename report file
os.system("mv -f test_report.xls Test_Reports_" + module_name + ".xls")
# Create log folder
os.system("mkdir logs/" + module)
# Move all log file into log folder
os.system("mv -f " + module + "*.txt logs/" + module)
# End of run_test()
#*******************************************************************************
def module_test(tests={}, module=""):
print "*******************************************************************"
print "Welcome to Light LCC COMMAND verification"
print "*******************************************************************"
test_ids = []
scenarios = []
handlers = []
approaches = []
cflags = []
logs = []
results = []
# STEP 1: Read test specfication
spec_path = current_dir + "/../spec/test_" + "command_" + module.lower() + ".xls"
# Read specification
spec_proc.read_spec(spec_path, test_ids, scenarios, \
handlers, approaches, cflags)
# Configure serial port
serial_obj = serial_port.xSerialInit("/dev/ttyUSB1", "115200")
# STEP 2: Run all test
index = 0
while index < len(test_ids):
print '%4s' % index, " | ", '%6s' % test_ids[index], " | ",\
'%8s' % handlers[index]
# Create one event
xTerminateEvent = multiprocessing.Event()
# Indicate result file
xResult = "./result.txt"
# Remove garbage file
if os.path.exists(xResult) == True:
os.system("rm -f " + xResult)
# Create streaming task
xStreamingTask = multiprocessing.Process(name='xStreamingTask',
target=serial_port.xSerialStreamTask,
args=(serial_obj, xResult, xTerminateEvent,))
# Start streaming task
xStreamingTask.start()
# Go to workspace
os.chdir(current_dir + "/../../../05_WORKSPACE")
# Do reset Asic board
child = pexpect.spawn('make reset')
# Create auto passed parameters
child.expect_exact('Password:')
# Send password
child.sendline(pw)
# Go to test module
os.chdir(current_dir + "/test_ucid")
# Sleep 2s
os.system("sleep 3")
# Excute test cases
child = pexpect.spawn("./" + handlers[index])
# Create auto passed parameters
# Get back directory
os.chdir(current_dir)
# Waiting for finish 18s
os.system("sleep 20")
# Set terminate event
xTerminateEvent.set()
# Convert result file
os.system("mv ./result.txt " + test_ids[index] + ".txt")
# Move to next test case
index += 1
# Denitialize serial port
serial_port.xSerialDeinit(serial_obj)
# Step 3: Write to report
#spec_proc.report(spec_path, test_ids, logs, results)
# Step 4: Print all result to screen
index = 0
while index < len(test_ids):
# Read test result file
f = open(test_ids[index] + ".txt", "r")
record_start = False
data = ""
for line in f.readlines():
if (record_start == False):
if ("Start task_cam_ctrl: c5" in line):
record_start = True;
data += line
else:
data += line
# Save log
logs.append(data)
# Save result
results.append("NA")
# Move to next instance
index += 1
# End of module_test()
spec_proc.report(spec_path, test_ids, logs, results)
#*******************************************************************************
if __name__ == '__main__':
module_name = str(sys.argv[1]).upper()
# Execute module test
module_test(modules_list, module_name)
# Rename report file
os.system("mv -f test_report.xls Test_Reports_Module_" + module_name + ".xls")
#*******************************************************************************
# END OF FILE
#*******************************************************************************
|
14,465 | d7c8e2d3f60377a7e92e8bff3245ad83f2ce729e | #!/usr/bin/env python3
# Written by mohlcyber v.0.1 (17.02.2021)
# based on a process name script will automatically launch MVISION EDR query
import sys
import getpass
import time
import requests
import logging
import json
from argparse import ArgumentParser, RawTextHelpFormatter
class EDR():
def __init__(self):
self.logger = logging.getLogger('logs')
loglevel = args.loglevel
self.logger.setLevel(loglevel)
ch = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s")
ch.setFormatter(formatter)
self.logger.addHandler(ch)
if args.region == 'EU':
self.base_url = 'https://api.soc.eu-central-1.mcafee.com'
elif args.region == 'US':
self.base_url = 'https://api.soc.mcafee.com'
elif args.region == 'SY':
self.base_url = 'https://api.soc.ap-southeast-2.mcafee.com'
self.verify = True
self.request = requests.Session()
user = args.user
pw = args.password
creds = (user, pw)
self.auth(creds)
self.pname = args.process
def auth(self, creds):
r = requests.get(self.base_url + '/identity/v1/login', auth=creds)
res = r.json()
if r.status_code == 200:
token = res['AuthorizationToken']
headers = {'Authorization': 'Bearer {}'.format(token)}
self.request.headers.update(headers)
self.logger.info('Successfully authenticated.')
else:
self.logger.error('Something went wrong during the authentication')
sys.exit()
def search(self):
queryId = None
payload = {
"projections": [
{
"name": "HostInfo",
"outputs": ["hostname", "ip_address"]
}, {
"name": "Processes",
"outputs": ["name", "id", "parentimagepath", "started_at"]
}
],
"condition": {
"or": [{
"and": [{
"name": "Processes",
"output": "name",
"op": "CONTAINS",
"value": str(self.pname)
}]
}]
}
}
res = self.request.post(self.base_url + '/active-response/api/v1/searches', json=payload)
try:
if res.status_code == 200:
queryId = res.json()['id']
self.logger.info('MVISION EDR search got started successfully')
else:
self.logger.error('Could not find the query ID.')
except Exception as e:
self.logger.error('Could not find the query ID. Error: {}'.format(e))
sys.exit()
return queryId
def search_status(self, queryId):
status = False
res = self.request.get(self.base_url + '/active-response/api/v1/searches/{}/status'.format(str(queryId)))
if res.status_code == 200:
if res.json()['status'] == 'FINISHED':
status = True
else:
self.logger.info('Search still in process. Status: {}'.format(res.json()['status']))
return status
def search_result(self, queryId):
res = self.request.get(self.base_url + '/active-response/api/v1/searches/{}/results'.format(str(queryId)))
if res.status_code == 200:
try:
items = res.json()['totalItems']
react_summary = []
for item in res.json()['items']:
react_dict = {}
react_dict[item['id']] = item['output']['Processes|id']
react_summary.append(react_dict)
self.logger.debug(json.dumps(res.json()))
self.logger.info('MVISION EDR search got {} responses for this process name. {} '
.format(items, len(react_summary)))
return react_summary
except Exception as e:
self.logger.error('Something went wrong to retrieve the results. Error: {}'.format(e))
sys.exit()
else:
self.logger.error('Something went wrong to retrieve the results.')
sys.exit()
def get_reactions(self):
res = self.request.get(self.base_url + '/active-response/api/v1/catalog/reactions')
if res.status_code == 200:
return res.json()
else:
self.logger.error('Something went wrong to retrieve reactions.: {0}'.format(str(res.text)))
def reaction_execution(self, queryId, systemId, pid):
reactionId = None
payload = {
"action":"killProcess",
"searchResultsArguments": {
"searchId": int(queryId),
"rowsIds": [str(systemId)],
"arguments": {}
},
"provider": "AR",
"actionInputs": [
{
"name": "pid",
"value": str(pid)
}
]
}
res = self.request.post(self.base_url + '/remediation/api/v1/actions/search-results-actions',
json=payload)
if res.status_code == 201:
try:
reactionId = res.json()['id']
self.logger.info('MVISION EDR reaction got executed successfully')
except Exception as e:
self.logger.error('Something went wrong to create reaction. Error: {}'.format(e))
sys.exit()
return reactionId
def main(self):
#Retrieve all reactions
# reactions = self.get_reactions()
# self.logger.info(json.dumps(reactions))
# sys.exit()
queryId = self.search()
if queryId is None:
sys.exit()
while self.search_status(queryId) is False:
time.sleep(30)
results = self.search_result(queryId)
if results == []:
self.logger.info('No Further Actions need to take place.')
sys.exit()
if args.reaction == 'True':
for result in results:
for systemId, pid in result.items():
reaction_id = self.reaction_execution(queryId, systemId, pid)
if reaction_id is None:
self.logger.error('Could not create new MVISION EDR reaction')
if __name__ == '__main__':
usage = """Usage: mvision_edr_search_process.py -U <username> -P <password> -PN <process name> """
title = 'McAfee EDR Python API'
parser = ArgumentParser(description=title, usage=usage, formatter_class=RawTextHelpFormatter)
parser.add_argument('--region', '-R', required=True,
choices=['EU', 'US', 'SY'], type=str,
help='MVISION EDR Tenant Region')
parser.add_argument('--user', '-U', required=True,
type=str, help='MVISION EDR User Account')
parser.add_argument('--password', '-P', required=False,
type=str, help='MVISION EDR Password')
parser.add_argument('--process', '-PN', required=True,
type=str, default='Process Name to search for')
parser.add_argument('--reaction', '-RE', required=False,
type=str, choices=['True', 'False'],
default='False', help='Kill Process.')
parser.add_argument('--loglevel', '-L', required=False,
type=str, choices=['INFO', 'DEBUG'],
default='INFO', help='Specify log level.')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass()
EDR().main() |
14,466 | 3dc30d2955955bb0a8cd588338d675d4bcace502 | #!/home/frappe/frappe-bench/env/bin/python
from __future__ import annotations
import argparse
import os
import sys
from pathlib import Path
from typing import TYPE_CHECKING, Any, List, cast
import boto3
import frappe
from frappe.utils.backups import BackupGenerator
if TYPE_CHECKING:
from mypy_boto3_s3.service_resource import _Bucket
class Arguments(argparse.Namespace):
site: str
bucket: str
region_name: str
endpoint_url: str
aws_access_key_id: str
aws_secret_access_key: str
bucket_directory: str
def _get_files_from_previous_backup(site_name: str) -> list[Path]:
frappe.connect(site_name)
conf = cast(Any, frappe.conf)
backup_generator = BackupGenerator(
db_name=conf.db_name,
user=conf.db_name,
password=conf.db_password,
db_host=frappe.db.host,
db_port=frappe.db.port,
db_type=conf.db_type,
)
recent_backup_files = backup_generator.get_recent_backup(24)
frappe.destroy()
return [Path(f) for f in recent_backup_files if f]
def get_files_from_previous_backup(site_name: str) -> list[Path]:
files = _get_files_from_previous_backup(site_name)
if not files:
print("No backup found that was taken <24 hours ago.")
return files
def get_bucket(args: Arguments) -> _Bucket:
return boto3.resource(
service_name="s3",
endpoint_url=args.endpoint_url,
region_name=args.region_name,
aws_access_key_id=args.aws_access_key_id,
aws_secret_access_key=args.aws_secret_access_key,
).Bucket(args.bucket)
def upload_file(
path: Path, site_name: str, bucket: _Bucket, bucket_directory: str = None
) -> None:
filename = str(path.absolute())
key = str(Path(site_name) / path.name)
if bucket_directory:
key = bucket_directory + "/" + key
print(f"Uploading {key}")
bucket.upload_file(Filename=filename, Key=key)
os.remove(path)
def push_backup(args: Arguments) -> None:
"""Get latest backup files using Frappe utils, push them to S3 and remove local copy"""
files = get_files_from_previous_backup(args.site)
bucket = get_bucket(args)
for path in files:
upload_file(
path=path,
site_name=args.site,
bucket=bucket,
bucket_directory=args.bucket_directory,
)
print("Done!")
def parse_args(args: list[str]) -> Arguments:
parser = argparse.ArgumentParser()
parser.add_argument("--site", required=True)
parser.add_argument("--bucket", required=True)
parser.add_argument("--region-name", required=True)
parser.add_argument("--endpoint-url", required=True)
# Looking for default AWS credentials variables
parser.add_argument(
"--aws-access-key-id", required=True, default=os.getenv("AWS_ACCESS_KEY_ID")
)
parser.add_argument(
"--aws-secret-access-key",
required=True,
default=os.getenv("AWS_SECRET_ACCESS_KEY"),
)
parser.add_argument("--bucket-directory")
return parser.parse_args(args, namespace=Arguments())
def main(args: list[str]) -> int:
push_backup(parse_args(args))
return 0
if __name__ == "__main__":
raise SystemExit(main(sys.argv[1:]))
|
14,467 | bbdc30ed2dd590b2834f0a357208276590b6d71f | # This file is for param search (optimizing)
'''
'''
import numpy as np
import pandas as pd
import os, sys, csv
from progress.bar import Bar
# Implementation part of HW2
def myStrategy(pastData, currPrice, stockType, l, s, au, bl):
# stock-wise param config starts here
if stockType[0:3] == 'SPY':
w_l = 2
w_s = 101
alpha = 0.447
beta = 0.04
elif stockType[0:3] == 'DSI':
w_l = 6
w_s = 4
alpha = 0.9
beta = 0.0
elif stockType[0:3] == 'IAU':
w_l = 11
w_s = 5
alpha = 0.95
beta = 0.0
elif stockType[0:3] == 'LQD':
w_l = 6
w_s = 4
alpha = 0.8
beta = 0.0
# stock-wise param config rnds here
action = 0
data_len = len(pastData)
if data_len < max(w_s, w_l):
return 0
# rsi index for estimation
windowed_data_l = pastData[-w_l:]
windowed_data_s = pastData[-w_s:]
up = 0
down = 0
rsi_l = 0
rsi_s = 0
for i in range(w_l - 1):
if windowed_data_l[i] < windowed_data_l[i + 1]:
up += (windowed_data_l[i + 1] - windowed_data_l[i])
elif windowed_data_l[i] > windowed_data_l[i + 1]:
down += (windowed_data_l[i] - windowed_data_l[i + 1])
rsi_l = float((up + 1) / (up + down + 1))
up = down = 0
for i in range(w_s - 1):
if windowed_data_s[i] < windowed_data_s[i + 1]:
up += (windowed_data_s[i + 1] - windowed_data_s[i])
elif windowed_data_s[i] > windowed_data_s[i + 1]:
down += (windowed_data_s[i] - windowed_data_s[i + 1])
rsi_s = float((up + 1) / (up + down + 1))
if stockType[0:3] == 'IAU' or stockType[0:3] == 'DSI' or stockType[0:3] == 'LQD':
if rsi_s > rsi_l or (rsi_s > alpha and rsi_s < au):
action = 1
elif rsi_s < rsi_l or (rsi_s < beta and rsi_s > bl):
action = -1
else:
action = 0
elif stockType[0:3] == 'SPY':
if rsi_s > rsi_l or (rsi_s > alpha and rsi_s < au):
action = 1
elif rsi_s < rsi_l or (rsi_s < beta and rsi_s > bl):
action = -1
else:
action = 0
return action
# Compute the return rate of my strategy, this code is from TA
# Compute return rate over a given price vector, with 3 modifiable parameters
def computeReturnRate(priceVec, stockType, l, s, a, b):
capital=1000 # Initial available capital
capitalOrig=capital # original capital
dataCount=len(priceVec) # day size
suggestedAction=np.zeros((dataCount,1)) # Vec of suggested actions
stockHolding=np.zeros((dataCount,1)) # Vec of stock holdings
total=np.zeros((dataCount,1)) # Vec of total asset
realAction=np.zeros((dataCount,1)) # Real action, which might be different from suggested action. For instance, when the suggested action is 1 (buy) but you don't have any capital, then the real action is 0 (hold, or do nothing).
# Run through each day
for ic in range(dataCount):
currentPrice=priceVec[ic] # current price
suggestedAction[ic]=myStrategy(priceVec[0:ic], currentPrice, stockType, l, s, a, b) # Obtain the suggested action
# get real action by suggested action
if ic>0:
stockHolding[ic]=stockHolding[ic-1] # The stock holding from the previous day
if suggestedAction[ic]==1: # Suggested action is "buy"
if stockHolding[ic]==0: # "buy" only if you don't have stock holding
stockHolding[ic]=capital/currentPrice # Buy stock using cash
capital=0 # Cash
realAction[ic]=1
elif suggestedAction[ic]==-1: # Suggested action is "sell"
if stockHolding[ic]>0: # "sell" only if you have stock holding
capital=stockHolding[ic]*currentPrice # Sell stock to have cash
stockHolding[ic]=0 # Stocking holding
realAction[ic]=-1
elif suggestedAction[ic]==0: # No action
realAction[ic]=0
else:
assert False
total[ic]=capital+stockHolding[ic]*currentPrice # Total asset, including stock holding and cash
returnRate=(total[-1]-capitalOrig)/capitalOrig # Return rate of this run
return returnRate
if __name__=='__main__':
returnRateBest=-1.00 # Init best return rate
fileList = ['SPY.csv'] # Init file names
fileCount=len(fileList)
# MA search algorithm
'''
# Config search range
windowSizeMin=200; windowSizeMax=500; # Range of windowSize to explore
alphaMin=0; alphaMax=30; # Range of alpha to explore
betaMin=0; betaMax=20; # Range of beta to explore
# Start exhaustive search
for windowSize in range(windowSizeMin, windowSizeMax+1): # For-loop for windowSize
for alpha in range(alphaMin, alphaMax+1): # For-loop for alpha
for beta in range(betaMin, betaMax+1): # For-loop for beta
'''
# RSI search algotrithm
lmin = 101; lmax = 101;
lbest = 0; sbest = 0;
alist = np.arange(0.448, 1.0, 0.001)
blist = np.arange(0.05, 1.0, 0.001)
abest = 0; bbest = 0;
for l in range(lmin, lmax + 1, 2):
for s in range(2, 3):
for a in alist:
for b in blist:
rr=np.zeros((fileCount,1))
for ic in range(fileCount):
file=fileList[ic];
df=pd.read_csv(file)
adjClose=df["Adj Close"].values # Get adj close as the price vector
stockType=file[-7:-4] # Get stock type
rr[ic]=computeReturnRate(adjClose, stockType, l, s, a, b) # Compute return rate
#print("File=%s ==> rr=%f" %(file, rr[ic]));
returnRate = np.mean(rr)
if returnRate > returnRateBest: # Keep the best parameters
lbest = l
sbest = s
abest = a
bbest = b
returnRateBest=returnRate
# print("Current best settings: l=%d, s=%d ==> avgReturnRate=%f" %(lbest, sbest, returnRateBest))
# print("Current best settings: a=%f, b=%f ==> avgReturnRate=%f" %(abest, bbest, returnRateBest))
print("Current best settings: l=%d, s=%d, a=%f, b=%f ==> avgReturnRate=%f" %(lbest, sbest, abest, bbest, returnRateBest))
# print("Overall best settings: l=%d, s=%d ==> bestAvgReturnRate=%f" %(lbest, sbest, returnRateBest))
# print("Overall best settings: a=%f, b=%f ==> bestReturnRate=%f" %(abest, bbest, returnRateBest))
print("Overall best settings: l=%d, s=%d, a=%f, b=%f ==> avgReturnRate=%f" %(lbest, sbest, abest, bbest, returnRateBest))
# with open('1011_weighted_ma.txt', 'w') as f:
# f.write()
|
14,468 | 5c537228c18656cc65e23fa28333de1769174c1c | import os
from PyQt5.QtCore import QTimer, pyqtSignal
from PyQt5.QtWidgets import QWidget
import Const
from QFlowLayout.ItemWidget import ItemWidget
from QFlowLayout.FlowLayout import FlowLayout
from QFlowLayout.SqlUtils import SqlUtils
Url = "http://v.qq.com/x/list/movie?pay=-1&offset={0}"
# 主演
Actor = '''<a href="{href}" target="_blank" title="{title}" style="text-decoration: none;font-size: 12px;color: #999999;">{title}</a> '''
class GridWidget(QWidget):
Page = 0
loadStarted = pyqtSignal(bool)
def __init__(self, *args, **kwargs):
super(GridWidget, self).__init__(*args, **kwargs)
self._layout = FlowLayout(self) # 使用自定义流式布局
# 异步网络下载管理器
# self._manager = QNetworkAccessManager(self)
# self._manager.finished.connect(self.onFinished)
def load(self):
if self.Page == -1:
return
self.loadStarted.emit(True)
# 延迟一秒后调用目的在于显示进度条
# QTimer.singleShot(1000, self._load)
QTimer.singleShot(1, self.loadFromSQL)
def loadFromSQL(self):
if self.Page == -1:
return
self.loadStarted.emit(True)
# 延迟一秒后调用目的在于显示进度条
QTimer.singleShot(1, self._loadFromSQL)
def _loadFromSQL(self):
# self._layout.__del__() # 加载之前先清空子控件
video_list = SqlUtils.select_videos(Const.Gl_Refresh_Sql)
for video in video_list:
if video.img_type == Const.GL_gif_type:
cover_path = "cache/covergif/" + video.identifier + ".gif"
else:
# cover_path = "cache/coverimg/IMG_20180729_110141.jpg"
cover_path = "cache/coverimg/" + video.identifier + ".jpg"
iwidget = ItemWidget(cover_path, video.custom_tag, video.video_name_local,
video.resolution, video.actor_name, video.like_stars, video.video_path,
video.hash,video.title,video.intro,
self)
self._layout.addWidget(iwidget)
self.loadStarted.emit(False)
# print("id = ", row[0])
# print("type = ", row[1])
# print("video_name = ", row[2])
# print("actor_name = ", row[3])
# print("tag = ", row[4])
# print("country = ", row[5])
# print("company = ", row[6])
# print("series = ", row[7])
# print("hash = ", row[8], "\n")
# def _load(self):
# print("load url:", Url.format(self.Page * 30))
# url = QUrl(Url.format(self.Page * 30))
# self._manager.get(QNetworkRequest(url))
#
# def onFinished(self, reply):
# # 请求完成后会调用该函数
# req = reply.request() # 获取请求
# iwidget = req.attribute(QNetworkRequest.User + 1, None)
# path = req.attribute(QNetworkRequest.User + 2, None)
# html = reply.readAll().data()
# reply.deleteLater()
# del reply
# if iwidget and path and html:
# # 这里是图片下载完毕
# open(path, "wb").write(html)
# iwidget.setCover(path)
# return
# # 解析网页
# self._parseHtml(html)
# self.loadStarted.emit(False)
#
# def _parseHtml(self, html):
# # encoding = chardet.detect(html) or {}
# # html = html.decode(encoding.get("encoding","utf-8"))
# html = HTML(html)
# # 查找所有的li list_item
# lis = html.xpath("//li[@class='list_item']")
# if not lis:
# self.Page = -1 # 后面没有页面了
# return
# self.Page += 1
# self._makeItem(lis)
#
# def _makeItem(self, lis):
# for li in lis:
# a = li.find("a")
# video_url = a.get("href") # 视频播放地址
# img = a.find("img")
# cover_url = "http:" + img.get("r-lazyload") # 封面图片
# figure_title = img.get("alt") # 电影名
# figure_info = a.find("div/span")
# figure_info = "" if figure_info is None else figure_info.text # 影片信息
# figure_score = "".join(li.xpath(".//em/text()")) # 评分
# # 主演
# figure_desc = "<span style=\"font-size: 12px;\">主演:</span>" + \
# "".join([Actor.format(**dict(fd.items()))
# for fd in li.xpath(".//div[@class='figure_desc']/a")])
# # 播放数
# figure_count = (
# li.xpath(".//div[@class='figure_count']/span/text()") or [""])[0]
# path = "cache/{0}.jpg".format(
# os.path.splitext(os.path.basename(video_url))[0])
# cover_path = "Data/pic_v.png"
# if os.path.isfile(path):
# cover_path = path
# iwidget = ItemWidget(cover_path, figure_info, figure_title,
# figure_score, figure_desc, figure_count, video_url, cover_url, path, self)
# self._layout.addWidget(iwidget)
|
14,469 | b7164164588a7a9c3a4fff3b932f0413341d8fe6 | import yfinance as yf
from datetime import datetime
class Yahoo(object):
@staticmethod
def history(stockCode, interval, startDate):
endDate = datetime.now().strftime('%Y-%m-%d')
stock = yf.Ticker(stockCode)
historyData = stock.history('max', interval, startDate, endDate,
auto_adjust = False)
outputData = [
{
'history_date': date.strftime('%Y-%m-%d'),
'open': historyData['Open'][i],
'close': historyData['Close'][i],
'high': historyData['High'][i],
'low': historyData['Low'][i],
'volume': historyData['Volume'][i],
'dividend': historyData['Dividends'][i],
'split': historyData['Stock Splits'][i]
}
for i, date in enumerate(historyData.index)
if date != endDate # remove today data
]
return outputData
@staticmethod
def event(stockCode, interval, startDate):
history = Yahoo.history(stockCode, interval, startDate)
outputData = [
data for data in history
if data['dividend'] != 0 or data['split'] != 0
]
return outputData
if __name__ == '__main__':
stock = 'BBCA.JK'
interval = '1D'
startDate = '1990-01-01'
print()
print('===== HISTORY =====')
for history in Yahoo.history(stock, interval, startDate):
print(history)
print()
print('===== EVENT =====')
for event in Yahoo.event(stock, interval, startDate):
print(event)
|
14,470 | f2dd161dd33c5db71fe77acd33ba0fa13d8d68df | import json
import os.path
import firebase_admin
from firebase_admin import credentials, firestore
def main():
key_json = os.path.expanduser("~/.config/serviceAccountKey.json")
cred = credentials.Certificate(key_json)
firebase_admin.initialize_app(cred)
with open('data.json', 'r') as f:
items = json.load(f)
db = firestore.client()
coll = db.collection('places')
ids = []
docs = coll.get()
for doc in docs:
ids.append(doc.id)
for item in items:
if item['id'] in ids:
continue
doc = coll.document(item['id'])
doc.set({
'name': item['name'],
'position': firestore.GeoPoint(
item['position']['lat'], item['position']['lng']
)
})
print(item)
if __name__ == '__main__':
main()
|
14,471 | 10e5bfdb2a51c7907ad50fab8b0ec035db2515ea | from contextlib import contextmanager
import sys
import os
import os.path as path
import subprocess
import argparse
import json
import logging
import errno
import copy
import re
dependency_file = 'subprojects.quark'
freeze_file = 'freeze.quark'
logger = logging.getLogger(__name__)
catalog_cache = {}
catalog_urls_overrides = {}
def workaround_url_read(url):
"""
Tries to perform an urlopen(url).read(), with workarounds for broken
certificate stores.
On many Python installations, urllib has problems using the system
certificate stores; this seems to be particularly true on macOS, it is so
in a twisted way on Win32 and can be a problem on some Linux distros (where
in general the very definition of "system certificates store" is somewhat
confused). For the horrible details, see:
https://stackoverflow.com/a/42107877/214671
https://stackoverflow.com/q/52074590/214671
A possibility could be to require the certifi package and use the
certificates it provides (as requests does), but it's yet another thing to
install and, most importantly, to keep updated (those pesky certificates do
love to expire).
On the other hand, on pretty much every macOS (and Linux) machine there's
some system-provided cURL command-line tool that should work fine with the
system certificate store; so, if urllib fails due to SSL errors, we try
that route as well.
"""
from urllib.request import urlopen
from urllib.error import URLError
try:
return urlopen(url).read()
except URLError as ex:
import ssl
if len(ex.args) and isinstance(ex.args[0], ssl.SSLError):
logger.warn("SSL error reading catalog file %s, trying with command-line curl..." % url)
def curl_url_read(url):
return log_check_output(["curl", "-s", url])
try:
# Try with command-line cURL
return curl_url_read(url)
except:
# Re-raise original exception - maybe SSL _is_ broken after all
raise ex
# It worked out fine, don't waste time with urllib in the future
workaround_url_read = curl_url_read
raise
def load_conf(folder):
filepath = path.join(folder, dependency_file)
if path.exists(filepath):
jsonfile = path.join(folder, dependency_file)
try:
with open(jsonfile, 'r') as f:
result = json.load(f)
if isinstance(result, dict) and "catalog" in result:
# Fill-in with default options from catalog
catalog_url = result["catalog"]
# None is used as placeholder for the first fetched catalog
if None in catalog_urls_overrides:
catalog_urls_overrides[catalog_url] = catalog_urls_overrides[None]
del catalog_urls_overrides[None]
# If we have an override, use the overridden URL
if catalog_url in catalog_urls_overrides:
catalog_url = catalog_urls_overrides[catalog_url]
# The catalog is often the same for all dependencies, don't
# hammer the server *and* make sure we have a coherent view
if catalog_url not in catalog_cache:
catalog_cache[catalog_url] = json.loads(workaround_url_read(catalog_url).decode('utf-8'))
cat = catalog_cache[catalog_url]
def filldefault(depends):
for module, opts in depends.items():
name = opts.get("name", module.split("/")[-1])
if name in cat:
for opt, value in cat[name].items():
if opt not in opts:
opts[opt] = copy.deepcopy(value)
if "depends" in result:
filldefault(result["depends"])
if "optdepends" in result:
for option, deps in result["optdepends"].items():
for d in deps:
if "depends" in d:
filldefault(d["depends"])
return result
except json.decoder.JSONDecodeError as err:
logger.error("Error parsing '%s'" % jsonfile)
raise err
else:
return None
def print_cmd(cmd, comment = "", stream = sys.stdout):
if comment:
comment = " (" + comment + ")"
yellow = green = reset = blue = ""
if os.isatty(stream.fileno()):
yellow = "\x1b[33m"
green = "\x1b[32m"
reset = "\x1b[30m\x1b(B\x1b[m"
blue = "\x1b[34m"
stream.write(
yellow + "quark: " +
green + os.getcwd() + reset + '$ ' +
' '.join(cmd) +
blue + comment +
reset + "\n")
stream.flush()
def fork(*args, **kwargs):
print_cmd(args[0])
return subprocess.check_call(*args, **kwargs)
def log_check_output(*args, **kwargs):
print_cmd(args[0], "captured")
return subprocess.check_output(*args, **kwargs)
def parse_option(s):
eq = s.find('=')
if eq < 0:
raise ValueError('Unable to parse option: "%s"' % s)
colon = s.find(':')
if colon < 0:
colon = eq
key, value = s[:colon], s[eq + 1:]
try:
value = str2bool(value)
except argparse.ArgumentTypeError:
pass
else:
key = s[:colon]
ty = s[colon + 1:eq].lower()
if ty == 'BOOL':
value = str2bool(s[eq + 1])
else:
value = s[eq + 1]
return key, value
def mkdir(path):
try:
os.makedirs(path)
except OSError as ex:
if ex.errno != errno.EEXIST or not os.path.isdir(path):
raise
def str2bool(v):
if v.lower() in {'yes', 'true', 't', 'y', '1', 'on'}:
return True
elif v.lower() in {'no', 'false', 'f', 'n', '0', 'off'}:
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def cmake_escape(s):
# find longest equals sequence, add one equal
equals = max(re.findall('=+', s), key=len, default='') + '='
return '[%s[%s]%s]' % (equals, s, equals)
@contextmanager
def DirectoryContext(newdir):
prevdir = os.getcwd()
os.chdir(newdir)
try:
yield
finally:
os.chdir(prevdir)
|
14,472 | 96f1a08bb77bee81970cd9da4df0a7a2539a0846 | class Logger:
_TAG = "VectraTvOnlinePlugin"
_DEBUG = 'Debug'
_INFO = 'Info'
_WARNING = 'Warning'
_ERROR = 'Error'
def __init__(self, prefix):
self.prefix = prefix.replace('_', '') + '.py'
def i(self, message):
self.__log__(message, self._INFO)
def d(self, message):
self.__log__(message, self._DEBUG)
def e(self, message):
self.__log__(message, self._ERROR)
def w(self, message):
self.__log__(message, self._WARNING)
def __log__(self, message, level):
print '%s:%s (%s): %s' % (self._TAG, level, self.prefix, message)
|
14,473 | 55d546806dca82abb54695c9bbb22ff0decb007e | # @Time : 2020/7/20 19:18
# @File : Deeplabv3plus
# @Email : dean0731@qq.com
# @Software : PyCharm
# @Desc : Deeplabv3plus + Xception
# 参考:https://github.com/bubbliiiing/Semantic-Segmentation/tree/master/deeplab_Xception/nets
# @History :
# 2020/7/20 Dean First Release
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras import layers
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Lambda
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Softmax,Reshape
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D,SeparableConv2D
from tensorflow.keras.layers import DepthwiseConv2D
from tensorflow.keras.layers import ZeroPadding2D
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras import backend as K
class Xception:
def _conv2d_same(self,x, filters,stride=1, kernel_size=3, rate=1,name=""):
if stride == 1:
return Conv2D(filters,(kernel_size, kernel_size),strides=(stride, stride),padding='same', use_bias=False,dilation_rate=(rate, rate),name=name)(x)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
return Conv2D(filters,(kernel_size, kernel_size),strides=(stride, stride),padding='valid', use_bias=False,dilation_rate=(rate, rate),name=name)(x)
def _sepConv_BN(self,x, filters,stride=1, kernel_size=3, rate=1, epsilon=1e-3,name=''):
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),padding=depth_padding, use_bias=False)(x) # dilation_rate 深度膨胀卷积
x = BatchNormalization(epsilon=epsilon)(x)
x = Conv2D(filters, (1, 1), padding='same',use_bias=False)(x)
x = BatchNormalization(epsilon=epsilon)(x)
x = Activation('relu')(x)
return x
def _entry_flow(self,x):
x = Conv2D(filters=32,kernel_size=(3,3),strides=(2,2),use_bias=False,padding="same",name="Xception_1")(x)
x = BatchNormalization(name="Xception_2")(x)
x = Activation('relu',name="Xception_3")(x)
x = Conv2D(filters=64,kernel_size=(3,3),use_bias=False,padding="same",name="Xception_4")(x)
x = BatchNormalization(name="Xception_5")(x)
x = Activation('relu',name="Xception_6")(x)
residual = self._conv2d_same(x,128,2,1,name="Xception_7")
residual = BatchNormalization(name="Xception_8")(residual)
x = self._sepConv_BN(x,128,stride=1,rate=1,name="Xception_9")
x = self._sepConv_BN(x,128,stride=1,rate=1,name="Xception_10")
x = self._sepConv_BN(x,128,stride=2,rate=1,name="Xception_11")
x = layers.add([x,residual])
residual = self._conv2d_same(x,256,2,1,name="Xception_12")
residual = BatchNormalization(name="Xception_13")(residual)
x = self._sepConv_BN(x,256,stride=1,rate=1,name="Xception_14")
skip = self._sepConv_BN(x,256,stride=1,rate=1,name="Xception_15")
x = self._sepConv_BN(skip,256,stride=2,rate=1,name="Xception_16")
x = layers.add([x,residual])
residual = self._conv2d_same(x,728,2,1,name="Xception_17")
residual = BatchNormalization(name="Xception_18")(residual)
x = self._sepConv_BN(x,728,stride=1,rate=1,name="Xception_19")
x = self._sepConv_BN(x,728,stride=1,rate=1,name="Xception_20")
x = self._sepConv_BN(x,728,stride=2,rate=1,name="Xception_21")
x = layers.add([x,residual])
return x,skip
def _middle_flow(self,x):
for i in range(16):
residual = x
shortcut = SepConv_BN(residual,728,stride=1,rate=1,name="Xception_22_{}_1".format(i))
shortcut = SepConv_BN(shortcut,728,stride=1,rate=1,name="Xception_22_{}_2".format(i))
shortcut = SepConv_BN(shortcut,728,stride=1,rate=1,name="Xception_22_{}_3".format(i))
x = layers.add([shortcut,residual])
return x
def _exit(self,x):
residual = self._conv2d_same(x,1024,2,1,name="Xception_23")
residual = BatchNormalization(name="Xception_24")(residual)
x = self._sepConv_BN(x,728,stride=1,rate=1,name="Xception_25")
x = self._sepConv_BN(x,1024,stride=1,rate=1,name="Xception_26")
x = self._sepConv_BN(x,1024,stride=2,rate=1,name="Xception_27")
x = layers.add([x,residual])
x = self._sepConv_BN(x,1536,stride=1,rate=1,name="Xception_28")
x = self._sepConv_BN(x,1536,stride=1,rate=1,name="Xception_29")
x = self._sepConv_BN(x,2048,stride=1,rate=1,name="Xception_30")
return x
def xception(self,x,OS=16):
if OS == 8:
entry_block3_stride = 1
middle_block_rate = 2 # ! Not mentioned in paper, but required
exit_block_rates = (2, 4)
atrous_rates = (12, 24, 36)
else:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
atrous_rates = (6, 12, 18)
x,skip = self._entry_flow(x)
x = self._middle_flow(x)
x = self._exit(x)
return x,atrous_rates,skip
def SepConv_BN(x, filters,stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3,name=''):
# 计算padding的数量,hw是否需要收缩
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
# 如果需要激活函数
if not depth_activation:
x = Activation('relu')(x)
# 分离卷积,首先3x3分离卷积,再1x1卷积
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False)(x) # dilation_rate 深度膨胀卷积
x = BatchNormalization(epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
x = Conv2D(filters, (1, 1), padding='same',use_bias=False)(x)
x = BatchNormalization(epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
return x
def Deeplabv3(width,height,channel = 3, n_labels=2):
img_input = Input(shape=(width,height,channel))
# 主干网络
x,atrous_rates,skip1 = Xception().xception(img_input,OS=16)
# ASPP,rate值与Output Strides相关,SepConv_BN为先3x3膨胀卷积,再1x1卷积,进行压缩其膨胀率就是rate值
b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, )(x)
b0 = BatchNormalization(epsilon=1e-5)(b0)
b0 = Activation('relu')(b0)
# rate = 6 (12)
b1 = SepConv_BN(x, 256,rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
# rate = 12 (24)
b2 = SepConv_BN(x, 256,rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
# rate = 18 (36)
b3 = SepConv_BN(x, 256,rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)
b4 = GlobalAveragePooling2D()(x) # 全局池化
b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4) # 扩张一维
b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4) # 再扩张一维 1*1*channels
b4 = Conv2D(256, (1, 1), padding='same',use_bias=False)(b4) # 卷积通道压缩 1*1*256
b4 = BatchNormalization( epsilon=1e-5)(b4)
b4 = Activation('relu')(b4)
size_before = tf.keras.backend.int_shape(x)
b4 = Lambda(lambda x: tf.image.resize(x, size_before[1:3]))(b4) # 扩张为64*64*256
x = Concatenate()([b4,b0, b1, b2, b3])
x = Conv2D(256, (1, 1), padding='same',
use_bias=False)(x)
x = BatchNormalization(epsilon=1e-5)(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
x = Lambda(lambda xx: tf.image.resize(xx, skip1.shape[1:3]))(x)
dec_skip1 = Conv2D(48, (1, 1), padding='same',use_bias=False)(skip1)
dec_skip1 = BatchNormalization(epsilon=1e-5)(dec_skip1)
dec_skip1 = Activation('relu')(dec_skip1)
x = Concatenate()([x, dec_skip1])
x = SepConv_BN(x, 256,depth_activation=True, epsilon=1e-5)
x = SepConv_BN(x, 256,depth_activation=True, epsilon=1e-5)
x = Conv2D(n_labels, (1, 1), padding='same')(x)
size_before3 = tf.keras.backend.int_shape(img_input)
x = Lambda(lambda xx:tf.image.resize(xx,size_before3[1:3]))(x)
# x = Reshape((-1,n_labels))(x)
x = Softmax()(x)
inputs = img_input
model = Model(inputs, x)
return model
if __name__ == '__main__':
model = Deeplabv3(512,512,3)
model.summary(line_length=200) |
14,474 | b9e4266519b95e5cdff676e4561f977c34db2ab0 | #!/usr/bin/python
import sys
import csv
import string
import os
import numpy as np
def calcGamesToCooperate(fname, label):
types = ["ABCD", "ABDC", "BACD", "BADC", "ABCxD","BACxD", "AxBCD", "AxBDC", "AxBCxD"]
numAgents = len(types)
#create and open all files
data = fname+label+"_rawData/"+label+"_combinedOverLearnReward.csv"
dataOut = open(data, 'w')
writer = csv.writer(dataOut)
learn = fname+label+'_timeToLearn.csv'
learnOut = open(learn, 'w')
coop = fname+label+'_timeToCoop.csv'
coopOut = open(coop, 'w')
numcoop = fname+label+'_numCoop.csv'
numCoopOut = open(numcoop, 'w')
numGNf = fname+label+'_numGN.csv'
numGNOut = open(numGNf, 'w')
numNGf = fname+label+'_numNG.csv'
numNGOut = open(numNGf, 'w')
settleGNf = fname+label+'_settleGN.csv'
settleGNOut = open(settleGNf, 'w')
settleNGf = fname+label+'_settleNG.csv'
settleNGOut = open(settleNGf, 'w')
#print fname
numfound = 0
#create matrices for data and results
foundMatrix = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
numCoop = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
numGN = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
numNG = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
firstCoop = [[[] for x in range(numAgents)] for x in range(numAgents)]
learnTime = [[[] for x in range(numAgents)] for x in range(numAgents)]
GNTime = [[[] for x in range(numAgents)] for x in range(numAgents)]
NGTime = [[[] for x in range(numAgents)] for x in range(numAgents)]
coopMatrix = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
learnMatrix = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
GNMatrix = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
NGMatrix = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
percCoop = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
percGN = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
percNG = [[0.0 for x in range(numAgents)] for x in range(numAgents)]
# for all agentRewards files, pull the data into one file and add data to calculate
# learn and "converge" points
for subdir in os.listdir(fname):
if os.path.isdir(fname+subdir):
if os.path.isfile(fname+subdir+"/agentRewards.csv"):
outLineG=[]
outLineB=[]
#pull out the attribute number from file name
attNum = subdir.split('_')[0]
outLineG.append(attNum)
outLineG.append('0')
outLineB.append(attNum)
outLineB.append('1')
#print attNum
toRead = fname+subdir+"/agentRewards.csv"
numfound+=1
#print subdir
# pull out agent preferences from file name
gname = subdir.split('_')[1]
bname = subdir.split('_')[2]
# add names to two outfile lines that will be written
outLineG.append(gname)
outLineG.append(bname)
outLineB.append(gname)
outLineB.append(bname)
# get the index of the agent's type
gLoc = types.index(gname)
bLoc = types.index(bname)
foundMatrix[gLoc][bLoc]+=1;
lines = []
# read in the file and split up the data into the right out lines
with open(toRead, 'rb') as csvfile:
f = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in f:
lines.append(row)
for i in range(2,len(lines[0])):
outLineG.append(lines[0][i])
outLineB.append(lines[1][i])
timeToCoop = findFirstCoop(lines)
timeToLearn = findTimeToLearn(lines)
timeToGN = findTimeToGN(lines)
timeToNG = findTimeToNG(lines)
#print gname+" "+bname+"coop "+str(timeToCoop)+" learn "+str(timeToLearn)
# if the conditions were found, add the data
if timeToCoop>-1:
firstCoop[gLoc][bLoc].append(timeToCoop)
if timeToLearn>-1:
numCoop[gLoc][bLoc]+=1;
learnTime[gLoc][bLoc].append(timeToLearn)
if timeToGN>-1:
numGN[gLoc][bLoc]+=1;
GNTime[gLoc][bLoc].append(timeToGN)
if timeToNG>-1:
numNG[gLoc][bLoc]+=1;
NGTime[gLoc][bLoc].append(timeToNG)
writer.writerow(outLineG)
writer.writerow(outLineB)
else:
print fname+subdir+"/agentRewards.csv"
# calculate the means across the learning trials
for i in range(0,len(firstCoop)):
for j in range(0, len(firstCoop[0])):
if len(firstCoop[i][j])>0:
coopMatrix[i][j] = np.mean(firstCoop[i][j])
if len(learnTime[i][j])>0:
learnMatrix[i][j] = np.mean(learnTime[i][j])
if len(GNTime[i][j])>0:
GNMatrix[i][j] = np.mean(GNTime[i][j])
if len(NGTime[i][j])>0:
NGMatrix[i][j] = np.mean(NGTime[i][j])
if foundMatrix[i][j]>0:
percGN[i][j] = numGN[i][j]/foundMatrix[i][j]
percNG[i][j] = numNG[i][j]/foundMatrix[i][j]
percCoop[i][j] = numCoop[i][j]/foundMatrix[i][j]
#print percCoop[i][j]
outputMatrix(percCoop, numCoopOut, types, label+" perc. Coop")
outputMatrix(coopMatrix, coopOut, types, label+" avg. 1st Coop")
outputMatrix(learnMatrix, learnOut, types, label+" avg. learn time")
outputMatrix(percGN, numGNOut, types, label+" perc GN")
outputMatrix(GNMatrix, settleGNOut, types, label+" avg. learn GN")
outputMatrix(percNG, numNGOut, types, label+" perc NG")
outputMatrix(NGMatrix, settleNGOut, types, label+" avg. learn NG")
def outputMatrix(matrix, outFile, types, label):
writer = csv.writer(outFile)
types.insert(0,"{0:6s}".format(" "))
writer.writerow([label])
writer.writerow(types)
for i in range(0,len(matrix)):
row = [types[i+1]]
for j in range(0,len(matrix[0])):
#print matrix[i][j]
row.append("{0:6.2f}".format(matrix[i][j]))
writer.writerow(row)
del types[0]
writer.writerow([])
writer.writerow([])
def findFirstCoop(lines):
#print lines
for i in range(2,len(lines[1])):
if float(lines[0][i])>0 and float(lines[1][i])>0:
return i
return -1
def findTimeToLearn(lines):
#print lines
found = False
for i in range(2, len(lines[1])):
#print ""+str(lines[1][i])+" "+str(lines[2][i])
if float(lines[0][i])>0 and float(lines[1][i])>0 and not found:
learnTime = i
found = True
if float(lines[0][i])==0 or float(lines[1][i])==0:
found = False
learnTime = -1
return learnTime
def findTimeToGN(lines):
#print lines
found = False
for i in range(2, len(lines[1])):
#print ""+str(lines[1][i])+" "+str(lines[2][i])
if float(lines[0][i])>0 and float(lines[1][i])==0 and not found:
learnTime = i
found = True
if float(lines[0][i])== 0 or float(lines[1][i])>0:
found = False
learnTime = -1
return learnTime
def findTimeToNG(lines):
#print lines
found = False
for i in range(2, len(lines[1])):
#print ""+str(lines[1][i])+" "+str(lines[2][i])
if float(lines[0][i])==0 and float(lines[1][i])>0 and not found:
learnTime = i
found = True
if float(lines[0][i])>0 or float(lines[1][i])==0:
found = False
learnTime = -1
return learnTime
def main(filename,label):
calcGamesToCooperate(filename,label)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
14,475 | b7b0d863ce8ea91d0c5ea89b93c6f97e33108d81 | # Functions
"""
Functions give us the ability to make our programs much more powerful and clean
while also saving us time. We use functions is because of the ability to write once and call repeatedly.
Overview
• How to use functions and what they are
• Passing data around using parameters
• Returning data from functions
• Understanding scope and its importance
• Creating a shopping cart program
"""
# Creating and Calling Functions
"""
A function is a block of code which only runs when it is called.
You can pass data, known as parameters, into a function.
A function can return data as a result.
"""
# writing a function
def printInfo(): # defines what the function does when called
print("Name: John Smith") # calls the function to run
print("Age: 45") # calls the function again
printInfo()
printInfo()
# Function Stages
"""
There are two stages: function definition and function call.
Function definition is where you define the function name, any parameters it's supposed to accept,
and what it's supposed to do in the block of code associated with it.
The second stage is known as the function call. Functions will never run until called,
so you can define as many functions as you’d like, but if you never call one of them, then
nothing will happen. When you call a function, it will run the block of code within the
definition
"""
# UDF vs. Built-in
"""
Built-in functions are included in Python to serve a specific purpose to help build applications.
UDFs are user-defined functions.
"""
# performing a calculation in a function
def calc():
x, y = 5, 10
print(x+y)
calc() # will run the block of code within calc and output 15
# Exercises
"""
1. Print Name: Define a function called myName, and have it print out your name
when called.
"""
# solution
def myName():
name = "Nick"
print(name)
myName()
"""
2. Pizza Toppings: Define a function that prints out all your favorite pizza toppings
called pizzaToppings. Call the function three times.
"""
# solution
def pizzaToppings():
topping1, topping2, topping3 = "Cheese,", "Pepperoni,", "Chicken"
print(topping1, topping2, topping3)
pizzaToppings()
pizzaToppings()
pizzaToppings()
# Parameters
"""
Parameters are temporary variables declared on the function definition. To call a function with
different values, you need to use parameters. This is an arbitrary variable name that you use to reference the value within the function block; however, you usually want it to
be relevent to the data that you're working with. When calling the function, you would pass in the necessary value to run the block of code with.
"""
# Passing a Single Parameter
def printName(full_name):
print("Your name is: {}".format(full_name))
printName("John Smith")
printName("Amanda")
# Multiple parameters
# passing multiple parameters into a function
def addNums(num1, num2):
result = num1+num2
print("{} + {} = {}".format(num1, num2, result))
addNums(5, 8)
addNums(3.5, 5.5)
# passing a list
# using a function to square all information
numbers1 = [2, 4, 5, 10]
numbers2 = [1, 3, 6]
# def squares(nums):
# for num in nums:
# print(nums**2)
# squares(numbers1)
# squares(numbers2)
# Default Parameters
"""
A parameter can be associated with a default value. Take the value of pi for instance, it will always be 3.14,
so we can set a parameter called pi to that exact value to allow us to call the function with an already defined value for pi.
"""
# setting default parameter values
def calcArea(r, pi=3.14):
area = pi*(r**2)
print("Area: {}".format(area))
calcArea(2) # assuming radius is the value of 2
"""
Note: Default parameters must always go after non-default parameters.
"""
# Making Parameters Optional
"""
Sometimes you need to make functions that take optional arguments. The best example
is always middle names; some people have them, and some don’t. If we wanted to write
a function that would print out properly for both situations, we would need to make the
middle name an optional parameter. We do this by assigning an empty string value as
the default:
"""
# setting default parameter values
def printName(first, last, middle=""):
if middle:
print("{} {} {}".format(first, middle, last))
else:
print("{} {}".format(first, last))
printName("John", "Smith")
printName("John", "Smith", "Paul") # will output with middle name
# Keep in mind the order of our parameters! Parameters must line up from left to right according to the function definition.
# Named Parameter Assignment
"""
During the function call, you can explicity assign values into parameter names. This is useful
when you don’t want to mix up the order of values being passed in, as they work from left
to right by default. You can use parameter names to assign values for every parameter if
you choose, but it’s not necessary most of the time. Let’s check out an example:
"""
# explicity assigning values to parameters by referencing the name
def addNums(num1, num2):
print(num2)
print(num1)
addNums(5, num2=2.5)
# *args
"""
The use of *args allows you to pass a variable number of arguments into a function. This
allows you to make functions more modular. The magic isn’t the “args” keyword here
though; it’s really the unary operator ( * ) that allows us to perform this feature. You could
theoretically replace the word args with anyone, like “ *data”, and it would still work.
However, args is the default and general standard throughout the industry.
"""
# using args parameter to take in a tuple of arbitrary value
def outputData(name, *args):
print(type(args))
for arg in args:
print(arg)
outputData("John Smith", 5, True, "Jess")
# **kwargs
"""
Like args, kwargs allows us to take in an arbitrary number of values in a function;
however, it works as a dictionary with keyword arguments instead. Keyword arguments
are values passed in with keys, which allow us to access them easily within the function
block. Again, the magic here is in the two unary operators ( ** ) not the keyword of
kwargs. Let’s check it out:
"""
# using kwargs parameter to take in a dictionary of arbitrary value
def outputData(**kwargs):
print(type(kwargs))
print(kwargs["name"])
print(kwargs["num"])
outputData(name="John Smith", num=5, b=True)
# Exercises
"""
1. User Input: Ask the user to input a word, and pass that word into a function
that checks if the word starts with an uppercase. If it does output “True”,
otherwise “False”.
"""
# solution
def checkCaps(name):
if name[0].isupper() == True:
print("True")
else:
print("False")
name = input("Enter your name")
checkCaps(name)
"""
2. No Name: Define a function that takes in two arguments, first_name and last_
name, and makes both optional. If no values are passed into the parameters, it
should output “No name passed in”; otherwise, it should print out the name.
"""
# solution
# def checkName(first_name, last_name):
# print("{} {}".format(first_name, last_name))
# checkName()
# Return Statement - is used to send info back to where the function call occured.
# using return keyword to return the sum of two numbers
def addNums(num1, num2):
return num1+num2
num = addNums(5.5, 4.5) # saves returned value into num
print(num)
print(addNums(10, 10)) # doesn't save returned value
# Ternary Operator
"""
A ternary operator is a shorthand python branching statement.
These operations can be used to assign values into a variable, or in this case, deciding what the return from a function
"""
# shorthand syntax using a ternary operator
def searchList(aList, el):
return True if el in aList else False
result = searchList(["one", 2, "three"], 2) # result = True
print(result)
# Exercises
"""
Full Name: Create a function that takes in a first and last name and returns the
two names joined together.
"""
# solution
def joinNames(firstName, lastName):
return firstName + lastName
firstName, lastName = "Nick ", "Mwangemi"
print(joinNames(firstName, lastName))
"""
2. User Input: Within a function, ask for user input. Have this function return that
input to be stored in a variable outside of the function. Then print out the input.
"""
# solution
def userInput(userInput):
faveSport = input("What's your favourite sport?")
return faveSport
faveSport = userInput("rugby")
print(faveSport)
# Scope -deals with the accessibility of variables declared within a program
# Types: global, function, class
# Global - when you declare a variable to be accessible to an entire file or application
# Function scope is in reference to variables being declared and accessible only withi functions
# Global Scope Access - accessible to the rest of the file
# where global variables can be accessed
# number = 5
# def scopeTest():
# number += 1 # not accessible due to function level scope
# scopeTest()
"""
Note: When passed in, it only passes the value, not the variable
"""
# Handling Function Scope
"""
When dealing with variables declared in a function, you generally won't need to access
it outside of the function. However, in order to access that value, best practice is to return it:
"""
# accessing variables defined in a function
def scopeTest():
word = "function"
return word
value = scopeTest()
print(value)
# In-Place Algorithms
"""
When passing variables into a function, you're simply passing the value of that variable and not the variable
itself. Such that the following will not alter the variable num
"""
num = 5
def changeNum(n):
n += 5
print(num)
changeNum(num)
# Exercises
"""
1. Names: Create a function that will change the list passed in with a parameter
of name at a given index. Such that if I were to pass in “Bill” and index 1,
it would change “Rich” to “Bill.” Use the list and function definition in the
following:
>>> names = ['Bob', 'Rich', 'Amanda']
>>> def changeValue(aList, name, index):
"""
# solution
# names = ['Bob', 'Rich', 'Amanda']
# def changeValue(aList, name, index):
# for name in names:
# names.insert(1, "Bill")
# return names
# print(changeValue(names, "Bill", 1))
|
14,476 | 69ebbe69f5f7d982b4125eb1aa4784fce62ef173 | from dot import Dot
class Dots:
"""A collection of dots."""
def __init__(self, WIDTH, HEIGHT,
LEFT_VERT, RIGHT_VERT,
TOP_HORIZ, BOTTOM_HORIZ):
self.WIDTH = WIDTH
self.HEIGHT = HEIGHT
self.TH = TOP_HORIZ
self.BH = BOTTOM_HORIZ
self.LV = LEFT_VERT
self.RV = RIGHT_VERT
self.SPACING = 75
self.EAT_DIST = 50
# Initialize four rows of dots, based on spacing and width of the maze
self.top_row = [Dot(self.SPACING * i, self.TH)
for i in range(self.WIDTH//self.SPACING + 1)]
self.bottom_row = [Dot(self.SPACING * i, self.BH)
for i in range(self.WIDTH//self.SPACING + 1)]
self.left_col = [Dot(self.LV, self.SPACING * i)
for i in range(self.HEIGHT//self.SPACING + 1)]
self.right_col = [Dot(self.RV, self.SPACING * i)
for i in range(self.HEIGHT//self.SPACING + 1)]
def display(self):
"""Calls each dot's display method"""
for i in range(0, len(self.top_row)):
self.top_row[i].display()
for i in range(0, len(self.bottom_row)):
self.bottom_row[i].display()
for i in range(0, len(self.left_col)):
self.left_col[i].display()
for i in range(0, len(self.right_col)):
self.right_col[i].display()
# TODO:
# PROBLEM 3: implement dot eating
# # BEGIN CODE CHANGES
# Modify the dots so a dot is being eaten (removed).
# x: x coordinate in pacman game, not the pixel coordinate.
# y: y coordinate in pacman game, not the pixel coordinate.
def eat(self, x, y):
"""Given the location of pacman, remove the dot which has been eaten"""
for dot in self.top_row:
if (dot.x in range(x-self.EAT_DIST, x+self.EAT_DIST) and
dot.y in range(y-self.EAT_DIST, y+self.EAT_DIST)):
self.top_row.remove(dot)
for dot in self.left_col:
if (dot.x in range(x-self.EAT_DIST, x+self.EAT_DIST) and
dot.y in range(y-self.EAT_DIST, y+self.EAT_DIST)):
self.left_col.remove(dot)
for dot in self.bottom_row:
if (dot.x in range(x-self.EAT_DIST, x+self.EAT_DIST) and
dot.y in range(y-self.EAT_DIST, y+self.EAT_DIST)):
self.bottom_row.remove(dot)
for dot in self.right_col:
if (dot.x in range(x-self.EAT_DIST, x+self.EAT_DIST) and
dot.y in range(y-self.EAT_DIST, y+self.EAT_DIST)):
self.right_col.remove(dot)
# # END CODE CHANGES
# Check if there is any non-zero dot exists in the four arrays of dots.
def dots_left(self):
"""Returns the number of remaing dots in the collection"""
return (len(self.top_row) +
len(self.bottom_row) +
len(self.left_col) +
len(self.right_col))
|
14,477 | a42da82e4f13a061bf2588809ea5b41a03ae1c1a | import random
#Enemies top left coordinate is located
#and then we check if a rectangle can be made by these x,y
class Enemy1:
def __init__(self,matrix):
x=random.randrange(2,42,2)
y=random.randrange(4,80,4)
self.x=x
self.y=y
e=0
for k in range(0,2):
for l in range(0,4):
if matrix[x+k][y+l]=="X" or matrix[x+k][y+l]=="/" or matrix[x+k][y+l]=="E":
e=e+1
break
if (x==2 and y==4) or (x==2 and y==8) or(x==4 and y==4) or(x==6 and y==4) or(x==2 and y==12):
e=e+1
if e==0:
ch=True
else:
ch=False
if ch==True:
for k in range(0,2):
for l in range(0,4):
matrix[x+k][y+l]="E"
self.x=x
self.y=y
#we choose a no from list a
def move(self,matrix):
a=[1,2,3,4]
r=random.choice(a)
# 1 denotes movement in right direction
if r==1:
if matrix[self.x][self.y+4]!='X' and matrix[self.x][self.y+4]!='/' and matrix[self.x][self.y+4]!='E':
x=self.x
y=self.y
for i in range(x,x+2):
for j in range(y+4,y+8):
matrix[i][j]='E'
for i in range(x,x+2):
for j in range(y,y+4):
matrix[i][j]=' '
self.y=self.y+4
# 2 denotes movement in left direction
if r==2:
if matrix[self.x][self.y-4]!='X' and matrix[self.x][self.y-4]!='/' and matrix[self.x][self.y-4]!='E':
x=self.x
y=self.y
for i in range(x,x+2):
for j in range(y-4,y):
matrix[i][j]='E'
for i in range(x,x+2):
for j in range(y,y+4):
matrix[i][j]=' '
self.y=self.y-4
# 3 denotes movement in down direction
if r==3:
if matrix[self.x+2][self.y]!='X' and matrix[self.x+2][self.y]!='/' and matrix[self.x+2][self.y]!='E':
x=self.x
y=self.y
for i in range(x+2,x+4):
for j in range(y,y+4):
matrix[i][j]='E'
for i in range(x,x+2):
for j in range(y,y+4):
matrix[i][j]=' '
self.x=self.x+2
# 4 denotes movement in up direction
if r==4:
if matrix[self.x-2][self.y]!='X' and matrix[self.x-2][self.y]!='/' and matrix[self.x-2][self.y]!='E':
x=self.x
y=self.y
for i in range(x-2,x):
for j in range(y,y+4):
matrix[i][j]='E'
for i in range(x,x+2):
for j in range(y,y+4):
matrix[i][j]=' '
self.x=self.x-2
class Enemy2:
def __init__(self,matrix):
x=random.randrange(2,42,2)
y=random.randrange(4,80,4)
self.x=x
self.y=y
e=0
for k in range(0,2):
for l in range(0,4):
if matrix[x+k][y+l]=="X" or matrix[x+k][y+l]=="/" or matrix[x+k][y+l]=="E":
e=e+1
break
if (x==2 and y==4) or (x==2 and y==8) or(x==4 and y==4) or(x==6 and y==4) or(x==2 and y==12):
e=e+1
if e==0:
ch=True
else:
ch=False
if ch==True:
for k in range(0,2):
for l in range(0,4):
matrix[x+k][y+l]="E"
def move(self,matrix):
a=[1,2,3,4]
r=random.choice(a)
if r==1:
if matrix[self.x][self.y+4]!='X' and matrix[self.x][self.y+4]!='/' and matrix[self.x][self.y+4]!='E':
x=self.x
y=self.y
for i in range(x,x+2):
for j in range(y+4,y+8):
matrix[i][j]='E'
for i in range(x,x+2):
for j in range(y,y+4):
matrix[i][j]=' '
self.y=self.y+4
if r==2:
if matrix[self.x][self.y-4]!='X' and matrix[self.x][self.y-4]!='/' and matrix[self.x][self.y-4]!='E':
x=self.x
y=self.y
for i in range(x,x+2):
for j in range(y-4,y):
matrix[i][j]='E'
for i in range(x,x+2):
for j in range(y,y+4):
matrix[i][j]=' '
self.y=self.y-4
if r==3:
if matrix[self.x+2][self.y]!='X' and matrix[self.x+2][self.y]!='/' and matrix[self.x+2][self.y]!='E':
x=self.x
y=self.y
for i in range(x+2,x+4):
for j in range(y,y+4):
matrix[i][j]='E'
for i in range(x,x+2):
for j in range(y,y+4):
matrix[i][j]=' '
self.x=self.x+2
if r==4:
if matrix[self.x-2][self.y]!='X' and matrix[self.x-2][self.y]!='/' and matrix[self.x-2][self.y]!='E':
x=self.x
y=self.y
for i in range(x-2,x):
for j in range(y,y+4):
matrix[i][j]='E'
for i in range(x,x+2):
for j in range(y,y+4):
matrix[i][j]=' '
self.x=self.x-2
|
14,478 | 46db70c9076a2a1b68660847d5a828898b8c74fd | #!/usr/bin/python
def main():
heights = map(int, raw_input().strip().split(' '))
assert len(heights) == 26
word = str(raw_input().strip())
curr_height = 0
for c in word:
indice = ord(c) - 97
height = heights[indice]
if height > curr_height:
curr_height = height
print curr_height * len(word)
if __name__ == '__main__':
main()
|
14,479 | 15fc914f7ad25bc46eb0a2431067fb21f853904b | #Params object for mono_dram
class DramParams(object):
##Bookkeeping params
#Base output directory
out_dir = "/home/slundquist/mountData/ram/"
#Inner run directory
run_dir = out_dir + "/mono_dram/"
#Save parameters
save_period = 100000
#output plots directory
plot_period = 10000
eval_period = 1718 # 1 epoch
#Progress step
progress = 100
#Controls how often to write out to tensorboard
write_step = 300
#Flag for loading weights from checkpoint
load = False
load_file = ""
#Device to run on
device = None #device
#data params
num_train_examples = None #dataObj.num_train_examples
#RAM params
win_size = 12 #The size of each glimpse in pixels in both x and y dimension
batch_size = 128 #Batch size of training
eval_batch_size = 100 #Batch size of testing
loc_std = 0.03 #Standard deviation of random noise added to locations
det_eval = False #If true, use only mean of location network for evaluation
original_size = None #dataObj.inputShape #Size of the input image in (y, x, f)
glimpse_scales = 2 #Number of channels in input image
sensor_size = win_size**2 * glimpse_scales #Total size of input glimpse
hg_size = 256 #Number of features in hidden layer for glimpse encode
#hl_size = 256 #Number of features in hidden layer for location encode
g_size = 256 #Number of features in second layer (combine g and l)
l_size = 256 #Number of features in hidden layer for location network
loc_dim = 2 #Number of dimensions used in the location output
cell_size_0 = 512 #Size of hidden latent space of first LSTM layer
cell_size_1 = 512 #Size of hidden latent space of second LSTM layer
classnet_size = 256
num_glimpses = 4 #Number of total glimpses
num_classes = 10 #Number of output classes
max_grad_norm = 5. #Clipping norm for gradient clipping
reinforce_lambda = 1
loc_pixel_ratio = .15 #Ratio of coord unit to image width unit
num_steps = 500001 #Number of total steps
lr_start = 1e-3 #Starting learning rate for lr decay
lr_min = 0 #Minimum learning rate for lr decay
lr_decay = .97 #Learning rate decay multiplier
# Monte Carlo sampling
M = 10
|
14,480 | 383a97a4d2803b4a5f970acc4691f6c59f362255 | from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
Base = declarative_base()
# Place your database schema code here
# Example code:
class Student(Base):
__tablename__ = "students"
id = Column(Integer, primary_key = True)
name = Column(String)
year = Column(Integer)
def __repr__(self):
return ("Student name: {}, Student year:{}".format(self.name, self.year)) |
14,481 | e47620f0637185999605b19204f479cde71587ed | # Ty Bergstrom
# remove_duplicates.py
# September 2020
# CSCE A401
# Software Engineering Project
# Input a dataset and find duplicates.
# Generate hashes for each image to find duplcate hashes.
# Important for ML projects because duplicates can cause bias, and you can get a lot of duplicates from scraping etc.
#
# python3 remove_duplicates.py -d ../original_dataset/mask -r tru
# python3 remove_duplicates.py -d ../original_dataset/without_mask -r tru
#
# optional arg -s to display the duplicates for assurance
# optional arg -r to actually remove duplicates for safety
#
# This is only set up to process one directory at a time, which is a good thing.
# Use preprocess.sh to easily run it on all the directories you need
#
# Note: I made it so that while a duplicate is being displayed,
# you have like 3 seconds to press the "s" key to pass deleting it
from imutils import paths
import numpy as np
import argparse
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True)
ap.add_argument("-r", "--remove", type=bool, default=False)
ap.add_argument("-s", "--show", type=bool, default=False)
args = vars(ap.parse_args())
img_paths = list(paths.list_images(args["dataset"]))
if len(img_paths) < 1:
print("Err: The directory", args["dataset"] + str(len(img_paths)) , "was empty")
sys.exit(1)
hashes = {} # dictionary of hashes of all images
hash_size = 8
total_duplicates = 0
# First part, loop through the input images and get their hashes
print("Generating hashes...")
for img_path in img_paths:
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (hash_size + 1, hash_size))
# Compute a horizontal gradient between adjacent column pixels
diff = img[:, 1:] > img[:, :-1]
# Convert the difference image to a hash
img_hash = sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])
# Find any other image paths with the same hash and add the current image
paths = hashes.get(img_hash, [])
# And store the list of paths back in the hashes dictionary
paths.append(img_path)
hashes[img_hash] = paths
# Second part, loop through the hashes and find duplicates
print("Finding duplicates...")
for (img_hash, hashed_paths) in hashes.items():
# Is there more than one image at this place in the dictionary, these have the same hash
if len(hashed_paths) > 1:
# Display the duplicates
if args["show"]:
montage = None
for path in hashed_paths:
image = cv2.imread(path)
image = cv2.resize(image, (150, 150))
if montage is None:
montage = image
else:
montage = np.hstack([montage, image])
cv2.imshow("Duplicates", montage)
# You have waitKay(5) much time to press "s" key to pass deleting
if cv2.waitKey(5) == ord("s"):
print("Duplicate image", path, "was not deleted")
continue
# Remove the duplicates
if args["remove"]:
for path in hashed_paths[1:]:
os.remove(path)
total_duplicates += 1
print("Deleted duplicate image:", path)
# End remove all duplicates of this one hash
# End if removing
# End if there are duplicates of this one hash
# End loop thru all hashes
print(total_duplicates, "duplicates were removed")
##
|
14,482 | f2e8a7da050b44ddb2632ecf9783e2f4e00e0f40 | import os
p="E:\\办公资料\\python\\pythontest\\test.txt"
p1="E:\办公资料\python\pythontest\\test1.txt"
p2="D:\Log\\b.txt"
#打开文件逐行读取 先转成二进制
data=open(p,'rb')
s = data.readlines()
data.close()
#循环处理每一行
for lines in s:
decodedLine = lines.decode('utf8')
print(decodedLine.replace("\n",""))
writedata=open(p,'a',encoding= 'utf8')#a 模式 打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
writedata.write("adf啊都是废话\n") #将这句话写入文件
writedata.close()
#打开文件逐行读取 指定编码读取方式
writedata=open(p,'r',encoding= 'utf8')
s = writedata.readlines()
writedata.close()
#循环处理每一行
for lines in s:
lines= lines.replace("\n","")#去掉空行
print(lines)
writedata1=open(p1,'w',encoding='utf8')#w 模式 打开一个文件只用于写入。如果该文件已存在则将其覆盖。如果该文件不存在,创建新文件
writedata1.write("adf啊都是废话\n") #将这句话写入文件
writedata1.close()
#打开文件逐行读取 指定编码读取方式
writedata1=open(p1,'r',encoding='utf8')
s = writedata1.readlines()
writedata1.close()
#循环处理每一行
for lines in s:
print(lines)
|
14,483 | 44804eda1ee17336531c27f64a1aa208f4b0062c | rows = int(input('Digit the # of rows: '))
collums = int(input('Digit the # of collums: '))
def tiktaktoe(rows, collums):
if rows <= 30 and collums <= 108:
for r in range(1, rows+1):
if r % 2 != 0:
for c in range(1, collums+1):
if c < collums:
if c % 2 != 0:
print(' ', end="")
else:
print('|', end="")
else:
if c % 2 != 0:
print(' ')
else:
print('|')
else:
print('-'*collums)
return True
else:
for r in range(1, rows+1):
if r % 2 != 0:
for c in range(1, collums+1):
if c < collums:
if c % 2 != 0:
print(' ', end="")
else:
print('|', end="")
else:
if c % 2 != 0:
print(' ')
else:
print('|')
else:
print('-'*collums)
return False
tiktaktoe(rows, collums)
print(tiktaktoe(rows, collums))
|
14,484 | 79b7f000712698a2a1cdf1030ea2d66005b39a65 | from my_library.modules.seq2vec_encoders.weighted_boe_encoder import WeightedBagOfEmbeddingsEncoder |
14,485 | 34c515d0e7263037ae926a5ca2698f72ef5ef87a | """
This test will run for annotator_mixin.py
"""
import unittest
from lxml import etree
from xmodule.annotator_mixin import get_extension, get_instructions, html_to_text
class HelperFunctionTest(unittest.TestCase):
"""
Tests to ensure that the following helper functions work for the annotation tool
"""
sample_xml = '''
<annotatable>
<instructions><p>Helper Test Instructions.</p></instructions>
</annotatable>
'''
sample_sourceurl = "http://video-js.zencoder.com/oceans-clip.mp4"
sample_youtubeurl = "http://www.youtube.com/watch?v=yxLIu-scR9Y"
sample_html = '<p><b>Testing here</b> and not bolded here</p>'
def test_get_instructions(self):
"""
Function takes in an input of a specific xml string with surrounding instructions
tags and returns a valid html string.
"""
xmltree = etree.fromstring(self.sample_xml)
expected_xml = "<div><p>Helper Test Instructions.</p></div>"
actual_xml = get_instructions(xmltree)
assert actual_xml is not None
assert expected_xml.strip() == actual_xml.strip()
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = get_instructions(xmltree)
assert actual is None
def test_get_extension(self):
"""
Tests whether given a url if the video will return a youtube source or extension
"""
expectedyoutube = 'video/youtube'
expectednotyoutube = 'video/mp4'
result1 = get_extension(self.sample_sourceurl)
result2 = get_extension(self.sample_youtubeurl)
assert expectedyoutube == result2
assert expectednotyoutube == result1
def test_html_to_text(self):
expectedtext = "Testing here and not bolded here"
result = html_to_text(self.sample_html)
assert expectedtext == result
|
14,486 | af045f9620f87127e94e099ac4d02a09d30426ab | #
-
*
-
coding
:
utf
-
8
-
*
-
"
"
"
h2
/
events
~
~
~
~
~
~
~
~
~
Defines
Event
types
for
HTTP
/
2
.
Events
are
returned
by
the
H2
state
machine
to
allow
implementations
to
keep
track
of
events
triggered
by
receiving
data
.
Each
time
data
is
provided
to
the
H2
state
machine
it
processes
the
data
and
returns
a
list
of
Event
objects
.
"
"
"
import
binascii
from
.
settings
import
ChangedSetting
_setting_code_from_int
class
Event
(
object
)
:
"
"
"
Base
class
for
h2
events
.
"
"
"
pass
class
RequestReceived
(
Event
)
:
"
"
"
The
RequestReceived
event
is
fired
whenever
request
headers
are
received
.
This
event
carries
the
HTTP
headers
for
the
given
request
and
the
stream
ID
of
the
new
stream
.
.
.
versionchanged
:
:
2
.
3
.
0
Changed
the
type
of
headers
to
:
class
:
HeaderTuple
<
hpack
:
hpack
.
HeaderTuple
>
.
This
has
no
effect
on
current
users
.
.
.
versionchanged
:
:
2
.
4
.
0
Added
stream_ended
and
priority_updated
properties
.
"
"
"
def
__init__
(
self
)
:
#
:
The
Stream
ID
for
the
stream
this
request
was
made
on
.
self
.
stream_id
=
None
#
:
The
request
headers
.
self
.
headers
=
None
#
:
If
this
request
also
ended
the
stream
the
associated
#
:
:
class
:
StreamEnded
<
h2
.
events
.
StreamEnded
>
event
will
be
available
#
:
here
.
#
:
#
:
.
.
versionadded
:
:
2
.
4
.
0
self
.
stream_ended
=
None
#
:
If
this
request
also
had
associated
priority
information
the
#
:
associated
:
class
:
PriorityUpdated
<
h2
.
events
.
PriorityUpdated
>
#
:
event
will
be
available
here
.
#
:
#
:
.
.
versionadded
:
:
2
.
4
.
0
self
.
priority_updated
=
None
def
__repr__
(
self
)
:
return
"
<
RequestReceived
stream_id
:
%
s
headers
:
%
s
>
"
%
(
self
.
stream_id
self
.
headers
)
class
ResponseReceived
(
Event
)
:
"
"
"
The
ResponseReceived
event
is
fired
whenever
response
headers
are
received
.
This
event
carries
the
HTTP
headers
for
the
given
response
and
the
stream
ID
of
the
new
stream
.
.
.
versionchanged
:
:
2
.
3
.
0
Changed
the
type
of
headers
to
:
class
:
HeaderTuple
<
hpack
:
hpack
.
HeaderTuple
>
.
This
has
no
effect
on
current
users
.
.
.
versionchanged
:
:
2
.
4
.
0
Added
stream_ended
and
priority_updated
properties
.
"
"
"
def
__init__
(
self
)
:
#
:
The
Stream
ID
for
the
stream
this
response
was
made
on
.
self
.
stream_id
=
None
#
:
The
response
headers
.
self
.
headers
=
None
#
:
If
this
response
also
ended
the
stream
the
associated
#
:
:
class
:
StreamEnded
<
h2
.
events
.
StreamEnded
>
event
will
be
available
#
:
here
.
#
:
#
:
.
.
versionadded
:
:
2
.
4
.
0
self
.
stream_ended
=
None
#
:
If
this
response
also
had
associated
priority
information
the
#
:
associated
:
class
:
PriorityUpdated
<
h2
.
events
.
PriorityUpdated
>
#
:
event
will
be
available
here
.
#
:
#
:
.
.
versionadded
:
:
2
.
4
.
0
self
.
priority_updated
=
None
def
__repr__
(
self
)
:
return
"
<
ResponseReceived
stream_id
:
%
s
headers
:
%
s
>
"
%
(
self
.
stream_id
self
.
headers
)
class
TrailersReceived
(
Event
)
:
"
"
"
The
TrailersReceived
event
is
fired
whenever
trailers
are
received
on
a
stream
.
Trailers
are
a
set
of
headers
sent
after
the
body
of
the
request
/
response
and
are
used
to
provide
information
that
wasn
'
t
known
ahead
of
time
(
e
.
g
.
content
-
length
)
.
This
event
carries
the
HTTP
header
fields
that
form
the
trailers
and
the
stream
ID
of
the
stream
on
which
they
were
received
.
.
.
versionchanged
:
:
2
.
3
.
0
Changed
the
type
of
headers
to
:
class
:
HeaderTuple
<
hpack
:
hpack
.
HeaderTuple
>
.
This
has
no
effect
on
current
users
.
.
.
versionchanged
:
:
2
.
4
.
0
Added
stream_ended
and
priority_updated
properties
.
"
"
"
def
__init__
(
self
)
:
#
:
The
Stream
ID
for
the
stream
on
which
these
trailers
were
received
.
self
.
stream_id
=
None
#
:
The
trailers
themselves
.
self
.
headers
=
None
#
:
Trailers
always
end
streams
.
This
property
has
the
associated
#
:
:
class
:
StreamEnded
<
h2
.
events
.
StreamEnded
>
in
it
.
#
:
#
:
.
.
versionadded
:
:
2
.
4
.
0
self
.
stream_ended
=
None
#
:
If
the
trailers
also
set
associated
priority
information
the
#
:
associated
:
class
:
PriorityUpdated
<
h2
.
events
.
PriorityUpdated
>
#
:
event
will
be
available
here
.
#
:
#
:
.
.
versionadded
:
:
2
.
4
.
0
self
.
priority_updated
=
None
def
__repr__
(
self
)
:
return
"
<
TrailersReceived
stream_id
:
%
s
headers
:
%
s
>
"
%
(
self
.
stream_id
self
.
headers
)
class
_HeadersSent
(
Event
)
:
"
"
"
The
_HeadersSent
event
is
fired
whenever
headers
are
sent
.
This
is
an
internal
event
used
to
determine
validation
steps
on
outgoing
header
blocks
.
"
"
"
pass
class
_ResponseSent
(
_HeadersSent
)
:
"
"
"
The
_ResponseSent
event
is
fired
whenever
response
headers
are
sent
on
a
stream
.
This
is
an
internal
event
used
to
determine
validation
steps
on
outgoing
header
blocks
.
"
"
"
pass
class
_RequestSent
(
_HeadersSent
)
:
"
"
"
The
_RequestSent
event
is
fired
whenever
request
headers
are
sent
on
a
stream
.
This
is
an
internal
event
used
to
determine
validation
steps
on
outgoing
header
blocks
.
"
"
"
pass
class
_TrailersSent
(
_HeadersSent
)
:
"
"
"
The
_TrailersSent
event
is
fired
whenever
trailers
are
sent
on
a
stream
.
Trailers
are
a
set
of
headers
sent
after
the
body
of
the
request
/
response
and
are
used
to
provide
information
that
wasn
'
t
known
ahead
of
time
(
e
.
g
.
content
-
length
)
.
This
is
an
internal
event
used
to
determine
validation
steps
on
outgoing
header
blocks
.
"
"
"
pass
class
_PushedRequestSent
(
_HeadersSent
)
:
"
"
"
The
_PushedRequestSent
event
is
fired
whenever
pushed
request
headers
are
sent
.
This
is
an
internal
event
used
to
determine
validation
steps
on
outgoing
header
blocks
.
"
"
"
pass
class
InformationalResponseReceived
(
Event
)
:
"
"
"
The
InformationalResponseReceived
event
is
fired
when
an
informational
response
(
that
is
one
whose
status
code
is
a
1XX
code
)
is
received
from
the
remote
peer
.
The
remote
peer
may
send
any
number
of
these
from
zero
upwards
.
These
responses
are
most
commonly
sent
in
response
to
requests
that
have
the
expect
:
100
-
continue
header
field
present
.
Most
users
can
safely
ignore
this
event
unless
you
are
intending
to
use
the
expect
:
100
-
continue
flow
or
are
for
any
reason
expecting
a
different
1XX
status
code
.
.
.
versionadded
:
:
2
.
2
.
0
.
.
versionchanged
:
:
2
.
3
.
0
Changed
the
type
of
headers
to
:
class
:
HeaderTuple
<
hpack
:
hpack
.
HeaderTuple
>
.
This
has
no
effect
on
current
users
.
.
.
versionchanged
:
:
2
.
4
.
0
Added
priority_updated
property
.
"
"
"
def
__init__
(
self
)
:
#
:
The
Stream
ID
for
the
stream
this
informational
response
was
made
#
:
on
.
self
.
stream_id
=
None
#
:
The
headers
for
this
informational
response
.
self
.
headers
=
None
#
:
If
this
response
also
had
associated
priority
information
the
#
:
associated
:
class
:
PriorityUpdated
<
h2
.
events
.
PriorityUpdated
>
#
:
event
will
be
available
here
.
#
:
#
:
.
.
versionadded
:
:
2
.
4
.
0
self
.
priority_updated
=
None
def
__repr__
(
self
)
:
return
"
<
InformationalResponseReceived
stream_id
:
%
s
headers
:
%
s
>
"
%
(
self
.
stream_id
self
.
headers
)
class
DataReceived
(
Event
)
:
"
"
"
The
DataReceived
event
is
fired
whenever
data
is
received
on
a
stream
from
the
remote
peer
.
The
event
carries
the
data
itself
and
the
stream
ID
on
which
the
data
was
received
.
.
.
versionchanged
:
:
2
.
4
.
0
Added
stream_ended
property
.
"
"
"
def
__init__
(
self
)
:
#
:
The
Stream
ID
for
the
stream
this
data
was
received
on
.
self
.
stream_id
=
None
#
:
The
data
itself
.
self
.
data
=
None
#
:
The
amount
of
data
received
that
counts
against
the
flow
control
#
:
window
.
Note
that
padding
counts
against
the
flow
control
window
so
#
:
when
adjusting
flow
control
you
should
always
use
this
field
rather
#
:
than
len
(
data
)
.
self
.
flow_controlled_length
=
None
#
:
If
this
data
chunk
also
completed
the
stream
the
associated
#
:
:
class
:
StreamEnded
<
h2
.
events
.
StreamEnded
>
event
will
be
available
#
:
here
.
#
:
#
:
.
.
versionadded
:
:
2
.
4
.
0
self
.
stream_ended
=
None
def
__repr__
(
self
)
:
return
(
"
<
DataReceived
stream_id
:
%
s
"
"
flow_controlled_length
:
%
s
"
"
data
:
%
s
>
"
%
(
self
.
stream_id
self
.
flow_controlled_length
_bytes_representation
(
self
.
data
[
:
20
]
)
)
)
class
WindowUpdated
(
Event
)
:
"
"
"
The
WindowUpdated
event
is
fired
whenever
a
flow
control
window
changes
size
.
HTTP
/
2
defines
flow
control
windows
for
connections
and
streams
:
this
event
fires
for
both
connections
and
streams
.
The
event
carries
the
ID
of
the
stream
to
which
it
applies
(
set
to
zero
if
the
window
update
applies
to
the
connection
)
and
the
delta
in
the
window
size
.
"
"
"
def
__init__
(
self
)
:
#
:
The
Stream
ID
of
the
stream
whose
flow
control
window
was
changed
.
#
:
May
be
0
if
the
connection
window
was
changed
.
self
.
stream_id
=
None
#
:
The
window
delta
.
self
.
delta
=
None
def
__repr__
(
self
)
:
return
"
<
WindowUpdated
stream_id
:
%
s
delta
:
%
s
>
"
%
(
self
.
stream_id
self
.
delta
)
class
RemoteSettingsChanged
(
Event
)
:
"
"
"
The
RemoteSettingsChanged
event
is
fired
whenever
the
remote
peer
changes
its
settings
.
It
contains
a
complete
inventory
of
changed
settings
including
their
previous
values
.
In
HTTP
/
2
settings
changes
need
to
be
acknowledged
.
hyper
-
h2
automatically
acknowledges
settings
changes
for
efficiency
.
However
it
is
possible
that
the
caller
may
not
be
happy
with
the
changed
setting
.
When
this
event
is
received
the
caller
should
confirm
that
the
new
settings
are
acceptable
.
If
they
are
not
acceptable
the
user
should
close
the
connection
with
the
error
code
:
data
:
PROTOCOL_ERROR
<
h2
.
errors
.
ErrorCodes
.
PROTOCOL_ERROR
>
.
.
.
versionchanged
:
:
2
.
0
.
0
Prior
to
this
version
the
user
needed
to
acknowledge
settings
changes
.
This
is
no
longer
the
case
:
hyper
-
h2
now
automatically
acknowledges
them
.
"
"
"
def
__init__
(
self
)
:
#
:
A
dictionary
of
setting
byte
to
#
:
:
class
:
ChangedSetting
<
h2
.
settings
.
ChangedSetting
>
representing
#
:
the
changed
settings
.
self
.
changed_settings
=
{
}
classmethod
def
from_settings
(
cls
old_settings
new_settings
)
:
"
"
"
Build
a
RemoteSettingsChanged
event
from
a
set
of
changed
settings
.
:
param
old_settings
:
A
complete
collection
of
old
settings
in
the
form
of
a
dictionary
of
{
setting
:
value
}
.
:
param
new_settings
:
All
the
changed
settings
and
their
new
values
in
the
form
of
a
dictionary
of
{
setting
:
value
}
.
"
"
"
e
=
cls
(
)
for
setting
new_value
in
new_settings
.
items
(
)
:
setting
=
_setting_code_from_int
(
setting
)
original_value
=
old_settings
.
get
(
setting
)
change
=
ChangedSetting
(
setting
original_value
new_value
)
e
.
changed_settings
[
setting
]
=
change
return
e
def
__repr__
(
self
)
:
return
"
<
RemoteSettingsChanged
changed_settings
:
{
%
s
}
>
"
%
(
"
"
.
join
(
repr
(
cs
)
for
cs
in
self
.
changed_settings
.
values
(
)
)
)
class
PingReceived
(
Event
)
:
"
"
"
The
PingReceived
event
is
fired
whenever
a
PING
is
received
.
It
contains
the
'
opaque
data
'
of
the
PING
frame
.
A
ping
acknowledgment
with
the
same
'
opaque
data
'
is
automatically
emitted
after
receiving
a
ping
.
.
.
versionadded
:
:
3
.
1
.
0
"
"
"
def
__init__
(
self
)
:
#
:
The
data
included
on
the
ping
.
self
.
ping_data
=
None
def
__repr__
(
self
)
:
return
"
<
PingReceived
ping_data
:
%
s
>
"
%
(
_bytes_representation
(
self
.
ping_data
)
)
class
PingAcknowledged
(
Event
)
:
"
"
"
Same
as
PingAckReceived
.
.
.
deprecated
:
:
3
.
1
.
0
"
"
"
def
__init__
(
self
)
:
#
:
The
data
included
on
the
ping
.
self
.
ping_data
=
None
def
__repr__
(
self
)
:
return
"
<
PingAckReceived
ping_data
:
%
s
>
"
%
(
_bytes_representation
(
self
.
ping_data
)
)
class
PingAckReceived
(
PingAcknowledged
)
:
"
"
"
The
PingAckReceived
event
is
fired
whenever
a
PING
acknowledgment
is
received
.
It
contains
the
'
opaque
data
'
of
the
PING
+
ACK
frame
allowing
the
user
to
correlate
PINGs
and
calculate
RTT
.
.
.
versionadded
:
:
3
.
1
.
0
"
"
"
pass
class
StreamEnded
(
Event
)
:
"
"
"
The
StreamEnded
event
is
fired
whenever
a
stream
is
ended
by
a
remote
party
.
The
stream
may
not
be
fully
closed
if
it
has
not
been
closed
locally
but
no
further
data
or
headers
should
be
expected
on
that
stream
.
"
"
"
def
__init__
(
self
)
:
#
:
The
Stream
ID
of
the
stream
that
was
closed
.
self
.
stream_id
=
None
def
__repr__
(
self
)
:
return
"
<
StreamEnded
stream_id
:
%
s
>
"
%
self
.
stream_id
class
StreamReset
(
Event
)
:
"
"
"
The
StreamReset
event
is
fired
in
two
situations
.
The
first
is
when
the
remote
party
forcefully
resets
the
stream
.
The
second
is
when
the
remote
party
has
made
a
protocol
error
which
only
affects
a
single
stream
.
In
this
case
Hyper
-
h2
will
terminate
the
stream
early
and
return
this
event
.
.
.
versionchanged
:
:
2
.
0
.
0
This
event
is
now
fired
when
Hyper
-
h2
automatically
resets
a
stream
.
"
"
"
def
__init__
(
self
)
:
#
:
The
Stream
ID
of
the
stream
that
was
reset
.
self
.
stream_id
=
None
#
:
The
error
code
given
.
Either
one
of
:
class
:
ErrorCodes
#
:
<
h2
.
errors
.
ErrorCodes
>
or
int
self
.
error_code
=
None
#
:
Whether
the
remote
peer
sent
a
RST_STREAM
or
we
did
.
self
.
remote_reset
=
True
def
__repr__
(
self
)
:
return
"
<
StreamReset
stream_id
:
%
s
error_code
:
%
s
remote_reset
:
%
s
>
"
%
(
self
.
stream_id
self
.
error_code
self
.
remote_reset
)
class
PushedStreamReceived
(
Event
)
:
"
"
"
The
PushedStreamReceived
event
is
fired
whenever
a
pushed
stream
has
been
received
from
a
remote
peer
.
The
event
carries
on
it
the
new
stream
ID
the
ID
of
the
parent
stream
and
the
request
headers
pushed
by
the
remote
peer
.
"
"
"
def
__init__
(
self
)
:
#
:
The
Stream
ID
of
the
stream
created
by
the
push
.
self
.
pushed_stream_id
=
None
#
:
The
Stream
ID
of
the
stream
that
the
push
is
related
to
.
self
.
parent_stream_id
=
None
#
:
The
request
headers
sent
by
the
remote
party
in
the
push
.
self
.
headers
=
None
def
__repr__
(
self
)
:
return
(
"
<
PushedStreamReceived
pushed_stream_id
:
%
s
parent_stream_id
:
%
s
"
"
headers
:
%
s
>
"
%
(
self
.
pushed_stream_id
self
.
parent_stream_id
self
.
headers
)
)
class
SettingsAcknowledged
(
Event
)
:
"
"
"
The
SettingsAcknowledged
event
is
fired
whenever
a
settings
ACK
is
received
from
the
remote
peer
.
The
event
carries
on
it
the
settings
that
were
acknowedged
in
the
same
format
as
:
class
:
h2
.
events
.
RemoteSettingsChanged
.
"
"
"
def
__init__
(
self
)
:
#
:
A
dictionary
of
setting
byte
to
#
:
:
class
:
ChangedSetting
<
h2
.
settings
.
ChangedSetting
>
representing
#
:
the
changed
settings
.
self
.
changed_settings
=
{
}
def
__repr__
(
self
)
:
return
"
<
SettingsAcknowledged
changed_settings
:
{
%
s
}
>
"
%
(
"
"
.
join
(
repr
(
cs
)
for
cs
in
self
.
changed_settings
.
values
(
)
)
)
class
PriorityUpdated
(
Event
)
:
"
"
"
The
PriorityUpdated
event
is
fired
whenever
a
stream
sends
updated
priority
information
.
This
can
occur
when
the
stream
is
opened
or
at
any
time
during
the
stream
lifetime
.
This
event
is
purely
advisory
and
does
not
need
to
be
acted
on
.
.
.
versionadded
:
:
2
.
0
.
0
"
"
"
def
__init__
(
self
)
:
#
:
The
ID
of
the
stream
whose
priority
information
is
being
updated
.
self
.
stream_id
=
None
#
:
The
new
stream
weight
.
May
be
the
same
as
the
original
stream
#
:
weight
.
An
integer
between
1
and
256
.
self
.
weight
=
None
#
:
The
stream
ID
this
stream
now
depends
on
.
May
be
0
.
self
.
depends_on
=
None
#
:
Whether
the
stream
*
exclusively
*
depends
on
the
parent
stream
.
If
it
#
:
does
this
stream
should
inherit
the
current
children
of
its
new
#
:
parent
.
self
.
exclusive
=
None
def
__repr__
(
self
)
:
return
(
"
<
PriorityUpdated
stream_id
:
%
s
weight
:
%
s
depends_on
:
%
s
"
"
exclusive
:
%
s
>
"
%
(
self
.
stream_id
self
.
weight
self
.
depends_on
self
.
exclusive
)
)
class
ConnectionTerminated
(
Event
)
:
"
"
"
The
ConnectionTerminated
event
is
fired
when
a
connection
is
torn
down
by
the
remote
peer
using
a
GOAWAY
frame
.
Once
received
no
further
action
may
be
taken
on
the
connection
:
a
new
connection
must
be
established
.
"
"
"
def
__init__
(
self
)
:
#
:
The
error
code
cited
when
tearing
down
the
connection
.
Should
be
#
:
one
of
:
class
:
ErrorCodes
<
h2
.
errors
.
ErrorCodes
>
but
may
not
be
if
#
:
unknown
HTTP
/
2
extensions
are
being
used
.
self
.
error_code
=
None
#
:
The
stream
ID
of
the
last
stream
the
remote
peer
saw
.
This
can
#
:
provide
an
indication
of
what
data
if
any
never
reached
the
remote
#
:
peer
and
so
can
safely
be
resent
.
self
.
last_stream_id
=
None
#
:
Additional
debug
data
that
can
be
appended
to
GOAWAY
frame
.
self
.
additional_data
=
None
def
__repr__
(
self
)
:
return
(
"
<
ConnectionTerminated
error_code
:
%
s
last_stream_id
:
%
s
"
"
additional_data
:
%
s
>
"
%
(
self
.
error_code
self
.
last_stream_id
_bytes_representation
(
self
.
additional_data
[
:
20
]
if
self
.
additional_data
else
None
)
)
)
class
AlternativeServiceAvailable
(
Event
)
:
"
"
"
The
AlternativeServiceAvailable
event
is
fired
when
the
remote
peer
advertises
an
RFC
7838
<
https
:
/
/
tools
.
ietf
.
org
/
html
/
rfc7838
>
_
Alternative
Service
using
an
ALTSVC
frame
.
This
event
always
carries
the
origin
to
which
the
ALTSVC
information
applies
.
That
origin
is
either
supplied
by
the
server
directly
or
inferred
by
hyper
-
h2
from
the
:
authority
pseudo
-
header
field
that
was
sent
by
the
user
when
initiating
a
given
stream
.
This
event
also
carries
what
RFC
7838
calls
the
"
Alternative
Service
Field
Value
"
which
is
formatted
like
a
HTTP
header
field
and
contains
the
relevant
alternative
service
information
.
Hyper
-
h2
does
not
parse
or
in
any
way
modify
that
information
:
the
user
is
required
to
do
that
.
This
event
can
only
be
fired
on
the
client
end
of
a
connection
.
.
.
versionadded
:
:
2
.
3
.
0
"
"
"
def
__init__
(
self
)
:
#
:
The
origin
to
which
the
alternative
service
field
value
applies
.
#
:
This
field
is
either
supplied
by
the
server
directly
or
inferred
by
#
:
hyper
-
h2
from
the
:
authority
pseudo
-
header
field
that
was
sent
#
:
by
the
user
when
initiating
the
stream
on
which
the
frame
was
#
:
received
.
self
.
origin
=
None
#
:
The
ALTSVC
field
value
.
This
contains
information
about
the
HTTP
#
:
alternative
service
being
advertised
by
the
server
.
Hyper
-
h2
does
#
:
not
parse
this
field
:
it
is
left
exactly
as
sent
by
the
server
.
The
#
:
structure
of
the
data
in
this
field
is
given
by
RFC
7838
Section
3
#
:
<
https
:
/
/
tools
.
ietf
.
org
/
html
/
rfc7838
#
section
-
3
>
_
.
self
.
field_value
=
None
def
__repr__
(
self
)
:
return
(
"
<
AlternativeServiceAvailable
origin
:
%
s
field_value
:
%
s
>
"
%
(
self
.
origin
.
decode
(
'
utf
-
8
'
'
ignore
'
)
self
.
field_value
.
decode
(
'
utf
-
8
'
'
ignore
'
)
)
)
class
UnknownFrameReceived
(
Event
)
:
"
"
"
The
UnknownFrameReceived
event
is
fired
when
the
remote
peer
sends
a
frame
that
hyper
-
h2
does
not
understand
.
This
occurs
primarily
when
the
remote
peer
is
employing
HTTP
/
2
extensions
that
hyper
-
h2
doesn
'
t
know
anything
about
.
RFC
7540
requires
that
HTTP
/
2
implementations
ignore
these
frames
.
hyper
-
h2
does
so
.
However
this
event
is
fired
to
allow
implementations
to
perform
special
processing
on
those
frames
if
needed
(
e
.
g
.
if
the
implementation
is
capable
of
handling
the
frame
itself
)
.
.
.
versionadded
:
:
2
.
7
.
0
"
"
"
def
__init__
(
self
)
:
#
:
The
hyperframe
Frame
object
that
encapsulates
the
received
frame
.
self
.
frame
=
None
def
__repr__
(
self
)
:
return
"
<
UnknownFrameReceived
>
"
def
_bytes_representation
(
data
)
:
"
"
"
Converts
a
bytestring
into
something
that
is
safe
to
print
on
all
Python
platforms
.
This
function
is
relatively
expensive
so
it
should
not
be
called
on
the
mainline
of
the
code
.
It
'
s
safe
to
use
in
things
like
object
repr
methods
though
.
"
"
"
if
data
is
None
:
return
None
hex
=
binascii
.
hexlify
(
data
)
#
This
is
moderately
clever
:
on
all
Python
versions
hexlify
returns
a
byte
#
string
.
On
Python
3
we
want
an
actual
string
so
we
just
check
whether
#
that
'
s
what
we
have
.
if
not
isinstance
(
hex
str
)
:
#
pragma
:
no
cover
hex
=
hex
.
decode
(
'
ascii
'
)
return
hex
|
14,487 | 37329224c1ed4cb5cef3614fe3a1e2c303b893ba | #-*- coding:utf-8 -*-
###########################################################################################
# author: luu
# info: 获取查询信息,除icd、伤害、故障和评价信息
# Revision: 1.0
###########################################################################################
"""
功能说明: 获取查询信息,包含以下数据:
#患者姓名
#报告编码
#报告单位
#时间后果
#产品名称
#报告日期
#系统接受日期
#评价状态
#补充材料
#退回状态
用于主循环函数中
逻辑: 首先,取得所提供查询时间段字典参数的值,调用gather_report_total获得查询页数,进入循环遍历整个查询页面
向http://www.adrs.org.cn/MDR/scripts/mdr/smdr/queryCondition.jsp发送post请求,获得站点请求后,返回输出
输入参数: 所提供查询时间段字典参数
输出参数: 返回一个list对象,包含如下结构的字典:
filters = {
"obj_id": obj_id,
"r_u_name": obj_name,
"info": info,
"bs": _bs_,
"create_date": create_date,
"bccl": bccl,
"report_id": report_id,
"sned_date": sned_date
},
view对象、报告单位名称、评价状态、退回状态、接受日期 、补充材料、报告编码、报告日期
"""
from gather import mdr_get_page_count
from utils import *
from config import *
import codecs
import time
import os
from deletedance import dodelete
import simplejson as json
import mdr_parse
import login2
def MDR_Report_Query(_timedict):
#
t = _timedict
t_start = t['beginTime']
t_end = t['endTime']
#获取总页数
pageTotal = mdr_get_page_count(_timedict)
print u'数据总页数:', pageTotal
for pageIdx in range(0, pageTotal):
print u'抓取第%s页数据!' % pageIdx
records = mdr_get_records_by_page_index(pageIdx, t_start, t_end)
for rec in records:
mdr_parse.mdr_import_report(rec)
print u'本次抓取任务完成!'
def mdr_get_records_by_page_index(pageIdx, t_start, t_end ):
'''获取指定页的记录信息'''
startPos = pageIdx*100 #i是页数, startPos为本面首记录
jim = {"funcID":"QUERY_DATA", "userID":78919, "operations":[{"actionName":"query", "operationDatas":[{"PROD_NAME_1540":"", "REG_NO_1540":"", "DEVICE_CLASS_ID_1540":"", "DEVICE_CLASS_NAME_1540":"", "REPORT_DATE_START":t_start, "REPORT_DATE_END":t_end, "REPORT_NUMBER_1540":"", "CREATE_DATE_START":"", "CREATE_DATE_END":"", "SUPERVISE_ORG_ID_1540":"","SUPERVISE_ORG_NAME_1540":"","MAN_NAME_1540":"","REPORT_UNIT_NAME_1540":"","PATIENT_NAME_1540":"","EVALUATE_DATE_START":"","EVALUATE_DATE_END":"","MANAGE_CATEGORY_1540":"","listid":"1540","start":startPos,"limit":100}]}]}
#查询当前页记录数
_searchdata = send_post_json_me(totalHome, jim)
_data = json.loads(_searchdata)
#本次查询结果记录数
searchdataid = _data['ResponseMessage']['operations'][0]['count']
records = []
for i in range(0, searchdataid):
#报告id
_report_id = _data['ResponseMessage']['operations'][0]['operationDatas'][i]['es'][4]['v']
#报告单位名称
_r_u_name = _data['ResponseMessage']['operations'][0]['operationDatas'][i]['es'][27]['v']
#报告日期
_sned_date = _data['ResponseMessage']['operations'][0]['operationDatas'][i]['es'][2]['v']
#接受日期
_create_date = _data['ResponseMessage']['operations'][0]['operationDatas'][i]['es'][6]['v']
#评价状态
_info = _data['ResponseMessage']['operations'][0]['operationDatas'][i]['es'][16]['v']
#view对象
_fd_obj = _data['ResponseMessage']['operations'][0]['operationDatas'][i]['es'][14]['v']
#退回状态
_bs = _data['ResponseMessage']['operations'][0]['operationDatas'][i]['es'][21]['v']
#补充材料
_bccl = _data['ResponseMessage']['operations'][0]['operationDatas'][i]['es'][24]['v']
filters = {
"ViewID": unicode_to_str(data_check_type(_fd_obj)),
"ReportUnitName": unicode_to_str( data_check_type(_r_u_name)),
"ValueState": data_check_type(_info),
"BackState": data_check_type(_bs),
"ReceiveDate": data_check_type(_create_date),
"AddSource": data_check_type(_bccl),
"ReportID": data_check_type(_report_id),
"SendDate": data_check_type(_sned_date),
"CreateDate": data_check_type(_create_date)
}
yield filters |
14,488 | 7b5bb8bd9082dedcee68d25f4f595424d457cfb9 | import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
from sklearn.cluster import KMeans
# generate 2-D dataset with four blobs.
from sklearn.datasets.samples_generator import make_blobs
X, y_true = make_blobs(n_samples = 500, centers = 4,
cluster_std = 0.40, random_state = 0)
# Plot four blobs
plt.scatter(X[:, 0], X[:, 1], s = 50);
plt.show()
# k-mans with 4 clusters
kmeans = KMeans(n_clusters = 4)
# trains k-means with input data.
kmeans.fit(X)
y_kmeans = kmeans.predict(X)
plt.scatter(X[:, 0], X[:, 1], c = y_kmeans, s = 50, cmap = 'viridis')
centers = kmeans.cluster_centers_
# plot
plt.scatter(centers[:, 0], centers[:, 1], c = 'black', s = 200, alpha = 0.5);
plt.show() |
14,489 | 1fddbd93f11efa9764877199935d77a4a26826d8 | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from pykube.config import KubeConfig
from pykube.http import HTTPClient
from pykube.http import HTTPError
from pykube.objects import Pod
from pykube.objects import Event
except ImportError:
raise RuntimeError('pykube is not installed. KubernetesJobTask requires pykube.')
class PodModel:
def __init__(self, obj, run_id):
self.run_id = run_id
self.name = obj['metadata']['name']
if 'status' in obj:
if 'phase' in obj['status']:
self.status = obj['status']['phase']
if 'podIP' in obj['status']:
self.ip = obj['status']['podIP']
class Kubernetes:
def __init__(self):
self.__kube_api = HTTPClient(KubeConfig.from_service_account())
self.__kube_api.session.verify = False
def get_pod(self, run_id):
pods = Pod.objects(self.__kube_api).filter(selector={'runid': run_id})
if len(pods.response['items']) == 0:
return None
else:
return PodModel(pods.response['items'][0], run_id)
|
14,490 | 4a961ba5a2d087fd7487643522c1023b00bb6a5d | import jieba
from collections import defaultdict
def cut(s: str) -> list:
return jieba.cut(s)
word_map = defaultdict(list)
if __name__ == '__main__':
with open("zhwiki_pinyin.sc", encoding="utf-8") as f:
for line in f:
line = line.strip()
words = cut(line)
for word in words:
word_map[word].append(line)
with open("result.txt", mode="w", encoding="utf-8") as f:
for word, lines in word_map.items():
if len(word) == 1:
continue
f.write(f"{len(lines)}:{word}:{lines}\n")
|
14,491 | e048c92df027293401af4db26df6944e4dc99974 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Programming\Soundexy\Soundexy\GUI\DesignerFiles\Gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1346, 755)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../smith/Desktop/Downloads/Player_-_Audio_Full-512.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.player = QtWidgets.QFrame(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.player.sizePolicy().hasHeightForWidth())
self.player.setSizePolicy(sizePolicy)
self.player.setMinimumSize(QtCore.QSize(0, 200))
self.player.setSizeIncrement(QtCore.QSize(0, 0))
self.player.setBaseSize(QtCore.QSize(0, 0))
self.player.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.player.setFrameShadow(QtWidgets.QFrame.Raised)
self.player.setObjectName("player")
self.gridLayout_2 = QtWidgets.QGridLayout(self.player)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout.setObjectName("verticalLayout")
self.playerAlbumImageLbl = QtWidgets.QLabel(self.player)
self.playerAlbumImageLbl.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.playerAlbumImageLbl.sizePolicy().hasHeightForWidth())
self.playerAlbumImageLbl.setSizePolicy(sizePolicy)
self.playerAlbumImageLbl.setMinimumSize(QtCore.QSize(180, 180))
self.playerAlbumImageLbl.setSizeIncrement(QtCore.QSize(64, 0))
self.playerAlbumImageLbl.setText("")
self.playerAlbumImageLbl.setScaledContents(True)
self.playerAlbumImageLbl.setObjectName("playerAlbumImageLbl")
self.verticalLayout.addWidget(self.playerAlbumImageLbl)
self.currentTimeLabel = QtWidgets.QLabel(self.player)
self.currentTimeLabel.setObjectName("currentTimeLabel")
self.verticalLayout.addWidget(self.currentTimeLabel)
self.gridLayout_2.addLayout(self.verticalLayout, 1, 0, 1, 1)
self.gridLayout.addWidget(self.player, 5, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.searchLineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.searchLineEdit.setAcceptDrops(False)
self.searchLineEdit.setObjectName("searchLineEdit")
self.horizontalLayout.addWidget(self.searchLineEdit)
self.topbarLibraryLocalCheckbox = QtWidgets.QCheckBox(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.topbarLibraryLocalCheckbox.setFont(font)
self.topbarLibraryLocalCheckbox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.topbarLibraryLocalCheckbox.setChecked(True)
self.topbarLibraryLocalCheckbox.setObjectName("topbarLibraryLocalCheckbox")
self.horizontalLayout.addWidget(self.topbarLibraryLocalCheckbox)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setWindowModality(QtCore.Qt.ApplicationModal)
self.line.setFrameShadow(QtWidgets.QFrame.Plain)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setObjectName("line")
self.horizontalLayout.addWidget(self.line)
self.topbarLibraryFreeCheckbox = QtWidgets.QCheckBox(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.topbarLibraryFreeCheckbox.setFont(font)
self.topbarLibraryFreeCheckbox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.topbarLibraryFreeCheckbox.setChecked(False)
self.topbarLibraryFreeCheckbox.setObjectName("topbarLibraryFreeCheckbox")
self.horizontalLayout.addWidget(self.topbarLibraryFreeCheckbox)
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setObjectName("line_2")
self.horizontalLayout.addWidget(self.line_2)
self.topbarLibraryPaidCheckbox = QtWidgets.QCheckBox(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.topbarLibraryPaidCheckbox.setFont(font)
self.topbarLibraryPaidCheckbox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.topbarLibraryPaidCheckbox.setChecked(False)
self.topbarLibraryPaidCheckbox.setTristate(False)
self.topbarLibraryPaidCheckbox.setObjectName("topbarLibraryPaidCheckbox")
self.horizontalLayout.addWidget(self.topbarLibraryPaidCheckbox)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.gridLayout.addLayout(self.horizontalLayout, 2, 0, 1, 1)
self.mainWidget = QtWidgets.QHBoxLayout()
self.mainWidget.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
self.mainWidget.setObjectName("mainWidget")
self.sidebar = QtWidgets.QTabWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sidebar.sizePolicy().hasHeightForWidth())
self.sidebar.setSizePolicy(sizePolicy)
self.sidebar.setMinimumSize(QtCore.QSize(300, 0))
self.sidebar.setTabPosition(QtWidgets.QTabWidget.South)
self.sidebar.setTabShape(QtWidgets.QTabWidget.Triangular)
self.sidebar.setElideMode(QtCore.Qt.ElideNone)
self.sidebar.setMovable(True)
self.sidebar.setObjectName("sidebar")
self.metaTab = QtWidgets.QWidget()
self.metaTab.setObjectName("metaTab")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.metaTab)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.metaAlbumImageLbl = QtWidgets.QLabel(self.metaTab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.metaAlbumImageLbl.sizePolicy().hasHeightForWidth())
self.metaAlbumImageLbl.setSizePolicy(sizePolicy)
self.metaAlbumImageLbl.setMinimumSize(QtCore.QSize(100, 100))
self.metaAlbumImageLbl.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.metaAlbumImageLbl.setSizeIncrement(QtCore.QSize(0, 0))
self.metaAlbumImageLbl.setBaseSize(QtCore.QSize(0, 0))
self.metaAlbumImageLbl.setText("")
self.metaAlbumImageLbl.setScaledContents(True)
self.metaAlbumImageLbl.setObjectName("metaAlbumImageLbl")
self.verticalLayout_2.addWidget(self.metaAlbumImageLbl)
self.metaArea = QtWidgets.QScrollArea(self.metaTab)
self.metaArea.setWidgetResizable(True)
self.metaArea.setObjectName("metaArea")
self.metaAreaContents = QtWidgets.QWidget()
self.metaAreaContents.setGeometry(QtCore.QRect(0, 0, 274, 258))
self.metaAreaContents.setObjectName("metaAreaContents")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.metaAreaContents)
self.verticalLayout_3.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.metaArea.setWidget(self.metaAreaContents)
self.verticalLayout_2.addWidget(self.metaArea)
self.sidebar.addTab(self.metaTab, "")
self.playlistsTab = QtWidgets.QWidget()
self.playlistsTab.setObjectName("playlistsTab")
self.gridLayout_3 = QtWidgets.QGridLayout(self.playlistsTab)
self.gridLayout_3.setObjectName("gridLayout_3")
self.playlistDeleteBtn = QtWidgets.QPushButton(self.playlistsTab)
self.playlistDeleteBtn.setObjectName("playlistDeleteBtn")
self.gridLayout_3.addWidget(self.playlistDeleteBtn, 1, 1, 1, 1)
self.playlistAddBtn = QtWidgets.QPushButton(self.playlistsTab)
self.playlistAddBtn.setObjectName("playlistAddBtn")
self.gridLayout_3.addWidget(self.playlistAddBtn, 1, 0, 1, 1)
self.playlistWidgetContainer = QtWidgets.QGridLayout()
self.playlistWidgetContainer.setObjectName("playlistWidgetContainer")
self.gridLayout_3.addLayout(self.playlistWidgetContainer, 0, 0, 1, 2)
self.sidebar.addTab(self.playlistsTab, "")
self.mainWidget.addWidget(self.sidebar)
self.gridLayout.addLayout(self.mainWidget, 4, 0, 1, 1)
self.lowerPlayer = QtWidgets.QGridLayout()
self.lowerPlayer.setHorizontalSpacing(10)
self.lowerPlayer.setObjectName("lowerPlayer")
spacerItem1 = QtWidgets.QSpacerItem(0, 25, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.lowerPlayer.addItem(spacerItem1, 0, 5, 1, 1)
self.volumeSlider = QtWidgets.QSlider(self.centralwidget)
self.volumeSlider.setMinimumSize(QtCore.QSize(185, 0))
self.volumeSlider.setMaximumSize(QtCore.QSize(185, 16777215))
self.volumeSlider.setAutoFillBackground(False)
self.volumeSlider.setStyleSheet("")
self.volumeSlider.setMaximum(125)
self.volumeSlider.setProperty("value", 100)
self.volumeSlider.setOrientation(QtCore.Qt.Horizontal)
self.volumeSlider.setObjectName("volumeSlider")
self.lowerPlayer.addWidget(self.volumeSlider, 0, 0, 1, 1)
self.loopCheckBox = QtWidgets.QCheckBox(self.centralwidget)
self.loopCheckBox.setTristate(False)
self.loopCheckBox.setObjectName("loopCheckBox")
self.lowerPlayer.addWidget(self.loopCheckBox, 0, 2, 1, 1)
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.lowerPlayer.addWidget(self.line_3, 0, 1, 1, 1)
self.messageLabel = QtWidgets.QLabel(self.centralwidget)
self.messageLabel.setText("")
self.messageLabel.setObjectName("messageLabel")
self.lowerPlayer.addWidget(self.messageLabel, 0, 7, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(0, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.lowerPlayer.addItem(spacerItem2, 0, 6, 1, 1)
self.selectedChannelsLayout = QtWidgets.QHBoxLayout()
self.selectedChannelsLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.selectedChannelsLayout.setSpacing(0)
self.selectedChannelsLayout.setObjectName("selectedChannelsLayout")
self.lowerPlayer.addLayout(self.selectedChannelsLayout, 0, 4, 1, 1)
self.line_4 = QtWidgets.QFrame(self.centralwidget)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.lowerPlayer.addWidget(self.line_4, 0, 3, 1, 1)
self.gridLayout.addLayout(self.lowerPlayer, 6, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1346, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
self.menuPlayer = QtWidgets.QMenu(self.menubar)
self.menuPlayer.setObjectName("menuPlayer")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionSearch = QtWidgets.QAction(MainWindow)
self.actionSearch.setShortcutVisibleInContextMenu(True)
self.actionSearch.setObjectName("actionSearch")
self.actionPlay = QtWidgets.QAction(MainWindow)
self.actionPlay.setObjectName("actionPlay")
self.actionImport_Directory = QtWidgets.QAction(MainWindow)
self.actionImport_Directory.setObjectName("actionImport_Directory")
self.actionImport_Audio_File = QtWidgets.QAction(MainWindow)
self.actionImport_Audio_File.setObjectName("actionImport_Audio_File")
self.menuFile.addAction(self.actionImport_Directory)
self.menuFile.addAction(self.actionImport_Audio_File)
self.menuTools.addAction(self.actionSearch)
self.menuPlayer.addAction(self.actionPlay)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuTools.menuAction())
self.menubar.addAction(self.menuPlayer.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.sidebar.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Sound Library"))
self.currentTimeLabel.setText(_translate("MainWindow", "Current Time: 00:00:000"))
self.searchLineEdit.setPlaceholderText(_translate("MainWindow", "Search"))
self.topbarLibraryLocalCheckbox.setText(_translate("MainWindow", "Local"))
self.topbarLibraryLocalCheckbox.setShortcut(_translate("MainWindow", "Ctrl+L"))
self.topbarLibraryFreeCheckbox.setText(_translate("MainWindow", "Free"))
self.topbarLibraryFreeCheckbox.setShortcut(_translate("MainWindow", "Ctrl+F"))
self.topbarLibraryPaidCheckbox.setText(_translate("MainWindow", "Paid"))
self.topbarLibraryPaidCheckbox.setShortcut(_translate("MainWindow", "Ctrl+P"))
self.sidebar.setTabText(self.sidebar.indexOf(self.metaTab), _translate("MainWindow", "Metadata"))
self.playlistDeleteBtn.setText(_translate("MainWindow", "Delete"))
self.playlistAddBtn.setText(_translate("MainWindow", "Add"))
self.sidebar.setTabText(self.sidebar.indexOf(self.playlistsTab), _translate("MainWindow", "Playlists"))
self.loopCheckBox.setText(_translate("MainWindow", "Loop"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.menuTools.setTitle(_translate("MainWindow", "Tools"))
self.menuPlayer.setTitle(_translate("MainWindow", "Player"))
self.actionSearch.setText(_translate("MainWindow", "Search"))
self.actionSearch.setShortcut(_translate("MainWindow", "Return"))
self.actionPlay.setText(_translate("MainWindow", "Play"))
self.actionPlay.setShortcut(_translate("MainWindow", "Space"))
self.actionImport_Directory.setText(_translate("MainWindow", "Import Directory"))
self.actionImport_Audio_File.setText(_translate("MainWindow", "Import Audio File"))
|
14,492 | 1c667813a814275e6accc1a39fb10c619e0658ad | # coding: utf-8
import tweepy
import json
import os
from datetime import datetime
import pandas as pd
import credentials.credentials_twitter as cred
class Twitter_Analysis:
""" copyright© 2019 — Luc Bertin - License MIT """
__consumer_key = cred.CONSUMER_KEY
__token = cred.TOKEN
__api = None
def __init__(self, dico_file, maxTweets, filename, company_name, companies_CSV_file):
self.dico_file, self.dico_found = self.open_dico(dico_file)
self.max_tweets = maxTweets
self.tweetsPerQry = 100 # Can't change that
self.fName = filename
self.companies_list = self.open_csv_companies(companies_CSV_file)
self.company_name = company_name
self.__class__.__api, self.apifound = self.__class__.authentificator()
if self.dico_found:
# This is what we're searching for :
self.searchQuery = " OR ".join(['\"' + item + '\"' for item in self.dico_file["eco_responsable"]])
self.searchQuery += " -filter:retweets AND -filter:replies"
@staticmethod
def open_dico(dico_file):
try:
with open(dico_file) as dico:
return(eval(dico.read()), True)
except:
return(0, False)
@staticmethod
def open_csv_companies(companies_CSV_file):
try:
if companies_CSV_file is not None:
df = pd.read_csv(companies_CSV_file, encoding='utf-8', delimiter=';')
companies_list = df["companies"].tolist()
#companies_with_twitter_account = [str(x) for x in companies_list if str(x)!='nan']
##companies_with_twitter_account = [str(x) for x in companies_list if str(x)!='nan']
return(companies_list)
else:
return([])
except:
print('No dataset found')
return([])
@classmethod
def authentificator(cls):
try:
auth = tweepy.AppAuthHandler(cls.__consumer_key, cls.__token)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
print("Authentification success !")
return api, True
#cls.__api = api
#return("sucess")
except:
print("Impossible to Authentifiate !")
#return("fail")
return None, False
def search(self):
if self.dico_found:
try:
print('Liste des companies : '+str(self.companies_list)+'\n')
except:
pass
if len(self.companies_list)>0:
for companie in self.companies_list:
print('\n'+repr(companie)+'\n')
parameter = " AND " + repr(companie)
print('Requete Finale : '+self.searchQuery+parameter)
self.twitter_retrieval(addParam=parameter)
elif len(str(self.company_name))>0:
parameter = " AND " + repr(self.company_name)
print('Requete Finale : '+self.searchQuery+parameter)
self.twitter_retrieval(addParam=parameter)
else:
print('Requete Finale : '+self.searchQuery)
self.twitter_retrieval()
return ('JSON disponible!', True)
else:
return ('Fichier de dictionnaire manquant', False)
def twitter_retrieval(self, max_id=-1, sinceId=None, addParam=None):
# default to no upper limit, start from the most recent tweet matching the search query.
tweetCount = 0
##print(max_id)
##if (not sinceId): print(2)
print("Downloading max {0} tweets".format(self.max_tweets))
with open(str(self.fName + '.json'), 'a',encoding='utf-8') as f:
while tweetCount < self.max_tweets:
try:
if (max_id <= 0):
if (not sinceId):
new_tweets = self.__class__.__api.search(q=self.searchQuery+addParam, count=self.tweetsPerQry)
else:
new_tweets = self.__class__.__api.search(q=self.searchQuery+addParam, count=self.tweetsPerQry,
since_id=sinceId)
else:
if (not sinceId):
new_tweets = self.__class__.__api.search(q=self.searchQuery+addParam, count=self.tweetsPerQry,
max_id=str(max_id - 1))
else:
new_tweets = self.__class__.__api.search(q=self.searchQuery+addParam, count=self.tweetsPerQry,
max_id=str(max_id - 1),
since_id=sinceId)
if not new_tweets:
print("No more tweets found")
break
for tweet in new_tweets:
f.write(str({k:str(tweet._json.get(k, None)) for k in ('id_str', 'created_at', 'text', 'retweeted', 'user',
'entities', 'lang', 'retweet_count', 'geo')})+"\n")
tweetCount += len(new_tweets)
print("Downloaded {0} tweets".format(tweetCount))
max_id = new_tweets[-1].id
except tweepy.TweepError as e:
# Just exit if any error
print("some error : " + str(e))
break
print ("Downloaded {0} tweets, Saved to {1}".format(tweetCount, self.fName+'.json'))
def tweets_to_dataframe(self):
##### tweet retrieval ######
if self.dico_found:
json_found=True
try:
lines = [line.rstrip('\n') for line in open(self.fName+'.json', 'r', encoding='utf-8')]
new = [json.loads(json.dumps(eval(item))) for item in lines]
df=pd.DataFrame(new)
except:
json_found=False
if json_found:
df.shape
#final = [datetime.strptime(re.sub(r" \+[0-9]+", "", x), "%a %b %d %H:%M:%S %Y").date() for x in df.created_at.astype(str)]
df.created_at = [datetime.strptime(x, "%a %b %d %H:%M:%S %z %Y") for x in df.created_at.astype(str)]
df.retweeted = df.retweeted.astype(bool)
df.id_str = df.id_str.astype(str)
df['day'] = [x.day for x in df.created_at]
df['month'] = [x.month for x in df.created_at]
df['year'] = [x.year for x in df.created_at]
#info from the tweet itself
df['hashtags'] = list(map(lambda z:
[x.get('text') for x in eval(json.loads(json.dumps(z)))['hashtags']], df.entities))
"""df['hastags_occurencies'] = list(map(lambda z:
[len(x.get('indices')) for x in eval(json.loads(json.dumps(z)))['hashtags']], df.entities))"""
df['user_mentions'] = list(map(lambda z:
[x.get('screen_name') for x in eval(json.loads(json.dumps(z)))['user_mentions']], df.entities))
#info from the user who posts the tweet
df['user_name'] = list(map(lambda z: eval(json.loads(json.dumps(z)))['name'], df.user))
#df.where(df.user_name!=df.user_name2)
df['user_location'] = list(map(lambda z: eval(json.loads(json.dumps(z)))['location'], df.user))
df['user_followers_count'] = list(map(lambda z: eval(json.loads(json.dumps(z)))['followers_count'], df.user))
df['user_friends_count'] = list(map(lambda z: eval(json.loads(json.dumps(z)))['friends_count'], df.user))
df['tweet_coordinates'] = list(map(lambda z: tuple(eval(json.loads(json.dumps(z)))['coordinates']) if z != 'None' else 'None', df.geo))
df['valeur_dico'] = list(map(lambda z: [x for x in ' '.join(self.dico_file['eco_responsable']).split(' ') if x.lower() in str(z)], df.text.str.lower()))
del(df['entities'])
del(df['created_at'])
del(df['user'])
del(df['geo'])
df.to_csv(self.fName+'.csv', encoding='utf-8', index=False, sep=';')
return('Dataframe disponible !'), True
else:
return('Fichier JSON non existant...'), False
else:
return 'Fichier de dictionnaire manquant', False
def strengthen_dico(self):
#my_set = set()
b={}
liste_hashtags=[]
tweetCount=0
while tweetCount < self.max_tweets:
new_tweets = self.__class__.__api.search(q=self.searchQuery, count=self.tweetsPerQry, lang='fr')
for tweet in new_tweets:
a = tweet._json.get('entities', None).get('hashtags', None)
if len(a)>0:
text_each_hashtag_in_tweet = [i.get('text', None).lower() for i in a]
liste_hashtags.extend(text_each_hashtag_in_tweet)
tweetCount += len(new_tweets)
print("Downloaded {0} tweets".format(tweetCount))
for word in liste_hashtags:
b[word] = b.get(word, 0) + 1
sorted_dico = sorted(b.items(), key=lambda d: d[1], reverse=True)
return(sorted_dico)
"""
analysis = Twitter_Analysis(dico_file='dico_file.txt',filename='tweets44',maxTweets=10000)
analysis.fName
dico_strengthen2 = analysis.strengthen_dico()
analysis.twitter_retrieval()
analysis.tweets_to_dataframe()
"""
|
14,493 | 1e2d818b6b30b51f6256f2ce55e34b4de360e93d | #!/usr/bin/env python
# -*- coding=utf-8 -*-
class DbTools:
# 数据库基类
def __init__(self, task):
self._host = task['dbhost']
self._port = task['dbport']
self._name = task['dbname']
self._user = task['dbuser']
self._pass = task['dbpass']
self._index_name = task['index_name']
self._main_type = task['main_fld_type']
self._time_fld = task['time_fld']
self._task_type = task['task_type']
self._fields = task['fields']
self._task_name = task['task_name']
self._sql = task['sql']
|
14,494 | 74dacb847802b87037e785bff3d020f4a6def5e1 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2018/10/21 21:13
# @Author : Administrator
# @Site :
# @File : Python练习实例8.py
# @Software: PyCharm
"""
题目:
输出 9*9 乘法口诀表
-----------------------------------------------------
思路:
分行与列考虑,共9行9列,i控制行,j控制列
-----------------------------------------------------
"""
for i in range(1,10):
print
for j in range(1,i+1):
print '%d*%d=%d' % (i,j,i*j), |
14,495 | 04008080ec9e22b6ba96d9f1b1c34947928c3408 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import jenkins
import time
import configparser
#配属文件信息(地址,登陆用户名,登陆token)
JKS_CONFIG = {
'jenkins_server_url': 'http://jks.pre.gomeplus.com:8787/jenkins',
'user_id': 'admin',
'api_token': '9d9fe1b771538d889550aa686d526ee2'
}
#实例化jenkins对象,连接远程的jenkins master server
server = jenkins.Jenkins(JKS_CONFIG['jenkins_server_url'], username=JKS_CONFIG['user_id'],
password=JKS_CONFIG['api_token'], timeout=3000)
#收取get_runing_build_status 将项目名称,和现在的构建编号传入
def get_runing_build_status(name, num):
try:
#判断job名为job_name的job的某次构建是否还在构建中
#server.get_build_info(job_name,build_number)['result']
stat = server.get_build_info(name, num)['building']
except Exception:
print( ' ...waiting...')
stat = True
return stat
#依次传入的是项目名称,版本号字典
def build_job(name, parameter):
#打印出项目
input("-----确认发%s,请回车-----" % name)
#获取job名为job_name的job的最后次构建号
last_build_num = server.get_job_info(name)['lastBuild']['number']
print ('[%s]开始构建....' % name)
'''
#构建job名为job_name的job(不带构建参数)
# 构建 #构建job名为job_name的job(parameters是我穿进来的构建参数)
#String参数化构建job名为job_name的job, 参数param_dict为字典形式,\
# 如:param_dict= {"param1":“value1”, “param2”:“value2”}
#server.build_job(job_name, parameters=param_dict)
'''
server.build_job(name, parameters=parameter)
#调用get_runing_build_status 将项目名称,和现在的构建编号传入
while get_runing_build_status(name, last_build_num + 1):
print( ' ...building...')
time.sleep(5)
#获取job名为job_name的job的某次构建的执行结果状态
is_success = server.get_build_info(name, last_build_num + 1)['result']
print ('[%s]构建结束,构建结果: 【%s】' % (name, is_success))
return is_success
#定了主程序(面向对象)
class PlanList:
#生成config对象
deploy_order = configparser.ConfigParser()
deploy_list = configparser.ConfigParser()
#初始化
def __init__(self):
#用config对象读取配置文件
#read(filename) 直接读取ini文件内容
self.deploy_order.read('online_list.conf')#所有项目清单
self.deploy_list.read('deployList.conf')#发版列表
def get_plan_dict(self):
#赋值两个空列表
deploy_order, plan_order = [], []
#以列表形式返回所有的section
#-sections() 得到所有的section,并以列表的形式返回
#section=模块名称
#得到模块名
for section in self.deploy_order.sections():
#-options(section) 得到该section的所有option(把模块下的所有所有项目赋值到x
#在加进空列表 )
[deploy_order.append(x) for x in self.deploy_order.options(section)]
print(deploy_order)
#循环deploy_order列表(所有的项目),一个一个取值
for order in deploy_order:
#如果发版配置文件里的项目存在所有项目配置文件里(这里是按照所有项目配置里的项目排序的)
if self.deploy_list.has_option('projects', order):
print(order)
#指定section=模块,option=项目 读取值=版本号
order_version = self.deploy_list.get('projects', order)
print(order_version)
#定义字典 赋值版本号
param_dict = {"VERSION": order_version}
#循环掉build_job方法(往里面穿项目名称,版本字典)
while build_job(order, param_dict) != 'SUCCESS':
input_str = input("发版失败,输入【K】跳过,否则重发此项目:")
if 'K' == input_str:
break
#发版项目配置文件里面删除这个项目
self.deploy_list.remove_option('projects', order)
#如果发版配置文件'projects'下面还有项目
if len(self.deploy_list.options('projects')) > 0:
print ('下列工程没有发版')
#打印出没有发办的项目名
print (self.deploy_list.options('projects'))
if __name__ == '__main__':
plane = PlanList()
plane.get_plan_dict()
|
14,496 | 02e4e0fa118fe95635b1cd38797547f050e6d053 | # Prolog
# Author: David Yurek, Stanley McClister, Evan Whitmer
# Team 3
# Section: 012
# Nov. 1, 2013
# Lab 10 Team
def main():
too_high = False
too_low = False
for i in range(10):
number = float(input('Enter number ' + str(i+1) + ' '))
if number > 30:
too_high = True
if number < 15:
too_low = True
if too_low:
print('There was at least one number below the lower bound')
else:
print('There were no numbers below the lower bound')
if too_high:
print('There was at least one number above the upper bound')
else:
print('There were no numbers above the upper bound')
main()
|
14,497 | 97579c9c6c507c3c4431865083e51a9de2f2d6f0 | class Position:
def __init__(self, seeds_value, bot_statu):
self.cells_player = [4 for i in range(seeds_value)]
self.cells_computer = [4 for i in range(seeds_value)]
self.computer_play = bot_statu
self.seeds_player = seeds_value
self.seeds_computer = seeds_value
def get_computer_seed(self):
return self.seeds_computer
def get_player_seeds(self):
return self.seeds_player
def illegal_moove(self,bot, indice):
if bot:
if self.cells_computer[indice] == 0:
raise ValueError("Case vide")
else:
if self.cells_player[indice] == 0:
raise ValueError("Case vide")
def play_move(self, bot, indice):
try:
game.illegal_moove(game.computer_play, indice)
except ValueError:
print("Case vide!")
exit(-1)
if bot:
seeds = self.cells_computer[indice]
self.cells_computer[indice] = 0
else:
seeds = self.cells_player[indice]
self.cells_player[indice] = 0
if bot:
taken_cell = indice + 12
i = indice + 12
else:
i = indice
taken_cell = indice
while seeds != 0:
if i == taken_cell:
i += 1
else:
i += 1
if i > 23:
i = 0
if i < 12:
self.cells_player[i] += 1
seeds -= 1
else:
self.cells_computer[i - 12] += 1
seeds -= 1
while True:
if i >= 12:
if bot and 1 < self.cells_computer[i - 12] < 4:
self.seeds_computer += self.cells_computer[i - 12]
self.cells_computer[i - 12] = 0
elif not bot and 1 < self.cells_computer[i - 12] < 4:
self.seeds_player += self.cells_computer[i - 12]
self.cells_computer[i - 12] = 0
else:
break
else:
if 1 < self.cells_player[i] < 4 and not bot:
self.seeds_player += self.cells_player[i]
self.cells_player[i] = 0
elif 1 < self.cells_player[i] < 4 and bot:
self.seeds_computer += self.cells_player[i]
self.cells_player[i] = 0
else:
break
if i <= 0:
i = 24
else:
i -= 1
def __repr__(self):
return f"Etat plateau joueur: {self.cells_player}\nEtat plateau CPU: {self.cells_computer[len(self.cells_computer)::-1]}\n"
game = Position(12, False)
while sum(game.cells_player) + sum(game.cells_computer) > 8:
take = int(input(f"{'Bot turn : ' if game.computer_play else ''}Quel case?\n"))
if not game.computer_play:
game.play_move(game.computer_play, take)
game.computer_play = True
else:
game.play_move(game.computer_play, take)
game.computer_play = False
print(game)
|
14,498 | 223f8f3af858838a3577025a99ba1dea21423613 |
import threading
import time
import random as r
class simDataCapture(object):
"""this class is a simulation for Zach J's MAV interface.
It will spawn a thread that will wait a random amount of time, and
then fill the fields of a mav-data struct which will be later paired with
images. This data will be loaded into a hashtable keyed on time."""
samples = []
def getData(self):
while True:
pose = (r.randint(-128, 128), r.randint(-128, 128), r.randint(-128, 128), r.randint(-128, 128), r.randint(-128, 128), r.randint(-128, 128))
gpsTime = time.time()
sysTime = time.time()
sample = (pose, gpsTime, sysTime)
self.samples.append(sample)
time.sleep(r.random())
def getNextSample(self):
return self.samples.pop() #return a sample
def getClosestSample(self, time):#this should eventually trim the data.
sample = min(self.samples, key=lambda x:abs(x[2] - time))
self.samples.remove(sample)
return sample
def __init__(self):
dataThread = threading.Thread(target=self.getData, name="dataGen", args=())
print "spawning data thread"
dataThread.start()
|
14,499 | 2f6c3383f3857a533e88376bab2984723b9ad629 | import json
import utils
import accepts
from urllib import request
from trans_order import TransportOrder
from order_sequence_head import OrderSequenceHead
class OrderTask(object):
'''
define how to create an order task
attention: no try-catch in this class
'''
def __init__(self, logname = 'order task'):
'''
instructor
'''
# _log must be defined for the decorator mb_default_catch_exception
self._log = utils.logger().getLogger(logname)
self._name = 'order_task_default_name'
self._task = {
"sequences" : [],
"transports" : []
}
@utils.mb_default_catch_exception
@accepts.mb_accepts(str)
def setName(self, name):
self._name = name
@utils.mb_default_catch_exception
def getName(self):
return self._name
@utils.mb_default_catch_exception
def getOrdersNum(self):
return len(self._task.get('transports'))
@utils.mb_default_catch_exception
@accepts.mb_accepts(TransportOrder, int)
def addTransportOrder(self, to, seqIndex, *orderIndex):
'''
there is at least one transport order in a task
'''
if seqIndex >= 0:
to.setWrappingSequence(self.getSequenceNameByIndex(seqIndex))
if len(orderIndex) > 0:
for index in orderIndex:
to.addOrderDependencies(self.getOrderNameByIndex(index))
self._task.get('transports').append(to.getOrder())
@utils.mb_default_catch_exception
@accepts.mb_accepts(OrderSequenceHead)
def addOrderSequenceHead(self, os):
'''
there is at least one order sequence in a task
'''
self._task.get('sequences').append(os.getSequence())
@utils.mb_default_catch_exception
@accepts.mb_accepts(int)
def getOrderNameByIndex(self, idx):
'''
return an order's name, for dependencies
'''
if idx >= 0 and idx < len(self._task.get('transports')):
return "%s_order_%d" % (self._name, idx)
else:
raise IndexError('illegal order index')
return 'illegal order index'
@utils.mb_default_catch_exception
@accepts.mb_accepts(int)
def getSequenceNameByIndex(self, idx):
'''
return an sequence's name, for warpping sequence
'''
if idx >= 0 and idx < len(self._task.get('sequences')):
return "%s_seq_%d" % (self._name, idx)
else:
raise IndexError('illegal sequence index')
return 'illegal sequence index'
@utils.mb_default_catch_exception
def encode(self):
'''
need verify!! use jsonschema TODO
'''
return json.dumps(self._task)
if __name__ == '__main__':
t = TransportOrder() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.