text stringlengths 38 1.54M |
|---|
N = int(input())
ans = N-1
def get_steps(a, b):
return (a-1) + (b-1)
for i in range(1, int(N**0.5)+1):
if N % i == 0:
d = N // i
ans = min(ans, get_steps(i, d))
print(ans) |
import requests
from bs4 import BeautifulSoup
import os
f = requests.get('http://tieba.baidu.com/p/3181528205').text
#用BS解析html
s = BeautifulSoup(f,'lxml')
print(s)
s_imgs = s.find_all('img', pic_type = "0")
i=0
if not os.path.exists('北理校花'):
os.makedirs('北理校花')
path = os.path.join(os.getcwd(),"北理校花")
for s_img in s_imgs:
img_url = s_img['src']
img_content = requests.get(img_url).content
file_name = str(i) + '.jpg'
os.chdir(path)
with open(file_name, 'wb') as wf:
wf.write(img_content)
i += 1
|
#-*- coding: UTF-8 -*-
__author__ = 'Childe'
#数组最大和
#输入一个数组A,求其连续子数组的最大和
def max_sub_array(A):
if len(A)==0:
return 0
curr_max=A[0]
max_value=A[0]
i=1
while i<len(A):
curr_max=max(A[i],curr_max+A[i])
max_value=max(max_value,curr_max)
i+=1
return max_value
print(max_sub_array([-3,1,-3,4,-1,2,1])) |
from openerp.osv import osv, fields
class res_users(osv.Model):
_inherit = "res.users"
def im_search(self, cr, uid, name, limit=20, context=None):
""" search users with a name and return its id, name and im_status """
result = [];
# find the employee group
group_employee = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'group_user')[1]
where_clause_base = " U.active = 't' "
query_params = ()
if name:
where_clause_base += " AND P.name ILIKE %s "
query_params = query_params + ('%'+name+'%',)
if(len(result) < limit):
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM res_users U
LEFT JOIN im_chat_presence S ON S.user_id = U.id
LEFT JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id != %s
AND EXISTS (SELECT 1 FROM res_groups_users_rel G WHERE G.gid = %s AND G.uid = U.id)
ORDER BY P.name
LIMIT %s
''', query_params + (uid, group_employee, limit))
result = result + cr.dictfetchall()
return result |
# -*- encoding: utf-8 -*-
"""
@File : prac1371.py
@Time : 2020/5/20 7:53 下午
@Author : zhengjiani
@Email : 936089353@qq.com
@Software: PyCharm
不重复遍历子串的前提下,快速求出区间字母出现的次数->前缀和
一个区间可以用两个前缀和的差值,得到某个字母的出现次数
[(00000)2,(11111)2]
"""
class Solution:
def findTheLongestSubstring(self, s: str) -> int:
res = 0
state = [-1]*(1 << 5)
cur, state[0] = 0, 0
d = dict(zip('aeiou',range(5)))
for idx,val in enumerate(s):
tmp = -1
if val in d:
tmp = d[val]
if tmp != -1:
cur ^= 1 << tmp
if state[cur] == -1:
state[cur] = idx + 1
else:
res = max(res,idx+1-state[cur])
return res
if __name__ == '__main__':
s = "eleetminicoworoep"
s1 = Solution()
print(s1.findTheLongestSubstring(s)) |
class Scene(object):
def enter(self):
pass
class Engine(object):
def __init__(self, scene_map):
pass
def play(self):
pass
class Death(Scene):
r
def enter(self):
pass
class CentralCorridor(Scene):
def enter(self):
print "You wake up after being knocked out by an alien."
print "directly in front of you is a tall alien with a hammer."
print "He says 'Tell me a joke and you can pass'."
print "tell him a joke? y or n."
joke = raw_input()
if joke == "y":
print "You tell himna dirty joke"
print "the goblin dies"
#engine moves the player to next scene
else:
#engine moves player to death scene
class LaserWeaponArmory(Scene):
def enter(self):
print "you enter the amroury and grab the bomb"
#engine moves the player to the next scene
class TheBridge(Scene):
def enter(Scene):
print "you plant the bomb on the bridge"
print " a goblin pulls out a gun and trys to shoot you"
print "what do you do?"
print "1. dodge."
print "2. take it like a man."
print "3. give up."
print ">please enter 1-3."
action = str(raw_input())
class EscapePod(Scene):
def enter(Scene):
pass
class Map(object):
def __init__(self, start_scene):
pass
def next_scene(self, scene_name):
pass
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play() |
def LED(number_repeat, teste_one, teste_two, teste_three):
res_teste_one = list(map(int, str(teste_one)))
sum = 0
for item_one in res_teste_one:
if((item_one == 1)):
sum += 2
elif((item_one == 2)):
sum += 5
elif(item_one == 3):
sum += 5
elif (item_one == 4):
sum += 4
elif (item_one == 5):
sum += 5
elif (item_one == 6):
sum += 6
elif (item_one == 7):
sum += 3
elif (item_one == 8):
sum += 7
elif (item_one == 9):
sum += 6
else:
sum += 6
print("leds" + " " + str(sum))
res_teste_two = list(map(int, str(teste_two)))
sum_two = 0
for item_two in res_teste_one:
if((item_two == 1)):
sum_two += 2
elif((item_two == 2)):
sum_two += 5
elif(item_two == 3):
sum_two += 5
elif (item_two == 4):
sum_two += 4
elif (item_two == 5):
sum_two += 5
elif (item_two == 6):
sum_two += 6
elif (item_two == 7):
sum_two += 3
elif (item_two == 8):
sum_two += 7
elif (item_two == 9):
sum_two += 6
else:
sum += 6
print("leds" + " " + str(sum_two))
res_teste_three = list(map(int, str(teste_three)))
sum_three = 0
for item_three in res_teste_three:
if((item_three == 1)):
sum_three += 2
elif((item_three == 2)):
sum_three += 5
elif(item_three == 3):
sum_three += 5
elif (item_three == 4):
sum_three += 4
elif (item_three == 5):
sum_three += 5
elif (item_three == 6):
sum_three += 6
elif (item_three == 7):
sum_three += 3
elif (item_three == 8):
sum_three += 7
elif (item_three == 9):
sum_three += 6
else:
sum += 6
print("leds" + " " + str(sum_three))
# LED(3, 1234567890, 344, 234)
dic = {
'0': '6',
'1': '2',
'2': '5',
'3': '5',
'4': '4',
'5': '5',
'6': '6',
'7': '3',
'8': '7',
'9': '6'
}
i = 0
j = 0
number_of_cases = int(input())
while i < number_of_cases:
sum = 0
cases = int(input())
print(cases)
str_cases = str(cases)
print type(cases)
print type(str_cases)
for j in range(len(str_cases)):
sum += int(dic[str_cases[j]])
print("%i leds" % sum)
i += 1
|
from torchvision import datasets, models, transforms
import torch
import os
from dataloader import get_loader
from model import *
import pickle
import numpy as np
from tqdm import tqdm
import matplotlib
import cv2
import matplotlib.pyplot as plt
from PIL import Image
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
SAVE_STEP = 2000
MODEL_DIR = 'models/'
EMBED_SIZE = 200
LEARNING_RATE = 5e-4
EPOCH = 5
# decide your checkpoint loader
ENCODER_PATH = './models/encoder-4-82000.ckpt'
DECODER_PATH = './models/decoder-4-82000.ckpt'
LOAD_FROM_CHECKPOINT = False
# simple test help func
def sentence(vocab, sampled_ids):
captions = []
for word_id in sampled_ids:
word = vocab.idx_to_word[word_id]
captions.append(word)
print(captions)
# a simple test
def sample(encoder, decoder, vocab):
imagelist = ['./data/resizedTrain2014/COCO_train2014_000000000659.jpg', './data/resizedTrain2014/COCO_train2014_000000000034.jpg', './data/resizedTrain2014/COCO_train2014_000000000801.jpg']
for img in imagelist:
image = Image.open(img)
image_tensor = torch.Tensor(np.asarray(image)).view((1, 256, 256, 3)).to(device)
# Generate an caption from the image
encoder.eval()
feature = encoder(image_tensor)
sampled_ids = decoder.predict(feature)
sampled_ids = sampled_ids[0].cpu().numpy() # (1, max_seq_length) -> (max_seq_length)
# Convert word_ids to words
sampled_caption = []
for word_id in sampled_ids:
word = vocab.idx_to_word[word_id]
sampled_caption.append(word)
if word == '<end>':
break
sentence = ' '.join(sampled_caption)
# Print out the image and the generated caption
print(sentence)
def main():
# load data
f = open('./data/vocab.pkl', 'rb')
vocab = pickle.load(f)
f.close()
f = open('./data/embed.pkl','rb')
embed = pickle.load(f)
f.close()
data_loader = get_loader(vocab=vocab, batch_size=5, shuffle=True)
total_length = len(data_loader)
# Encoder
encoder = EncoderNet(EMBED_SIZE).to(device)
# vgg16 = models.vgg16(pretrained=True)
# vgg16.cuda()
# encoder.copy_params_from_vgg16(vgg16)
# Decoder
decoder = DecoderNet(embed_size=EMBED_SIZE, hidden_size=128, embeddic=embed, vocab_size=len(vocab.word_to_idx)).to(device)
if LOAD_FROM_CHECKPOINT:
encoder.load_state_dict(torch.load(ENCODER_PATH))
decoder.load_state_dict(torch.load(DECODER_PATH))
# decide loss function and optimizer
criterion = nn.CrossEntropyLoss()
params = list(decoder.parameters())+list(encoder.fc.parameters())
optimizer = torch.optim.Adam(params, lr=LEARNING_RATE)
# start training
for epoch in range(EPOCH):
for i, (images, captions, lengths) in enumerate(tqdm(data_loader)):
images = images.to(device)
captions = captions.to(device)
targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
features = encoder(images)
outputs = decoder(features, captions, lengths)
loss = criterion(outputs, targets)
encoder.zero_grad()
decoder.zero_grad()
loss.backward()
optimizer.step()
if i % 50 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'.format(epoch + 1, EPOCH, i+1, total_length, loss.item(), np.exp(loss.item())))
if i % 100 == 0 and i != 0:
sample(encoder, decoder, vocab)
if (i + 1) % SAVE_STEP == 0:
torch.save(decoder.state_dict(), os.path.join(
MODEL_DIR, 'decoder-{}-{}.ckpt'.format(epoch + 1, i + 1)))
torch.save(encoder.state_dict(), os.path.join(
MODEL_DIR, 'encoder-{}-{}.ckpt'.format(epoch + 1, i + 1)))
# targets=pack_padded_sequence(captions,lengths=lengths,batch_first=True)[0]
if __name__ == '__main__':
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("../")
import rospy
import socket
from std_msgs.msg import Int32, ColorRGBA
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
import geometry_msgs.msg
import moveit_msgs.msg
import moveit_commander
from supervisor.msg import Force
from interactive_markers.interactive_marker_server import *
from interactive_markers.menu_handler import *
from visualization_msgs.msg import Marker
from kernel import InverseMultiquadricKernelPytouch
from gpis import MultiTaskGaussianProcessImplicitSurfaces, GaussianProcessImplicitSurfaces
import numpy as np
import math
import torch
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import mean_squared_error
import time
import tf
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Vector3
import numpy
import math
class MoveUR5:
def __init__(self):
self.manipulator = moveit_commander.MoveGroupCommander('manipulator')
self.manipulator.set_max_acceleration_scaling_factor(0.1)
self.manipulator.set_max_velocity_scaling_factor(0.1)
print "========== Printing robot position =========="
print self.manipulator.get_current_pose()
print "============================================="
rospy.sleep(1)
def move_func(self, pose):
self.manipulator.set_pose_target(pose)
plan = self.manipulator.plan()
self.manipulator.execute(plan)
rospy.sleep(0.1)
self.manipulator.stop()
self.manipulator.clear_pose_targets()
def euler_to_quaternion(self, euler):
"""Convert Euler Angles to Quaternion
euler: geometry_msgs/Vector3
quaternion: geometry_msgs/Quaternion
"""
q = tf.transformations.quaternion_from_euler(euler.x, euler.y, euler.z)
return Quaternion(x=q[0], y=q[1], z=q[2], w=q[3])
def main(self):
current_pose = self.manipulator.get_current_pose().pose
self.start_pose = geometry_msgs.msg.Pose()
self.start_pose.position = current_pose.position
normal = np.array([0, 0, 1])
# normal = np.array([1, 0.2, 1])
normal = normal / np.linalg.norm(normal)
print(normal)
# normal2 = np.array([0.1, 0.1, -0.9])
# normal2 /= abs(normal2)
n_x = math.atan2(normal[1], normal[0])
n_y = math.atan2(normal[2], normal[1])
n_z = math.atan2(normal[0], normal[2])
print(n_x, n_y, n_z)
print(math.degrees(n_x))
print(math.degrees(n_y))
print(math.degrees(n_z))
# print quaternion_a_b(normal, normal2)
alpha, beta, gamma = n_x, n_y, n_z
pose = current_pose.orientation
print(pose)
angles = tf.transformations.euler_from_quaternion([pose.x, pose.y, pose.z, pose.w])
print "angle:", angles
print angles
print angles[0]/3.14*180
print angles[1]/3.14*180
print angles[2]/3.14*180
# alpha = angles[0]
# beta = angles[1]
# gamma = angles[2]
q = tf.transformations.quaternion_from_euler(alpha, beta, gamma)
print(q)
# print aaa
self.start_pose.position = current_pose.position
# self.start_pose.position.y = 0.221169067786
# self.start_pose.position.z = 0.0855258019136
self.start_pose.orientation.x = q[0]
self.start_pose.orientation.y = q[1]
self.start_pose.orientation.z = q[2]
self.start_pose.orientation.w = q[3]
print(self.start_pose)
self.move_func(self.start_pose)
if __name__ == '__main__':
rospy.init_node('ur5_ik_velo', anonymous=True, disable_signals=True)
ur5 = MoveUR5()
ur5.main()
|
# Generated by Django 3.0.5 on 2020-04-25 22:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment_gateway', '0002_auto_20200425_1228'),
]
operations = [
migrations.AddField(
model_name='payment',
name='amount',
field=models.FloatField(default=10, help_text='Transaction amount'),
preserve_default=False,
),
migrations.AddField(
model_name='payment',
name='shopper',
field=models.CharField(default='eddy', help_text='User who initiated the transaction', max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='payment',
name='status',
field=models.CharField(choices=[('CREATED', 'created'), ('SUCCESSFUL', 'successful'), ('FAILED', 'failed'), ('DISPUTED', 'disputed')], help_text='Payment status', max_length=20),
),
]
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
""" 项目启动入口 """
import sys, os.path
from wsgiref.simple_server import make_server
sys.path.append(os.path.dirname(__file__))
from kyger.kgcms import App
if __name__ == '__main__':
httpd = make_server('', 8000, App())
httpd.serve_forever()
|
import kNN
"""
test_data_set = [[0, 0], [1, 1], [2, 3], [5,-1]]
data_set, labels = kNN.create_simple_data_set()
for i in range(len(test_data_set)):
t = kNN.classify0(test_data_set[i], data_set, labels, 3)
print(t)
"""
d, l = kNN.file2matrix('datingTestSet2.txt')
print(d)
print(l) |
from pytest import mark
from leetcode.wiggle_sort_ii import Solution
from . import read_csv
@mark.timeout(2)
@mark.parametrize('nums', read_csv(__file__, parser=eval))
def test_wiggle_sort(nums):
Solution().wiggleSort(nums)
for i, num in enumerate(nums):
if i % 2 == 0:
if i == 0:
continue
if nums[i - 1] <= num:
print(i, num, nums[i - 1])
assert nums[i - 1] > num
else:
if nums[i - 1] >= num:
print(i, num, nums[i - 1])
assert nums[i - 1] < num
@mark.parametrize(
'nums, expect', (
([1], 1),
([1, 2], 1),
([1, 2, 1], 1),
([1, 2, 3], 2),
([1, 2, 3, 4], 2),
([1, 2, 3, 4, 5], 3),
([1, 1, 2, 2, 2, 1], 1),
([1, 1, 1, 1, 2, 2, 2], 1),
([1, 1, 1, 2, 2, 2, 2], 2),
)
)
def test_kth_max_value(nums, expect):
assert expect == Solution.kth_max_value(
nums, 0,
len(nums) - 1,
len(nums) // 2
)
@mark.parametrize(
'i, expect', (
(0, 0),
(1, -1),
(2, 1),
(3, -2),
(4, 2),
(5, -3),
)
)
def test_median_index(i, expect):
assert expect == Solution.median_index(i)
@mark.parametrize(
'i, length, expect', (
(0, 4, 1),
(1, 4, 3),
(2, 4, 0),
(3, 4, 2),
(0, 5, 1),
(1, 5, 3),
(2, 5, 0),
(3, 5, 2),
(4, 5, 4),
)
)
def test_map_index(i, length, expect):
assert expect == Solution.map_index(i, length)
@mark.parametrize(
'nums, median', (
([1], 1),
([1, 2], 2),
([1, 1, 2], 1),
([1, 2, 3], 2),
([1, 2, 3, 4], 2),
([1, 2, 3, 4, 5], 3),
([1, 3, 3, 4, 5], 3),
([1, 3, 3, 4, 3], 3),
([1, 3, 3, 3, 3], 3),
([1, 1, 1, 1, 2, 2, 2], 1),
([3, 3, 3, 2, 2, 2, 3, 2, 1, 1, 2, 1, 2, 3, 3, 3, 1, 2], 2),
)
)
def test_place_same_median(nums, median):
Solution.place_same_median(nums, median)
length = len(nums)
mid = length // 2
for i in range(nums.count(median)):
mid_i = mid + Solution.median_index(i)
map_i = Solution.map_index(mid_i, length)
assert nums[map_i] == median
|
from abc import ABCMeta, abstractmethod
import csv
import json
from ast import literal_eval as le
class Searchclient:
def execute():
a = input('Search by author, title, or year? -> ')
b = input()
class Interface:
def __init__(self):
self.__modes = {'none': None, 'add': AddClient, 'search': SearchClient, 'delete': DeleteClient}
self.__mode = self.__modes['none']
def __set_mode(self):
mode = input('add, search, or delete book? -> ')
try:
self.__mode = self.__modes[mode]
return True
except Exception:
return False
def execute(self):
if not self.__set_mode():
raise ValueError('Invalid input')
self.__mode.execute()
if name == '__main__':
a = Interface()
a.execute() |
# Can we find the lowest common ancestor of two nodes given the root of a binary tree?
# Definition for a Node.
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.parent = None
def lowestCommonAncestor(self, root, p, q):
# Purpose: Recursively finds the lowest common ancestor nodes given two nodes.
if not root: return None
if root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left and right:
return root
if not left: return right
if not right: return left
# What if we don't have information about the root of the tree?
def lowestCommonAncestor2(self, p, q):
# Purpose: Finds the root of tree first, then same logic as above.
# S: O(1)
root = p
while root.parent:
root = root.parent
def LCA(root, p, q):
if not root: return None
if root == p or root == q:
return root
left = LCA(root.left, p, q)
right = LCA(root.right, p, q)
if left and right:
return root
if not left: return right
if not right: return left
return LCA(root, p, q)
# Cool solutions:
def lowestCommonAncestor3(self, p, q):
# Purpose: Solution using two pointers. Turning into finding intersection of a linked list questin.
# S: O(1)
ptr1 = p
ptr2 = q
while ptr1 != ptr2:
if ptr1.parent:
ptr1 = ptr1.parent
else: ptr1 = q
if ptr2.parent:
ptr2 = ptr2.parent
else: ptr2 = p
return ptr1
|
import codecs
import functools
import json
import os
from dataclasses import dataclass
from typing import Optional, Dict, Any
from pyconfr_2019.grpc_nlp.protos import TweetFeaturesService_pb2_grpc
from pyconfr_2019.grpc_nlp.tools.find_free_port import find_free_port
from tweet_features.tweet_features_server import serve
try:
import grpc
except ImportError:
raise ModuleNotFoundError("grpc is needed in order to "
"launch RPC server (`pip install .[grpc]`)")
import mongomock
import pytest
from bson import json_util
# Code highly inspired from pytest-mongodb
# https://github.com/mdomke/pytest-mongodb/blob/develop/pytest_mongodb/plugin.py
_cache = {}
_cache_grpc = {
"server_instance": None, # type: Optional[grpc.Server]
"grpc_host_and_port": None, # type: str
} # type: Dict[str, Any]
@pytest.fixture(scope="function")
def mongodb(pytestconfig):
def make_mongo_client():
return mongomock.MongoClient()
@dataclass
class MongoWrapper:
def get_db(self, fixture_name: str = None,
dbname: str = 'twitter_analyzer'):
client = make_mongo_client()
db = client[dbname]
self.clean_database(db)
if fixture_name is not None:
self.load_fixtures(db, fixture_name)
return db
@staticmethod
def load_fixtures(db: mongomock.Database, fixture_name: str):
basedir = (pytestconfig.getoption('mongodb_fixture_dir') or
pytestconfig.getini('mongodb_fixture_dir'))
fixture_path = os.path.join(pytestconfig.rootdir, basedir,
'{}.json'.format(fixture_name))
if not os.path.exists(fixture_path):
raise FileNotFoundError(fixture_path)
loader = functools.partial(json.load,
object_hook=json_util.object_hook)
try:
collections = _cache[fixture_path]
except KeyError:
with codecs.open(fixture_path, encoding='utf-8') as fp:
_cache[fixture_path] = collections = loader(fp)
for collection, docs in collections.items():
mongo_collection = db[collection] # type: mongomock.Collection
mongo_collection.insert_many(docs)
@staticmethod
def clean_database(db):
for name in db.collection_names(include_system_collections=False):
db.drop_collection(name)
return MongoWrapper()
@pytest.fixture(scope="session", autouse=True)
def close_tweet_features_server(request):
def stop_server():
if _cache_grpc["server_instance"]:
_cache_grpc["server_instance"].stop(0)
_cache_grpc["server_instance"] = None
request.addfinalizer(stop_server)
@pytest.fixture(scope='module')
def free_port_for_grpc_server():
return find_free_port()
@pytest.fixture(scope='function')
def mocked_tweet_features_rpc_server(mocker, free_port_for_grpc_server):
"""
Spawn an instance of the tweet features service,
only if one is not already available
:param mocker:
:param free_port_for_grpc_server:
:return:
"""
class Wrapper(object):
@staticmethod
def start(database=None):
# Mock the database first
mock_tweet_features = mocker.patch('tweet_features.tweet_features_service.StorageDatabase')
if database:
# Mock methods
mock_tweet_features.return_value.__enter__.return_value = database
if _cache_grpc["server_instance"] is None:
_cache_grpc["grpc_host_and_port"] = "localhost:{}".format(free_port_for_grpc_server)
_cache_grpc["server_instance"] = serve(block=False,
grpc_host_and_port=_cache_grpc["grpc_host_and_port"])
assert _cache_grpc["server_instance"] is not None
return Wrapper()
@pytest.fixture(scope="function")
def tweet_features_rpc_stub(mongodb, mocked_tweet_features_rpc_server):
"""
Create a new tweet features rpc stub and connect to the server
Args:
mongodb:
mocked_tweet_features_rpc_server:
Returns:
"""
# Prepare mongo database
db = mongodb.get_db('tweets')
# Start storage server
mocked_tweet_features_rpc_server.start(db)
channel = grpc.insecure_channel(_cache_grpc["grpc_host_and_port"])
stub = TweetFeaturesService_pb2_grpc.TweetFeaturesServiceStub(channel)
return stub
|
# coding: utf-8
from procset import ProcSet
from oar.lib.hierarchy import find_resource_hierarchies_scattered
from oar.kao.slot import intersec_itvs_slots, Slot
def find_resource_hierarchies_job(itvs_slots, hy_res_rqts, hy):
'''
Find resources in interval for all resource subrequests of a moldable
instance of a job
'''
result = ProcSet()
for hy_res_rqt in hy_res_rqts:
(hy_level_nbs, constraints) = hy_res_rqt
hy_levels = []
hy_nbs = []
for hy_l_n in hy_level_nbs:
(l_name, n) = hy_l_n
hy_levels.append(hy[l_name])
hy_nbs.append(n)
itvs_cts_slots = constraints & itvs_slots
result = result | find_resource_hierarchies_scattered(itvs_cts_slots,
hy_levels,
hy_nbs)
return result
def find_first_suitable_contiguous_slots(slots_set, job, res_rqt, hy):
'''find first_suitable_contiguous_slot '''
(mld_id, walltime, hy_res_rqts) = res_rqt
itvs = ProcSet()
slots = slots_set.slots
cache = slots_set.cache
# updated_cache = False
# to not always begin by the first slots ( O(n^2) )
# TODO:
if job.key_cache and (job.key_cache[mld_id] in cache):
sid_left = cache[job.key_cache[mld_id]]
else:
sid_left = 1
# sid_left = 1 # TODO no cache
sid_right = sid_left
slot_e = slots[sid_right].e
# print 'first sid_left', sid_left
while True:
# find next contiguous slots_time
slot_b = slots[sid_left].b
# print "slot_e, slot_b, walltime ", slot_e, slot_b, walltime
while ((slot_e - slot_b + 1) < walltime):
sid_right = slots[sid_right].next
slot_e = slots[sid_right].e
# if not updated_cache and (slots[sid_left].itvs != []):
# cache[walltime] = sid_left
# updated_cache = True
itvs_avail = intersec_itvs_slots(slots, sid_left, sid_right)
itvs = find_resource_hierarchies_job(itvs_avail, hy_res_rqts, hy)
if (len(itvs) != 0):
break
sid_left = slots[sid_left].next
if job.key_cache:
cache[job.key_cache[mld_id]] = sid_left
return (itvs, sid_left, sid_right)
def assign_resources_mld_job_split_slots(slots_set, job, hy):
'''
Assign resources to a job and update by spliting the concerned slots -
moldable version
'''
prev_t_finish = 2 ** 32 - 1 # large enough
prev_res_set = ProcSet()
prev_res_rqt = ProcSet()
slots = slots_set.slots
prev_start_time = slots[1].b
for res_rqt in job.mld_res_rqts:
(mld_id, walltime, hy_res_rqts) = res_rqt
(res_set, sid_left, sid_right) = \
find_first_suitable_contiguous_slots(slots_set, job, res_rqt, hy)
# print("after find fisrt suitable")
t_finish = slots[sid_left].b + walltime
if (t_finish < prev_t_finish):
prev_start_time = slots[sid_left].b
prev_t_finish = t_finish
prev_res_set = res_set
prev_res_rqt = res_rqt
prev_sid_left = sid_left
prev_sid_right = sid_right
(mld_id, walltime, hy_res_rqts) = prev_res_rqt
job.moldable_id = mld_id
job.res_set = prev_res_set
job.start_time = prev_start_time
job.walltime = walltime
# Take avantage of job.starttime = slots[prev_sid_left].b
# print prev_sid_left, prev_sid_right, job.moldable_id , job.res_set,
# job.start_time , job.walltime, job.mld_id
slots_set.split_slots(prev_sid_left, prev_sid_right, job)
def schedule_id_jobs_ct(slots_sets, jobs, hy, id_jobs, security_time):
'''
Schedule loop with support for jobs container - can be recursive
(recursivity has not be tested)
'''
# for k,job in iteritems(jobs):
# print "*********j_id:", k, job.mld_res_rqts[0]
for jid in id_jobs:
job = jobs[jid]
ss_id = 'default'
if "inner" in job.types:
ss_id = job.types["inner"]
slots_set = slots_sets[ss_id]
# slots_set.show_slots()
assign_resources_mld_job_split_slots(slots_set, job, hy)
if "container" in job.types:
Slot(1, 0, 0, job.res_set, job.start_time,
job.start_time + job.walltime - security_time)
|
import logging
import textwrap
import time
from dataclasses import dataclass
from datetime import datetime
from typing import List, Dict, Any
import feedparser
from bs4 import BeautifulSoup
from feedparser import FeedParserDict as FeedDict
from .enums import PostStatus
from .models import Post, Feed
DataDict = Dict[str, Any]
logger = logging.getLogger(__name__)
def get_text_from_html(html: str):
soup = BeautifulSoup(html, features='html.parser')
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it outs
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text
def time_struct_to_datetime(struct: time.struct_time) -> dataclass:
return datetime.fromtimestamp(time.mktime(struct))
def parse_content(item: FeedDict) -> str:
content = get_text_from_html(item.get('summary') or '')
content = textwrap.shorten(content, width=300, placeholder="...")
content = (
content
.replace('_', '')
.replace('*', '')
.replace('`', '')
.replace('[', '')
.replace(']', '')
.replace('(', '')
.replace(')', '')
)
return content
def parse_date(item: FeedDict) -> datetime:
date = item.get('published_parsed') or item.get('updated_parsed')
return time_struct_to_datetime(date)
def get_posts(feed: Feed) -> List[Post]:
try:
_feed = feedparser.parse(feed.url)
except Exception as exc:
logger.exception('Can not get feed url', exc_info=exc)
return []
for item in _feed.entries:
try:
content = parse_content(item)
except Exception as exc:
logger.exception('Can not parse feed', exc_info=exc)
continue
try:
date = parse_date(item)
except Exception:
continue
yield Post(
title=item.title,
body=content,
link=item.link,
date=date,
feed=feed,
status=PostStatus.new,
)
|
import scipy.io as sio
import matplotlib.pyplot as plt
import seaborn as sns
import os
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from sklearn.decomposition import PCA
def run(ratio):
input_data =np.load('./data/noisy_sensor_data.npy').item()
test_input = input_data['test_input']
test_output = input_data['test_output']
train_input = input_data['train_input']
train_output = input_data['train_output']
idx = np.arange(500)
np.random.shuffle(idx)
train_input=train_input[idx]
train_output=train_output[idx]
train_input_max = []
train_output_max = []
test_input_max = []
test_output_max = []
for i, data_i in enumerate(train_input):
# normalize with training data
train_input_max += [np.max(data_i)]
train_input[i] = data_i/np.max(data_i)
data_i = train_output[i]
train_output_max += [np.max(data_i)]
train_output[i] = data_i/np.max(data_i)
for i, data_i in enumerate(test_input):
# normalize with testing data
test_input_max += [np.max(data_i)]
test_input[i] = data_i/np.max(data_i)
data_i = test_output[i]
test_output_max += [np.max(data_i)]
test_output[i] = data_i/np.max(data_i)
train_input_mag = np.expand_dims(np.asarray(train_input_max),1)
train_output_mag = np.expand_dims(np.asarray(train_output_max),1)
test_input_mag = np.expand_dims(np.asarray(test_input_max),1)
test_output_mag = np.expand_dims(np.asarray(test_output_max),1)
pca = PCA(n_components=500)
pca.fit(train_input)
train_input = pca.transform(train_input)
test_input = pca.transform(test_input)
batch_size = 16
num_time_steps = 500
input_pl = tf.placeholder(tf.float32, [batch_size, num_time_steps])
output_pl = tf.placeholder(tf.float32, [batch_size, 3000])
input_mag_pl = tf.placeholder(tf.float32, [batch_size, 1])
output_mag_pl = tf.placeholder(tf.float32, [batch_size, 1])
net = {}
net['enc1'] = x = slim.fully_connected(input_pl, 512, scope='enc/fc1')
net['enc2'] = x = slim.fully_connected(x, 512, scope='enc/fc2')
net['enc3'] = x = slim.fully_connected(x, 256, scope='enc/fc3')
net['enc4'] = x = slim.fully_connected(x, 256, scope='enc/fc4')
net['enc5'] = x = slim.fully_connected(x, 128, scope='enc/fc5')
net['enc6'] = x = slim.fully_connected(x, 128, scope='enc/fc6')
z = x
net['dec1'] = x = slim.fully_connected(x, 256, scope='dec/fc1')
net['dec2'] = x = slim.fully_connected(x, 256, scope='dec/fc2')
net['dec3'] = x = slim.fully_connected(x, 1024, scope='dec/fc3')
net['dec4'] = x = slim.fully_connected(x, 1024, scope='dec/fc4')
net['dec5'] = x = slim.fully_connected(x, 3000, scope='dec/fc5')
net['dec6'] = x = slim.fully_connected(x, 3000, activation_fn=None, scope='dec/fc6')
net['denoised'] = net['dec6'] #+ input_pl
net['cls1'] = x = slim.fully_connected(z, 64, scope='ppn/fc1')
net['cls11'] = x = slim.fully_connected(x, 64, scope='ppn/fc11')
# extra = tf.tile(input_mag_pl, (1,32))
# extra = tf.concat([net['cls1'], extra], 1)
net['cls2'] = x = slim.fully_connected(x, 16, scope='ppn/fc2')
net['cls21'] = x = slim.fully_connected(x, 16, scope='ppn/fc21')
# extra = tf.tile(input_mag_pl, (1,8))
# extra = tf.concat([net['cls2'], extra], 1)
# extra = net['cls2']
net['cls3'] = x = slim.fully_connected(x, 8, scope='ppn/fc3')
x = tf.concat([x, input_mag_pl], 1)
net['cls31'] = x = slim.fully_connected(x, 8, scope='ppn/fc31')
net['cls32'] = x = slim.fully_connected(x, 8, scope='ppn/fc32')
# extra = tf.tile(input_mag_pl, (1,2))
# extra = tf.concat([net['cls3'], extra], 1)
# extra = net['cls3']
net['cls4'] = x = slim.fully_connected(x, 4, scope='ppn/fc4')
net['cls4'] = x = slim.fully_connected(x, 2, activation_fn=None, scope='ppn/fc41')
net['cls4'] = x = slim.fully_connected(x, 1, activation_fn=None, scope='ppn/fc42')
net['mag'] = x+input_mag_pl
loss_l2 = tf.reduce_mean(tf.abs(net['denoised'] - output_pl))
loss_l1 = tf.Variable(0.)#tf.reduce_max(tf.abs(net['denoised'] - output_pl))#
loss_mag = tf.reduce_mean(tf.abs( (net['mag'] - output_mag_pl)))
loss_main = loss_l2 + loss_l1
## OPTIMIZER ## note: both optimizer and learning rate is not found in the paper
main_optimizer = tf.train.AdamOptimizer(1e-3, beta1=0.5)
enc_vars = tf.get_collection(tf.GraphKeys.VARIABLES, scope='enc')
dec_vars = tf.get_collection(tf.GraphKeys.VARIABLES, scope='dec')
ppn_vars = tf.get_collection(tf.GraphKeys.VARIABLES, scope='ppn')
main_grads = main_optimizer.compute_gradients(loss_main, enc_vars+dec_vars)
main_train_op = main_optimizer.apply_gradients(main_grads)
ppn_optimizer = tf.train.AdamOptimizer(1e-4, beta1=0.5)
ppn_grads = ppn_optimizer.compute_gradients(loss_mag, ppn_vars)
ppn_train_op = ppn_optimizer.apply_gradients(ppn_grads)
train_op = tf.group(main_train_op, ppn_train_op)
## training starts ###
FLAGS = tf.app.flags.FLAGS
tfconfig = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
init = tf.global_variables_initializer()
sess.run(init)
max_epoch = 2000
train_loss_l1_val_hist = []
test_loss_l1_val_hist = []
train_loss_l2_val_hist = []
test_loss_l2_val_hist = []
train_loss_mag_val_hist = []
test_loss_mag_val_hist = []
print(train_input.shape, test_input.shape)
for eq_i in range(max_epoch):
# training data, for optimization
num_itr = train_input.shape[0] / batch_size
ave_loss_l1_val_train = []
ave_loss_l2_val_train = []
ave_loss_mag_val_train = []
for i in range(num_itr):
#np.save('train_input_mag',train_input_mag[i*batch_size:(i+1)*batch_size])
#np.save('train_output_mag',train_output_mag[i*batch_size:(i+1)*batch_size])
#exit(0)
feed_dict_train = {input_pl: train_input[i*batch_size:(i+1)*batch_size],
output_pl: train_output[i*batch_size:(i+1)*batch_size],
input_mag_pl: train_input_mag[i*batch_size:(i+1)*batch_size],
output_mag_pl: train_output_mag[i*batch_size:(i+1)*batch_size],
}
loss_l2_val, loss_l1_val, loss_mag_val, _ = sess.run([loss_l2, loss_l1, loss_mag, train_op], feed_dict_train)
ave_loss_l1_val_train += [loss_l1_val]
ave_loss_l2_val_train += [loss_l2_val]
ave_loss_mag_val_train += [loss_mag_val]
train_loss_l1_val_hist += [np.mean(ave_loss_l1_val_train)]
train_loss_l2_val_hist += [np.mean(ave_loss_l2_val_train)]
train_loss_mag_val_hist += [np.mean(ave_loss_mag_val_train)]
# testing data
ave_loss_l1_val_test = []
ave_loss_l2_val_test = []
ave_loss_mag_val_test = []
num_itr = test_input.shape[0] / batch_size
for i in range(num_itr):
feed_dict_test = {input_pl: test_input[i*batch_size:(i+1)*batch_size],
output_pl: test_output[i*batch_size:(i+1)*batch_size],
input_mag_pl: test_input_mag[i*batch_size:(i+1)*batch_size],
output_mag_pl: test_output_mag[i*batch_size:(i+1)*batch_size],
}
loss_l2_val, loss_l1_val, loss_mag_val = sess.run([loss_l2, loss_l1, loss_mag], feed_dict_test)
ave_loss_l2_val_test += [loss_l2_val]
ave_loss_l1_val_test += [loss_l1_val]
ave_loss_mag_val_test += [loss_mag_val]
test_loss_l1_val_hist += [np.mean(ave_loss_l1_val_test)]
test_loss_l2_val_hist += [np.mean(ave_loss_l2_val_test)]
test_loss_mag_val_hist += [np.mean(ave_loss_mag_val_test)]
print(eq_i, np.mean(ave_loss_l1_val_train), np.mean(ave_loss_l1_val_test), np.mean(ave_loss_l2_val_train), np.mean(ave_loss_l2_val_test), np.mean(ave_loss_mag_val_train), np.mean(ave_loss_mag_val_test))
er1 = []
er2 = []
for i in range(10):
input_data_val = test_input[i * batch_size:(i + 1) * batch_size]
denoised_data_val = sess.run(net['denoised'], {input_pl: input_data_val})
denoised_mag_val = sess.run(net['mag'], {input_pl: input_data_val,
input_mag_pl: test_input_mag[i * batch_size:(i + 1) * batch_size]})
denoised_data_val = denoised_data_val * denoised_mag_val
reference_data_val = test_output[i * batch_size:(i + 1) * batch_size] * test_output_mag[
i * batch_size:(i + 1) * batch_size]
er1 += [np.abs(denoised_mag_val - test_output_mag[i * batch_size:(i + 1) * batch_size]) / np.abs(
test_output_mag[i * batch_size:(i + 1) * batch_size])]
er2 += [np.sum(np.abs(denoised_data_val - reference_data_val), 1) / np.max(reference_data_val, 1)]
print(np.mean(er1), np.mean(er2))
plt.figure()
plt.subplot(3,1,1)
plt.plot(train_loss_l2_val_hist[3:], label='training l2 loss')
plt.plot(test_loss_l2_val_hist[3:], label='testing l2 loss')
plt.legend()
plt.subplot(3,1,2)
plt.plot(train_loss_l1_val_hist[3:], label='training l1 loss')
plt.plot(test_loss_l1_val_hist[3:], label='testing l1 loss')
plt.legend()
plt.show()
plt.subplot(3,1,3)
plt.plot(train_loss_mag_val_hist[3:], label='training mag loss')
plt.plot(test_loss_mag_val_hist[3:], label='testing mag loss')
plt.legend()
plt.show()
#plt.plot(sess.run(net['output'], feed_dict_train)[idx], label='true')
i = 0
input_data_val = test_input[i*batch_size:(i+1)*batch_size]
input_mag_val = test_input_mag[i*batch_size:(i+1)*batch_size]
residual_data_val = sess.run(net['denoised'], {input_pl: input_data_val})
denoised_data_val = sess.run(net['denoised'], {input_pl: input_data_val})
denoised_mag_val = sess.run(net['mag'], {input_pl: input_data_val, input_mag_pl: input_mag_val})
reference_data_val = test_output[i*batch_size:(i+1)*batch_size]
if 1:
test_max_val = test_input_mag[i*batch_size:(i+1)*batch_size,0:1]
test_min_val = test_input_mag[i*batch_size:(i+1)*batch_size,1:]
input_data_val = (input_data_val) * (test_max_val - test_min_val) + test_min_val
test_max_val = denoised_mag_val[:,0:1]
test_min_val = denoised_mag_val[:,1:]
residual_data_val = (residual_data_val) * (test_max_val - test_min_val) + test_min_val
denoised_data_val = (denoised_data_val) * (test_max_val - test_min_val) + test_min_val
test_max_val = test_output_mag[i*batch_size:(i+1)*batch_size,0:1]
test_min_val = test_output_mag[i*batch_size:(i+1)*batch_size,1:]
reference_data_val = (reference_data_val) * (test_max_val - test_min_val) + test_min_val
np.save('input_data_val_ch2.npy', input_data_val)
np.save('output_data_val_ch2.npy', reference_data_val)
np.save('denoised_data_val_ch2.npy', denoised_data_val)
for idx in range(5):
plt.figure(figsize=(7,15))
plt.subplot(5,1,1)
plt.plot(input_data_val[idx],label='bad sensor')
plt.legend()
plt.subplot(5,1,2)
plt.plot(residual_data_val[idx], label='residual sensor')
plt.legend()
plt.subplot(5,1,3)
plt.plot(denoised_data_val[idx], label='denoised signal')
plt.legend()
plt.subplot(5,1,4)
plt.plot(reference_data_val[idx], label='good sensor')
plt.legend()
plt.subplot(5,1,5)
plt.plot(reference_data_val[idx]-denoised_data_val[idx], label='error')
plt.legend()
plt.show()
if __name__ == '__main__':
# for ratio in range(0.,1,10):
run(0.5)
|
from frmwk import flask_framework, orm_db
from flask.ext.login import login_required, current_user
from flask.ext.babel import gettext
from flask import render_template, flash, request, redirect, url_for, g
from frmwk import administrator_permission
# from flask import Response
from frmwk.model.mdRole import Role
from frmwk.forms.fmRole import RoleForm
@flask_framework.route('/roles')
@flask_framework.route('/roles/<int:page>')
@login_required
def roles(page = 1):
print 'roles or roles with ' + str(page)
return renderThem({'pageNum': page})
@flask_framework.route('/newrole', methods = ['GET', 'POST'])
@login_required
def newrole():
print 'newrole'
if administrator_permission.can():
role = Role()
form = RoleForm(role)
if form.validate_on_submit():
print 'saving * * * * '
return saveIt(role, form)
return renderIt({'key': 'new', 'form': form})
else:
flash(gettext('You are not authorised to create new roles. You can request permission below.'), 'error')
return redirect(url_for('roles'))
@flask_framework.route('/role/<role_id>', methods = ['GET', 'POST'])
@login_required
def role(role_id = None):
print 'role/id with ' + str(role_id)
if administrator_permission.can():
role = Role.query.filter_by(id = role_id).first()
form = RoleForm(role)
if form.validate_on_submit():
print "Saving {} with key {}.".format(form.name.data, form.role_id.data)
return saveIt(role, form)
elif request.method != "POST":
form.name.data = role.name
form.role_id.data = role.id
return renderIt({'key': role_id, 'form': form})
else:
flash(gettext('You are not authorised to edit roles. You can request permission below.'), 'error')
return redirect(url_for('edit', nickname = g.user.nickname))
def saveIt(role, form):
role.name = form.name.data
role.id = form.role_id.data
orm_db.session.add(role)
orm_db.session.commit()
flash(gettext('Your changes have been saved.'), 'success')
return redirect(url_for('roles'))
def renderIt(pyld):
pyld['pageNum'] = 1
return renderThem(pyld)
def renderThem(pyld):
pyld['page'] = 'Role'
pyld['records'] = Role.query.all() # .paginate(page, POSTS_PER_PAGE, False)
records = pyld['records']
return render_template('role.html', payload = pyld)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 19:39:55 2020
@author: juliu
"""
from os import chdir, mkdir
from urllib.request import urlopen
from datetime.datetime import today
def build_archive():
months = ['January','February','March','April','May','June','July',
'August','September','October','Novemeber','December']
if not os.path.exists('SEC_Archive'):
mkdir('SEC_Archive')
chdir('SEC_Archive')
base = 'https://www.sec.gov/Archives/edgar/full-index/'
for year in range(-7, today().year-1999):
for quarter in range(1, 5):
with urlopen(base+str(year+2000)+'/QTR'+str(quarter)+'/company.idx') as archive:
namer = []
i = 0
for line in archive:
namer.append(line)
if i == 1:
date = str(line).lstrip("b'Last Data Received:").rstrip("\\n'")
mon = str(months.index(date[:date.find(' ')])+1)
day = date[date.find(' ')+1:date.find(',')]
year = date[date.find(', ')+2:]
quarter = '-'.join([year,mon,day])
break
i+=1
with open('index_'+quarter+'.idx', 'a+', encoding = 'utf-8') as file:
for line in namer:
file.write(str(line).lstrip("b'").rstrip("\\n'")+'\n')
with open('index_'+quarter+'.idx', 'a+', encoding = 'utf-8') as file:
for line in archive:
file.write(str(line).lstrip("b'").rstrip("\\n'")+'\n')
chdir('../') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 21:35:16 2019
@author: george
"""
import numpy as np
def make_supervised( states_matrix, value = 0):
"""
takes a matrix with values
(in general 0 or 1) and produces
a matrix with 1 and -infinities
replacing the value "value" with -inf
"""
dim0 = states_matrix.shape[0]
new_mat = np.zeros_like( states_matrix )
for i in np.arange( dim0 ):
rowi = states_matrix[i,:]
rowi[np.where(rowi == value)] = -np.Inf
new_mat[i,:] = rowi
return new_mat
def compute_forw(hmm, data):
"""
computes the forward probabilities for all data
"""
N = data.shape[0]
T = data.shape[1]
zers = np.zeros(shape = [N,T])
ones = np.zeros( shape = [N,T])
for i in range(N):
forw = np.exp( hmm.log_forward(data[i,:,:]) )
zers[i] = forw[0,:]
ones[i] = forw[1,:]
return zers.reshape(N*T), ones.reshape(N*T)
|
def findShift(encrypted):
import os.path
file = os.path.join("data", "lowerwords.txt")
f = open(file)
wordsClean = [w.strip() for w in f.read().split()]
max_shift = 0
max_value = 0
for sh in range(26):
setShift(sh)
print(sh, encrypt(encrypted))
n = 0
for word in encrypt(encrypted).split():
if word in wordsClean:
n += 1
if n > max_value:
max_value = n
max_shift = sh
decryption = setShift(max_shift)
return decryption
Chat Conversation End
Type a message...
|
from fastdtw import fastdtw, dtw
from scipy.spatial.distance import euclidean
import scipy.io.wavfile as wav
from DTW.extract_features import extract
from utility import *
from proj_paths import *
models = dict()
def load_models():
for raw_model_name, model_path in collect_files(DTW_MODELS_PATH):
models[raw_model_name] = load(model_path)
def real_time():
while True:
try:
for raw_file, joined_file in collect_files(REAL_TIME_PATH):
_, sig = wav.read(joined_file)
test_feats = extract(sig)
results = dict()
for model_name, model_feats in models.items():
dist, path = dtw(test_feats, model_feats, euclidean)
print(dist)
os.remove(joined_file)
except Exception as e:
# print(e.__str__())
pass
if __name__ == "__main__":
load_models()
real_time()
|
from django import forms
class AddForm(forms.Form):
product=forms.ChoiceField(choices=[('valve','Valve')])
oee = forms.IntegerField()
quality = forms.IntegerField()
volume = forms.IntegerField() |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2021, Spyder Bot
#
# Licensed under the terms of the MIT license
# ----------------------------------------------------------------------------
"""
Custom toolbar plugin.
"""
# Third-party imports
from qtpy.QtGui import QIcon
# Spyder imports
from spyder.api.plugin_registration.decorators import on_plugin_available
from spyder.api.plugins import Plugins, SpyderPluginV2
from spyder.api.translations import get_translation
# Local imports
from custom_toolbar.spyder.container import CustomToolbarContainer
_ = get_translation("custom_toolbar.spyder")
class CustomToolbar(SpyderPluginV2):
"""
Custom toolbar plugin.
"""
NAME = "custom_toolbar"
REQUIRES = [Plugins.Toolbar]
CONTAINER_CLASS = CustomToolbarContainer
CONF_SECTION = NAME
# --- Signals
# --- SpyderPluginV2 API
# ------------------------------------------------------------------------
def get_name(self):
return _("Custom toolbar")
def get_description(self):
return _("Example that shows how to add a custom toolbar to Spyder")
def get_icon(self):
return QIcon()
def on_initialize(self):
pass
@on_plugin_available(plugin=Plugins.Toolbar)
def on_toolbar_available(self):
container = self.get_container()
toolbar = self.get_plugin(Plugins.Toolbar)
toolbar.add_application_toolbar(container.toolbar)
def on_close(self, cancellable=True):
return True
|
# Solution to https://leetcode.com/problems/roman-to-integer/
class Solution:
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
symbol_to_val = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
ans = 0
prev = None
for letter in s:
if (letter in 'VX' and prev == 'I')\
or (letter in 'LC' and prev == 'X')\
or (letter in 'MD' and prev == 'C'):
ans -= 2 * symbol_to_val[prev]
ans += symbol_to_val[letter]
prev = letter
return ans
|
import lxml.etree
from file_path_collect import feed_broken_xml_path as path
try:
tree1 = lxml.etree.parse(path)
except lxml.etree.XMLSyntaxError as err:
print(err)
print()
parser = lxml.etree.XMLParser(recover=True)
tree = lxml.etree.parse(path, parser)
print(parser.error_log)
print()
print(tree.findall('{http://www.w3.org/2005/Atom}title'))
title = tree.findall('{http://www.w3.org/2005/Atom}title')[0]
print(title.text)
print()
print(lxml.etree.tounicode(tree.getroot()))
"""
Entity 'hellip' not defined, line 3, column 28 (feed-broken.xml, line 3)
../resource/xml/feed-broken.xml:3:28:FATAL:PARSER:ERR_UNDECLARED_ENTITY: Entity 'hellip' not defined
[<Element {http://www.w3.org/2005/Atom}title at 0x1108a6e60>]
dive into
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
<title>dive into </title>
<subtitle>currently between addictions</subtitle>
<id>tag:diveintomark.org,2001-07-29:/</id>
<updated>2009-03-27T21:56:07Z</updated>
<link rel="alternate" type="text/html" href="http://diveintomark.org/"/>
<entry>
<author>
<name>Mark</name>
<uri>http://diveintomark.org/</uri>
</author>
<title>Dive into history, 2009 edition</title>
<link rel="alternate" type="text/html" href="http://diveintomark.org/archives/2009/03/27/dive-into-history-2009-edition"/>
<id>tag:diveintomark.org,2009-03-27:/archives/20090327172042</id>
<updated>2009-03-27T21:56:07Z</updated>
<published>2009-03-27T17:20:42Z</published>
<category scheme="http://diveintomark.org" term="diveintopython"/>
<category scheme="http://diveintomark.org" term="docbook"/>
<category scheme="http://diveintomark.org" term="html"/>
<summary type="html">Putting an entire chapter on one page sounds
bloated, but consider this mdash; my longest chapter so far
would be 75 printed pages, and it loads in under 5 secondshellip;
On dialup.</summary>
</entry>
<entry>
<author>
<name>Mark</name>
<uri>http://diveintomark.org/</uri>
</author>
<title>Accessibility is a harsh mistress</title>
<link rel="alternate" type="text/html" href="http://diveintomark.org/archives/2009/03/21/accessibility-is-a-harsh-mistress"/>
<id>tag:diveintomark.org,2009-03-21:/archives/20090321200928</id>
<updated>2009-03-22T01:05:37Z</updated>
<published>2009-03-21T20:09:28Z</published>
<category scheme="http://diveintomark.org" term="accessibility"/>
<summary type="html">The accessibility orthodoxy does not permit people to
question the value of features that are rarely useful and rarely used.</summary>
</entry>
<entry>
<author>
<name>Mark</name>
</author>
<title>A gentle introduction to video encoding, part 1: container formats</title>
<link rel="alternate" type="text/html" href="http://diveintomark.org/archives/2008/12/18/give-part-1-container-formats"/>
<id>tag:diveintomark.org,2008-12-18:/archives/20081218155422</id>
<updated>2009-01-11T19:39:22Z</updated>
<published>2008-12-18T15:54:22Z</published>
<category scheme="http://diveintomark.org" term="asf"/>
<category scheme="http://diveintomark.org" term="avi"/>
<category scheme="http://diveintomark.org" term="encoding"/>
<category scheme="http://diveintomark.org" term="flv"/>
<category scheme="http://diveintomark.org" term="GIVE"/>
<category scheme="http://diveintomark.org" term="mp4"/>
<category scheme="http://diveintomark.org" term="ogg"/>
<category scheme="http://diveintomark.org" term="video"/>
<summary type="html">These notes will eventually become part of a
tech talk on video encoding.</summary>
</entry>
</feed>
"""
|
ulang = str
while ulang:
print ("PROGRAM CEK HARGA\n")
print ("MERK YANG TERSEDIA\n 1.IMP\n 2.Prada\n 3.Gucci\n 4.Louis Vuitton\n")
print ("Size yang tersedia: s, m, l\n")
print("masukan pilihan anda: ")
pilihan = int(input())
print("masukan size yang diinginkan: ")
size = input()
if pilihan == 1:
merk = "IMP"
if size == "s":
harga = 150000
elif size == "m":
harga = 200000
elif size == "l":
harga = 250000
print("merk: ", merk)
print("size: ", size)
print("harga: ", harga)
print()
elif pilihan == 2:
merk = "Prada"
if size == "s":
harga = 150000
elif size == "m":
harga = 160000
elif size == "l":
harga = 170000
print("merk: ", merk)
print("size: ", size)
print("harga: ", harga)
print()
elif pilihan == 3:
merk = "Gucci"
if size == "s":
harga = 200000
elif size == "m":
harga = 210000
elif size == "l":
harga = 230000
print("merk: ", merk)
print("size: ", size)
print("harga: ", harga)
print()
elif pilihan == 4:
merk = "Louis Vuitton"
if size == "s":
harga = 300000
elif size == "m":
harga = 300000
elif size == "l":
harga = 350000
print("merk: ", merk)
print("size: ", size)
print("harga: ", harga)
print()
else:
print ("pilihan tidak tersedia")
print()
print("Apakah anda ingin mengulang program? (Y/T)")
ulang1 = input()
print("\n")
if ulang1 == "T":
break
|
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from unittest.mock import patch
from extensions.front.caffe.grn_ext import GRNFrontExtractor
from extensions.ops.grn import GRNOp
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.ops.op import Op
from mo.utils.unittest.extractors import FakeMultiParam
from mo.utils.unittest.graph import FakeNode
class FakeGRNProtoLayer:
def __init__(self, val):
self.grn_param = val
class TestGRNExt(unittest.TestCase):
@classmethod
def setUpClass(cls):
Op.registered_ops['GRN'] = GRNOp
def test_grn_no_pb_no_ml(self):
self.assertRaises(AttributeError, GRNFrontExtractor.extract, None)
@patch('extensions.front.caffe.grn_ext.merge_attrs')
def test_grn_ext_ideal_numbers(self, merge_attrs_mock):
params = {
'bias': 0.7
}
merge_attrs_mock.return_value = {
**params
}
fake_pl = FakeGRNProtoLayer(FakeMultiParam(params))
fake_node = FakeNode(fake_pl, None)
GRNFrontExtractor.extract(fake_node)
exp_res = {
'type': "GRN",
'bias': 0.7,
'infer': copy_shape_infer
}
for key in exp_res.keys():
self.assertEqual(fake_node[key], exp_res[key])
|
import numpy as np
class DataGen:
"""
class tha represent data generator for iter over large amount of data
"""
def __init__(self, X: np.ndarray, y: np.ndarray, shuffle=False, batch=32) -> None:
"""
:param X: data in shape (number of training examples X number fo attributes X dimension...)
:param y: labels of the data
:param shuffle: shuffle the data
:param batch: batch size
"""
self.X: np.ndarray = X
self.y: np.ndarray = y
self.shuffle: bool = shuffle
self.batch: int = batch
def __iter__(self):
m, b = self.X.shape[0], self.batch
idx = np.arange(m)
if self.shuffle:
np.random.shuffle(idx)
return iter((self.X[idx[i:i + b]], self.y[idx[i:i + b]]) for i in range(0, m, b))
def __next__(self):
"""
:return: random sample of the data
"""
batch_idx = np.random.choice(self.X.shape[0], self.batch, replace=False)
return self.X[batch_idx], self.y[batch_idx]
def __getitem__(self, key):
if key == 0:
return self.X
elif key == 1:
return self.y
else:
raise ValueError('key is 0 or 1: DataGen[0]=X,DataGen[1]=y')
def __setitem__(self, key, value: np.ndarray):
if key == 0:
self.X = value
elif key == 1:
self.y = value
else:
raise ValueError('key is 0 or 1: DataGen[0]=X,DataGen[1]=y')
def __len__(self):
return self.X.shape[0]
class DataGenRNN(DataGen):
"""
class tha represent data generator for iter over large amount of data
"""
def __init__(self, X: list, y: np.ndarray, mask: np.ndarray, shuffle=False, batch=32) -> None:
super().__init__(X[0], y, shuffle, batch)
# print(mask.shape,X[0].s)
self.feature = X[1]
self.mask = mask
def __iter__(self):
m, b = self.X.shape[0], self.batch
idx = np.arange(m)
if self.shuffle:
np.random.shuffle(idx)
return iter(
([self.X[idx[i:i + b]], self.feature[idx[i:i + b]]], self.y[idx[i:i + b]], self.mask[idx[i:i + b]]) for i in
range(0, m, b))
def __next__(self):
"""
:return: random sample of the data
"""
batch_idx = np.random.choice(self.X.shape[0], self.batch, replace=False)
return [self.X[batch_idx], self.feature[batch_idx]], self.y[batch_idx], self.mask[batch_idx]
def __getitem__(self, key):
if key == 0:
return [self.X, self.feature]
elif key == 1:
return self.y
elif key == 2:
return self.mask
else:
raise ValueError('key is 0 or 1 or 2: DataGen[0]=X,DataGen[1]=y')
def __setitem__(self, key, value: np.ndarray):
if key == 0:
self.X = value[0]
self.feature = value[1]
elif key == 1:
self.y = value
elif key == 2:
self.mask = value
else:
raise ValueError('key is 0 or 1 or 2: DataGen[0]=X,DataGen[1]=y')
def __len__(self):
return self.X.shape[0]
|
from django.db.models import Q
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions, status
from django.contrib.auth.models import User
from main.serializers import UserSerializer
from rest_framework.authtoken.models import Token
from main.models import Room, Chat, Products, Category, ListOrders, Comments
from main.serializers import (RoomSerializers, ChatSerializers, ChatPostSerializers, UsersSerializer)
from main.serializers import ProductsSerializer, CategorySerializer, CommentsSerializer
"""======================Product View======================="""
class CategoryViews(APIView):
"""List Products"""
# permission_classes = [permissions.IsAuthenticated, ]
permission_classes = [permissions.AllowAny, ]
def get(self, request):
categorys = Category.objects.all()
serializer = CategorySerializer(categorys, many=True)
return Response({"data": serializer.data})
class ProductsViews(APIView):
permission_classes = [permissions.AllowAny, ]
def get(self,request):
#category = request.GET.get("category")
#products = Products.objects.filter(category_id=category)
products = Products.objects.all()
serializer = ProductsSerializer(products, many=True)
return Response({"data": serializer.data})
"""======================Comments============================"""
class CommentsViews(APIView):
permission_classes = [permissions.AllowAny, ]
def get(self,request):
product = request.GET.get("product")
products = Products.objects.filter(product_id=product)
#products = Products.objects.all()
serializer = CommentsSerializer(products, many=True)
return Response({"data": serializer.data})
"""======================Chat======================="""
class Rooms(APIView):
"""Комнаты чата"""
# permission_classes = [permissions.IsAuthenticated, ]
permission_classes = [permissions.AllowAny, ]
def get(self, request):
rooms = Room.objects.filter(Q(creater=request.user) | Q(invited=request.user))
serializer = RoomSerializers(rooms, many=True)
return Response({"data": serializer.data})
def post(self, request):
Room.objects.create(creater=request.user)
return Response(status=201)
class Dialog(APIView):
"""Диалог чата, сообщение"""
# permission_classes = [permissions.IsAuthenticated, ]
permission_classes = [permissions.AllowAny, ]
def get(self, request):
room = request.GET.get("room")
chat = Chat.objects.filter(room=room)
serializer = ChatSerializers(chat, many=True)
return Response({"data": serializer.data})
def post(self, request):
# room = request.data.get("room")
dialog = ChatPostSerializers(data=request.data)
if dialog.is_valid():
dialog.save(user=request.user)
return Response(status=201)
else:
return Response(status=400)
class AddUsersRoom(APIView):
"""Добавление юзеров в комнату чата"""
def get(self, request):
users = User.objects.all()
serializer = UsersSerializer(users, many=True)
return Response(serializer.data)
def post(self, request):
room = request.data.get("room")
user = request.data.get("user")
try:
room = Room.objects.get(id=room)
room.invited.add(user)
room.save()
return Response(status=201)
except:
return Response(status=400)
"""=======================CreateUser================"""
class UserCreate(APIView):
"""
Creates the user.
"""
permission_classes = [permissions.AllowAny, ]
def post(self, request, format='json'):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
token = Token.objects.create(user=user)
json = serializer.data
json['token'] = token.key
return Response(json, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
#library managment system
class Library:
def __init__(self,listofbooks):
self.listofbooks = listofbooks
def books_availabel(self):
print("Books available")
print()
for books in self.listofbooks:
print(books)
def lend_books(self):
if requestedbook in self.listofbooks:
print("You have borrowed the books: ")
self.listofbooks.remove(requestedbook)
else:
print("The book is not available in list")
def add_books(self):
pass
class customer():
def requestedbook(self):
self.books = input("Enter the name of the book: ")
return self.books
library = Library(["the god of war" , "the journey" , "around the world" , "last of us"])
library.books_availabel()
library.lend_books()
Customer = customer()
Customer.requestedbook()
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
def plot_series(time,series,format="-",start=0,end=None):
plt.figure(figsize=(10,6))
plt.plot(time[start:end],series[start:end],format)
plt.xlabel("time")
plt.ylabel("value")
plt.grid(True)
def trend(time,slope=0):
return slope*time
def seasonal_pattern(season_time):
return np.where(season_time<0.4,
np.cos(season_time*2*np.pi),
1/np.exp(3*season_time))
def seasonality(time,period,amplitude=1,phase=0):
season_time = ((time+phase)%period)/period
return amplitude*seasonal_pattern(season_time)
def noise(time,noise_level=1,seed=None):
rnd=np.random.RandomState(seed)
return rnd.randn(len(time))*noise_level
time = np.arange(4*365+1,dtype='float32')
baseline =10
series = trend(time,0.1)
amplitude = 40
slope = 0.05
noise_level =5
series = baseline+trend(time,slope)+seasonality(time,period=365,amplitude=amplitude)
series +=noise(time,noise_level,seed=42)
# plt.figure(figsize=(10,6))
# plot_series(time,series)
# plt.show
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
# plt.figure(figsize=(10,6))
# plot_series(time_train,x_train)
# # plt.show()
#
# # plt.figure(figsize=(10,6))
# plot_series(time_valid,x_valid)
#
# plt.show()
naive_forecast = series[split_time-1:-1]
# plt.figure(figsize=(10,6))
# plot_series(time_valid,x_valid,start=0,end=150)
# plot_series(time_valid,naive_forecast,start=1,end=151)
# plt.show()
tf.compat.v1.enable_eager_execution()
print(keras.metrics.mean_squared_error(x_valid,naive_forecast).numpy())
print(keras.metrics.mean_absolute_error(x_valid,naive_forecast).numpy())
def moving_average_forecast(series,window_size):
forecast = []
for time in range(len(series)-window_size):
forecast.append(series[time:time+window_size].mean())
return np.array(forecast)
moving_avg = moving_average_forecast(series,30)[split_time-30:]
# plt.figure(figsize=(10,6))
# plot_series(time_valid,x_valid)
# plot_series(time_valid,moving_avg)
# plt.show()
print(keras.metrics.mean_squared_error(x_valid,moving_avg).numpy())
print(keras.metrics.mean_absolute_error(x_valid,moving_avg).numpy())
diff_series = (series[365:]-series[:-365])
diff_time=time[365:]
# plt.figure(figsize=(10,6))
# plot_series(diff_time,diff_series)
# plt.show()
diff_moving_avg = moving_average_forecast(diff_series,50)[split_time-365-50:]
# plt.figure(figsize=(10,6))
# plot_series(time_valid,diff_series[split_time-365:])
# plot_series(time_valid,diff_moving_avg )
# plt.show()
diff_moving_avg_plus_past = series[split_time-365:-365]+diff_moving_avg
# plt.figure(figsize=(10,6))
# plot_series(time_valid,x_valid)
# plot_series(time_valid,diff_moving_avg_plus_past)
# plt.show()
print(keras.metrics.mean_squared_error(x_valid,diff_moving_avg_plus_past).numpy())
print(keras.metrics.mean_absolute_error(x_valid,diff_moving_avg_plus_past).numpy())
# diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time-370:-360],5) +diff_moving_avg
diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time-370:-360],10) +diff_moving_avg
print(keras.metrics.mean_squared_error(x_valid,diff_moving_avg_plus_smooth_past).numpy())
print(keras.metrics.mean_absolute_error(x_valid,diff_moving_avg_plus_smooth_past).numpy()) |
import heapq
def kthSmallest(mat, n, k):
min_heap=[]
for i in range(n):
for j in range(n):
heapq.heappush(min_heap,mat[i][j])
while k:
x=heapq.heappop(min_heap)
k-=1
return x
|
"""Bài 08: Viết chương trình đếm số lần xuất hiện
các từ đơn trong một đoạn văn bản"""
str=input("Nhập đoạn văn: ")
dem=0
a=str.split(" ")
for i in range(len(a)):
if len(a[i])==1: dem+=1
print( dem)
str.split() |
from django.conf.urls import url
from . import views
app_name ='pdf_reducer'
urlpatterns = [
url(r'^$', views.FileUploadView.as_view(), name="index"),
url(r'^test_upload', views.test_upload, name="test_upload")
]
|
from manager import *
if __name__ == '__main__':
manager = ZooManager()
manager.test_zoo()
|
from __future__ import print_function
from datetime import date
import os
import unittest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# noinspection PyPep8Naming
class BaseTestCase(unittest.TestCase):
"""
Base class that all e2e tests should extend. It provides several useful
helper functions, sets up the selenium webdriver, and provides a common
interface for logging in/out a user. Each test then only really needs to
override user_id, user_name, and user_password as necessary for a
particular testcase and this class will handle the rest to setup the test.
"""
TEST_URL = "http://192.168.56.101"
USER_ID = "student"
USER_NAME = "Joe"
USER_PASSWORD = "student"
def __init__(self, testname, user_id=None, user_password=None, user_name=None, log_in=True):
super().__init__(testname)
if "TEST_URL" in os.environ and os.environ['TEST_URL'] is not None:
self.test_url = os.environ['TEST_URL']
else:
self.test_url = BaseTestCase.TEST_URL
self.driver = None
""" :type driver: webdriver.Chrome """
self.options = Options()
self.options.add_argument('--headless')
self.options.add_argument("--disable-extensions")
self.options.add_argument('--hide-scrollbars')
self.options.add_argument('--disable-gpu')
self.options.add_argument('--no-proxy-server')
self.user_id = user_id if user_id is not None else BaseTestCase.USER_ID
self.user_name = user_name if user_name is not None else BaseTestCase.USER_NAME
if user_password is None and user_id is not None:
user_password = user_id
self.user_password = user_password if user_password is not None else BaseTestCase.USER_PASSWORD
self.semester = BaseTestCase.get_current_semester()
self.logged_in = False
self.use_log_in = log_in
def setUp(self):
self.driver = webdriver.Chrome(options=self.options)
if self.use_log_in:
self.log_in()
def tearDown(self):
self.driver.quit()
def get(self, url):
if url[0] != "/":
url = "/" + url
self.driver.get(self.test_url + url)
def log_in(self, url=None, title="Submitty", user_id=None, user_password=None, user_name=None):
"""
Provides a common function for logging into the site (and ensuring
that we're logged in)
:return:
"""
if url is None:
url = "/index.php"
if user_password is None:
user_password = user_id if user_id is not None else self.user_password
if user_id is None:
user_id = self.user_id
if user_name is None:
user_name = self.user_name
self.get(url)
self.assertIn(title, self.driver.title)
self.driver.find_element_by_name('user_id').send_keys(user_id)
self.driver.find_element_by_name('password').send_keys(user_password)
self.driver.find_element_by_name('login').click()
# print(self.driver.page_source)
self.assertEqual(user_name, self.driver.find_element_by_id("login-id").text)
self.logged_in = True
def log_out(self):
if self.logged_in:
self.logged_in = False
self.driver.find_element_by_id('logout').click()
self.driver.find_element_by_id('login-guest')
def click_class(self, course, course_name):
self.driver.find_element_by_id(self.get_current_semester() + '_' + course).click()
WebDriverWait(self.driver, 10).until(EC.title_is(course_name))
# see Navigation.twig for css selectors
# loaded_selector must recognize an element on the page being loaded (test_simple_grader.py has xpath example)
def click_nav_gradeable_button(self, gradeable_category, gradeable_id, button_name, loaded_selector):
self.driver.find_element_by_xpath("//tbody[@id='{}_tbody']/tr[@id='{}']/td/a[@name='{}_button']".format(gradeable_category, gradeable_id, button_name)).click()
WebDriverWait(self.driver, 10).until(EC.presence_of_element_located(loaded_selector))
# clicks the navigation header text to 'go back' pages
# for homepage, selector can be gradeable list
def click_header_link_text(self, text, loaded_selector):
self.driver.find_element_by_xpath("//div[@id='header-text']/h2[2]/a[text()='{}']".format(text)).click()
WebDriverWait(self.driver, 10).until(EC.presence_of_element_located(loaded_selector))
@staticmethod
def wait_user_input():
"""
Causes the running selenium test to pause until the user has hit the enter key in the
terminal that is running python. This is useful for using in the middle of building tests
as then you cna use the javascript console to inspect the page, get the name/id of elements
or other such actions and then use that to continue building the test
"""
input("Hit enter to continue...")
@staticmethod
def get_current_semester():
"""
Returns the "current" academic semester which is in use on the Vagrant/Travis machine (as we
want to keep referring to a url that is "up-to-date"). The semester will either be spring
(prefix "s") if we're in the first half of the year otherwise fall (prefix "f") followed
by the last two digits of the current year. Unless you know you're using a course that
was specifically set-up for a certain semester, you should always be using the value
generated by this function in the code.
:return:
"""
today = date.today()
semester = "f" + str(today.year)[-2:]
if today.month < 7:
semester = "s" + str(today.year)[-2:]
return semester
|
from PySide import QtCore, QtGui
from p4_q2_ui import *
from library import *
import re, datetime
class MagazineRow(object):
def __init__(self, index, table, item, magazine):
self.index = index
self.item = item
self.magazine = magazine
self.iid = self.item['iid']
self.titleField = QtGui.QTableWidgetItem(self.item['title'])
self.dateField = QtGui.QTableWidgetItem(unicode(self.item['date']))
self.issnField = QtGui.QTableWidgetItem(unicode(self.magazine['issn']))
self.issueField = QtGui.QTableWidgetItem(unicode(self.magazine['issue']))
self.pagesField = QtGui.QTableWidgetItem(unicode(self.magazine['pages']))
self.cells = [
(self.titleField, r'^.+$', self.item, 'title', unicode),
(self.dateField, r'^\d\d\d\d-\d\d-\d\d$', self.item, 'date', self.getDate),
(self.issnField, r'^\d+$', self.magazine, 'issn', int),
(self.issueField, r'^\d+$', self.magazine, 'issue', int),
(self.pagesField, r'^\d+$', self.magazine, 'pages', int)
]
self.table = table
for i, c in enumerate(self.cells):
self.table.setItem(self.index, i, c[0])
def getDate(self, date):
return datetime.date(*map(int, date.split('-')))
def revert(self, c):
c[0].setText(unicode(c[2][c[3]]))
def needUpdate(self):
for c in self.cells:
if not re.match(c[1], c[0].text()):
self.revert(c)
try:
t = c[4](c[0].text())
except:
self.revert(c)
continue
if c[2][c[3]] == c[0].text():
continue
c[2][c[3]] = t
return self.item.needSync() or self.magazine.needSync()
def update(self):
self.item.sync()
self.magazine.sync()
class Option3(UIOption):
def __init__(self):
UIOption.__init__(self)
def getTitle(self):
return 'Magazines editor'
def initUI(self, layout):
columns = ('Title', 'Date', 'ISSN', 'Issue', 'Pages')
self.label = QtGui.QLabel('The following table contains a list of magazines\nIt can be interactively edited by double-clicking on a cell.\nAll modifications will be replicated on the database server automatically.')
layout.addWidget(self.label)
magResults = sqlQuery("""SELECT * FROM items NATURAL JOIN magazines""", asDict=True).fetchall()
allItems = itemsTable.fromResults(magResults)
allMagazines = magazinesTable.fromResults(magResults)
self.table = QtGui.QTableWidget(len(magResults), len(columns))
self.table.setHorizontalHeaderLabels(columns)
self.rows = [MagazineRow(i, self.table, allItems[i], allMagazines[i]) for i in xrange(len(allItems))]
layout.addWidget(self.table)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateDatabase)
self.timer.start(1000)
def updateDatabase(self):
needUpdate = False
for r in self.rows:
needUpdate = r.needUpdate() or needUpdate
if needUpdate:
try:
transactionStart()
for r in self.rows:
r.update()
transactionCommit()
except Exception, e:
transactionRollback()
self.label.setText(unicode(e))
if __name__ == '__main__':
runApp(2)
|
import datetime
import json
class Curtida:
def __init__(self):
self.id = 0
self.data_insercao = datetime.datetime.now()
self.data_alteracao = datetime.datetime.now()
self.usuario_id = ''
self.postagem_id = ''
self.operacao = ''
def definir_por_tupla(self, tupla_notificacao):
self.id = tupla_notificacao[0]
self.data_insercao = tupla_notificacao[1]
self.data_alteracao = tupla_notificacao[2]
self.usuario_id = tupla_notificacao[3]
self.postagem_id = tupla_notificacao[4]
self.operacao = tupla_notificacao[5]
def definir_por_json(self, curtida_json):
self.id = curtida_json['id']
self.data_insercao = curtida_json['data_insercao']
self.data_alteracao = curtida_json["data_alteracao"]
self.postagem_id = curtida_json['postagem_id']
self.usuario_id = curtida_json['usuario_id']
self.operacao = curtida_json['operacao']
def json_string(self):
texto_json = '{\n'
texto_json += '\t\t\"id\": ' + str(self.id) + ',\n'
texto_json += '\t\t\"data_insercao\": \"' + str(self.data_insercao) + '\",\n'
texto_json += '\t\t\"data_alteracao\": \"' + str(self.data_alteracao) + '\",\n'
texto_json += '\t\t\"postagem_id\": ' + str(self.postagem_id) + ',\n'
texto_json += '\t\t\"usuario_id\": ' + str(self.usuario_id) + ',\n'
texto_json += '\t\t\"operacao\": ' + str(self.operacao) + '\n'
texto_json += '}'
return texto_json
def __str__(self):
return self.json_string()
def json(self):
return json.loads(self.json_string()) |
import DiskUsage as DU
import datetime
import importlib.util
spec = importlib.util.spec_from_file_location("Teamcity", "..\TA_TestReport\Teamcity.py")
TC = importlib.util.module_from_spec(spec)
spec.loader.exec_module(TC)
if __name__ == '__main__':
DU.DiskUsage.__init__(DU.DiskUsage, username="adacc",
password="jd7gowuX",
server="http://vmsk-tc-prm.paragon-software.com",
port=None,
jsonname="Prm_Sandbox_2Sintsov"
)
#builds = TC.Teamcity.getLastBuildsByDate(TC.Teamcity,"Prm_Tests_Sdkl1_723sdkFastWixFullLi", "20170727T000000%2B0300")
#project = TC.Teamcity.getSubprojectsFromProject(TC.Teamcity, "PrmF100716")
#print(project)
#DU.DiskUsage.buildTree(DU.DiskUsage, "PrmBackup")
print ("Work begins :" + str(datetime.datetime.now()))
t = DU.DiskUsage.getAllBuildsArtsize(DU.DiskUsage, "Prm_Sandbox_2Sintsov")
#print (t)
DU.DiskUsage.buildCustomTree(DU.DiskUsage, "Prm_Sandbox_2Sintsov")
DU.DiskUsage.projectToJSON(DU.DiskUsage, "Prm_Sandbox_2Sintsov")
#for k, v in DU.DiskUsage.projectsArtSize.items():
# print(k +" : "+ "%.3f" %(v/1048576) + " Mb")
print("Work ends :" + str(datetime.datetime.now()))
|
from django import forms
from blog.models import Article
# Create the form class.
class Article(forms.ModelForm):
class Meta:
model = Article
fields = ('title', 'text')
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'text': forms.Textarea(attrs={'class': 'form-control'})
}
class Search(forms.Form):
keyword = forms.CharField(max_length=20, min_length=1, widget=forms.TextInput(attrs={'class': 'form-control'}))
|
import math
lis = []
for k in range(1, 76576501):
a = (k*(k+1))/2
lis.append(a)
#print lis
#lis is the list of all the triangular numbers
# cnt = 0
# answer = 0
# cnt = []
# for i in lis:
# #print i
# b = range(2, (i+1))
# #print b
# for number in b:
# del cnt[:]
# if (i % number == 0):
# cnt.append(number)
# print cnt
c = []
print lis
for number in lis:
b = range(2, number+1)
#b is all the numbers from 2 to the number
for i in b:
if (number % i == 0):
c.append(i)
if (len(c) == 499):
print number
break
del c[:]
#print count
# def factors(number):
# factors = {1}
# for i in range(2, int(math.sqrt(number)) + 1):
# if number % i == 0:
# factors.add(i)
# factors.add(number / i)
# return factors
# if __name__ == "__main__":
# number_of_divisors = 0
# start_number = 1000
# triangular_number = 0
# while number_of_divisors < 501:
# triangular_number = sum(range(1, start_number))
# number_of_divisors = len(factors(triangular_number))
# start_number += 1
# print(triangular_number) |
import asyncio
import io
from datetime import datetime, timedelta
import pandas as pd
import requests
import streamlit as st
from openbb_terminal.core.plots.plotly_helper import OpenBBFigure, theme
from openbb_terminal.core.session.current_system import set_system_variable
from openbb_terminal.dashboards.stream import streamlit_helpers as st_helpers
pd.options.plotting.backend = "plotly"
# Suppressing sdk logs
set_system_variable("LOGGING_SUPPRESS", True)
st.set_page_config(
layout="wide",
page_title="Short Data",
initial_sidebar_state="expanded",
)
st.sidebar.markdown(
"<h2 style='text-align: center;'>Finra Short Data</h2>",
unsafe_allow_html=True,
)
TICKER = st.sidebar.container()
DAYS = st.sidebar.container()
COUNT = st.sidebar.container()
load_button, ticker_button, show_button = st.sidebar.columns([1, 1, 1])
output1 = st.empty()
output2 = st.empty()
title_html = """
<p><strong style="color: #00ACFF">Load Data:</strong> <br>
This widget downloads the consolidated NMS short data from FINRA and aggregates
the data by summing over the entire time period.</p>
<p>Note that clicking the this button will reload all data.
This can get time consuming, so if you pick a few hundred days,
expect a few minutes for loading time.</p>
"""
middle_html = """
<strong style="color: #00ACFF">Plot Ticker:</strong> <br>Query for a single stock. This will work with the loaded data.
Note that if you want to reload the data, this will once again take some time.
"""
st.markdown(title_html, unsafe_allow_html=True)
st.markdown(middle_html, unsafe_allow_html=True)
MAIN_LOOP: asyncio.AbstractEventLoop = None # type: ignore
st_helpers.set_current_page("Short Data")
st_helpers.set_css()
class FinraShortData:
def __init__(self, days_slider=30, count_slider=10):
self.df = st_helpers.load_state("df", pd.DataFrame())
self.days_slider = st_helpers.load_state("days_slider", days_slider)
self.count_slider = st_helpers.load_state("count_slider", count_slider)
self.ticker_button = None
self.load_button = None
self.show_button = None
self.loaded = False
def activate_buttons(self):
if not self.loaded:
with ticker_button.container():
self.ticker_button = ticker_button.button(
"Plot Ticker", key="ticker_button"
)
with show_button.container():
self.show_button = show_button.button("Show", key="show_button")
self.loaded = True
def show_button_click(self):
output1.empty()
self.update()
def load_button_click(self):
output1.empty()
output2.empty()
with st.spinner(f"Loading data for {self.days_slider} days"):
self.fetch_new_data()
self.update()
def ticker_button_click(self):
output2.empty()
self.ticker_plot()
def fetch_new_data(self):
self.df = pd.DataFrame()
today = datetime.now().date()
start_date = today - timedelta(days=self.days_slider)
dates = pd.date_range(start_date, today)
for date in dates:
r = requests.get(
f"https://cdn.finra.org/equity/regsho/daily/CNMSshvol{date.strftime('%Y%m%d')}.txt",
timeout=30,
)
if r.status_code == 200:
self.df = pd.concat(
[self.df, pd.read_csv(io.StringIO(r.text), sep="|")], axis=0
)
self.df = self.df[self.df.Date > 20100101]
self.df.Date = self.df["Date"].apply(
lambda x: datetime.strptime(str(x), "%Y%m%d")
)
st_helpers.save_state("df", self.df)
def update(self):
if not self.df.empty:
temp = (
self.df.groupby("Symbol")[["ShortVolume", "TotalVolume"]]
.agg("sum")
.sort_values(by="ShortVolume", ascending=False)
.head(self.count_slider)[::-1]
)
fig = OpenBBFigure()
fig.add_bar(
x=temp.TotalVolume,
y=temp.index,
orientation="h",
name="Total Volume",
marker_color=theme.up_color,
)
fig.add_bar(
x=temp.ShortVolume,
y=temp.index,
orientation="h",
name="Short Volume",
marker_color=theme.down_color,
)
fig.update_layout(
title=f"Top {self.count_slider} Short Volume in Last {self.days_slider} Days",
margin=dict(l=30),
xaxis_title="Volume",
yaxis_title="Ticker",
barmode="stack",
bargap=0.1,
hovermode="y unified",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
),
)
fig.show(external=True)
with output1:
st.plotly_chart(fig, use_container_width=True)
def ticker_plot(self):
stock_data = self.df.copy().loc[
self.df.Symbol == self.stock_input.upper(),
["Date", "ShortVolume", "TotalVolume"],
]
fig2 = OpenBBFigure()
fig2.add_scatter(
x=stock_data.Date,
y=stock_data.TotalVolume,
name="Total Volume",
marker_color=theme.up_color,
)
fig2.add_scatter(
x=stock_data.Date,
y=stock_data.ShortVolume,
name="Short Volume",
marker_color=theme.down_color,
)
fig2.update_layout(
title=f"Stock Volume and Short Volume for {self.stock_input.upper()}",
margin=dict(l=30),
xaxis_title="Date",
yaxis_title="Volume",
)
fig2.show(external=True)
with output2:
st.plotly_chart(fig2)
def build_app(self):
with TICKER:
self.stock_input = TICKER.text_input("Ticker", "GME", key="ticker")
with DAYS:
self.days_slider = DAYS.slider(
"Days", 1, 1000, 100, help="Number of days to load"
)
with COUNT:
self.count_slider = COUNT.slider(
"Count", 1, 100, 20, help="Number of stocks to plot"
)
with load_button.container():
self.load_button = load_button.button("Load Data", key="load")
if not self.df.empty:
self.activate_buttons()
if self.load_button:
self.load_button_click()
if self.show_button:
self.show_button_click()
if self.ticker_button:
self.ticker_button_click()
if not self.df.empty:
self.activate_buttons()
if __name__ == "__main__":
app = FinraShortData()
app.build_app()
|
#!/usr/bin/python3
import pygame
from classes.wall import Wall
from classes.block import Block
from classes.player import Player
from classes.ghost import Ghost
black = (0,0,0)
white = (255,255,255)
blue = (0,0,255)
green = (0,255,0)
red = (255,0,0)
purple = (255,0,255)
yellow = (255,255,0)
# Commands
print("")
print("\033[36m📚 HOW TO PLAY?\033[0m")
print("\033[32m🟢 Move Pacman using UP KEY 🔼, DOWN KEY 🔽, LEFT KEY ◀️ and RIGHT KEY ▶️ \033[0m")
print("\033[31m🔴 Press the \"ESCAPE\" KEY on the PACMAN GAME OVER screen to end the game! \033[0m")
print("")
#Add music
pygame.mixer.init()
pygame.mixer.music.load('pacman.mp3')
pygame.mixer.music.play(-1, 0.0)
# Default locations for Pacman and Ghosts
w = 303-16 #Width
pacman_height = (7*60)+19
monster_height = (4*60)+19
blinky_height = (3*60)+19
inky_width = 303-16-32
clyde_width = 303+(32-16)
#Pinky movements
Pinky_directions = [
[0,-30,4],
[15,0,9],
[0,15,11],
[-15,0,23],
[0,15,7],
[15,0,3],
[0,-15,3],
[15,0,19],
[0,15,3],
[15,0,3],
[0,15,3],
[15,0,3],
[0,-15,15],
[-15,0,7],
[0,15,3],
[-15,0,19],
[0,-15,11],
[15,0,9]
]
#Blinky movements
Blinky_directions = [
[0,-15,4],
[15,0,9],
[0,15,11],
[15,0,3],
[0,15,7],
[-15,0,11],
[0,15,3],
[15,0,15],
[0,-15,15],
[15,0,3],
[0,-15,11],
[-15,0,3],
[0,-15,11],
[-15,0,3],
[0,-15,3],
[-15,0,7],
[0,-15,3],
[15,0,15],
[0,15,15],
[-15,0,3],
[0,15,3],
[-15,0,3],
[0,-15,7],
[-15,0,3],
[0,15,7],
[-15,0,11],
[0,-15,7],
[15,0,5]
]
#Inky movements
Inky_directions = [
[30,0,2],
[0,-15,4],
[15,0,10],
[0,15,7],
[15,0,3],
[0,-15,3],
[15,0,3],
[0,-15,15],
[-15,0,15],
[0,15,3],
[15,0,15],
[0,15,11],
[-15,0,3],
[0,-15,7],
[-15,0,11],
[0,15,3],
[-15,0,11],
[0,15,7],
[-15,0,3],
[0,-15,3],
[-15,0,3],
[0,-15,15],
[15,0,15],
[0,15,3],
[-15,0,15],
[0,15,11],
[15,0,3],
[0,-15,11],
[15,0,11],
[0,15,3],
[15,0,1],
]
#Clyde movements
Clyde_directions = [
[-30,0,2],
[0,-15,4],
[15,0,5],
[0,15,7],
[-15,0,11],
[0,-15,7],
[-15,0,3],
[0,15,7],
[-15,0,7],
[0,15,15],
[15,0,15],
[0,-15,3],
[-15,0,11],
[0,-15,7],
[15,0,3],
[0,-15,11],
[15,0,9],
]
pinky_movements_list = len(Pinky_directions)-1
blinky_movements_list = len(Blinky_directions)-1
inky_movements_list = len(Inky_directions)-1
clyde_movements_list = len(Clyde_directions)-1
pygame.init()
# Create an 606x606 sized screen
screen = pygame.display.set_mode([606, 606])
# Window Title
pygame.display.set_caption('Pacman')
# Surface Creation
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill(black)
clock = pygame.time.Clock()
pygame.font.init()
font = pygame.font.Font("freesansbold.ttf", 24)
def run():
startGame()
pygame.quit()
def startGame():
all_sprites_list = pygame.sprite.RenderPlain()
block_list = pygame.sprite.RenderPlain()
monsta_list = pygame.sprite.RenderPlain()
pacman_collide = pygame.sprite.RenderPlain()
wall_list = setupRoomOne(all_sprites_list)
gate = setupGate(all_sprites_list)
pinky_turn = 0
pinky_steps = 0
blinky_turn = 0
blinky_steps = 0
inky_turn = 0
inky_steps = 0
clyde_turn = 0
clyde_steps = 0
Pacman = Player(w, pacman_height, "images/pacman.png" )
all_sprites_list.add(Pacman)
pacman_collide.add(Pacman)
Blinky = Ghost(w, blinky_height, "images/Blinky.png" )
monsta_list.add(Blinky)
all_sprites_list.add(Blinky)
Pinky = Ghost(w, monster_height, "images/Pinky.png" )
monsta_list.add(Pinky)
all_sprites_list.add(Pinky)
Inky = Ghost(inky_width, monster_height, "images/Inky.png" )
monsta_list.add(Inky)
all_sprites_list.add(Inky)
Clyde = Ghost(clyde_width, monster_height, "images/Clyde.png" )
monsta_list.add(Clyde)
all_sprites_list.add(Clyde)
# Draw the grid
for row in range(19):
for column in range(19):
if (row == 7 or row == 8) and (column == 8 or column == 9 or column == 10):
continue
else:
block = Block(yellow, 4, 4)
# Set a random location for the block
block.rect.x = (30*column+6)+26
block.rect.y = (30*row+6)+26
b_collide = pygame.sprite.spritecollide(block, wall_list, False)
p_collide = pygame.sprite.spritecollide(block, pacman_collide, False)
if b_collide:
continue
elif p_collide:
continue
else:
# Add the block to the list of objects
block_list.add(block)
all_sprites_list.add(block)
bll = len(block_list)
score = 0
done = False
i = 0
while done == False:
# ALL EVENT PROCESSING SHOULD GO BELOW THIS COMMENT
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT or event.key == ord('a'):
Pacman.changespeed(-30,0)
if event.key == pygame.K_RIGHT or event.key == ord('d'):
Pacman.changespeed(30,0)
if event.key == pygame.K_UP or event.key == ord('w'):
Pacman.changespeed(0,-30)
if event.key == pygame.K_DOWN or event.key == ord('s'):
Pacman.changespeed(0,30)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == ord('a'):
Pacman.changespeed(30,0)
if event.key == pygame.K_RIGHT or event.key == ord('d'):
Pacman.changespeed(-30,0)
if event.key == pygame.K_UP or event.key == ord('w'):
Pacman.changespeed(0,30)
if event.key == pygame.K_DOWN or event.key == ord('s'):
Pacman.changespeed(0,-30)
# ALL GAME LOGIC SHOULD GO BELOW THIS COMMENT
Pacman.update(wall_list,gate)
returned = Pinky.changespeed(Pinky_directions, False, pinky_turn, pinky_steps, pinky_movements_list)
pinky_turn = returned[0]
pinky_steps = returned[1]
Pinky.changespeed(Pinky_directions, False, pinky_turn, pinky_steps, pinky_movements_list)
Pinky.update(wall_list, False)
returned = Blinky.changespeed(Blinky_directions, False, blinky_turn, blinky_steps, blinky_movements_list)
blinky_turn = returned[0]
blinky_steps = returned[1]
Blinky.changespeed(Blinky_directions, False, blinky_turn, blinky_steps, blinky_movements_list)
Blinky.update(wall_list, False)
returned = Inky.changespeed(Inky_directions, False, inky_turn, inky_steps, inky_movements_list)
inky_turn = returned[0]
inky_steps = returned[1]
Inky.changespeed(Inky_directions, False, inky_turn, inky_steps, inky_movements_list)
Inky.update(wall_list,False)
returned = Clyde.changespeed(Clyde_directions, "clyde", clyde_turn, clyde_steps, clyde_movements_list)
clyde_turn = returned[0]
clyde_steps = returned[1]
Clyde.changespeed(Clyde_directions, "clyde", clyde_turn, clyde_steps, clyde_movements_list)
Clyde.update(wall_list,False)
# See if the Pacman block has collided with anything.
blocks_hit_list = pygame.sprite.spritecollide(Pacman, block_list, True)
# Check the list of collisions.
if len(blocks_hit_list) > 0:
score += len(blocks_hit_list)
# ALL CODE TO DRAW SHOULD GO BELOW THIS COMMENT
screen.fill(black)
wall_list.draw(screen)
gate.draw(screen)
all_sprites_list.draw(screen)
monsta_list.draw(screen)
text = font.render("Score: " + str(score) + "/" + str(bll), True, red)
screen.blit(text, [10, 10])
if score == bll:
doNext("Congratulations, you won!",145,all_sprites_list,block_list,monsta_list,pacman_collide,wall_list,gate)
monsta_hit_list = pygame.sprite.spritecollide(Pacman, monsta_list, False)
if monsta_hit_list:
doNext("Game Over",235,all_sprites_list,block_list,monsta_list,pacman_collide,wall_list,gate)
pygame.display.flip()
clock.tick(10)
# This creates all level 1 walls
def setupRoomOne(all_sprites_list):
# Make the walls (x_pos, y_pos, width, height)
wall_list = pygame.sprite.RenderPlain()
# List of walls to display on level.
# Each is in the form [x, y, width, height]
walls = [ [0,0,6,600],
[0,0,600,6],
[0,600,606,6],
[600,0,6,606],
[300,0,6,66],
[60,60,186,6],
[360,60,186,6],
[60,120,66,6],
[60,120,6,126],
[180,120,246,6],
[300,120,6,66],
[480,120,66,6],
[540,120,6,126],
[120,180,126,6],
[120,180,6,126],
[360,180,126,6],
[480,180,6,126],
[180,240,6,126],
[180,360,246,6],
[420,240,6,126],
[240,240,42,6],
[324,240,42,6],
[240,240,6,66],
[240,300,126,6],
[360,240,6,66],
[0,300,66,6],
[540,300,66,6],
[60,360,66,6],
[60,360,6,186],
[480,360,66,6],
[540,360,6,186],
[120,420,366,6],
[120,420,6,66],
[480,420,6,66],
[180,480,246,6],
[300,480,6,66],
[120,540,126,6],
[360,540,126,6]
]
# Loop creating the walls, adding them to the list
for item in walls:
wall = Wall(item[0],item[1],item[2],item[3],blue)
wall_list.add(wall)
all_sprites_list.add(wall)
return wall_list
def setupGate(all_sprites_list):
gate = pygame.sprite.RenderPlain()
gate.add(Wall(282,242,42,2,white))
all_sprites_list.add(gate)
return gate
def doNext(message,left,all_sprites_list,block_list,monsta_list,pacman_collide,wall_list,gate):
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
if event.key == pygame.K_RETURN:
del all_sprites_list
del block_list
del monsta_list
del pacman_collide
del wall_list
del gate
startGame()
# Grey background
w = pygame.Surface((400,200)) # the size of your rect
w.set_alpha(10) # alpha level
w.fill((128,128,128)) # this fills the entire surface
screen.blit(w, (100,200)) # (0,0) are the top-left coordinates
# WON or QUIT
text1 = font.render(message, True, white)
screen.blit(text1, [left, 233])
text2 = font.render("To play again, press ENTER.", True, white)
screen.blit(text2, [135, 303])
text3 = font.render("To quit, press ESCAPE.", True, white)
screen.blit(text3, [165, 333])
pygame.display.flip()
clock.tick(10) |
#1. Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление.
# Числа запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль.
def my_dev(x,y):
try:
z = x / y
except ZeroDivisionError:
return
return z
x = int(input('Введите числитель:'))
y = int(input('Введите знаменатель:'))
print(my_dev(x,y))
# 2. Реализовать функцию, принимающую несколько параметров, описывающих данные пользователя: имя,
# фамилия, год рождения, город проживания, email, телефон. Функция должна принимать параметры как
# именованные аргументы. Реализовать вывод данных о пользователе одной строкой.
def infant_school(surname, name, born_year, town, email, phone):
print(f"Имя - {name}, Фамилия - {surname}. Год рождения - {born_year}, город - {town}, Email {email}, номер телефона {phone}")
infant_school(surname="Иванов", name="Иван", born_year=1999, town="Москва", email="abra@kadabra.com", phone="987-765-4321")
# 3. Реализовать функцию my_func(), которая принимает три позиционных аргумента,
# и возвращает сумму наибольших двух аргументов.
def my_func(x, y, z):
return (x + y + z - min(x, y, z))
print(my_func(3, 4, 5))
# 4. Программа принимает действительное положительное число x и целое отрицательное число y. Необходимо выполнить
# возведение числа x в степень y. Задание необходимо реализовать в виде функции my_func(x, y).
# При решении задания необходимо обойтись без встроенной функции возведения числа в степень.
def my_func(x, y):
z = x
for i in range(1, -y):
z *= x
return 1 / z
print(my_func(2, -5))
# 5. Программа запрашивает у пользователя строку чисел, разделенных пробелом.
# При нажатии Enter должна выводиться сумма чисел. Пользователь может продолжить ввод чисел,
# разделенных пробелом и снова нажать Enter. Сумма вновь введенных чисел будет добавляться к уже
# подсчитанной сумме. Но если вместо числа вводится специальный символ, выполнение программы завершается.
# Если специальный символ введен после нескольких чисел, то вначале нужно добавить сумму этих чисел к
# полученной ранее сумме и после этого завершить программу.
def infant():
summa = 0
while True:
x = input('Введите cтроку чисел, разделенных пробелом, для завершения введите \'q\':')
if x == 'q':
print('Сумма равна ', summa)
return summa
y = x.split()
for i in range(len(y)):
if y[i] == 'q':
print('Сумма равна ', summa)
return summa
summa += int(y[i])
print('Сумма равна ', summa)
print('Продолжим данное увлекательное занятие')
sum = infant()
# 6. Реализовать функцию int_func(), принимающую слово из маленьких латинских букв и возвращающую его же,
# но с прописной первой буквой. Например, print(int_func(‘text’)) -> Text.
int_func = lambda p: p.capitalize()
# Продолжить работу над заданием. В программу должна попадать строка из слов, разделенных пробелом. Каждое слово состо
# ит из латинских букв в нижнем регистре. Сделать вывод исходной строки, но каждое слово должно начинаться
# с заглавной буквы. Необходимо использовать написанную ранее функцию int_func().
s = 'hello guy how are you'
ss = s.split()
cap_s = ''
for i in range(len(ss)):
cap_s = cap_s + int_func(ss[i]) +' '
print(cap_s)
###
|
# Generated by Django 2.2.6 on 2020-04-15 16:08
from decimal import Decimal
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Products',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=30)),
('name', models.CharField(max_length=50)),
('price', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=10)),
('quantity', models.PositiveIntegerField()),
('rating', models.PositiveSmallIntegerField(default=0)),
('description', models.TextField(default='Product description.')),
('image_url', models.TextField(default='')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'products',
},
),
migrations.CreateModel(
name='Orders',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('products', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=50), default=[], size=None)),
('quantities', django.contrib.postgres.fields.ArrayField(base_field=models.PositiveIntegerField(), default=[], size=None)),
('total_price', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=10)),
('delivery_method', models.CharField(default='', max_length=30)),
('payment_method', models.CharField(default='', max_length=30)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'orders',
},
),
]
|
from generate_json_from_grid import findPath
import json
import numpy as np
path = '../data/setting/LA/'
moves = [(1,0), (0,1), (-1,0), (0,-1)]
typemap = {"l": "turn_left", "s": "go_straight", "r": "turn_right"}
turnmap = {"l": 1, "s": 0, "r": -1}
LANE_WIDTH = 4
LANE_MAX_SPEED = 16.67
NMIDPOINTS = 5
BRANCH_LENGTH = 50
INTER_WIDTH = 30
lane_setting = json.load(open(path + 'lane_setting.json'))
raw_intersections = json.load(open(path + 'inter_coor.json'))
phase_setting = json.load(open(path + 'phase_setting.json'))
for inter in raw_intersections:
raw_intersections[inter] = {
"point": dict(zip(["x", "y"], raw_intersections[inter])),
"roadLinks": [],
"trafficLight": {
"lightphases": []
},
"virtual": False,
"width": INTER_WIDTH
}
intersections = raw_intersections.copy()
for inter in raw_intersections:
_, x, y = inter.split('_')
x = int(x)
y = int(y)
for i in range(4):
inter2 = "intersection_%d_%d" % (x + moves[i][0], y + moves[i][1])
if not inter2 in raw_intersections:
intersections[inter2] = {
"point": {
"x": raw_intersections[inter]["point"]["x"] + moves[i][0] * BRANCH_LENGTH,
"y": raw_intersections[inter]["point"]["y"] + moves[i][1] * BRANCH_LENGTH
},
"virtual": True,
"width": 0
}
roads = {}
for road in lane_setting:
_, x, y, direction = road.split('_')
x = int(x)
y = int(y)
direction = int(direction)
roads[road] = {
"lanes": [{"width": LANE_WIDTH, "maxSpeed": LANE_MAX_SPEED}] * len(lane_setting[road]),
"startIntersection": "intersection_%d_%d" % (x, y),
"endIntersection": "intersection_%d_%d" % (x+moves[direction][0], y+moves[direction][1])
}
roads[road]["points"] = [
intersections[roads[road]["startIntersection"]]["point"],
intersections[roads[road]["endIntersection"]]["point"]
]
for road in lane_setting:
_, x, y, direction = road.split('_')
x = int(x)
y = int(y)
direction = int(direction)
x += moves[direction][0]
y += moves[direction][1]
inter = "intersection_%d_%d" % (x, y)
if intersections[inter]["virtual"]:
continue
for i, heads in enumerate(lane_setting[road]):
for head in heads[1]:
endRoad = "road_%d_%d_%d" % (x, y, (direction+turnmap[head]) % 4)
laneLinks = []
roadLink = {
"type": typemap[head],
"startRoad": road,
"endRoad": endRoad,
"laneLinks": laneLinks
}
for j in range(len(roads[endRoad]["lanes"])):
laneLinks.append({
"startLaneIndex": i,
"endLaneIndex": j,
"points": findPath(roads[road], i, roads[endRoad], j, intersections[inter]["width"], NMIDPOINTS),
})
intersections[inter]["roadLinks"].append(roadLink)
# generate traffic light
for inter in intersections:
if intersections[inter]["virtual"]:
continue
roadLinks = intersections[inter]["roadLinks"]
roadLinkIndices = [x for x in range(len(roadLinks))]
leftLaneLinks = set(filter(lambda x: roadLinks[x]["type"] == "turn_left", roadLinkIndices))
rightLaneLinks = set(filter(lambda x: roadLinks[x]["type"] == "turn_right", roadLinkIndices))
straightLaneLinks = set(filter(lambda x: roadLinks[x]["type"] == "go_straight", roadLinkIndices))
WELaneLinks = set(filter(lambda x: roadLinks[x]["startRoad"].split('_')[3] == "0", roadLinkIndices))
NSLaneLinks = set(filter(lambda x: roadLinks[x]["startRoad"].split('_')[3] == "3", roadLinkIndices))
EWLaneLinks = set(filter(lambda x: roadLinks[x]["startRoad"].split('_')[3] == "2", roadLinkIndices))
SNLaneLinks = set(filter(lambda x: roadLinks[x]["startRoad"].split('_')[3] == "1", roadLinkIndices))
directionLaneLinks = {
"N": NSLaneLinks,
"S": SNLaneLinks,
"W": WELaneLinks,
"E": EWLaneLinks
}
turnLaneLinks = {
"s": straightLaneLinks,
"l": leftLaneLinks,
"r": rightLaneLinks
}
tlPhases = []
for i, phase in phase_setting[inter].items():
if phase == "all red":
tlPhases.append({
"time": 5,
"availableRoadLinks": rightLaneLinks
})
else:
availableRoadLinks = set(rightLaneLinks)
for phase_pair in phase:
direction, turn = phase_pair
availableRoadLinks |= directionLaneLinks[direction] & turnLaneLinks[turn]
tlPhases.append({
"time": 30,
"availableRoadLinks": availableRoadLinks
})
for tlPhase in tlPhases:
tlPhase["availableRoadLinks"] = list(tlPhase["availableRoadLinks"])
intersections[inter]["trafficLight"]["lightphases"] = tlPhases
roadnet = {
"intersections": [{"id": inter, **intersections[inter]} for inter in intersections],
"roads": [{"id": road, **roads[road]} for road in roads]
}
json.dump(roadnet, open(path + "roadnet.json", "w"), indent=2)
|
from . import views
from django.urls import path
from .views import BlogPageView
#
#urlpatterns = [
# path('/blog/', BlogPageView.as_view(), name='blog'),
#]
# |
"""woof"""
class Dog:
"""woof woof"""
def __init__(self, name, age):
"""initialise name & age attributes"""
self.name = name.title()
self.age = age
def sit(self):
"""simulate dog sitting"""
print(self.name + " is now sitting.")
def roll_over(self):
"""simulate roll over"""
print(self.name + " is now rolling over!")
my_dog = Dog("precious", 10)
print("My dog's name is " + my_dog.name)
my_dog.sit()
my_dog.roll_over()
|
from django.core.mail import send_mail
def email(message, message2):
messages = f'Код регистрации: \n{message2}'
send_mail(
'Код авторизации',
messages,
f'{message}', # почта куда
[f'{message}'], # Это поле Кому:
fail_silently=False,
)
|
import pylibmc as memcache
import logging
from django.core.urlresolvers import reverse
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
from django.http import HttpResponseRedirect
from core.api.resources import Site
from core.api.exceptions import APIException
from requests import RequestException
logger = logging.getLogger('core.middleware.redirect')
class DomainRedirectMiddleware():
"""
Where a site has a custom domain, the user should be permanently redirected to
the custom domain from the microcosm subdomain.
"""
def __init__(self):
self.mc = memcache.Client(['%s:%d' % (settings.MEMCACHE_HOST, settings.MEMCACHE_PORT)])
def process_request(self, request):
host = request.get_host()
# Only look at requests to example.microco.sm
if host.endswith(settings.API_DOMAIN_NAME):
# Fetch site from cache
try:
site = self.mc.get(host)
except memcache.Error as e:
logger.error('Memcached GET error: %s' % str(e))
site = None
# Not in cache or retrieval failed
if site is None:
try:
site = Site.retrieve(host)
try:
self.mc.set(host, site, time=300)
except memcache.Error as e:
logger.error('Memcached SET error: %s' % str(e))
except APIException, e:
# HTTP 400 indicates a non-existent site.
if e.status_code == 404:
return HttpResponseRedirect('http://microco.sm')
logger.error('APIException: %s' % e.message)
return HttpResponseRedirect(reverse('server-error'))
except RequestException, e:
logger.error('RequestException: %s' % e.message)
return HttpResponseRedirect(reverse('server-error'))
# Forum owner has configured their own domain, so 301 the client.
if hasattr(site, 'domain') and site.domain:
# We don't support SSL on custom domains yet, so ensure the scheme is http.
location = 'http://' + site.domain + request.get_full_path()
logger.debug('Redirecting subdomain to: %s' % location)
return HttpResponsePermanentRedirect(location)
return None
|
import pyautogui
import time # for timer
def invertMouse(seconds, minutes):
currentMouseX, currentMouseY = pyautogui.position()
pyautogui.moveTo
def toFirstTab(seconds, minutes): # This returns you to your first tab on chrome
pyautogui.hotkey('ctrl', '1')
exit();
def alertMessages(seconds, minutes): # Opens two popups
time.sleep(1)
seconds += 1
if seconds == 5:
seconds = 0
minutes += 1
pyautogui.alert(text='So it looks like someone is slacking off still. Get back to work', title='Really? Again?', button='Sorry :(')
pyautogui.alert(text='Do you promise?', title='Hmmm are you sure...?', button='I promise')
class MainScript:
def __init__(self):
tabbedTimer = 'true' # timer while not studying
seconds = 0
minutes = 0
while tabbedTimer: # Generic timer
time.sleep(1)
seconds+= 1
if seconds == 5:
seconds = 0
minutes += 1
if minutes == 1: # If they are tabbed for 1 minute
invertMouse(seconds, minutes)
if minutes == 2: # If they are tabbed for 3 minutes
invertMouse(seconds, minutes)
alertMessages(seconds, minutes)
if minutes == 3:
toFirstTab(seconds, minutes)
test1 = MainScript()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-09-07 02:19
from __future__ import unicode_literals
from django.db import migrations
from chroma_core.migrations import (
build_tables,
forward_trigger_template,
backward_trigger_template,
join,
forward_function_str,
backward_function_str,
)
tables = map(
build_tables,
[
"managedfilesystem",
"managedhost",
"managedtarget",
"managedtargetmount",
"volume",
"volumenode",
"alertstate",
"stratagemconfiguration",
"lnetconfiguration",
],
)
forward_trigger_list = map(forward_trigger_template, tables)
forward_trigger_str = join(forward_trigger_list)
backward_trigger_list = map(backward_trigger_template, tables)
backward_trigger_str = join(backward_trigger_list)
class Migration(migrations.Migration):
dependencies = [("chroma_core", "0004_add_health_status_procedure")]
operations = [
migrations.RunSQL(
sql=forward_function_str + forward_trigger_str, reverse_sql=backward_trigger_str + backward_function_str
)
]
|
#!/usr/bin/env python
from __future__ import print_function
import sys
from lxml import etree
import epitran
import epitran.vector
def main(fn):
epi = epitran.Epitran('uig-Arab')
vwis = epitran.vector.VectorsWithIPASpace('uig-Arab', ['uig-Arab'])
tree = etree.parse(fn)
root = tree.getroot()
for token in root.findall('.//TOKEN'):
# print(token.text.encode('utf-8'))
print(epi.transliterate(unicode(token.text)).encode('utf-8'))
if __name__ == '__main__':
main(sys.argv[1])
|
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
count = Counter(nums)
unique = list(count.keys())
n = len(unique)
self.quickselect(n - k, 0, n - 1, count, unique)
return unique[n - k:]
def quickselect(self, k, left, right, count, unique):
if left == right:
return
idx = randint(left, right)
true_idx = self.partition(idx, left, right, count, unique)
if true_idx == k:
return
if k < true_idx:
self.quickselect(k, left, true_idx, count, unique)
else:
self.quickselect(k, true_idx, right, count, unique)
def partition(self, idx, left, right, count, unique):
pivot_frequency = count[unique[idx]]
unique[idx], unique[right] = unique[right], unique[idx]
store_idx = left
for i in range(left, right):
if count[unique[i]] < pivot_frequency:
unique[i], unique[store_idx] = unique[store_idx], unique[i]
store_idx += 1
unique[store_idx], unique[right] = unique[right], unique[store_idx]
return store_idx
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumby/flask-thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2015 thumby.io dev@thumby.io
class FlaskThumbor:
__name__ = "FlaskThumbor"
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
from libthumbor import CryptoURL
from flask import current_app
thumbor_server = app.config.get('THUMBOR_SERVER', None)
thumbor_key = app.config.get('THUMBOR_KEY', None)
if thumbor_server is None or thumbor_key is None:
raise RuntimeError(
'Make sure both THUMBOR_SERVER (URL for the thumbor server that will serve your images) and '
'THUMBOR_KEY (security key for the thumbor server you\'re connecting to) are set in your '
'Flask configuration.'
)
app.thumbor_crypto = CryptoURL(key=thumbor_key)
app.thumbor_server = thumbor_server.rstrip('/')
@app.context_processor
def utility_processor():
def thumbor(**kw):
return '%s%s' % (
current_app.thumbor_server,
current_app.thumbor_crypto.generate(**kw)
)
return dict(thumbor=thumbor)
|
"""
A permutation is an ordered arrangement of objects. For example, 3124 is one
possible permutation of the digits 1,2,3 and 4. If all permutations are listed
numerically or alphabetically, we call it lexicographic order. The lexicographic
permutations of 0,1, and 2 are 012, 021, 102, 120, 201, 210. What is the
millionith lexicographic permutation of the digits 0,1,2,3,4,5,6,7,8,9?
"""
def word_finder(alphabet_size, place)
"""
:param alphabet_size: The number of symbols to be permuted
:param place: The position in the ordered dictionary of symbols desired
:return: The permutation of digits of 0,1,...,alphabet_size at the number place
"""
page = alphabet_size-1
while place > 1:
|
# Generated by Django 2.2b1 on 2019-02-14 03:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Character',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('additional_info', models.TextField(null=True)),
],
),
migrations.CreateModel(
name='Contract',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('salary', models.IntegerField()),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Script',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('content', models.TextField()),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('birthplace', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='people', to='core.Country')),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('additional_info', models.TextField(null=True)),
('actors', models.ManyToManyField(related_name='movies', through='core.Character', to='core.Person')),
('script', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='movies', to='core.Script')),
],
),
migrations.AddField(
model_name='character',
name='actor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='characters', to='core.Person'),
),
migrations.AddField(
model_name='character',
name='contract',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='characters', to='core.Contract'),
),
migrations.AddField(
model_name='character',
name='movie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='characters', to='core.Movie'),
),
migrations.AlterUniqueTogether(
name='character',
unique_together={('movie', 'actor', 'name', 'additional_info')},
),
]
|
N, K = map(int, input().split())
MOD = 10 ** 9 + 7
# 繰り返し自乗法
def power(x, n):
if n == 0:
return 1
tmp = power(x * x % MOD, n // 2)
if n % 2 == 1:
tmp = tmp * x % MOD
return tmp
# nCk
def nCk(n, k):
res = 1
for i in range(n - k + 1, n+1):
res *= i
res %= MOD
x = 1
for i in range(1, k + 1):
x *= i
x %= MOD
res *= power(x, MOD - 2)
return res % MOD
print(nCk(N, K)) |
from django.contrib import admin
from .models import Noticia,TipoNoticia
# Register your models here.
admin.site.register(Noticia)
admin.site.register(TipoNoticia) |
"""Write a program which accepts a string as input to print "Yes" if the string is "yes", "YES" or "Yes", otherwise print "No".
Hint: Use input () to get the persons input"""
string_input =input("Enter the String: ")
if string_input == "yes" or "YES" or "Yes" :
print("Yes")
else:
print("No")
|
#!/usr/bin/python
#coding:utf-8
"""
详细见:
https://ygobbs.com/t/lexusl%E4%B8%8E%E6%B8%B8%E6%88%8F%E7%8E%8B%EF%BC%881%EF%BC%89%EF%BC%9A%E5%8D%A1%E7%BB%84%E6%9E%84%E5%BB%BA%E7%9A%84%E6%A6%82%E7%8E%87%E5%AD%A6/102771
总数(Population Size)(N):卡组数量
成功数(Number of successes in population)(K):卡组里同名卡a的数量
样本大小(Sample Size)(n):抽卡数量
样本成功数(Number of successes in sample)(k):手卡里希望抽到多少张a
"""
import re
def parse_ydk(file_ydk):
"""
Read .ydk file, to get deck list
Return:
dict : key is card number in ygopro, value is the present in this .ydk file. (the number in the deck)
"""
d_card_num={}
card_num=0
with open(file_ydk, 'r') as fp:
l=fp.readlines()
l.sort()
for i in l:
j=re.search(r'[0-9]*', i).group()
if not j:
continue
if j != card_num:
d_card_num[j]=1
card_num=j
elif j==card_num:
d_card_num[j]+=1
return d_card_num
def combine(a, b):
"""
calculate the combine of (a, b).
"""
ai = 1
for i in list(range(a+1))[1:]:
ai *= i
bi = 1
for i in list(range(b+1))[1:]:
bi *= i
ambi = 1
for i in list(range(a-b+1))[1:]:
ambi *= i
return ai/(bi*ambi)
def statistics(N=40, n=5, K=3, k=1):
"""
Calculate the statistics of first draw card from whole deck, and can draw in the card (place 3, first draw 1).
Args:
N (int) : The total number of deck.
n (int) : The first draw number.
K (int) : Total card number in deck that want to first draw in.
k (int) : Want first draw in number.
Returns:
float : The statistics of first draw in that card.
"""
cmb_Kk = combine(K, k)
cmb_Nkmnk = combine(N-K, n-k)
cmb_Nn = combine(N, n)
return cmb_Kk*cmb_Nkmnk/cmb_Nn
def deck_statistics(d_deck):
"""
Calculate whole deck statistics, that is, each card statistics of first draw in.
Args:
d_deck (dict) : key is card name (OR card number), value is the number in the deck.
Returns:
dict : key is card name (OR card number), value is the statistics of this card thar first draw in the deck.
"""
N = 40
n = 5
K = 3
k = 1
d = {}
for i in d_deck:
k = d_deck[i]
d[i] = statistics(N,n,K,k)
return d
if __name__ == "__main__":
N = 40
n = 5
K = 3
k = 1
print(statistics(N,n,K,k))
#d = parse_ydk('./a.ydk')
#d2 = deck_statistics(d)
#print(d2)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Required
- requests (必须)
- pillow (可选)
Info
- author : "ZachBergh"
- email : "berghzach@gmail.com"
- date : "2016.6.21"
'''
import requests
import re
import time
import sys
import json
import rsa
import os.path
import binascii
import datetime
from bs4 import BeautifulSoup
try:
import cookielib
except:
import http.cookiejar as cookielib
try:
from PIL import Image
except:
pass
session = requests.Session()
session.headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}
# 使用登录cookie信息
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='cookies')
try:
session.cookies.load(ignore_discard=True)
except:
print("Cookie 未能加载")
def rsaEncrypt(password):
url = 'http://passport.bilibili.com/login?act=getkey'
try:
getKeyRes = session.get(url)
token = json.loads(getKeyRes.content.decode('utf-8'))
pw = str(token['hash'] + password).encode('utf-8')
key = token['key']
key = rsa.PublicKey.load_pkcs1_openssl_pem(key)
pw = rsa.encrypt(pw, key)
password = binascii.b2a_base64(pw)
return password
except:
return False
def get_vdcode():
t = str(int(time.time()*1000))
captcha_url = 'https://passport.bilibili.com/captcha.gif?r=' + t + "&type=login"
r = session.get(captcha_url)
with open('captcha.jpg', 'wb') as f:
f.write(r.content)
f.close()
# 用pillow 的 Image 显示验证码
# 如果没有安装 pillow 到源代码所在的目录去找到验证码然后手动输入
try:
im = Image.open('captcha.jpg')
im.show()
im.close()
except:
print(u'请到 %s 目录找到captcha.jpg 手动输入' % os.path.abspath('captcha.jpg'))
captcha = input("please input the captcha\n>")
return captcha
def login(user, password):
post_url = 'https://passport.bilibili.com/login/dologin'
payload = {
'act': 'login',
'gourl': '',
'keeptime': '2592000',
'userid': user,
'pwd': password,
'vdcode': get_vdcode(),
}
if payload["vdcode"] == None:
return False
try:
resp = session.post(post_url, data=payload)
session.cookies.save()
soup = BeautifulSoup(resp.content, 'lxml')
s = str(soup.select('center')[0])
s = s.replace('\n', '')
s = s.replace('\r', '')
s = s.replace(' ', '')
s = s.split('>')
s = s[2]
s = s.replace('<br/', '')
flash(s)
return False
except requests.exceptions.ConnectionError as e:
flash(e)
return False
except:
return True
def isLogin():
url = 'https://account.bilibili.com/home/userInfo'
resp = session.get(url, allow_redirects=False)
if resp.status_code == 200 and resp.json()['code'] == 0:
return True
else:
return False
if __name__ == '__main__':
if isLogin():
print('您已经登录')
else:
account = input('请输入你的用户名:')
password = input('请输入密码:')
login(account, rsaEncrypt(password))
|
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from load_data import load_train_data, load_test_data
SAMPLE_SUBMIT_FILE='../input/sample_submission.csv'
DIR='result_tmp'
if __name__=='__main__':
df=load_train_data()
x_train=df.drop('target', axis=1)
y_train=df['target'].values
use_cols=x_train.columns.values
clf = LogisticRegression(random_state=0)
clf.fit(x_train, y_train)
df = load_test_data()
x_test=df[use_cols].sort_values('id')
pred_test=clf.predict_proba(df)
df_submit = pd.read_csv(SAMPLE_SUBMIT_FILE).sort_values('id')
df_submit['target']=pred_test
df_submit.to_csv(DIR + 'submit.csv', index=False)
|
#Django rest_framework
from rest_framework import mixins, viewsets
#Serializers
from colegio.serializers.cursos import CursosModelSerializer
#Models
from colegio.models import Curso
class CursoViewSet(viewsets.ModelViewSet):
"""Curso view set."""
queryset = Curso.objects.all()
serializer_class = CursosModelSerializer
|
import itertools
import math
import os
import uuid
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from django.contrib.auth import get_user_model
from .conf import settings
from .managers import FolderManager, FolderQuerySet, DocumentQuerySet
def uuid_filename(instance, filename):
ext = filename.split(".")[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join("document", filename)
class Folder(models.Model):
name = models.CharField(max_length=140)
parent = models.ForeignKey("self", null=True, blank=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="+")
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(default=timezone.now)
modified_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="+")
objects = FolderManager.from_queryset(FolderQuerySet)()
kind = "folder"
icon = "folder-open"
shared = None
@classmethod
def shared_user_model(cls):
return FolderSharedUser
def __unicode__(self):
return self.name
def save(self, **kwargs):
self.touch(self.author, commit=False)
super(Folder, self).save(**kwargs)
def get_absolute_url(self):
return reverse("documents_folder_detail", args=[self.pk])
def unique_id(self):
return "f-%d" % self.id
def members(self, **kwargs):
return Folder.objects.members(self, **kwargs)
def touch(self, user, commit=True):
self.modified = timezone.now()
self.modified_by = user
if commit:
if self.parent:
self.parent.touch(user)
self.save()
@property
def size(self):
"""
Return size of this folder.
"""
return sum([m.size for m in self.members(direct=False) if m.kind == "document"])
def breadcrumbs(self):
"""
Produces a list of ancestors (excluding self).
"""
crumbs = []
if self.parent:
crumbs.extend(self.parent.breadcrumbs())
crumbs.append(self.parent)
return crumbs
def shared_queryset(self):
"""
Returns queryset of this folder mapped into the shared user model.
The queryset should only consist of zero or one instances (aka shared
or not shared.) This method is mostly used for convenience.
"""
model = self.shared_user_model()
return model._default_manager.filter(**{model.obj_attr: self})
@property
def shared(self):
"""
Determines if self is shared. This checks the denormalization and
does not return whether self SHOULD be shared (based on parents.)
"""
return self.shared_queryset().exists()
def shared_ui(self):
"""
Returns boolean based on whether self should show any shared UI.
"""
return self.parent_id is None and self.shared
def shared_with(self, user=None):
"""
Returns a User queryset of users shared on this folder, or, if user
is given optimizes the check and returns boolean.
"""
User = get_user_model()
qs = self.shared_queryset()
if user is not None:
return qs.filter(user=user).exists()
if not qs.exists():
return User.objects.none()
return User.objects.filter(pk__in=qs.values("user"))
def shared_parent(self):
"""
Returns the folder object that is the shared parent (the root of
a shared folder hierarchy) or None if there is no shared parent.
"""
root = self
a, b = itertools.tee(reversed(self.breadcrumbs()))
next(b, None)
for folder, parent in itertools.izip_longest(a, b):
if folder.shared:
root = folder
if parent is None or not parent.shared:
break
return root
def can_share(self, user):
"""
Determines the shared parent and checks if given user is the author
of the folder.
"""
# sp = self.shared_parent()
# return sp.author_id == user.id and self == sp
# only share folders that live at the top-level
return self.parent_id is None and self.author_id == user.id
def share(self, users):
"""
Ensures self is shared with given users (can accept users who are
already shared on self).
"""
users = [u for u in users if not self.shared_with(user=u)]
if users:
members = [self] + self.members(direct=False)
FM, DM = self.shared_user_model(), Document.shared_user_model()
fm, dm = [], []
for member, user in itertools.product(members, users):
if user.pk == member.author_id:
continue
if isinstance(member, Folder):
fm.append(FM(**{FM.obj_attr: member, "user": user}))
if isinstance(member, Document):
dm.append(DM(**{DM.obj_attr: member, "user": user}))
FM._default_manager.bulk_create(fm)
DM._default_manager.bulk_create(dm)
class Document(models.Model):
name = models.CharField(max_length=255)
folder = models.ForeignKey(Folder, null=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="+")
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(default=timezone.now)
modified_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="+")
file = models.FileField(upload_to=uuid_filename)
objects = DocumentQuerySet.as_manager()
kind = "document"
icon = "file"
shared = None
@classmethod
def shared_user_model(cls):
return DocumentSharedUser
def __unicode__(self):
return self.name
def save(self, **kwargs):
self.touch(self.author, commit=False)
super(Document, self).save(**kwargs)
def get_absolute_url(self):
return reverse("documents_document_detail", args=[self.pk])
def unique_id(self):
return "d-%d" % self.id
def touch(self, user, commit=True):
self.modified = timezone.now()
self.modified_by = user
if commit:
if self.folder:
self.folder.touch(user)
self.save()
@property
def size(self):
return self.file.size
def breadcrumbs(self):
crumbs = []
if self.folder:
crumbs.extend(self.folder.breadcrumbs())
crumbs.append(self.folder)
return crumbs
def shared_queryset(self):
"""
Returns queryset of this folder mapped into the shared user model.
The queryset should only consist of zero or one instances (aka shared
or not shared.) This method is mostly used for convenience.
"""
model = self.shared_user_model()
return model._default_manager.filter(**{model.obj_attr: self})
@property
def shared(self):
"""
Determines if self is shared. This checks the denormalization and
does not return whether self SHOULD be shared (based on parents.)
"""
return self.shared_queryset().exists()
def shared_ui(self):
return False
def shared_with(self, user=None):
"""
Returns a User queryset of users shared on this folder, or, if user
is given optimizes the check and returns boolean.
"""
User = get_user_model()
qs = self.shared_queryset()
if user is not None:
return qs.filter(user=user).exists()
if not qs.exists():
return User.objects.none()
return User.objects.filter(pk__in=qs.values("user"))
def share(self, users):
users = [u for u in users if not self.shared_with(user=u)]
if users:
model = self.shared_user_model()
objs = []
for user in users:
objs.append(self.shared_user_model()(**{model.obj_attr: self, "user": user}))
model._default_manager.bulk_create(objs)
def download_url(self):
return reverse("documents_document_download", args=[self.pk, os.path.basename(self.file.name).lower()])
class MemberSharedUser(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
# @@@ priviledges
class Meta:
abstract = True
@classmethod
def for_user(cls, user):
qs = cls._default_manager.filter(user=user)
return qs.values_list(cls.obj_attr, flat=True)
class FolderSharedUser(MemberSharedUser):
folder = models.ForeignKey(Folder)
obj_attr = "folder"
class Meta:
unique_together = [("folder", "user")]
class DocumentSharedUser(MemberSharedUser):
document = models.ForeignKey(Document)
obj_attr = "document"
class Meta:
unique_together = [("document", "user")]
class UserStorage(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name="storage")
bytes_used = models.BigIntegerField(default=0)
bytes_total = models.BigIntegerField(default=0)
@property
def percentage(self):
return int(math.ceil((float(self.bytes_used) / self.bytes_total) * 100))
@property
def color(self):
p = self.percentage
if p >= 0 and p < 60:
return "success"
if p >= 60 and p < 90:
return "warning"
if p >= 90 and p <= 100:
return "danger"
raise ValueError("percentage out of range")
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def ensure_userstorage(sender, **kwargs):
if kwargs["created"]:
user = kwargs["instance"]
UserStorage.objects.create(user=user, bytes_total=(1024 * 1024 * 50))
|
import requests
import json
import time
from modules.redis_functions import set_data, set_volatile_data
from modules.misc import to_uuid
from modules.config import influx_read_users, influx_timeout, influx_database_batch_size
import logging
logger = logging.getLogger(__name__)
# Note: If two accounts share hosts with the same name this is going to make a mess
def get_tick_data():
"""
Collects data for all influx users provided in the config file and returns
it as a tuple, dictionary containing all servers as key server name, value
server data and dictionary with meta data for checks returned
"""
tick_data = {}
tick_data_validity = {}
tick_data_validity['failed_accounts'] = 0
tick_data_validity['total_accounts'] = 0
tick_data_validity['total_checks'] = 0
for influx_user in influx_read_users:
tick_data_validity['total_accounts'] += 1
influx_query_api = '{}/query'.format(influx_user['influx_url'])
try:
list_of_databases_response = requests.get(influx_query_api, params={'u': influx_user['influx_user'], 'p': influx_user['influx_pass'], 'q': 'SHOW DATABASES', 'epoch': 'ms'}, timeout=influx_timeout)
list_of_databases_response.raise_for_status()
except requests.exceptions.RequestException as e:
tick_data_validity['failed_accounts'] += 1
logger.error('Could not get TICK data for {} - error listing databases from Influx: Error: {}'.format(influx_user['influx_user'], e))
continue
try:
list_of_databases = json.loads(list_of_databases_response.text)['results'][0]['series'][0]['values']
except Exception as e:
tick_data_validity['failed_accounts'] += 1
logger.error('Could not parse TICK data for {}: Error: {}'.format(influx_user['influx_user'], e))
queries = {}
# The four metric queries limit data to the last hour but are
# mostly interested in the most recent data this is because the
# decision that the query is invalid is made based on timestamp
# and will ultimately tie into the deadman alerts
queries['cpu_query'] = 'SELECT 100 - LAST("usage_idle") AS "cpu" FROM "{}"."autogen"."cpu" WHERE time > now() - 1h GROUP BY "host";'
queries['memory_query'] = 'SELECT LAST("used_percent") AS "memory" FROM "{}"."autogen"."mem" WHERE time > now() - 1h GROUP BY "host";'
queries['fullest_disk_query'] = 'SELECT MAX("last_used_percent") AS "fullest_disk" FROM (SELECT last("used_percent") AS "last_used_percent" FROM "{}"."autogen"."disk" WHERE time > now() - 1h GROUP BY "path") GROUP BY "host";'
# I'm not completly sold on using the last minute of data for
# our rate of change (the GROUP BY time(1m) bit), we could
# smooth the output by using 5 minutes or taking a moving
# average. It depends on if we want the most recent data or
# a fairly smooth value. If we did do this we would want to
# move cpu usage over to a moving average.
queries['disk_io_query'] = 'SELECT MAX(latest_delta_io) AS "disk_io" FROM (SELECT LAST("delta_io") AS "latest_delta_io" FROM (SELECT derivative(last("io_time"),1ms) * 100 AS "delta_io" FROM "{}"."autogen"."diskio" WHERE time > now() - 1h GROUP BY time(1m)) GROUP BY "name") GROUP BY "host"'
# We don't have a tag key for memory, at the moment it is the
# only thing without a tag so it will be seperate
# We actually want to pull this query from all of time since it
# gives the most recent alert status however the db isn't going
# to appreciate that to I'll grab the last 28 days for now
queries['alert_query'] = 'SELECT LAST("crit_duration") AS "crit_duration_before_alerting", LAST("warn_duration") AS "warn_duration_before_alerting", LAST("emitted") AS "deadman_status" FROM "{}"."autogen"."kapacitor_alerts" GROUP BY "host","metric","path"'
list_of_queries = []
# The next two for loops are a little funky, we want to make as
# few requests to influx as possible whilst keeping the number
# low enough that we don't go over any timeouts or max request
# sizes
for database_as_list in list_of_databases:
# database is the list ["$database_name"], I can't see how
# the list will have multiple values and would probably
# rather break than loop through all of the returned values
assert len(database_as_list) == 1
database = database_as_list[0]
for query in queries:
list_of_queries.append(queries[query].format(database))
# Collect in a list incase influx_database_batch_size is not a
# multipe of the number of queries we are running per server
batches_response_list = []
for beginning_of_slice in range(0, len(list_of_queries), influx_database_batch_size):
tick_data_validity['total_accounts'] += 1
batch_query = ';'.join(list_of_queries[beginning_of_slice:beginning_of_slice + influx_database_batch_size])
try:
metric_data_batch_response = requests.get(influx_query_api, params={'u': influx_user['influx_user'], 'p': influx_user['influx_pass'], 'q': batch_query, 'epoch': 'ms'}, timeout=influx_timeout)
metric_data_batch_response.raise_for_status()
except requests.exceptions.RequestException as e:
tick_data_validity['failed_accounts'] += 1
logger.error('Could not get TICK data for {} - error getting batch of data from Influx: Error: {}'.format(influx_user['influx_user'], e))
continue
try:
batches_response_list.append(json.loads(metric_data_batch_response.text)['results'])
except:
tick_data_validity['failed_accounts'] += 1
logger.error('Could parse get TICK data for {} - error parsing data recieved from Influx: Error: {}'.format(influx_user['influx_user'], e))
# Key = hostname, Value = data
hosts_data = {}
alerts = {}
for batch in batches_response_list:
for statement in batch:
# If we don't get data back there will be no series
if 'series' not in statement:
continue
# Catch kapacitor alert data and set the health status
# accordingly
if statement['series'][0]['name'] == "kapacitor_alerts":
# We will create two lists and to store the
# crit_duration and warn_duration values in when an
# alert is a warning it's warn_duration will be an
# integer and it's crit_duration will be None, we
# will then grab to max to check if something is
# alerting since crit duration has value -1 when not
# alerting and x when alerting where x is the
# kapacitor variable critTime / warnTime
for each_measurement_with_an_alerting_status in statement['series']:
# Skip data that is reported without a metric tag
if each_measurement_with_an_alerting_status['tags']['metric'] == '':
continue
hostname = each_measurement_with_an_alerting_status['tags']['host']
if hostname not in alerts:
# Systems are assumed not to be alerting, if they have any critcal, warning or deadman alerts these
# fields will be updated. Once a field becomes true it will not be reset to false
alerts[hostname] = {}
alerts[hostname]['critical'] = False
alerts[hostname]['warning'] = False
alerts[hostname]['deadman_alerting'] = False
for tag_or_field_position_in_list, tag_or_field in enumerate(each_measurement_with_an_alerting_status['columns']):
if tag_or_field == "time":
continue
elif tag_or_field == "deadman_status":
assert len(each_measurement_with_an_alerting_status['values']) == 1
# Checking if a deadman alert is up or
# down isn't particularly clear. We are
# working on the basis that when the
# latest value is 0 the server is down
# and that any other value means the
# server is reporting. In my testing
# this seems to be true for every alert
# aside from the first one sent by a
# each host.
if each_measurement_with_an_alerting_status['values'][0][tag_or_field_position_in_list] == 0:
alerts[hostname]['deadman_alerting'] = True
# The most recent x_duration_before_alerting tags in kapacitor will have a positive value if an alert is currently active
elif tag_or_field == "crit_duration_before_alerting":
assert len(each_measurement_with_an_alerting_status['values']) == 1
if each_measurement_with_an_alerting_status['values'][0][tag_or_field_position_in_list] and each_measurement_with_an_alerting_status['values'][0][tag_or_field_position_in_list] > 0:
alerts[hostname]['critical'] = True
elif tag_or_field == "warn_duration_before_alerting":
assert len(each_measurement_with_an_alerting_status['values']) == 1
if each_measurement_with_an_alerting_status['values'][0][tag_or_field_position_in_list] and each_measurement_with_an_alerting_status['values'][0][tag_or_field_position_in_list] > 0:
alerts[hostname]['warning'] = True
else:
logger.warning('Unexpected tag or field when parsing kapacitor alerts for host \'{}\': {}'.format(hostname, tag_or_field))
# for all other data - cpu memory disk diskio
else:
for host_data in statement['series']:
hostname = host_data['tags']['host']
if hostname not in hosts_data:
tick_data_validity['total_checks'] += 1
hosts_data[hostname] = {}
hosts_data[hostname]['name'] = hostname
if 'summary' not in hosts_data[hostname]:
hosts_data[hostname]['summary'] = {}
# cpu and fullest_disk will be the first non time
# column
hosts_data[hostname]['summary'][host_data['columns'][1]] = host_data['values'][0][1]
for host in hosts_data:
tick_host_data = hosts_data[host]
tick_host_data['health_status'] = 'green'
if host in alerts:
# Alert status can only be one colour, deadman alerts take precedence over critical alerts which in turn are prioritised over warnings.
if alerts[host]['deadman_alerting']:
tick_host_data['health_status'] = 'blue'
elif alerts[host]['critical']:
tick_host_data['health_status'] = 'red'
elif alerts[host]['warning']:
tick_host_data['health_status'] = 'orange'
try:
tick_host_data['orderby'] = max(
tick_host_data['summary']['cpu'],
tick_host_data['summary']['memory'],
tick_host_data['summary']['fullest_disk'],
tick_host_data['summary']['disk_io'])
except KeyError as e:
logger.warning('{} did not return data for all metrics, the first missing metric was {}'.format(host, e))
tick_host_data['orderby'] = 0
tick_host_data['health_status'] = 'blue'
tick_data[tick_host_data['name']] = tick_host_data
tick_data_validity['valid_until'] = time.time() * 1000 + 300000
logger.debug('tick_data is valid until {}'.format(tick_data_validity['valid_until']))
return tick_data, tick_data_validity
def store_tick_data(tick_data, tick_data_validity):
"""
Store data returned by get_tick_data in redis as key value pairs
"""
for host in tick_data:
host_data = tick_data[host]
set_volatile_data('resources:tick#{}'.format(to_uuid(host)), json.dumps([host_data]))
set_data('resources_success:tick', json.dumps([tick_data_validity]))
|
from enum import Enum
class LabelMode(Enum):
AddFeature = "Feature"
IgnoreFeature = "Ignore"
class FactorUtils(object):
@staticmethod
def extract_factors(string_factors):
string_factors = string_factors.strip()
if not string_factors:
return {}
list_factors = []
eval_factor_units = string_factors.split(';')
for eval_factor_unit in eval_factor_units:
try:
factor = int(eval_factor_unit)
list_factors.append(factor)
except ValueError:
string_bounds = eval_factor_unit.split('-')
if len(string_bounds) != 2:
raise AttributeError('Range need to contain exactly two numbers!')
begin_range = int(string_bounds[0])
end_range = int(string_bounds[1])
list_factors += list(range(begin_range, end_range + 1))
return set(list_factors)
@staticmethod
def factors_to_string(factors):
if len(factors) == 0:
return ''
parts = []
factors_list = sorted(factors)
begin = factors_list[0]
for i in range(1, len(factors_list)):
if factors_list[i] != factors_list[i - 1] + 1:
end = factors_list[i - 1]
if begin != end:
parts.append('{}-{}'.format(begin, end))
else:
parts.append(str(begin))
begin = factors_list[i]
end = len(factors_list) - 1
if begin != factors_list[end]:
parts.append('{}-{}'.format(begin, factors_list[end]))
else:
parts.append(str(begin))
return ';'.join(parts)
@staticmethod
def compress_string_factors(string_factors):
factors = FactorUtils.extract_factors(string_factors)
compressed_string_factors = FactorUtils.factors_to_string(factors)
return compressed_string_factors
@staticmethod
def single_range_to_string(left, right):
if left != right:
return "{}-{}".format(left, right)
else:
return "{}".format(left)
@staticmethod
def group_factors_by_range(factors_set):
factors = sorted(list(factors_set))
is_start = []
for i in range(0, len(factors)):
is_start.append(i == 0 or (factors[i] != factors[i - 1] + 1))
grouped_factors = []
i = 0
while i < len(factors):
if is_start[i]:
grouped_factors.append([])
grouped_factors[-1].append(factors[i])
i += 1
return grouped_factors
@staticmethod
def factors_to_ranges_string(factors_set):
if factors_set is None or len(factors_set) == 0:
return "None"
grouped_factors = FactorUtils.group_factors_by_range(factors_set)
return ';'.join([FactorUtils.single_range_to_string(min(x), max(x)) for x in grouped_factors])
@staticmethod
def create_label(all_eval_features, removed_features, label_mode):
eval_features = set(all_eval_features)
if label_mode == LabelMode.AddFeature:
add_features = eval_features - set(removed_features)
return "Features: {}".format(FactorUtils.factors_to_ranges_string(add_features))
else:
return "Ignore: {}".format(FactorUtils.factors_to_ranges_string(set(removed_features)))
|
"""Lapis is an adaptable, performant, and interactive scheduling (Lapis) simulator"""
__version__ = "0.3.0"
|
import numpy as np
# 生成 4*4 的对角矩阵
print(np.eye(4))
"""
ndarray 内部由以下内容组成:
一个指向数据(内存或内存映射文件中的一块数据)的指针。
数据类型或 dtype,描述在数组中的固定大小值的格子。
一个表示数组形状(shape)的元组,表示各维度大小的元组。
一个跨度元组(stride),其中的整数指的是为了前进到当前维度下一个元素需要"跨过"的字节数。
"""
|
from django.urls import path
from . import views
from django.conf import settings
from django.contrib.auth import views as auth_views
app_name = 'account'
urlpatterns= [
# url(r'^login/$',views.user_login,name="user_login"),
# url(r'^login/$',LoginView,name="user_login"),
path('login/', auth_views.LoginView.as_view(template_name='account/login.html'), name='user_login'),
path('logout/', auth_views.LogoutView.as_view(template_name='account/logout.html'), name='user_logout'),
path('register/', views.register, name='user_register'),
path('password-change/', auth_views.PasswordChangeView.as_view(template_name="account/password_change_form.html", success_url="/account/password-change-done/"), name='password_change'),
path('password-change-done/', auth_views.PasswordChangeDoneView.as_view(template_name="account/password_change_done.html"), name='password_change_done'),
path('password-reset/', auth_views.PasswordResetView.as_view(template_name="account/password_reset_form.html",email_template_name="account/password_reset_email.html", success_url='/account/password-reset-done/'),name='password_reset'),
path('password-reset-done/',auth_views.PasswordResetDoneView.as_view(template_name="account/password_reset_done.html"), name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(template_name="account/password_reset_confirm.html",success_url='/account/password-reset-complete/'), name="password_reset_confirm"),
path('password-reset-complete/',auth_views.PasswordResetCompleteView.as_view(template_name='account/password_reset_complete.html'), name='password_reset_complete'),
path('my-information/',views.myself,name='my_information'),
path('edit-my-information/',views.myself_edit,name='edit_my_information'),
path('my-image/',views.my_image,name='my_image'),
] |
import requests
import json
from bs4 import BeautifulSoup
Steamkey = '9D8034447FC4F77028B94766E25A58C7'
def achieve(game):
soup = BeautifulSoup(requests.get('https://steamdb.info/search/?a=app&q=' + game + '&type=1&category=0').content, 'lxml')
first = soup.find("tr", class_= "app")
achievements = json.loads(requests.get('http://api.steampowered.com/ISteamUserStats/GetSchemaForGame/v2/?key=' + Steamkey + '&appid='
+ first.find('a').contents[0]).content)['game']['availableGameStats']['achievements']
achievearray = []
for x in range (0, len(achievements)):
try:
achievearray.append(achievements[x]['displayName'] + ': ' + achievements[x]['description'])
except:
pass
return '\n'.join(achievearray)
|
import flask
from flask import Flask, render_template, request
import numpy as np
import keras
from keras.models import load_model
from flask import Flask, request, jsonify
import pickle
app = Flask(__name__)
@app.route('/')
@app.route('/index.html')
def index():
return flask.render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
model = load_model('CROP_PRED_MODEL.h5')
moisture = float(request.form['moisture'])
nitrogen = float(request.form['nitrogen'])
phosphorous = float(request.form['phosphorous'])
potassium = float(request.form['potassium'])
test_vector = [moisture,nitrogen,phosphorous,potassium] # value for barley
test_vector = np.asanyarray(test_vector)
test_vector = np.reshape(test_vector, (1, 4))
reverse_mapping = ['Barley', 'Corn-Field for silage', 'Corn-Field for stover', 'Millet', 'Potato', 'Sugarcane']
reverse_mapping = np.asarray(reverse_mapping)
a = model.predict_classes(test_vector)
prediction = reverse_mapping[a]
return render_template("predict.html",prediction=prediction)
if __name__ == '__main__':
app.run()
|
__author__='lataman'
import utilities, scheme
import re, copy
class schemeContainer(object):
def __init__(self, lang):
self.lang=lang
#self.schemeList={"PL": "E:\\Skrypty\\PyVer\\standardRegex1.txt"}
self.schemeList={"PL": "C:\\Users\\lataman\\Documents\\OCR\\PyVer\\ABIscript\\standardRegex1.txt"}
self.setScheme(lang)
def setScheme(self, lang):
txtFile=utilities.fopen(self.schemeList[lang]).split('\n')
currKey=""
arrayReg=[]
self.base=scheme.createObject()
# print("setScheme")
for line in txtFile:
line=line.strip()
takeKey=re.compile("^#([A-Z])\w+").search(line)
if(utilities.hasValue((takeKey))):
# print(takeKey.group())
self.base.add(currKey, copy.deepcopy(arrayReg))
currKey=takeKey.group().replace('#', '')
arrayReg=[]
else:
arrayReg.append(copy.copy(line))
# print(self.base.dict.keys())
def getMethod(self, id):
return self.base.getMethod(id)
def getAlg(self, id, iter):
return self.base.getAlg(id, iter)
def seekPattern(self, id, textArray, seekFirstMatch = True):
method=self.base.getMethod(id)
matches=[]
for alg in method:
for pattern in alg:
for line in textArray:
found=re.compile(pattern, flags = re.IGNORECASE).search(line)
if(utilities.hasValue(found)):
matches.append(found.group())
# print(matches)
# print("next alg")
# print((seekFirstMatch and (utilities.isEmpty(matches))))
if(seekFirstMatch and (not utilities.isEmpty(matches))):
break
return matches
def createObject(lang):
return schemeContainer(lang)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 28 2018
@author: Phillip
"""
import scrapy
#import pandas as pd
#scrapy crawl craig -o items.csv -t csv
#Item class with listed fields to scrape
class CraigslistItem(scrapy.Item):
date = scrapy.Field()
title = scrapy.Field()
link = scrapy.Field()
price = scrapy.Field()
#area = scrapy.Field()
beds = scrapy.Field()
size = scrapy.Field()
craigId = scrapy.Field()
numPic = scrapy.Field()
postDate = scrapy.Field()
updateDate = scrapy.Field()
baths = scrapy.Field()
latitude = scrapy.Field()
longitude = scrapy.Field()
contentLen = scrapy.Field()
reposts = scrapy.Field()
zipcode = scrapy.Field()
class MySpider(scrapy.Spider):
name = "craig"
allowed_domains = ["craigslist.org"]
#Base url for Seattle apartment rentals. Change if necessary
base_url = "https://newyork.craigslist.org/search/aap"
start_urls = ["https://newyork.craigslist.org/search/aap"]
#Initially grab all of the urls up to where craigslist allows
#In this case, it's 2400
for i in range(1, 5):
start_urls.append(base_url + "s=" + str(i) + "00&")
# def __init__(self):
# global df
# test = CraigslistItem()
# test = self.initialize(test)
# df = pd.DataFrame(columns = list(test.keys()), index=xrange(0,2400))
def parse(self, response):
#find all postings
postings = response.xpath(".//p")
#loop through the postings
for i in range(0, len(postings)-1):
item = CraigslistItem()
#grab craiglist apartment listing ID
item["craigId"] = int(''.join(postings[i].xpath("@data-pid").extract()))
temp = postings[i].xpath("span[@class='txt']")
info = temp.xpath("span[@class='pl']")
#title of posting
item["title"] = ''.join(info.xpath("a/text()").extract())
#date of posting
item["date"] = ''.join(info.xpath("time/text()").extract())
#pre-processing for getting the price in the right format
price = ''.join(temp.xpath("span")[2].xpath("span[@class='price']").xpath("text()").extract())
#item["area"] = ''.join(temp.xpath("span")[2].xpath("span[@class='pnr']").xpath("small/text()").extract())
item["price"] = price.replace("$","")
item["link"] = ''.join(info.xpath("a/@href").extract())
follow = "http://newyork.craigslist.org" + item["link"]
#Parse request to follow the posting link into the actual post
request = scrapy.Request(follow , callback=self.parse_item_page)
request.meta['item'] = item
#self.df.loc[i] = pd.Series(item)
yield request
#Parsing method to grab items from inside the individual postings
def parse_item_page(self, response):
#import pdb; pdb.set_trace()
item = response.meta["item"]
maplocation = response.xpath("//div[contains(@id,'map')]")
latitude = ''.join(maplocation.xpath('@data-latitude').extract())
longitude = ''.join(maplocation.xpath('@data-longitude').extract())
if latitude:
item['latitude'] = float(latitude)
if longitude:
item['longitude'] = float(longitude)
attr = response.xpath("//p[@class='attrgroup']")
try:
item["beds"] = int(attr.xpath("span/b/text()")[0].extract())
bath = attr.xpath("span/b/text()")[1].extract()
item["size"] = int(''.join(attr.xpath("span")[1].xpath("b/text()").extract()))
if(bath.isdigit()):
item["baths"] = float(attr.xpath("span/b/text()")[1].extract())
item["baths"] = float(bath)
except:
pass
item["contentLen"] = len(response.xpath("//section[@id='postingbody']").xpath("text()").extract())
postinginfo = response.xpath("//p[@class = 'postinginfo reveal']").xpath("time/@datetime")
item["postDate"] = postinginfo.extract()
item["updateDate"] = postinginfo.extract()
#TODO: check this equal to if it's valid
if item["updateDate"] != item["postDate"]:
item["reposts"] = 1
else:
item["reposts"] = 0
item["numPic"] = len(response.xpath("//div[@id='thumbs']").xpath("a"))
return item
|
# argv[1]: Arquivo com todos os atributos.
# argv[2]: Arquivo com os atributos escolhidos.
# argv[3]: Arquivo com dados para triagem.
from sys import argv
import io
def ler_arquivo (path):
try:
with open (path, 'r') as content_file:
content = content_file.read().split('\n')
content.pop()
return content
except Exception as e:
print 'Erro ao abrir arquivo: ', e
exit (1)
def ler_arquivo_utf (path):
try:
with io.open (path, encoding='utf-8') as content_file:
content = content_file.read().split('\n')
content.pop()
return content
except Exception as e:
print 'Erro ao abrir arquivo: ', e
exit (1)
# Cria mascara binaria de atributos utilizados.
def criar_mascara (atributos, at_utilizados):
# Recebendo lista com todos os atributos.
lista_atributos = ler_arquivo (atributos)
# Recebendo lista com os atributos utilizados.
lista_at_ut = ler_arquivo (at_utilizados)
mascara = [0] * len (lista_atributos)
cont = 0
# Criando mascara de bit para identificar quais atributos
# Serao levados.
for a in lista_atributos:
if a in lista_at_ut:
mascara[cont] = 1
cont += 1
return mascara
def extrair_dados (mascara, arquivo_dados):
dados = ler_arquivo (arquivo_dados)
dados_finais = list()
utfex = 0
linhas_ex = 0
atributo_id = [0] * len (mascara)
for linha in dados:
tokens = linha.split (';')
cont = 0
temp = list ()
excecao = False
# Extraindo os valores dos atributos.
while cont < len (mascara):
# Selecionando os atributos marcados.
if (mascara[cont] == 1):
try:
att = tokens[cont].replace('\n','').replace ('\r','').encode ('ascii')
temp.append (att)#.iencode ('ascii'))
except Exception as e:
excecao = True
utfex += 1
atributo_id[cont] += 1
#break
cont += 1
# Se nao ocorrerem excecoes.
if (not excecao):
dados_finais.append (temp)
else:
linhas_ex += 1
return dados_finais, utfex, linhas_ex, atributo_id
if __name__=='__main__':
dados, utfex, linhas_ex, att = extrair_dados (criar_mascara (argv[1], argv[2]), argv[3])
cont = 0
print 'Qtde de excecoes: ', utfex
print 'Qtde de linhas excedidas: ', linhas_ex
j = 0
for i in att:
print 'Atributo: ', j, ' - ', i
j += 1
|
from service.slack_service import SlackService as Slack
from service.logging_service import LoggingService
from service.node_service import NodeService
_node_service = NodeService(Slack(), LoggingService())
_node_service.update_node()
|
from . import TargetMatcher
from spacy.tokens import Token
class ConceptTagger:
"""ConceptTagger is a component for setting an attribute on tokens contained
in spans extracted by TargetRules. This can be used for semantic labeling
for normalizing tokens, making downstream extraction simpler.
"""
name = "concept_tagger"
def __init__(self, nlp, attr_name="concept_tag"):
"""Create a new ConceptTagger.
Params:
nlp: A spaCy Language model.
attr_name (str): The name of the attribute to set to tokens.
"""
self.nlp = nlp
self.attr_name = attr_name
self.target_matcher = TargetMatcher(nlp, add_ents=False)
self.rules = []
# If the token attribute hasn't been set, add it now
try:
Token.set_extension(attr_name, default="")
except:
pass
def add(self, rules):
self.target_matcher.add(rules)
for rule in rules:
self.rules.append(rule)
def __call__(self, doc):
spans = self.target_matcher(doc)
for span in spans:
for token in span:
setattr(token._, self.attr_name, span.label_)
return doc
|
#!/usr/bin/env python
import unittest, operator, random
import numpy as MATH
from numpy.random import randint, uniform
from CGAPreprocessing import Utilities
class DataFunction(object):
"""Simple container for keeping track of data or function (unbound method)
- for simplicity, self.function contains the data or function
- e.g. self.function = numpy.pi -or- self.function = tanh"""
def __init__(self, string, latex, dataOrFunction):
self.string, self.latex, self.function = string, latex, dataOrFunction
class DataMethodFactory(dict):
"""Main class with static methods for getting data and functions"""
def __init__(self):
# the data functions (with random number generator)
self.data = {}
self.data['e'] = ("e", r' e', MATH.e)
self.data['pi'] = ("pi", r'$pi', MATH.pi)
# ephemeral random number
rnum = randint(-1,2)*uniform() + randint(-3, 4)
self.data[str(rnum)] = (str(rnum), r' %s'%str(rnum), rnum)
self.data['1/N'] = ("(1/N)", r'$frac{1}{N}', 1./20.)
self.data['p_i'] = ("p_i", r'$rho_{i}', -1.0)
self.data['p_j'] = ("p_j", r'$rho_{j}', -1.0)
self.data['p_ij'] = ("p_ij", r'$rho_{ij}', -1.0)
self.DATA = len(self.data)
# the unary functions
self.unary = {}
self.unary['exp'] = ("exp(%s)", r'$exp$left( %s$right) ', MATH.exp)
self.unary['log'] = ("log(%s)", r'$log$left( %s$right) ', MATH.log)
self.unary['tanh'] = ("tanh(%s)", r'$tanh$left( %s$right) ', MATH.tanh)
self.unary['sinh'] = ("sinh(%s)", r'$sinh$left( %s$right) ', MATH.sinh)
self.unary['cosh'] = ("cosh(%s)", r'$cosh$left( %s$right) ', MATH.cosh)
self.unary['transpose'] = ("(%s)^T",r'$left( %s$right)^{T} ',MATH.transpose)
self.unary['square'] = ("(%s)**2", r'$left( %s$right)^{2} ', self.sqr)
self.UNARY = len(self.unary)
# the binary functions
self.binary = {}
self.binary['add'] = ("(%s+%s)", r'$left( %s + %s$right) ', MATH.add)
self.binary['subtract'] = ("(%s-%s)", r'$left( %s- %s$right) ', MATH.subtract)
self.binary['multiply'] = ("(%s*%s)", r'$left(%s$cdot %s$right) ', MATH.multiply)
self.binary['divide'] = ("(%s/%s)", r'$frac{%s}{%s}', MATH.divide)
self.BINARY = len(self.binary)
# the scalarizing functions
self.scalars = {}
self.scalars['tr'] = ("tr(%s)", r' {$mathrm{Tr}}$left( %s$right) ', self.nantrace)
self.scalars['sum_ij'] = ("sum_ij(%s)", r'$Sigma_{ij}$left( %s$right) ', self.dsum)
self.SCALARS = len(self.scalars)
# reverse dicts (wait for it . . . ah) for accessing by .string
self.atad = dict((self.data[k][0],k) for k in self.data)
self.yranu = dict((self.unary[k][0],k) for k in self.unary)
self.yranib = dict((self.binary[k][0],k) for k in self.binary)
self.sralacs = dict((self.scalars[k][0],k) for k in self.scalars)
@staticmethod
def random(dictionary, size):
return dictionary[dictionary.keys()[randint(0, size)]]
@staticmethod
def randomDF(name, dictionary, size):
assert name is None or name in dictionary
if name in dictionary:
a, b, c = dictionary[name]
return DataFunction(a, b, c)
a, b, c = DataMethodFactory.random(dictionary, size)
return DataFunction(a, b, c)
def getData(self, name=None):
"""Method to return a named data element, if None a random data element, or a copy of an
ephemeral random number (either a float or int)"""
if name in self.data or name is None:
return DataMethodFactory.randomDF(name, self.data, self.DATA)
elif name in self.atad:
return DataMethodFactory.randomDF(self.atad[name], self.data, self.DATA)
else:
# asking for ephemeral random number (either float or int)
if '.' in name:
return DataFunction(name, name, float(name))
return DataFunction(name, name, int(name))
def getUnary(self, name=None):
"""Method to return a named unary operator or if None a random unary operator"""
if name in self.unary or name is None:
return DataMethodFactory.randomDF(name, self.unary, self.UNARY)
return DataMethodFactory.randomDF(self.yranu[name], self.unary, self.UNARY)
def getBinary(self, name=None):
"""Method to return a named binary operator or if None a random binary operator"""
if name in self.binary or name is None:
return DataMethodFactory.randomDF(name, self.binary, self.BINARY)
return DataMethodFactory.randomDF(self.yranib[name], self.binary, self.BINARY)
def getScalar(self, name=None):
"""Method to return a named scalarizing operator or if None a random scalarizing operator"""
if self.scalars.has_key(name) or name is None:
return DataMethodFactory.randomDF(name, self.scalars, self.SCALARS)
return DataMethodFactory.randomDF(self.sralacs[name], self.scalars, self.SCALARS)
@staticmethod
def nantrace(x):
# nan compatible trace
try:
y = MATH.nansum(x.diagonal())
except:
y = x
return y
@staticmethod
def dsum(x):
# nan compatible sum of the elements of a matrix
try:
y = MATH.nansum(x)
except IndexError:
y = x
return y
@ staticmethod
def sqr(x):
return x**2
class CGAFunctionsTests(unittest.TestCase):
def setUp(self):
self.methodFactory = DataMethodFactory()
def testName(self):
print "\n----- testing function names -----"
self.assertEquals(self.methodFactory.getData('pi').string, 'pi')
self.assertEquals(self.methodFactory.getScalar('tr').string, 'tr(%s)')
self.assertEquals(self.methodFactory.getUnary('log').string, 'log(%s)')
self.assertEquals(self.methodFactory.getBinary('add').string, '(%s+%s)')
def testFunctions(self):
print "\n----- testing function evaluation -----"
mynansum = self.methodFactory.getScalar('sum_ij')
mynantrace = self.methodFactory.getScalar('tr')
mylog = self.methodFactory.getUnary('log')
mysum = self.methodFactory.getBinary('add')
self.assertAlmostEquals(0.0,mylog.function(1.0))
self.assertAlmostEquals(2.0,mysum.function(1.0,1.0))
self.assertAlmostEquals(2.0,mynansum.function([1.0,MATH.nan,1.0,MATH.nan]))
self.assertAlmostEquals(2.0,mynansum.function(MATH.eye(2)))
def testReverseAccess(self):
print "\n----- testing reverse dictionary access -----"
N = 10
for i in range(N):
binary = self.methodFactory.getBinary()
binary2 = self.methodFactory.getBinary(binary.string)
if binary.string == binary2.string:
self.assertEquals((binary.string, binary.latex, binary.function), (binary.string, binary.latex, binary.function))
else:
self.assertNotEquals((binary.string, binary.latex, binary.function), (binary.string, binary.latex, binary.function))
self.assertNotEquals(binary, binary2)
for i in range(N):
unary = self.methodFactory.getUnary()
unary2 = self.methodFactory.getUnary(unary.string)
if unary.string == unary2.string:
self.assertEquals((unary.string, unary.latex, unary.function), (unary.string, unary.latex, unary.function))
else:
self.assertNotEquals((unary.string, unary.latex, unary.function), (unary.string, unary.latex, unary.function))
self.assertNotEquals(unary, unary2)
for i in range(N):
data = self.methodFactory.getData()
data2 = self.methodFactory.getData(data.string)
if data.string == data2.string:
self.assertEquals((data.string, data.latex, data.function), (data.string, data.latex, data.function))
else:
self.assertNotEquals((data.string, data.latex, data.function), (data.string, data.latex, data.function))
self.assertNotEquals(data, data2)
for i in range(N):
scalar = self.methodFactory.getScalar()
scalar2 = self.methodFactory.getScalar(scalar.string)
if scalar.string == scalar2.string:
self.assertEquals((scalar.string, scalar.latex, scalar.function), (scalar.string, scalar.latex, scalar.function))
else:
self.assertNotEquals((scalar.string, scalar.latex, scalar.function), (scalar.string, scalar.latex, scalar.function))
self.assertNotEquals(scalar, scalar2)
if __name__ == '__main__':
unittest.main()
|
#!python3
# -*- coding: utf-8 -*-
"""
@author: yanbin
Any suggestion? Please contract yanbin_c@hotmail.com
"""
import os
import wx
import sys
import time,datetime
import numpy as np
import math
from time import clock
from threading import Thread
from wx.lib.embeddedimage import PyEmbeddedImage
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,'BER Confidence Level Calculator V0.2',size=(600,400))
nb_main=wx.Notebook(self,-1,pos=(0,0),size=(600,400),style=wx.BK_DEFAULT)
self.panel_c=panel_Calculator(nb_main,-1)
self.panel_v=panel_version(nb_main,-1)
nb_main.AddPage(self.panel_c,"BER Cal")
nb_main.AddPage(self.panel_v,"Version")
self.panel_c.btn_run.Bind(wx.EVT_BUTTON,self.On_Run)
def On_Run(self, event):
thread = Thread(target = self.On_Run_cal, args = (), name = self.On_Run_cal.__name__)
thread.start()
def On_Run_cal(self):
basic_setting=self.panel_c.get_setting()
bers=float(basic_setting["BER"])
bps=float(basic_setting["BPS"])
t=float(basic_setting["T"])
error=int(basic_setting["E"])
unit_list=(1,60,3600)
unit=unit_list[int(basic_setting["U"]) ]
p=0
N=bps*unit*t
for i in range (error+1):
p+=math.pow(N*bers,i)/math.factorial(i)
Pnk=math.exp(-N*bers)*p
CL=1-Pnk
self.panel_c.txt_N.SetValue (str(N))
self.panel_c.txt_CL.SetValue (str(CL*100))
print ('\n\n\t***Simulation Done.***')
return()
class panel_Calculator(wx.Panel):
def __init__(self,*args,**kwargs):
wx.Panel.__init__(self,*args,**kwargs)
self.sizer=wx.GridBagSizer(hgap=10,vgap=5)
self.sizer.Add(wx.StaticText(self,-1,r'BER Confidence Level Calculator'),pos=(0,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(wx.StaticText(self,-1,r'Specified BER (BERs)'),pos=(1,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_ber=wx.TextCtrl(self,-1,"1e-16",size=(50,-1))
self.sizer.Add(self.txt_ber,pos=(1,1),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'Datarate in bits per second(BPS)'),pos=(2,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_bps=wx.TextCtrl(self,-1,"4.8e9",size=(50,-1))
self.sizer.Add(self.txt_bps,pos=(2,1),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'Numbers of measured bit errors(E)'),pos=(3,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_error=wx.TextCtrl(self,-1,"0",size=(50,-1))
self.sizer.Add(self.txt_error,pos=(3,1),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'Measurement time(T)'),pos=(4,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_time=wx.TextCtrl(self,-1,"2000",size=(50,-1))
self.sizer.Add(self.txt_time,pos=(4,1),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'in units of:'),pos=(4,2),flag=wx.ALIGN_CENTER_VERTICAL)
sampleList = ['Seconds', 'Minutes', 'Hours']
self.u_choice = wx.ComboBox(self,-1,'Hours',(740,18),(80,20),sampleList, wx.CB_DROPDOWN)
self.sizer.Add(self.u_choice,pos=(4,3),flag=wx.ALIGN_CENTER_VERTICAL)
self.btn_run = wx.Button(self, 20, "Calculate", (20, 100))
self.btn_run.SetToolTip("Run Analysis...")
self.sizer.Add(self.btn_run,pos=(5,0),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(wx.StaticText(self,-1,r'Numbers of transmitted bits(N=BPS*T)'),pos=(6,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_N=wx.TextCtrl(self,-1,"",size=(100,-1))
self.sizer.Add(self.txt_N,pos=(6,1),span=(1,2),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.sizer.Add(wx.StaticText(self,-1,r'BER confidence level(CL*100%)'),pos=(7,0),flag=wx.ALIGN_CENTER_VERTICAL)
self.txt_CL=wx.TextCtrl(self,-1,"",size=(100,-1))
self.sizer.Add(self.txt_CL,pos=(7,1),span=(1,2),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
jpg_file = wx.Image('eqn_ber_cl.jpg', wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.sizer.Add(wx.StaticBitmap(self, -1, jpg_file, (10 + jpg_file.GetWidth(), 5), (jpg_file.GetWidth(), jpg_file.GetHeight())),pos=(8,0),span=(1,1),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
self.SetSizer(self.sizer)
self.sizer.Add(wx.StaticText(self,-1,r'Reference: JitterLabs website of "BER Confidence-level Calculator".'),pos=(9,0),span=(1,4))
self.sizer.Add(wx.StaticText(self,-1,r'Link: https://www.jitterlabs.com/support/calculators/ber-confidence-level-calculator'),pos=(10,0),span=(1,4))
def get_setting(self):
res={}
res["BER"]=self.txt_ber.GetValue()
res["BPS"]=self.txt_bps.GetValue()
res["T"]=self.txt_time.GetValue()
res["U"]=self.u_choice.GetSelection ()
res["E"]=self.txt_error.GetValue()
return res
class panel_version(wx.Panel):
def __init__(self,*args,**kwargs):
wx.Panel.__init__(self,*args,**kwargs)
self.sizer=wx.GridBagSizer(hgap=10,vgap=5)
self.sizer.Add(wx.StaticText(self,-1,'version 0.1:Initial Release'),pos=(0,0))
self.sizer.Add(wx.StaticText(self,-1,'yanbin_c@hotmail.com'),pos=(1,0))
self.SetSizer(self.sizer)
self.sizer.Fit(self)
self.Fit
if __name__ == "__main__":
app = wx.App()
frame=MyFrame()
frame.Show()
app.MainLoop()
|
from __future__ import unicode_literals
import base64
import datetime
from django.db import models
from django.utils import timezone
from ckeditor.fields import RichTextField
# SAMPLE DATA
PLATFORM_BRAND_POSITION = (
('0', 'Luxury'),
('1', 'Mid rage'),
('2', 'Discount')
)
LOGISTICS_MODELS = (
('0', 'Dropshipping'),
('1', 'Warehousing'),
('2', 'Other')
)
# SAMPLE DATA
# Pulled from https://en.wikipedia.org/wiki/ISO_639
LISTING_LANGUAGES = (
('0', 'English (eng)'),
('1', 'Spanish (spa)'),
('2', 'Chinese', ('cdo'))
)
BOOLEAN = (
('0', 'No'),
('1', 'Yes')
)
class ProductCategory(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return "{0}".format(self.name)
class Meta:
ordering = ('name',)
class Logo(models.Model):
name = models.CharField(max_length=200)
_encoded_data = models.TextField()
def base64_logo(self):
return self._encoded_data
def __str__(self):
return "{0}".format(self.name)
class Region(models.Model):
name = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return "{0}".format(self.name)
class Meta:
ordering = ('-name',)
class Country(models.Model):
name = models.CharField(max_length=200, blank=True, null=True)
region = models.ForeignKey(Region)
def __str__(self):
return "{0}".format(self.name)
class Meta:
ordering = ('-name',)
class Market(models.Model):
last_modified = models.DateTimeField(auto_now=True)
# Trading name of the marketplace
name = models.CharField(max_length=200, null=True, blank=True)
# Description of the marketplace suitable for a seller.
description = models.CharField(max_length=200, null=True, blank=True)
# URL of the market
web_address = models.URLField(max_length=200, blank=True, null=True)
# Image of the marketplace logo
logo = models.ForeignKey('Logo', null=True, blank=True)
# Country where the marketplace is based
country = models.CharField(max_length=200, blank=True, null=True)
# That countries that have buyers for the marketplace
countries_served = models.ManyToManyField(Country)
# Industry standard for product categories.
product_categories = models.ManyToManyField(ProductCategory)
# Do they provide local customer services
local_customer_service = models.CharField(choices=BOOLEAN, max_length=1, blank=0, default=False)
local_customer_service_notes = models.CharField(max_length=200, blank=True, null=True, verbose_name='notes')
# Structure of the logistics and fulfillment for the e-marketplace.
logistics_structure = models.CharField(choices=LOGISTICS_MODELS, max_length=1, null=True, blank=True)
# Product type
product_type = models.CharField(choices=PLATFORM_BRAND_POSITION, max_length=1, null=True, blank=True)
# Uses the field product_categories, for each category provides a demand value
product_category_demand = models.CommaSeparatedIntegerField(max_length=500, blank=True, null=True)
# The number of buyers, sellers on a marketplace.
size = RichTextField(null=True, blank=True)
# The number of buyers, sellers for a particular product/product category on a marketplace.
product_category_size = models.CommaSeparatedIntegerField(max_length=10000, blank=True, null=True)
# Number of users going to the website per day on average.
web_traffic_to_site = RichTextField(null=True, blank=True)
# Number of users bouncing from the website per day on average.
web_traffic_to_bounce = RichTextField(null=True, blank=True)
# Structure of the fees and costs for sellers on the marketplace.
fee_pricing_structure = RichTextField(null=True, blank=True)
# Terms in place for sellers to receive payment from e-marketplace
payment_terms = RichTextField(null=True, blank=True)
# Type of support offered to sellers on the e-marketplace.
seller_support_structure = RichTextField(null=True, blank=True)
# Translation services offered for communication between buyers and sellers
# and/or translation of product/marketing material for a site.
translation_services = RichTextField(null=True, blank=True)
# Customer service offered to buyers on the e-marketplace
buyers_customer_service = RichTextField(null=True, blank=True)
# Details of the merchandising offer and associated costs involved
# (fe. marketing, feature to bump your product up on listings)
merchandising_offer_cost = RichTextField(null=True, blank=True)
# The payment methods for buyers on the e-marketplace. (fe. Card, PayPal)
payment_methods = RichTextField(null=True, blank=True)
# Languages offered for listing products on the e-marketplace
listing_languages = RichTextField(max_length=500, blank=True, null=True)
# The number of other sellers for a product/product category on the e-marketplace.
product_visibility = RichTextField(null=True, blank=True)
# The types of sellers for product/product category on the e-marketplace.
competitor_comparison = RichTextField(null=True, blank=True)
# What terms has been negotiated on behalf of UK Businesses by UKTI
ukti_terms = RichTextField(null=True, blank=True)
# Marketplace contacts which are supplied from UKTI for sellers.
contact_details = RichTextField(null=True, blank=True)
# List of steps a seller needs to go through to sell on the platform.
shop_analytics = RichTextField(null=True, blank=True)
# Tailoring options, themes, etc.
customization = RichTextField(null=True, blank=True)
# Details of social media integrations
social_media_integration = RichTextField(null=True, blank=True)
# Details of product promotion options
product_promotion_options = RichTextField(null=True, blank=True)
# Reviews, ratings, etc.
feedback_system = RichTextField(null=True, blank=True)
# Revenue of the business
revenue = RichTextField(null=True, blank=True)
# Parent company name
parent_company_name = RichTextField(null=True, blank=True)
# Platform target market
platform_target_market = RichTextField(null=True, blank=True)
# Product feedback system
product_feedback_system = RichTextField(null=True, blank=True)
# The application process for signing up
seller_application_process = RichTextField(null=True, blank=True)
# The subscription fee of the platform
subscription_fees = RichTextField(null=True, blank=True)
# The registration fee of the platform
registration_fees = RichTextField(null=True, blank=True)
# Additional operating fees of the platform
additional_fees = RichTextField(null=True, blank=True)
# Referral fee of the platform
referral_fees = RichTextField(null=True, blank=True)
# Prohibited items of the platform
prohibited_items = RichTextField(null=True, blank=True)
# Logistics options
logistics_options = RichTextField(null=True, blank=True)
# Local laws related to the countries in which you want to ship to
local_laws = RichTextField(null=True, blank=True)
# Platform signup
platform_signup = RichTextField(null=True, blank=True)
# General things to consider
things_to_consider = RichTextField(null=True, blank=True)
# Platform type eg shopfront or catalogue
platform_type = models.CharField(max_length=255, null=True, blank=True)
web_traffic = models.CharField(max_length=30, null=True, blank=True)
# Misc fields
misc1 = RichTextField(null=True, blank=True, help_text='')
misc2 = RichTextField(null=True, blank=True, help_text='')
misc3 = RichTextField(null=True, blank=True, help_text='')
misc4 = RichTextField(null=True, blank=True, help_text='')
misc5 = RichTextField(null=True, blank=True, help_text='')
misc6 = RichTextField(null=True, blank=True, help_text='')
misc7 = RichTextField(null=True, blank=True, help_text='')
misc8 = RichTextField(null=True, blank=True, help_text='')
misc9 = RichTextField(null=True, blank=True, help_text='')
misc10 = RichTextField(null=True, blank=True, help_text='Website traffic - grey box1')
misc11 = RichTextField(null=True, blank=True, help_text='Website traffic - grey box2')
misc12 = RichTextField(null=True, blank=True, help_text='Website traffic - grey box3')
misc13 = RichTextField(null=True, blank=True, help_text='Website traffic - grey box4')
misc14 = RichTextField(null=True, blank=True, help_text='Demographic profile')
misc15 = RichTextField(null=True, blank=True, help_text='Product upload process')
misc16 = RichTextField(null=True, blank=True, help_text='Customer support')
misc17 = RichTextField(null=True, blank=True, help_text='Local return address (Yes/No)')
misc18 = RichTextField(null=True, blank=True, help_text='Return rates')
misc19 = RichTextField(null=True, blank=True, help_text='Marketing and merchandising')
misc20 = RichTextField(null=True, blank=True, help_text='Local incorporation')
misc21 = RichTextField(null=True, blank=True, help_text='Local bank account')
misc22 = RichTextField(null=True, blank=True, help_text='Exclusivity')
misc23 = RichTextField(null=True, blank=True, help_text='Translation')
misc24 = RichTextField(null=True, blank=True, help_text='Payment time')
misc25 = RichTextField(null=True, blank=True, help_text='Exchange rate')
misc26 = RichTextField(null=True, blank=True, help_text='Bond required')
misc27 = RichTextField(null=True, blank=True, help_text='')
misc28 = RichTextField(null=True, blank=True, help_text='')
misc29 = RichTextField(null=True, blank=True, help_text='')
def __str__(self):
return "{0} {1}".format(self.country, self.name)
class Meta:
ordering = ('country',)
class OldMarket(Market):
class Meta:
proxy = True
verbose_name = "Market - deprecated"
verbose_name_plural = "Markets - deprecated"
|
__author__ = 'Hannah'
# Given an odd number, tests whether or not it is a prime number
def is_prime_number(number):
# Iterate through every possible odd factor
for possible_factor in range(3, number, 2):
# If something divides evenly into the number, then it is NOT prime
if number % possible_factor == 0:
return False
# If nothing divided evenly into the number, then it is prime
return True
# Recursive version of finding factors.
# Deprecated because it exceeds the maximum recursion depth for larger numbers
#def find_factors(a, b):
# # Base case. Everything is divisible by 1, so stop here.
# if b == 1:
# return [1]
# # If the number divides evenly into the number, then record it and keep looking for factors
# elif a % b == 0:
# return [b] + find_factors(a, b - 1)
# # Else it's not a factor. Keep looking for more factors.
# else:
# return find_factors(a, b - 1)
# Finds the larger odd factors of a given number
# (ex. 3 * 7 = 21. 7 is the larger odd factor, so it's part of the returned list)
def find_odd_factors(number):
odd_factors = []
# Iterate through every possible odd factor starting from the largest odd number under n.
# Continue that's less than the square root of n.
# We only check up to sqrt(number) to avoid redundantly checking a pair of factors twice
# For example, suppose number = 21 and i = 7. 3 is a factor of 21 (3 * 7 = 21) so we record it.
# If we exceed sqrt(21), we will
possible_factor = 3
possible_factor_squared = 9
while possible_factor_squared < number:
# If something divides evenly into the number, then it is a factor so record it
if number % possible_factor == 0:
odd_factors.append(possible_factor)
# Increment by 2 to skip over even numbers
possible_factor += 2
# Calculate the square for the new possible factor
possible_factor_squared = possible_factor * possible_factor
return odd_factors
# Find the odd factors for this number (since even numbers can't be prime)
number = 21
factors = find_odd_factors(number)
# Iterate through the list of factors and find the largest prime factor
max_prime_factor = -1
for item in factors:
# Check if this factor is prime
b_is_prime_number = is_prime_number(item)
# If it's prime and it's the largest prime factor we've seen, record it
if b_is_prime_number and item > max_prime_factor:
max_prime_factor = item
print(max_prime_factor)
|
from fairseq.models.roberta import XLMRModel
import torch
import torch.nn as nn
import torch.nn.functional as F
class XLMRForTokenClassification(nn.Module):
def __init__(self, pretrained_path, n_labels, hidden_size, dropout_p=0.2, label_ignore_idx=0,
head_init_range=0.04, device='cuda'):
super().__init__()
self.n_labels = n_labels
self.linear_1 = nn.Linear(hidden_size, hidden_size)
self.classification_head = nn.Linear(hidden_size, n_labels)
self.label_ignore_idx = label_ignore_idx
self.xlmr = XLMRModel.from_pretrained(pretrained_path)
self.model = self.xlmr.model
self.dropout = nn.Dropout(dropout_p)
self.device = device
# initializing classification head
self.classification_head.weight.data.normal_(mean=0.0, std=head_init_range)
def forward(self, inputs_ids, labels, labels_mask, valid_mask):
'''
Computes a forward pass through the sequence tagging model.
Args:
inputs_ids: tensor of size (bsz, max_seq_len). padding idx = 1
labels: tensor of size (bsz, max_seq_len)
labels_mask and valid_mask: indicate where loss gradients should be propagated and where
labels should be ignored
Returns :
logits: unnormalized model outputs.
loss: Cross Entropy loss between labels and logits
'''
transformer_out, _ = self.model(inputs_ids, features_only=True)
out_1 = F.relu(self.linear_1(transformer_out))
out_1 = self.dropout(out_1)
logits = self.classification_head(out_1)
if labels is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=self.label_ignore_idx)
# Only keep active parts of the loss
if labels_mask is not None:
active_loss = valid_mask.view(-1) == 1
active_logits = logits.view(-1, self.n_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
#print("Preds = ", active_logits.argmax(dim=-1))
#print("Labels = ", active_labels)
else:
loss = loss_fct(
logits.view(-1, self.n_labels), labels.view(-1))
return loss
else:
return logits
def encode_word(self, s):
"""
takes a string and returns a list of token ids
"""
tensor_ids = self.xlmr.encode(s)
# remove <s> and </s> ids
return tensor_ids.cpu().numpy().tolist()[1:-1]
|
"""
WSGI config for DjangoDemo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# +++++++++++ DJANGO +++++++++++
# To use your own django app use code like this:
path = '/home/gaivin/DjangoDemo'
if path not in sys.path:
sys.path.append(path)
#
os.environ['DJANGO_SETTINGS_MODULE'] = 'DjangoDemo.settings'
#
## then, for django >=1.5:
application = get_wsgi_application()
## or, for older django <=1.4
# import django.core.handlers.wsgi
# application = django.core.handlers.wsgi.WSGIHandler()
|
D, G = map(int, input().split())
arr = []
n = 0
for i in range(D):
arr.append([int(c) for c in input().split()])
n += arr[i][0]
ans = 1e9
for bit in range(1 << D):
sum = 0
num = 0
rest_max = -1
for i in range(D):
if bit & 1 << i:
sum += 100 * (i + 1) * arr[i][0] + arr[i][1]
num += arr[i][0]
else:
rest_max = i
if sum < G:
p = 100 * (rest_max + 1)
need = (G - sum + p - 1) // p
if need >= arr[rest_max][0]:
continue
num += need
ans = min(ans, num)
print(ans)
|
'''
Created on 06/03/2012
@author: Evandro
'''
class Poker(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def _converte_para_numero(self, carta):
if carta == 'J':
return 11
elif carta == 'Q':
return 12
elif carta == 'K':
return 13
elif carta == 'A':
return 14
else:
return carta
def ValidaJogada(self, jogador_1_numero, jogador_2_numero):
for valor in ['14', '13', '12', '11', '10', '9', '8', '7', '6', '5', '4', '3', '2']:
count_jogador_1 = jogador_1_numero.count(str(valor))
count_jogador_2 = jogador_2_numero.count(str(valor))
if count_jogador_1 > count_jogador_2 and count_jogador_1 > 1:
return 1
elif count_jogador_2 > count_jogador_1 and count_jogador_2 > 1:
return 2
ultimaCartaJogador1 = self._converte_para_numero(jogador_1_numero[4])
ultimaCartaJogador2 = self._converte_para_numero(jogador_2_numero[4])
if (int(ultimaCartaJogador1) > int(ultimaCartaJogador2)):
return 1
elif (int(ultimaCartaJogador2) > int(ultimaCartaJogador1)):
return 2
else:
return 0
|
"""
Image Converter class that takes an image in RGB/CMYK
and returns a similar image made of only ASCII chars.
alternatively one can opt to only pixelate the image.
Created by Trevor Dalton on 8/28/19
"""
from PIL import Image, ImageDraw, ImageFont
from concurrent.futures import ProcessPoolExecutor
import functools
import multiprocessing
import time
# The following 2 functions are taken from https://www.codementor.io
# Open an Image
def open_image(path):
newImage = Image.open(path)
return newImage
class AsciiConverter:
def __init__(self, image, blockSize):
self.image = image
self.oldWidth, self.oldHeight = image.size
self.width = self.oldWidth - (self.oldWidth % blockSize)
self.height = self.oldHeight - (self.oldHeight % blockSize)
self.blockSize = blockSize
self.blocks = self.getImageColor()
self.newImage = None
# Takes the list of blocks and
# turns them into a pixelated image
def pixelate(self):
image = Image.new("RGB", (self.width, self.height), "white")
pen = ImageDraw.Draw(image)
for i in range(self.height//self.blockSize):
for j in range(self.width//self.blockSize):
pen.rectangle([(j*self.blockSize, i*self.blockSize), (j*self.blockSize+self.blockSize, i*self.blockSize+self.blockSize)], self.blocks[j+(i*self.width//self.blockSize)], self.blocks[j+(i*self.width//self.blockSize)], 0)
self.newImage = image
# Helper method that turns an RGB tuple to a Greyscale value.
def RGBtoGreyscale(self, color):
val = color[0] + color[1] + color[2]
return val // 3
# Helper method to turn a greyscale value to a particular letter.
def determineLetter(self, darkness):
char = ""
if darkness < 30:
char = "W"
elif darkness < 60:
char = "X"
elif darkness < 90:
char = "A"
elif darkness < 120:
char = "S"
elif darkness < 150:
char = "C"
elif darkness < 180:
char = "T"
elif darkness < 200:
char = ")"
elif darkness < 215:
char = "!"
elif darkness < 230:
char = "^"
else:
char = "."
return char
# Takes the blocks and converts them to
# ASCII characters.
def asciiConvert(self):
image = Image.new("RGB", (self.width, self.height), "white")
pen = ImageDraw.Draw(image)
font = ImageFont.truetype("joystix_monospace.ttf", self.blockSize)
for i in range(self.height//self.blockSize):
for j in range(self.width//self.blockSize):
color = self.blocks[j+(i*self.width//self.blockSize)]
darkness = self.RGBtoGreyscale(color)
char = self.determineLetter(darkness)
pen.text((j*self.blockSize, i*self.blockSize), char, "black", font)
self.newImage = image
# For a size x size group of pixels
# get the average (r,g,b) between
# (start.x,start.y) and (start.x+size,start.y+size)
# Returns a tuple (r,g,b)
def getBlockColor(self, start):
totalRGB = [0,0,0]
# Loops over every pixel and adds its RGB value to the list
for i in range(self.blockSize):
for j in range(self.blockSize):
pixel = self.image.getpixel((start[0]+j, start[1]+i))
totalRGB[0] += pixel[0]
totalRGB[1] += pixel[1]
totalRGB[2] += pixel[2]
# Integer divides the total amount of RGB by self.blockSize**2 and puts it in a tuple
averageRGB = ( totalRGB[0] // self.blockSize**2, totalRGB[1] // self.blockSize**2, totalRGB[2] // self.blockSize**2 )
return averageRGB
# Helper method so the program can assign
# larger tasks to each thread
def getRowColor(self, start):
rowColors = []
for j in range(self.width//self.blockSize):
block = self.getBlockColor((j*self.blockSize, start))
rowColors.append(block)
return rowColors
# Helper method to get a list
# of the average RGB values.
# Goes by width and then by height.
def getImageColor(self):
# Now threads are used to gather image data
# about the average RGB of each blockSizexblockSize block.
averageColors = []
coords = []
with ProcessPoolExecutor(multiprocessing.cpu_count()) as executor:
for i in range(self.height//self.blockSize):
coords.append(i*self.blockSize)
for result in executor.map(self.getRowColor, coords):
for block in result:
averageColors.append(block)
return averageColors
# Takes a list of average RGB values of an image
# and returns an image of those shades.
def display(self):
if self.newImage == None:
self.image.show()
else:
self.newImage.show()
# Save the file to the name decided by user input
def save(self, name):
self.newImage.save(name)
def main():
imageName = input("What is the name of the image you'd like converted? ex: egg.jpg\n")
image = Image.open(imageName)
image.convert('RGB')
blockSize = 0
blockSize = int(input("What size would you like the blocks?"))
newImage = AsciiConverter(image, blockSize)
newImage.asciiConvert()
newImage.display()
newImage.save("test.jpg")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
class Solution:
def longestValidParentheses(self, s: str) -> int:
if len(s) == 0:
return 0
while len(s) > 0 and (s[:1] == ')' or s[-1:] == '('):
s = s[1 if s[:1] == ')' else 0 : -1 if s[-1:] == '(' else len(s)]
validString="" #当前有效的字符串
dp=[0] * (len(s) + 1 )
judgeStack=[]
for i in range(len(s)):
e = s[i:i+1]
dp[i + 1] = dp[i - 1]
if e == '(':
judgeStack.append('(')
else:
if len(judgeStack) > 0:
judgeStack.pop()
validString += "()"
dp[i + 1] = len(validString)
else:
validString=""
return max(dp)
param="()(()"
r=Solution().longestValidParentheses(param)
print(r)
|
from cms.apps.media.models import File
from django.shortcuts import get_object_or_404
from django.views.generic import RedirectView
from sorl.thumbnail import get_thumbnail
class ImageView(RedirectView):
# If they change the source image, we don't want to be showing the old image.
# Sorl uses memcached to retrieve images with the same args, so this should
# be pretty quick.
permanent = False
def get_redirect_url(self, *args, **kwargs):
# kwargs:
# {'pk': '9', 'width': '285', 'height': '400', 'format': 'webp', 'crop': 'None'}
file_ = get_object_or_404(File, pk=kwargs['pk'])
sorl_args = [
file_.file,
]
sorl_kwargs = {}
dimensions = ''
width = kwargs['width']
height = kwargs['height']
if width == 'auto':
dimensions = f'x{height}'
elif height == 'auto':
dimensions = width
else:
dimensions = f'{width}x{height}'
if 'crop' not in kwargs:
kwargs['crop'] = 'center'
sorl_args.append(dimensions)
if kwargs['crop'].lower() != 'none':
sorl_kwargs['crop'] = kwargs['crop']
if kwargs['format'] != 'source':
sorl_kwargs['format'] = kwargs['format'].upper()
return get_thumbnail(
*sorl_args,
**sorl_kwargs
).url
|
try:
x=int(raw_input())
y=int(raw_input())
except ValueError:
print("enter the integers only")
else:
print(pow(x,y)) |
from hashlib import sha256
import json
import time
import os,ast
from flask import Blueprint,render_template,request,Response,jsonify
import base64
from Crypto.Cipher import AES
class Block:
def __init__(self,data,t=time.time(),prev=0,index=0):
self.index=index
self.timestamp=time.ctime(t)
self.data=data
self.prevhash=prev
self.nonce=0
def __eq__(self,other):
return self.__dict__ == other.__dict__
class Blockchain:
def __init__(self):
self.chain=[]
self.create_genesis_block()
def proof_of_work(self,block):
block.nonce=0
cdata,cprevhash,cnonce=block.data,block.prevhash,block.nonce
comp=self.compute_hash(cdata,cprevhash,cnonce)
stime=time.time()
while not comp.startswith("0"*2) :
cnonce+=1
comp=self.compute_hash(cdata,cprevhash,cnonce)
etime=time.time()
s=time.localtime(stime).tm_min
e=time.localtime(etime).tm_min
d=e-s
if d<=10:
block.hash,block.nonce=comp,cnonce
return True
else:
return False
def compute_hash(self,data,prevhash,nonce):
blockstr=(str(data)+str(prevhash)+str(nonce)).encode()
return sha256(blockstr).hexdigest()
def create_genesis_block(self):
block=Block( {
"Temperature":"0",
"Humidity":"0"
})
block.hash=self.compute_hash(block.data,block.prevhash,block.nonce)
self.chain.append(block)
def add_new_block(self,block):
block.index=len(self.chain)
prevblock=self.chain[-1]
phash=prevblock.hash
block.prevhash=phash
b=self.proof_of_work(block)
if b:
self.chain.append(block)
return True
else:
cd.append(block.__dict__)
return False
def validate_block(self,block):
chash,cdata,cphash,cnonce=block.hash,block.data,block.prevhash,block.nonce
comp=self.compute_hash(cdata,cphash,cnonce)
return comp==chash
def displaydata(self):
dval=self.chain
c=[]
for ddata in reversed(dval):
c.append(ddata.__dict__)
return json.dumps(c,separators=(',',':'))
def mine(self,obj):
no=int(obj['index'])
h=obj['hash']
d=obj['data']
cchain=self.chain[:no]
ccchain=self.chain[no:]
# Copy the partial blocks before hand for the proof of work to perform
prev=cchain[-1]
if not self.comp_data(ccchain[0].data,d):
for k in ccchain:
block=Block("")
if h==k.hash and k.index==no:
block.data=d
block.index=k.index
block.prevhash=prev.hash
else:
block.prevhash=cchain[-1].hash
block.index=len(cchain)
block.data=k.data
q=self.proof_of_work(block)
if q:
cchain.append(block)
else:
cd.append(block.__dict__)
self.chain=cchain
return True
else:
return False
def comp_data(self,obj1,obj2):
print(obj1,obj2)
print(obj1==obj2)
return obj1==obj2
# Starting line of the code
cd=[]
bd=Blueprint('blockdata',__name__)
bb=Blockchain()
# Create routes for the backend
@bd.route("/")
def index():
return render_template('home.html')
# API router for home html
@bd.route("/display")
def dis():
return bb.displaydata()
@bd.route("/valid/<int:i>")
def valid(i):
print(len(bb.chain))
if i==1:
return "Enter value greater than 1"
if 1<= i-1 <len(bb.chain) and bb.validate_block(bb.chain[i-1]):
return "Yes it is"
elif not (i<= i-1 <len(bb.chain)):
return "No it is not in range"
elif not (bb.validate_block(bb.chain[i-1])):
return "Not validated"
else:
pass
# Dynamic adding of data from the script to the server
@bd.route("/add",methods=['POST'])
def addb():
hexa=['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
key="mypasswordisramsmypasswordisrams"
w = request.get_data(as_text=True)
fi=ast.literal_eval(w)
gdec,giv=fi['data'],fi['iv']
b64dec=base64.b64decode(gdec)
aes1=AES.new(key.encode('utf-8'),AES.MODE_CBC,giv.encode('utf-8'))
d=aes1.decrypt(b64dec)
f=d.decode('utf-8')
trail=f[-1]
v=0
for ind,k in enumerate(hexa):
if k == trail:
v=ind
break
f=f[:-v]
dis=ast.literal_eval(f)
sw=bb.add_new_block(Block(dis,time.time()))
print(bb.chain[-1].timestamp)
if sw :
r={}
r["description"]="Successfully added to the block"
return jsonify(r),200
else:
return Response("Cannot added to the block")
# Shows the dynamically added data
@bd.route("/dd")
def dexceptdata():
return json.dumps(cd)
# Shows the individual block details
@bd.route("/block")
def showsingleblock():
return render_template('block.html')
# Shows the mining of the block
@bd.route("/mine",methods=['POST'])
def minedata():
newdata=request.get_json(force=True)
w=bb.mine(newdata)
if w:
return "Successfully added to the block"
else:
return "unsuccessful" |
from numpy import ndarray
from typing import List
from ..path.sampling import SamplingSetting
from .types import SolveMethod, CostFuntionType
class OptSettings:
"""
Settings for the numerical optimization based planners.
"""
def __init__(
self,
q_init: ndarray = None,
max_iters: int = None,
weights: List[float] = None,
con_objective_weight=0.0,
):
# q init is handled when whe know the path length and the ndof of the robot
self.q_init = q_init
self.weights = weights
self.con_objective_weight = con_objective_weight
if max_iters is None:
self.max_iters = 100
else:
self.max_iters = max_iters
class SolverSettings:
def __init__(
self,
solve_method: SolveMethod,
cost_function_type: CostFuntionType,
sampling_settings: SamplingSetting = None,
opt_settings: OptSettings = None,
):
self.solve_method = solve_method
self.cost_function_type = cost_function_type
if solve_method == SolveMethod.sampling_based:
assert sampling_settings is not None
self.sampling_settings = sampling_settings
# fill in the correct cost function based on the type
elif solve_method == SolveMethod.optimization_based:
assert opt_settings is not None
self.opt_settings = opt_settings
|
class Constants:
RICHNESS_NULL = 0
RICHNESS_POOR = 1
RICHNESS_OK = 2
RICHNESS_LUSH = 3
TREE_SEED = 0
TREE_SMALL = 1
TREE_MEDIUM = 2
TREE_TALL = 3
TREE_BASE_COST = [ 0, 1, 3, 7 ]
TREE_COST_SCALE = 1
LIFECYCLE_END_COST = 4
DURATION_ACTION_PHASE = 1000
DURATION_GATHER_PHASE = 2000
DURATION_SUNMOVE_PHASE = 1000
STARTING_TREE_COUNT = 2
RICHNESS_BONUS_OK = 2
RICHNESS_BONUS_LUSH = 4
|
from defs import *
# he's at 465k into rcl5 i'm at 161k into rcl 4
# 102k into rcl 5 i'm at 388k into rcl 4
js_global.USERNAME = 'Lisp'
js_global.VERSION = 1842
js_global.CONTROLLER_SIGN = 'Territory of Lisp [' + str(js_global.VERSION) + ']'
js_global.CREEP_SAY = False
js_global.BUILD_ORDER = [STRUCTURE_SPAWN, STRUCTURE_TOWER, STRUCTURE_EXTENSION, STRUCTURE_STORAGE,
STRUCTURE_TERMINAL, STRUCTURE_LINK, STRUCTURE_CONTAINER,
STRUCTURE_EXTRACTOR, STRUCTURE_ROAD, STRUCTURE_LAB, STRUCTURE_RAMPART]
js_global.WALL_WIDTH = {5: 1, 6: 2, 7: 3, 8: 4}
js_global.MIN_WALL_HITS = 50000
js_global.WALL_PLACEMENT_FREQUENCY = 50
js_global.MAX_WALL_DECAY = 10000
js_global.WALL_REINFORCEMENT = 50000
js_global.ROAD_RCL = 4
js_global.MIN_REPAIR = 0.7
js_global.TOWER_REPAIR = 0.5
js_global.MAX_DEATH_TIMER = 500
js_global.STORAGE_MIN = {4: 20000, 5: 50000, 6: 50000, 7: 50000, 8: 50000}
js_global.STORAGE_MAX = {4: 50000, 5: 200000, 6: 200000, 7: 200000, 8: 200000}
js_global.RESOURCE_MAX_STORAGE = 10000
js_global.RESOURCE_MIN_TERMINAL = 5000
js_global.RESOURCE_MAX_TERMINAL = 10000
js_global.ENERGY_MAX_TERMINAL = 50000
js_global.MIN_RESERVE_TICKS = 2500
js_global.TOWER_MIN = 0.8
js_global.BODY_ORDER = [TOUGH, ATTACK, WORK, RANGED_ATTACK, CARRY, HEAL, MOVE]
|
# -*- coding:utf-8 -*-
import os
import copy
import json
from verify_new22 import stringdiffanalysis
temp_file_name = "./~compaer_tools_null_file.tmp"
class Compare(object):
"""
比对类
"""
def __init__(self, is_atom=False):
"""
Init
:param is_atom:是否为原子级比对, 如果为是将不进行更低级别的比对
"""
self._is_atom = is_atom
self._same = True
self._same_part = []
self._same_count = 0
self._different_part = []
self._different_count = 0
self._total_count = 0
self._compare_result = {}
self._left_only = []
self._right_only = []
self._sub_compare_result = {}
# 待定
self._type = "CompareAbstract"
self._sub_type = "None"
def get_compare_result(self):
"""
获取比对结果
:return: 将比对结果拼装为Json返回
"""
# 拼装结果
self._compare_result["Type"] = self._type
self._compare_result["SamePart"] = self._same_part
self._compare_result["DifferentPart"] = self._different_part
self._compare_result["LeftOnly"] = self._left_only
self._compare_result["RightOnly"] = self._right_only
if not self._is_atom:
self._compare_result["SubType"] = self._sub_type
self._compare_result["SubCompareResult"] = self._sub_compare_result
self._compare_result["SameCount"] = self._same_count
self._compare_result["DifferentCount"] = self._different_count
self._compare_result["TotalCount"] = self._total_count
self._compare_result["IsSame"] = self._same
return self._compare_result
def get_string_result(self, indent=0):
"""
获取字符串格式的比对结果
:param indent: 缩进深度
:return: None
"""
return json.dumps(self.get_compare_result(), ensure_ascii=False, indent=indent)
def get_statistical_result(self):
"""
获取统计结果
:return: Json格式的统计结果
"""
compare_result = self.get_compare_result()
sub_type = compare_result["SubType"]
statistical_result = {sub_type: {}}
statistical_result[sub_type]["Total"] = compare_result["TotalCount"]
statistical_result[sub_type]["Same"] = compare_result["SameCount"]
if compare_result["TotalCount"] != 0:
statistical_result[sub_type]["Proportion"] = "%.2f%%" % (float(compare_result["SameCount"]) /
float(compare_result["TotalCount"]) * 100)
else:
statistical_result[sub_type]["Proportion"] = "%.2f%%" % 0
# TODO: 实现多层级结果的统计,可以考虑将该方法转为静态方法?
# TODO: 暂时的统计条目正确率的方法, 之后使用其他方法进行替换
item_total_count = 0
item_same_count = 0
word_total_count = 0
word_same_count = 0
for sub_file in compare_result["SubCompareResult"]:
for sub_file2 in compare_result["SubCompareResult"][sub_file]["SubCompareResult"]:
item_total_count += compare_result["SubCompareResult"][sub_file]["SubCompareResult"][sub_file2][
"TotalCount"]
item_same_count += compare_result["SubCompareResult"][sub_file]["SubCompareResult"][sub_file2][
"SameCount"]
word_total_count += \
compare_result["SubCompareResult"][sub_file]["SubCompareResult"][sub_file2]["SubCompareResult"][
"WordCompareResult"]["TotalCount"]
word_same_count += \
compare_result["SubCompareResult"][sub_file]["SubCompareResult"][sub_file2]["SubCompareResult"][
"WordCompareResult"]["SameCount"]
statistical_result["Item"] = {}
statistical_result["Item"]["Total"] = item_total_count
statistical_result["Item"]["Same"] = item_same_count
if item_total_count == 0:
statistical_result["Item"]["Proportion"] = "0.00"
else:
statistical_result["Item"]["Proportion"] = "%.2f%%" % (
float(item_same_count) / float(item_total_count) * 100)
statistical_result["Word"] = {}
statistical_result["Word"]["Total"] = word_total_count
statistical_result["Word"]["Same"] = word_same_count
if word_total_count == 0:
statistical_result["Word"]["Proportion"] = "0.00"
else:
statistical_result["Word"]["Proportion"] = "%.2f%%" % (
float(word_same_count) / float(word_total_count) * 100)
return statistical_result
# TODO: 实现字比对
class WordCompare(Compare):
"""
实现字符串的比对, 统计字正确率
"""
def __init__(self, left_word, right_word):
"""
Init
:param left_word: 标准字符串
:param right_word: 比对字符串
"""
super(WordCompare, self).__init__(is_atom=True)
self._type = "Word"
self._sub_type = None
# 将字符串转换Unicode格式再转换为list
try:
left_tokens = list(left_word.decode("utf-8"))
except UnicodeDecodeError:
left_tokens = list(left_word.decode("GBK"))
self._total_count = len(left_tokens)
try:
right_tokens = list(right_word.decode("utf-8"))
except UnicodeDecodeError:
try:
right_tokens = list(right_word.decode("GBK"))
except UnicodeDecodeError:
print right_word
# right_tokens = list(right_word.decode("GBK"))
right_tokens = []
# 将list中的每个字符转换为UTF-8格式
for i in range(len(left_tokens)):
left_tokens[i] = left_tokens[i].encode("utf-8")
for i in range(len(right_tokens)):
right_tokens[i] = right_tokens[i].encode("utf-8")
# 获取比对结果
stringdiff = stringdiffanalysis(" ".join(left_tokens), " ".join(right_tokens))
stringdiff.calclate_diff_lcs2()
self._different_part = stringdiff._diffstring
self._total_count = stringdiff._allcount
self._same_count = stringdiff._samecount
# 以下方法为简单粗暴的字统计结果
# i = 0
# while i < len(left_tokens):
# token = left_tokens[i]
# if token in right_tokens:
# self._same_count += 1
# # self._same_part.append(token)
# left_tokens.remove(token)
# right_tokens.remove(token)
# else:
# i += 1
# for token in left_tokens:
# self._left_only.append(token)
# for token in right_tokens:
# self._right_only.append(token)
# 判断字符串是否相等
if (len(self._left_only) + len(self._right_only)) > 0:
self._same = False
class ItemCompare(Compare):
def __init__(self, left_item, right_item):
super(ItemCompare, self).__init__(is_atom=False) # 暂时将item作为最底层的比对元素, 不考虑字正确率
self._type = "Item"
self._sub_type = "Word"
self._total_count = len(left_item)
for item in left_item:
if item in right_item:
self._same_part.append(item)
self._same_count += 1
else:
self._different_part.append(item)
self._different_count += 1
self._same_part = list(set(self._same_part))
if self._total_count == self._same_count:
self._same = True
else:
self._same = False
self._left_only = copy.deepcopy(left_item)
self._right_only = copy.deepcopy(right_item)
self._compare_result["LeftOnly"] = self._left_only
self._compare_result["RightOnly"] = self._right_only
for value in self._same_part:
self._left_only.remove(value)
self._right_only.remove(value)
left_word = "".join(left_item)
right_word = "".join(right_item)
self._sub_compare_result["WordCompareResult"] = WordCompare(left_word, right_word).get_compare_result()
class FileCompare(Compare):
def __init__(self, left_file_path, right_file_path):
super(FileCompare, self).__init__()
self._type = "File"
self._sub_type = "Item"
self._compare_result["ItemResult"] = {}
left_items = self.get_items(left_file_path)
self._total_count = len(left_items.keys())
right_items = self.get_items(right_file_path)
# TODO: 比对
for key in left_items.keys():
if key in right_items.keys():
self._same_part.append(key)
self._left_only = left_items.keys()
self._right_only = right_items.keys()
for key in self._same_part:
self._left_only.remove(key)
self._right_only.remove(key)
# 获取每个条目的比对信息
for key in left_items.keys():
if key in self._same_part:
compare_result = ItemCompare(left_items[key], right_items[key]).get_compare_result()
else:
compare_result = ItemCompare(left_items[key], []).get_compare_result()
self._sub_compare_result[key] = compare_result
if not compare_result["IsSame"]:
self._different_part.append(key)
self._different_count += 1
else:
self._same_count += 1
# 这里可能有问题
self._same_part = left_items.keys()
for key in self._different_part:
self._same_part.remove(key)
if self._same_count != self._total_count:
self._same = False
@staticmethod
def get_items(file_path):
items = {}
with open(file_path, "r") as source_file:
for line in source_file.readlines():
# line = line.decode("GBK").encode("utf-8")
tag = line.find("\t")
tag_size = 1
if tag < 0:
tag = line.find(":")
if tag < 0:
tag = line.find(":")
tag_size = 2
key = line[:tag]
# 按照一定规则去除相应的元素
if key in ["tel", "tel_cell", "fax"]:
value = line[tag + tag_size:].replace("\n", "").replace("-", "").replace(" ", "").replace("(", "").replace(
")", "")
elif key in ["有效期限"]:
value = line[tag + tag_size:].replace("\n", "").replace(".", "").replace(" ", "")
else:
value = line[tag + tag_size:].replace("\n", "").replace(" ", "")
if key not in items.keys():
items[key] = []
items[key].append(value)
# TODO: 将item读出写到字典中
return items
class PathCompare(Compare):
def __init__(self, left_file_tree, right_file_tree, not_skip_different_file=True):
super(PathCompare, self).__init__()
self._type = "Path"
self._sub_type = "File"
self._compare_result["FileCompareResult"] = {}
# 准备临时文件
if os.path.exists(temp_file_name):
pass
else:
with open(temp_file_name, "w") as temp_file:
temp_file.write("")
# 获取两侧的文件列表
left_file_list = left_file_tree["file_list"]
right_file_list = right_file_tree["file_list"]
# 找出在两个目录中相同的文件
for left_file in left_file_list:
if left_file in right_file_list:
self._same_part.append(left_file)
# 比对全部文件时以左侧目录计算比对数量
if not_skip_different_file:
self._total_count = len(left_file_list)
else: # 仅比对相同文件时以相同目录计算比对数量
self._total_count = len(self._same_part)
if not_skip_different_file: # 当比对全部文件是统计两侧相同的文件
# 深复制之后需要重新指定关系
self._left_only = copy.deepcopy(left_file_list)
self._right_only = copy.deepcopy(right_file_list)
self._compare_result["LeftOnly"] = self._left_only
self._compare_result["RightOnly"] = self._right_only
# 找出两个目录独有的文件
for same_file in self._same_part:
self._left_only.remove(same_file)
self._right_only.remove(same_file)
if len(self._left_only + self._right_only) > 0:
self._same = False
# 获取每个文件的比对信息
for each_file in left_file_list:
if each_file in self._same_part:
compare_result = FileCompare(left_file_tree["path"] + "\\" + each_file,
right_file_tree["path"] + "\\" + each_file).get_compare_result()
else:
if not_skip_different_file:
compare_result = FileCompare(left_file_tree["path"] + "\\" + each_file,
temp_file_name).get_compare_result()
else:
continue
self._sub_compare_result[each_file] = compare_result
if not compare_result["IsSame"]:
self._same = False
self._different_part.append(each_file)
self._different_count += 1
else:
self._same_count += 1
if not_skip_different_file:
self._same_part = left_file_list
for different_file in self._different_part:
self._same_part.remove(different_file)
# 删除临时文件
if os.path.exists(temp_file_name):
os.remove(temp_file_name)
|
import pickle
from CONFIG import *
import os
with open(os.path.join(LINK_DATA,"data.picke"),"rb") as out_put_file:
user_dict, item_dict, event_dict, ui_dict, iu_dict, ur_dict,ir_dict = pickle.load(out_put_file)
print(ui_dict) |
import socket
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
def socket_create():
try:
global host
global port
global s
global IPAddr
host = IPAddr
port = 9999
s = socket.socket()
except:
print('socket creation error')
def socket_bind():
try:
global host
global s
global port
print('binding socket to port : ', str(port))
s.bind((host, port))
s.listen(5)
except:
print('unable to make a connection')
print('retrying...')
socket_bind()
def socket_accept():
conn, address = s.accept()
print('connection has been made')
print('IP : ', address[0], 'with port : ', address[1])
send_commands(conn)
s.close()
def send_commands(conn):
while True:
text = input('YOU : ')
if len(str.encode(text)) > 0:
conn.send(str.encode(text))
response = str(conn.recv(1024), 'utf-8')
print('SENDER : ' + response)
if __name__ == '__main__':
socket_create()
socket_bind()
socket_accept()
|
from projects.inflection.scripts.lib.clear_dir import clear_out_dir
if __name__ == '__main__':
clear_out_dir('py')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.