repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
sdotson/udacity-machine-learning-nanodegree | refs/heads/master | # third party imports
import argparse
import os
import torch
from torchvision import models
# local imports
from model import create_dataloaders, create_model, train_model
from utils import determine_device
from validation import validate_train_args
# CLI defaults
HIDDEN_UNITS_DEFAULT = 2048
ARCH_DEFAULT = "vgg16"
LEARNING_RATE_DEFAULT = 0.001
EPOCHS_DEFAULT = 8
# other settings
BATCH_SIZE = 60
DROPOUT_PROBABILITY = 0.5
ARCH_CHOICES = [
"vgg16",
"vgg16_bn",
"vgg11",
"vgg11_bn",
"vgg13",
"vgg13_bn",
"vgg19",
"vgg19_bn",
"densenet121",
"densenet161",
"densenet169",
"densenet201",
]
# configure argument parser
parser = argparse.ArgumentParser(description="Trains model and saves checkpoint")
parser.add_argument("data_directory", help="the directory for the training data")
parser.add_argument("--arch", choices=ARCH_CHOICES, default=ARCH_DEFAULT)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--learning_rate", type=float, default=LEARNING_RATE_DEFAULT)
parser.add_argument("--save_dir")
parser.add_argument("--epochs", type=int, default=EPOCHS_DEFAULT)
parser.add_argument("--hidden_units", type=int, default=HIDDEN_UNITS_DEFAULT)
# parse CLI args
args = parser.parse_args()
# do some additional validation on args
validate_train_args(args)
# get dataloaders and class_to_idx map
print("Creating dataloaders...")
dataloaders, class_to_idx = create_dataloaders(args.data_directory, BATCH_SIZE)
# use gpu if available and requested in args
device = determine_device(args.gpu)
print("Using device {}...".format(device.type))
print("Creating model...")
training_directory = args.data_directory + "/train/"
output_units_size = sum(
[os.path.isdir(training_directory + i) for i in os.listdir(training_directory)]
)
model, input_size = create_model(
args.arch, args.hidden_units, DROPOUT_PROBABILITY, output_units_size, device
)
# train the model in place
print("Training model...")
train_model(model, dataloaders, args.epochs, args.learning_rate, device)
# save checkpoint
print("Saving checkpoint...")
checkpoint = {
"arch": args.arch,
"batch_size": BATCH_SIZE,
"class_to_idx": class_to_idx,
"dropout_probability": DROPOUT_PROBABILITY,
"hidden_size": args.hidden_units,
"input_size": input_size,
"output_size": output_units_size,
"state_dict": model.state_dict(),
}
save_path = args.save_dir + "/checkpoint.pth" if args.save_dir else "checkpoint.pth"
torch.save(checkpoint, save_path)
print("Done. Checkpoint has been saved at {}".format(save_path))
| Python | 87 | 28.609196 | 84 | /classifying-flowers/train.py | 0.722438 | 0.705745 |
sdotson/udacity-machine-learning-nanodegree | refs/heads/master | import torch
def determine_device(gpu_flag_enabled):
"""Determine device given gpu flag and the availability of cuda"""
return torch.device(
"cuda" if torch.cuda.is_available() and gpu_flag_enabled else "cpu"
)
| Python | 8 | 28.125 | 75 | /classifying-flowers/utils.py | 0.690987 | 0.690987 |
sdotson/udacity-machine-learning-nanodegree | refs/heads/master | import argparse
from collections import OrderedDict
from torchvision import datasets, models, transforms
import torch
from torch import nn, optim
from PIL import Image
import numpy as np
import pandas as pd
import time
def create_dataloaders(data_directory, batch_size):
"""Create dataloaders for training, validation, and test data."""
means = [0.485, 0.456, 0.406]
std_deviations = [0.229, 0.224, 0.225]
image_size = 224
rotation = 30
train_dir = data_directory + "/train"
valid_dir = data_directory + "/valid"
test_dir = data_directory + "/test"
train_transform = transforms.Compose(
[
transforms.RandomRotation(rotation),
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(means, std_deviations),
]
)
test_transform = transforms.Compose(
[
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(means, std_deviations),
]
)
train_dataset = datasets.ImageFolder(train_dir, transform=train_transform)
test_dataset = datasets.ImageFolder(test_dir, transform=test_transform)
valid_dataset = datasets.ImageFolder(valid_dir, transform=test_transform)
dataloaders = {
"train": torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
),
"test": torch.utils.data.DataLoader(test_dataset, batch_size=batch_size),
"valid": torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size),
}
class_to_idx = train_dataset.class_to_idx
return dataloaders, class_to_idx
def create_classifier(
input_size, hidden_units_size, dropout_probability, output_units_size
):
"""Create and return classifier."""
return nn.Sequential(
OrderedDict(
[
("fc1", nn.Linear(input_size, hidden_units_size)),
("relu", nn.ReLU()),
("dropout", nn.Dropout(p=dropout_probability)),
("fc2", nn.Linear(hidden_units_size, output_units_size)),
("output", nn.LogSoftmax(dim=1)),
]
)
)
def determine_classifier_input_size(classifier):
"""Return input size for classifier"""
is_classifier_sequential = isinstance(
classifier, torch.nn.modules.container.Sequential
)
input_size = (
classifier[0].in_features
if is_classifier_sequential
else classifier.in_features
)
return input_size
def create_model(
arch, hidden_units_size, dropout_probability, output_units_size, device
):
"""Create pretrained model with custom classifier for given architecture."""
model = getattr(models, arch)(pretrained=True)
for param in model.parameters():
param.requires_grad = False
# define new classifier
input_size = determine_classifier_input_size(model.classifier)
model.classifier = create_classifier(
input_size, hidden_units_size, dropout_probability, output_units_size
)
model.to(device)
return model, input_size
def train_model(model, dataloaders, epochs, learning_rate, device):
"""Train model and periodically log validation stats."""
images_trained = 0
print_every = 5
running_loss = 0
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
criterion = nn.NLLLoss()
train_start = time.time()
for epoch in range(epochs):
model.train()
for inputs, labels in dataloaders["train"]:
images_trained += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if images_trained % print_every == 0:
model.eval()
model.to(device)
with torch.no_grad():
accuracy = 0
validation_loss = 0
for images, labels in dataloaders["valid"]:
images, labels = images.to(device), labels.to(device)
logps = model.forward(images)
validation_loss += criterion(logps, labels).item()
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(
f"Epoch {epoch+1}/{epochs} (image {images_trained}).. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Validation loss: {validation_loss/len(dataloaders['valid']):.3f}.. "
f"validation accuracy: {accuracy/len(dataloaders['valid']):.3f}"
)
running_loss = 0
model.train()
print("Training completed in {} seconds".format(time.time() - train_start))
def process_image(image_path):
""" Scale, crop, and normalize a PIL image for a PyTorch model and
return as Torch tensor.
"""
with Image.open(image_path) as image:
shortest_side_length = 256
is_width_bigger = image.size[0] > image.size[1]
new_size = (
[image.size[0], shortest_side_length]
if is_width_bigger
else [shortest_side_length, image.size[1]]
)
# return image with new size
resized_image = image.resize(new_size)
width, height = resized_image.size
# determine center crop bounding box
crop_size = 224
left = (width - crop_size) / 2
upper = (height - crop_size) / 2
right = (width + crop_size) / 2
lower = (height + crop_size) / 2
# crop the image
cropped_image = resized_image.crop((left, upper, right, lower))
# transform to numpy array
np_image = np.array(cropped_image)
# squish and normalize
np_image_squished = np_image / 255
means = np.array([0.485, 0.456, 0.406])
std_deviations = np.array([0.229, 0.224, 0.229])
normalized_image = (np_image_squished - means) / std_deviations
# we need to change order of dimensions to meet pytorch's expectations
transposed_image = np.transpose(normalized_image, (2, 0, 1))
return torch.from_numpy(transposed_image)
def predict(image_path, model, device, cat_to_name, top_k):
""" Predict the class (or classes) of an image using a trained deep learning model.
"""
predict_start = time.time()
model.to(device)
processed_image = process_image(image_path)
# needs to be a float or computer gets angry with me
image_float = processed_image.float().unsqueeze(0)
# run image through model
model.eval()
model_output = model.forward(image_float.to(device))
predictions = torch.exp(model_output)
# top predictions and top labels
top_preds, top_labels = predictions.topk(top_k)
# need to detach in order to call numpy
top_preds = top_preds.detach()
if device.type != "cpu":
top_preds = top_preds.cpu()
top_preds = top_preds.numpy().tolist()
top_labels = top_labels.tolist()
data = {"class": pd.Series(model.class_to_idx)}
# if there is cat_to_name translation dict around, we can add the flower_name column
if cat_to_name:
data["flower_name"] = pd.Series(cat_to_name)
chart_data = pd.DataFrame(data)
chart_data = chart_data.set_index("class")
chart_data = chart_data.iloc[top_labels[0]]
chart_data["probabilities"] = top_preds[0]
print(
"Processing and prediction completed in {} seconds".format(
time.time() - predict_start
)
)
return chart_data
def load_checkpoint(checkpoint_path, device):
"""Load checkpoint at checkpoint_path with device and return pretrained model with custom classifier."""
# Below is a solution for loading checkpoint saved on a gpu device and I believe vice versa
# https://discuss.pytorch.org/t/on-a-cpu-device-how-to-load-checkpoint-saved-on-gpu-device/349
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
model = getattr(models, checkpoint["arch"])(pretrained=True)
input_size = determine_classifier_input_size(model.classifier)
model.classifier = create_classifier(
input_size,
checkpoint["hidden_size"],
checkpoint["dropout_probability"],
checkpoint["output_size"],
)
model.load_state_dict(checkpoint["state_dict"])
model.class_to_idx = checkpoint["class_to_idx"]
model.to(device)
return model
| Python | 270 | 32.25185 | 108 | /classifying-flowers/model.py | 0.614057 | 0.603252 |
therealpeterpython/gimp-average-layers | refs/heads/master | #!/usr/bin/env python
from gimpfu import *
from array import array
import time
import sys
import itertools
import operator
from collections import Counter
# Not sure if get_mode() or get_mode1() is faster
# but it looks like get_mode is despite its length the faster one
def get_mode1(lst):
return Counter(lst).most_common(1)[0][0]
# Returns the mode of the list
def get_mode(lst):
# get an iterable of (item, iterable) pairs
SL = sorted((x, i) for i, x in enumerate(lst))
groups = itertools.groupby(SL, key=operator.itemgetter(0))
# auxiliary function to get "quality" for an item
def _auxfun(g):
item, iterable = g
count = 0
min_index = len(lst)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
# print 'item %r, count %r, minind %r' % (item, count, min_index)
return count, -min_index
# pick the highest-count/earliest item
return max(groups, key=_auxfun)[0]
# Returns the median of the list as input type if the list has an odd length
# or the mean between the two middle elements as float
def get_median(lst):
n = len(lst)
h = n//2
lst.sort()
if n % 2:
return lst[h]
else:
return sum(lst[h-1:h+1])/2.0
# Returns the mean of the list as float
def get_mean(list):
return sum(list) / float(len(list))
# Returns the visible layers of the image as list
def get_visible_layers(img):
pdb.gimp_message("Get visible layers")
gimp.progress_init('Getting visible layers')
layers = img.layers
layers_vis = []
for layer in layers:
if pdb.gimp_item_get_visible(layer):
if not pdb.gimp_item_is_group(layer):
pdb.gimp_layer_add_alpha(layer)
layers_vis.append(layer)
gimp.progress_update(1)
if len(layers_vis) == 0:
pdb.gimp_message("No visible layer found!")
gimp.quit()
pdb.gimp_message("Got visible layers")
return layers_vis
# Calculates the mean layer of the image
# identically to the original script
def calc_mean(img):
layers_vis = get_visible_layers(img)
pdb.gimp_message("mean")
# Set oppacity of visible layers
layers_left = len(layers_vis)
gimp.progress_init('Setting layer opacities')
for layer in layers_vis:
layer.opacity = 100.0 / layers_left
layers_left -= 1
gimp.progress_update((len(layers_vis) - layers_left) / len(layers_vis))
gimp.progress_init('Merging layers')
pdb.gimp_image_merge_visible_layers(img, CLIP_TO_IMAGE)
gimp.progress_update(1)
# Calculates the average layer with the given average function 'avrg_fnc' of the image
# It just takes the visible layers into account
def calc_avrg(img, avrg_fnc):
try:
pdb.gimp_message("Calc average")
image_x = img.width
image_y = img.height
layers_arrays = []
num_channels = 0
layers_vis = get_visible_layers(img)
# get pixel arrays
# layers_arrays contains the arrays of the layers
# an array contains the pixel values of one layer as [pixel1_r, pixel1_g, pixel1_b, pixel1_A, pixel2_r, ...]
gimp.progress_init('Getting pixel values')
for i,layer in enumerate(layers_vis):
layer_rgn = layer.get_pixel_rgn(0, 0, image_x, image_y, False, False)
layers_arrays.append(array("B", layer_rgn[:, :]))
num_channels = len(layer_rgn[0,0]) # Not pretty in this loop but it works
gimp.progress_update((i+1) / float(len(layers_vis)))
# create the merge layer and the destination pixel region
merged_layer = pdb.gimp_layer_new(img, image_x, image_y, RGB_IMAGE, "merged", 100, NORMAL_MODE)
pdb.gimp_layer_add_alpha(merged_layer)
pdb.gimp_image_insert_layer(img, merged_layer, None, 0)
dest_rgn = merged_layer.get_pixel_rgn(0, 0, image_x, image_y, True, True)
dest_array = array("B", "\x00" * (image_x * image_y * num_channels))
pdb.gimp_message("Doing the hard work")
t = time.time()
# process the arrays in this manner
# its faster than actual write out the for loops
averaged_values = [int(avrg_fnc([arr[i] for arr in layers_arrays])) for i in range(len(layers_arrays[0]))]
dest_array = array('B',averaged_values)
pdb.gimp_message(str(time.time() - t))
pdb.gimp_message("Hard work done!")
# add dest_array to the dest_rgn
dest_rgn[:,:] = dest_array.tostring() # deprecated in Python 3
# Write out changes
merged_layer.flush()
merged_layer.merge_shadow(1)
merged_layer.update(0, 0, image_x, image_y)
pdb.gimp_image_merge_visible_layers(img, CLIP_TO_IMAGE)
pdb.gimp_message("Calced average")
except:
# Print the exception details in gimp
exc_type, exc_obj, exc_tb = sys.exc_info()
pdb.gimp_message("Type: " +str(exc_type)+"\nLine: " +str(exc_tb.tb_lineno))
def average_layers(img, average):
try:
pdb.gimp_image_undo_group_start(img)
if(average == "mean"):
calc_mean(img) # faster than calc_avrg(img, get_mean)
elif(average == "median"):
pdb.gimp_message("median")
calc_avrg(img, get_median)
elif(average == "mode"):
pdb.gimp_message("mode")
calc_avrg(img, get_mode)
elif(average == "gmode"):
pdb.gimp_message("gmode")
pdb.gimp_message("Not implemented yet!")
#calc_avrg(img, get_gmode)
elif(average == "range"):
pdb.gimp_message("range")
pdb.gimp_message("Not implemented yet!")
#calc_avrg(img, get_range)
pdb.gimp_message("finished")
pdb.gimp_image_undo_group_end(img)
except:
exc_type, exc_obj, exc_tb = sys.exc_info()
pdb.gimp_message("Type: " +str(exc_type)+"\nLine: " +str(exc_tb.tb_lineno))
register(
'python_fu_average_layers',
'Merge all layers together using an average value for each pixel',
'Merge all layers together using an average value for each pixel',
'Simon Filter',
'Simon Filter',
'2019',
'Average layers ...',
'*',
[
(PF_IMAGE, "image", "takes current image", None),
(PF_RADIO, "average", "Set kind of average", "mean",(("Mean (fast)", "mean"), ("Median (slow)", "median"), ("Mode (slow!)", "mode"))),
],
[],
average_layers, menu="<Image>/Filters/Combine"
)
main()
| Python | 200 | 31.66 | 142 | /average-layers.py | 0.611451 | 0.603644 |
gausszh/sae_site | refs/heads/master | # coding=utf8
"""
jinja2的过滤器
"""
import markdown
def md2html(md):
"""
@param {unicode} md
@return {unicode html}
"""
return markdown.markdown(md, ['extra', 'codehilite', 'toc', 'nl2br'], safe_mode="escape")
JINJA2_FILTERS = {
'md2html': md2html,
}
| Python | 18 | 14.333333 | 93 | /utils/filters.py | 0.597826 | 0.572464 |
gausszh/sae_site | refs/heads/master | #coding=utf8
"""
基础类--用户信息
"""
from sqlalchemy import (
MetaData, Table, Column, Integer, BigInteger, Float, String, Text, DateTime,
ForeignKey, Date, UniqueConstraint)
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from models import sae_engine
from models import create_session
Base = declarative_base()
metadata = MetaData()
class User(Base):
"""
发布历史日志
"""
__tablename__ = 'user'
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
open_id = Column(String(45), nullable=False, index=True)
token = Column(String(64), nullable=False, index=True)
name = Column(String(45))
email = Column(String(60))
address = Column(String(150))
tel = Column(String(15))
school = Column(String(45))
create_time = Column(DateTime)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<User %r>' % (self.name)
if __name__ == '__main__':
Base.metadata.create_all(bind=sae_engine)
| Python | 55 | 21.50909 | 81 | /models/base.py | 0.642973 | 0.629241 |
gausszh/sae_site | refs/heads/master | #coding=utf8
import datetime
from flask import Blueprint, request, jsonify, render_template, redirect
import flask_login
import weibo as sinaweibo
from models.base import create_session, User
from utils import user_cache
from configs import settings
bp_base = Blueprint('base', __name__, url_prefix='/base')
@bp_base.route('/weibo/login/')
def weibo_login():
api = sinaweibo.Client(settings.API_KEY,settings.API_SECRET,settings.REDIRECT_URI)
code = request.args.get('code')
try:
api.set_code(code)
except Exception, e:
return redirect('/blog/')
sinainfo = api.token
user = user_cache.get_user(sinainfo.get('uid'), format='object')
if user:
flask_login.login_user(user, remember=True)
else:
user = User()
user.open_id = sinainfo.get('uid')
user.token = sinainfo.get('access_token')
userinfo = api.get('users/show', uid=sinainfo.get('uid'))
user.name = userinfo.get('name')
user.address = userinfo.get('location')
user.create_time = datetime.datetime.now()
session = create_session()
session.add(user)
session.commit()
flask_login.login_user(user, remember=True)
session.close()
return redirect('/blog/')
@bp_base.route('/logout/')
def logout():
flask_login.logout_user()
return redirect('/blog/') | Python | 48 | 27.541666 | 86 | /views/base.py | 0.65084 | 0.65011 |
gausszh/sae_site | refs/heads/master | #!/usr/bin/python
# coding=utf8
from flask import Flask, render_template, g
import flask_login
from configs import settings
from utils.filters import JINJA2_FILTERS
from utils import user_cache
from views import blog, base, security
def create_app(debug=settings.DEBUG):
app = Flask(__name__)
app.register_blueprint(blog.bp_blog)
app.register_blueprint(base.bp_base)
app.register_blueprint(security.bp_security)
app.jinja_env.filters.update(JINJA2_FILTERS)
app.debug = debug
app.secret_key = "gausszh"
@app.route('/')
def index():
return render_template('index.html')
@app.before_request
def check_user():
g.user = flask_login.current_user
login_manager = flask_login.LoginManager()
login_manager.setup_app(app)
@login_manager.user_loader
def load_user(userid):
user = user_cache.get_user(userid, format='object')
return user
login_manager.unauthorized = blog.list
# login_manager.anonymous_user = AnonymousUserMixin
return app
app = create_app(settings.DEBUG)
if __name__ == '__main__':
host = settings.APP_HOST
port = settings.APP_PORT
app.run(host=host, port=port)
| Python | 50 | 22.959999 | 59 | /flask_app.py | 0.65625 | 0.653846 |
gausszh/sae_site | refs/heads/master | #coding=utf-8
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from configs import settings
sae_engine = create_engine(settings.DB_SAE_URI+'?charset=utf8', encoding='utf-8',
convert_unicode=True, pool_recycle=settings.DB_POOL_RECYCLE_TIMEOUT,
echo=settings.DB_ECHO)
create_session = sessionmaker(autocommit=False, autoflush=False,
bind=sae_engine)
Base = declarative_base() | Python | 17 | 26.82353 | 82 | /models/__init__.py | 0.788136 | 0.78178 |
gausszh/sae_site | refs/heads/master | #coding=utf8
import datetime
import redis
import flask_login
from models.base import User, create_session
from utils import user_cache
from configs import settings
def AnonymousUserMixin():
'''
This is the default object for representing an anonymous user.
'''
session = create_session()
user = User()
count = user_cache.get_anonymous_count()
anonymouser_id = 1000 + count
user.open_id = 'anonymous%s' % anonymouser_id
user.name = u'游客%s' % anonymouser_id
user.token = ''
user.create_time = datetime.datetime.now()
session.add(user)
session.commit()
user_cache.incr_anonymous_count()
flask_login.login_user(user, remember=True)
session.close()
return user
redis_pool = redis.ConnectionPool(host=settings.REDIS_IP,
port=settings.REDIS_PORT,
db=settings.REDIS_DB)
def redis_connection():
return redis.Redis(connection_pool=redis_pool)
| Python | 38 | 24.868422 | 66 | /utils/__init__.py | 0.655137 | 0.650051 |
gausszh/sae_site | refs/heads/master | # coding=utf8
from configs import settings
from utils import redis_connection
APP = "blog"
def set_draft_blog(uid, markdown):
_cache = redis_connection()
key = str("%s:draft:blog:%s" % (APP, uid))
_cache.set(key, markdown, settings.DRAFT_BLOG_TIMEOUT)
| Python | 12 | 21.333334 | 58 | /utils/blog_cache.py | 0.686567 | 0.682836 |
gausszh/sae_site | refs/heads/master | #coding=utf8
import os
# system setting
DEBUG = True
APP_HOST = '127.0.0.1'
APP_PORT = 7020
STORAGE_BUCKET_DOMAIN_NAME = 'blogimg'
# database
if os.environ.get('SERVER_SOFTWARE'):#线上
import sae
DB_SAE_URI = 'mysql://%s:%s@%s:%s/database_name' % (sae.const.MYSQL_USER,
sae.const.MYSQL_PASS, sae.const.MYSQL_HOST, sae.const.MYSQL_PORT)
DB_POOL_RECYCLE_TIMEOUT = 10
DB_ECHO = True
else:
DB_SAE_URI = 'mysql://user:pass@127.0.0.1:3306/database_name'
# DB_SAE_URI = 'sqlite:////database.db'
DB_POOL_RECYCLE_TIMEOUT = 10
DB_ECHO = True
# cache
REDIS_HOST = "127.0.0.1"
REDIS_PORT = 6379
REDIS_DB = 1
CACHE_TIMEOUT = 3
# app
API_KEY = '***'
API_SECRET = '****'
REDIRECT_URI = 'http://****'
| Python | 33 | 20.39394 | 75 | /configs/settings_dev.py | 0.651558 | 0.59915 |
gausszh/sae_site | refs/heads/master | # coding=utf8
try:
import simplejson as json
except Exception:
import json
import datetime
from sqlalchemy.sql import or_
from models.base import create_session, User
from models.blog import BlogArticle
from configs import settings
from utils import redis_connection
# import sae.kvdb
APP = "base"
def get_user(uid, format="json"):
_cache = redis_connection()
key = str("%s:user:%s" % (APP, uid))
userinfo = _cache.get(key)
new = False
if not userinfo:
session = create_session()
userinfo = session.query(User).filter(or_(User.id == uid,
User.open_id == uid)).first()
userinfo = orm2json(userinfo)
_cache.set(key, json.dumps(userinfo), settings.CACHE_TIMEOUT)
new = True
session.close()
if not new:
userinfo = json.loads(userinfo)
if format == 'object' and userinfo:
user = User()
for k in userinfo:
setattr(user, k, userinfo.get(k))
userinfo = user
return userinfo or None
def delete_user(uid):
_cache = redis_connection()
key = str("%s:user:%s" % (APP, uid))
_cache.delete(key)
def get_anonymous_count():
_cache = redis_connection()
key = "%s:anonymous:count" % APP
count = _cache.get(key)
if not count:
session = create_session()
count = session.query(User).filter(
User.open_id.startswith("anonymous")).count()
_cache.set(key, count, settings.CACHE_TIMEOUT)
session.close()
return int(count)
def incr_anonymous_count():
_cache = redis_connection()
key = "%s:anonymous:count" % APP
count = get_anonymous_count()
_cache.set(key, count + 1, settings.CACHE_TIMEOUT)
def get_blog(blog_id):
"""
获取博客的数据
"""
_cache = redis_connection()
key = str("%s:blog:%s" % (APP, blog_id))
bloginfo = _cache.get(key)
new = False
if not bloginfo:
session = create_session()
bloginfo = session.query(BlogArticle).filter_by(id=blog_id).first()
bloginfo = orm2json(bloginfo)
_cache.set(key, json.dumps(bloginfo), settings.CACHE_TIMEOUT)
new = True
session.close()
if not new:
bloginfo = json.loads(bloginfo)
return bloginfo
def delete_blog(blog_id):
_cache = redis_connection()
key = str("%s:blog:%s" % (APP, blog_id))
_cache.delete(key)
def orm2json(orm):
"""
将sqlalchemy返回的对象转换为可序列话json类型的对象
"""
def single2py(instance):
d = {}
if instance:
keys = instance.__dict__.keys()
for key in keys:
if key.startswith('_'):
continue
value = getattr(instance, key)
d[key] = isinstance(value, datetime.datetime) and \
value.strftime('%Y-%m-%d %H:%M:%S') or value
return d
if isinstance(orm, list):
return [single2py(ins) for ins in orm]
return single2py(orm)
| Python | 112 | 25.758928 | 79 | /utils/user_cache.py | 0.58325 | 0.580581 |
gausszh/sae_site | refs/heads/master | # coding=utf8
"""
学web安全用到的一些页面
"""
from flask import Blueprint, render_template
from sae.storage import Bucket
from configs import settings
bp_security = Blueprint('security', __name__, url_prefix='/security')
bucket = Bucket(settings.STORAGE_BUCKET_DOMAIN_NAME)
bucket.put()
@bp_security.route('/wanbo/video/')
def wanbo_video():
return render_template('security/wanbo_video.html') | Python | 18 | 20.833334 | 69 | /views/security.py | 0.744898 | 0.742347 |
gausszh/sae_site | refs/heads/master | # coding=utf8
import datetime
import urllib
from flask import Blueprint, request, jsonify, render_template, g
import flask_login
from sae.storage import Bucket
from models.blog import create_session, BlogArticle
from utils.blog_cache import set_draft_blog
from configs import settings
bp_blog = Blueprint('blog', __name__, url_prefix='/blog')
bucket = Bucket(settings.STORAGE_BUCKET_DOMAIN_NAME)
bucket.put()
@bp_blog.route('/')
@bp_blog.route('/list/')
def list():
session = create_session()
blogs = session.query(BlogArticle).order_by(BlogArticle.update_time.desc())\
.all()
session.close()
return render_template('blog/blog_list.html', blogs=blogs)
@bp_blog.route('/delete/<int:blog_id>/', methods=['POST'])
@flask_login.login_required
def delete(blog_id):
session = create_session()
blog = session.query(BlogArticle).filter_by(id=blog_id).first()
if blog.create_by == g.user.id:
blog.is_active = 0
session.commit()
session.close()
return jsonify(ok=True, data={'blog_id': blog_id})
session.close()
return jsonify(ok=False, reason=u'数据错误')
@bp_blog.route('/draft/', methods=['POST'])
@flask_login.login_required
def draft():
"""
保存未上传的文章为草稿
"""
form = request.form
markdown = form.get('markdown', '')
set_draft_blog(flask_login.current_user.id, markdown)
return jsonify(ok=True)
@bp_blog.route('/edit/<int:blog_id>/', methods=['GET', 'POST'])
@bp_blog.route('/edit/', methods=['GET', 'POST'])
@flask_login.login_required
def edit(blog_id=0):
if request.method == 'GET':
if blog_id == 0:
blog = None
else:
session = create_session()
blog = session.query(BlogArticle).filter_by(id=blog_id).first()
session.close()
return render_template('blog/blog_edit.html', blog=blog)
if request.method == 'POST':
form = request.form
markdown = form.get('markdown')
title = form.get('title')
blog_id = form.get('blog_id')
if markdown and title and (len(markdown.strip()) *
len(title.strip()) > 0):
session = create_session()
now = datetime.datetime.now()
# blog_id belong to this user
if blog_id:
blog = session.query(BlogArticle).filter_by(id=blog_id).first()
if not blog_id or not blog:
blog = BlogArticle()
blog.create_by = flask_login.current_user.id
blog.create_time = now
blog.is_active = 1
blog.update_time = now
blog.title = title
blog.markdown = markdown
session.add(blog)
session.commit()
blog_id = blog.id
session.close()
return jsonify(ok=True, data={'blog_id': blog_id})
return jsonify(ok=False, reason=u'数据错误')
@bp_blog.route('/view/<int:blog_id>/')
def view_blog(blog_id):
session = create_session()
query = session.query(BlogArticle).filter_by(id=blog_id)
if not flask_login.current_user.is_active():
query = query.filter_by(is_active=1)
blog = query.first()
session.close()
return render_template('blog/blog_view.html', blog=blog)
@bp_blog.route('/files/', methods=['POST'])
@flask_login.login_required
def save_file():
"""
存储上传的图片
"""
files_name = request.files.keys()
ret = []
for fn in files_name:
# 暂未做安全校验 PIL
img_file = request.files.get(fn)
bucket.put_object(fn, img_file)
link = bucket.generate_url(fn)
ret.append({'name': fn, 'link': link})
http_files_link = request.form.keys()
for fn in http_files_link:
http_link = request.form.get(fn)
img_file = urllib.urlopen(http_link)
bucket.put_object(fn, img_file)
link = bucket.generate_url(fn)
ret.append({'name': fn, 'link': link})
return jsonify(ok=True, data=ret)
| Python | 129 | 29.937984 | 80 | /views/blog.py | 0.580504 | 0.578807 |
gausszh/sae_site | refs/heads/master | #!/usr/bin/python
#coding=utf8
import datetime
from sqlalchemy import (
MetaData, Table, Column, Integer, BigInteger, Float, String, Text, DateTime,
ForeignKey, Date, UniqueConstraint)
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from models import sae_engine
from models import create_session
Base = declarative_base()
metadata = MetaData()
class BlogArticle(Base):
"""
发布历史日志
"""
__tablename__ = 'blog_article'
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
title = Column(String(50))
markdown = Column(Text)
html = Column(Text)
create_by = Column(Integer, index=True, nullable=False)
create_time = Column(DateTime, nullable=False)
update_time = Column(DateTime, index=True, nullable=False,)
is_active = Column(Integer, nullable=False, default=1)
if __name__ == '__main__':
Base.metadata.create_all(bind=sae_engine)
| Python | 37 | 26.135136 | 81 | /models/blog.py | 0.695219 | 0.690239 |
cgddrd/maartech-test | refs/heads/main | # MAARTech technical test submission.
# Author: Connor Goddard
# First Published: 2021-05-20
# Submission notes:
# - For this task, I've made a two key assumptions: 1) we only need to support CSV file types; and 2) that it's a requirement to have the ORIGINAL/RAW data AS CONTAINED IN THE DATA FILES imported into the database tables.
#
# - I've made the decision NOT to transform the data and build new feature columns (e.g. combine the 'lat' and 'long' columns into a single GIS 'POINT' column) because in my experience,
# you would typically want to make sure the RAW data is imported 'as-is', and then apply such transformations across the 'raw' tables
# to curate new 'analytics' tables once the data is available in the database. This same reasoning led me to choose to NOT convert
# the hexadecimal representation of OSM tag values into plaintext. Again, this could be done as part of a downstream process, with the original data preserved.
#
# - I recognise that the data contained in the input files appears to be OpenStreetMap data, so it is possible that instead of connecting to and querying the database directly from Python,
# we could potentially make use of the official 'osm2pgsql' tool (https://osm2pgsql.org/) which could automate much of the table schema creation and unpacking. (This could even be called dynamically via a Python script.)
#
# - In terms of database credentials, in a production envrionment, we'd want to load the credentials in from a secure location at runtime (i.e. ideally from a secrets manager,
# but at the very least from a 'secure' configuration file - excluded from version control).
#
# - I could have used SQLAlchemy to provide the connection to the database (SQLAlchemy is a popular and well-established library for working with RDBMS databases in Python), however,
# because I wanted to take particular advantage of the 'COPY FROM' syntax supported by PostgresSQL, using SQL Alchemy would have been in some ways redundant, because I would have needed to
# access the underlying engine (psycopg2) in order to use the 'copy_expert()' function (i.e. it was more efficient just to import and use the psycopg2 library directly in this case).
#
# - I felt that building Python classes/objects in this situation was a little bit overkill, so kept everything contained inside a single script file with core functionality split out to dedicated functions.
# Obviously if the scope of the application was to grow (e.g. to parse and import different data file types), then abstracting certain logic (e.g. to load/parse these different file types) to dedicated
# class files would be a reasonable option.
#
# - In terms of evolving this application, I would like to add the ability to define the table schema directly from CSV header structure.
import click
from psycopg2 import connect, sql
from pathlib import Path
import yaml
import logging
# I'm a fan of using decorators for readabililty.
@click.command()
@click.option('--config', default='./config.yml', help='The path to the config file.')
def run(config):
"""Imports data from CSV files into a series of PostgresSQL tables (one table per file)."""
logging.info('Application started.')
db_conn = None
# Personally, I prefer YAML format in defining configuration files vs. the standard 'INI' format provided by Python. I find it cleaner.
config = read_yaml_config(config)
files = list_files(config['target_path'])
try:
# Use the '**' syntax to flatten the dictionary into key-value pairs that can be passed into as parameters into psycopg2.connect().
db_conn = connect(**config['db'])
for file in files:
import_file_to_database(file, db_conn)
logging.info('Import complete.')
except Exception:
logging.error('An error occurred whilst importing data files into the database', exc_info=1)
finally:
if db_conn is not None:
db_conn.close()
def read_yaml_config(config_path):
try:
with open(config_path) as file:
# We use safe_load() here to help prevent execution of any arbitary code embedded in the YAML file.
yaml_file = yaml.safe_load(file)
return yaml_file
except Exception:
logging.error('Failed to load YAML config file.', exc_info=1)
def list_files(search_folder:str):
pattern = "*.csv"
directory = Path(search_folder)
# Return a list of all files that match the pattern in the search folder.
return [csvFile for csvFile in directory.glob(pattern)]
def import_file_to_database(file_path:str, conn):
file_name = Path(file_path).stem
try:
logging.info('Importing file {} into database table {}.'.format(file_path, file_name))
with conn.cursor() as cur:
# First, attempt to create the table if it doesn't already exist.
query = sql.SQL("""
CREATE TABLE IF NOT EXISTS {table_name} (
osm_id INTEGER PRIMARY KEY,
area NUMERIC NOT NULL,
lon NUMERIC NOT NULL,
lat NUMERIC NOT NULL,
tags JSONB,
osm_type VARCHAR(25) NOT NULL,
p_tag_value TEXT,
city TEXT,
postcode TEXT,
address TEXT,
street TEXT,
has_way BOOLEAN NOT NULL,
shop_type TEXT,
derived_shared_area NUMERIC,
derived_way_area NUMERIC,
parent_way INTEGER,
shared_divisor INTEGER,
area_sq_foot NUMERIC NOT NULL
)
""").format(table_name = file_name)
cur.execute(query)
cur.commit()
with open(file_path, 'r') as f:
# Second, use the PgSQL 'COPY' feature to efficiently copy the contects of the CSV file into the table. (This can scale to millions of rows.) - https://www.postgresql.org/docs/current/sql-copy.html
query = sql.SQL("""
COPY {table_name} FROM stdin WITH CSV HEADER
DELIMITER as ','
""").format(table_name = file_name)
cur.copy_expert(sql=query, file=f)
except Exception:
logging.error('Failed to import file {} into database table {}'.format(file_path, file_name), exc_info=1)
finally:
if cur is not None:
cur.close()
if __name__ == '__main__':
run() | Python | 144 | 45.541668 | 223 | /run.py | 0.64856 | 0.645426 |
ahawker/krustofsky | refs/heads/master | """
import.py
~~~~~~~~~
Run this script to convert social security popular baby names dataset to SQLite.
"""
import glob
import io
import os
import sqlite3
import sys
SCHEMA = """
CREATE TABLE IF NOT EXISTS names (
year integer,
name text,
sex text,
occurrences integer
);
CREATE INDEX IF NOT EXISTS names_year_idx ON names (year);
CREATE INDEX IF NOT EXISTS names_name_idx ON names (name);
CREATE INDEX IF NOT EXISTS names_sex_idx ON names (sex);
CREATE INDEX IF NOT EXISTS names_occurrences_idx ON names (occurrences);
"""
INSERT = """
INSERT OR IGNORE INTO names (
year,
name,
sex,
occurrences
) VALUES (
:year,
:name,
:sex,
:occurrences
);
"""
def data_generator():
"""Generator function that yields dicts for each line in each data file"""
for path in glob.glob('data/*.txt'):
with io.open(path, 'r') as f:
print('Processing file {}'.format(path))
year = os.path.splitext(os.path.basename(path))[0].strip('yob')
for line in f:
line = line.strip()
name, sex, occurrences = line.split(',')
yield {
'year': int(year.lower()),
'name': name.lower(),
'sex': sex.lower(),
'occurrences': int(occurrences)
}
def create_db(name):
"""Create Sqlite DB using SCHEMA"""
db = sqlite3.connect(name, check_same_thread=False, detect_types=sqlite3.PARSE_COLNAMES)
db.executescript(SCHEMA)
return db
def main(argv):
"""Convert directory of text files to SQLite database"""
db = create_db(argv[0])
db.executemany(INSERT, data_generator())
db.commit()
db.close()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Python | 77 | 22.532467 | 92 | /import.py | 0.587748 | 0.584437 |
techgnosis/volca_beats_remap | refs/heads/master | import mido
# Volca Beats has ridiculous note mappings
# 36 - C2 - Kick
# 38 - D2 - Snare
# 43 - G2 - Lo Tom
# 50 - D3 - Hi Tom
# 42 - F#2 - Closed Hat
# 46 - A#2 - Open Hat
# 39 - D#2 - Clap
# 75 - D#5 - Claves
# 67 - G4 - Agogo
# 49 - C#3 - Crash
note_mapping = {
48 : 36,
49 : 38,
50 : 43,
51 : 50,
52 : 42,
53 : 46,
54 : 39,
55 : 75,
56 : 67,
57 : 49
}
mido.set_backend('mido.backends.rtmidi')
inport = mido.open_input('RemapInput', virtual=True)
outputs = mido.get_output_names()
um_one = next(x for x in outputs if 'UM-ONE' in x)
outport = mido.open_output(um_one, virtual=False)
for msg in inport:
if msg.type in ['note_on','note_off']:
# mido starts MIDI channels at 0
if msg.channel == 1:
if msg.note in note_mapping:
new_note = note_mapping[msg.note]
msg.note = new_note
outport.send(msg)
| Python | 46 | 19.173914 | 52 | /remapper.py | 0.549569 | 0.471983 |
liuchao012/myPythonWeb | refs/heads/master | from django.apps import AppConfig
class ListsssConfig(AppConfig):
name = 'listsss'
| Python | 5 | 16.799999 | 33 | /listsss/apps.py | 0.752809 | 0.752809 |
liuchao012/myPythonWeb | refs/heads/master | from django.test import TestCase
from django.urls import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.utils.html import escape
from listsss.models import Item, List
from listsss.views import home_page
import unittest
# Create your tests here.
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
print("第x个测试通过了")
found = resolve('/')
self.assertEqual(found.func, home_page)
def test_home_page_return_correct_html(self):
request = HttpRequest()
resp = home_page(request)
# 使用render_to_string ,django自带函数 生成string字符串,和渲染获取到的字符串对比
#### 注释:这个没有办法解决,两次生成得tocken值是不相同的,所以先注释掉这个字段对应的断言
expected_html = render_to_string('listsss/home.html', request=request)
# .decode()将字符串转换成unicode
# self.assertEqual(resp.content.decode(), expected_html)
# self.assertTrue(resp.content.startswith(b'<html>'))
self.assertIn(b"<title>To-Do lists</title>", resp.content)
self.assertTrue(resp.content.endswith(b'</html>'))
# def test_home_page_only_saves_items_when_necessary(self):
# request = HttpRequest()
# home_page(request)
# self.assertEqual(Item.objects.count(), 0)
# 中途这个用例不要了
# def test_home_page_displays_all_list_items(self):
# Item.objects.create(text='itemey 1')
# Item.objects.create(text='itemey 2')
#
# req = HttpRequest()
# rep = home_page(req)
#
# self.assertIn('itemey 1', rep.content.decode())
# self.assertIn('itemey 2', rep.content.decode())
class ListViewTest(TestCase):
# def test_home_page_displays_all_list_items(self):
def test_home_page_displays_only_items_for_that_list(self):
# list_ = List.objects.create()
# Item.objects.create(text='itemey 1', list=list_)
# Item.objects.create(text='itemey 2', list=list_)
correct_list = List.objects.create()
Item.objects.create(text='itemey 1', list=correct_list)
Item.objects.create(text='itemey 2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='other itemey 1', list=other_list)
Item.objects.create(text='other itemey 2', list=other_list)
# resp = self.client.get('/list/the-only-list-in-the-world/')
resp = self.client.get('/list/%d/' % (correct_list.id,))
self.assertContains(resp, 'itemey 1')
self.assertContains(resp, 'itemey 2')
self.assertNotContains(resp, 'other itemey 1')
self.assertNotContains(resp, 'other itemey 2')
def test_uses_list_template(self):
# resp = self.client.get('/list/the-only-list-in-the-world/')
list_ = List.objects.create()
resp = self.client.get('/list/%d/' % (list_.id,))
self.assertTemplateUsed(resp, 'listsss/list.html')
def test_passes_correct_list_to_template(self):
other_list = List.objects.create()
correct_list = List.objects.create()
resp = self.client.get('/list/%d/' % (correct_list.id,))
self.assertEqual(resp.context['list'], correct_list)
def test_can_save_a_POST_to_an_existing_list(self):
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post('/list/%d/' % (correct_list.id,),
data={'item_text': 'A new item for an existiong list'})
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existiong list')
self.assertEqual(new_item.list, correct_list)
def test_POST_redirects_to_list_view(self):
other_list = List.objects.create()
correct_list = List.objects.create()
resp = self.client.post('/list/%d/' % (correct_list.id,),
data={'item_text': 'A new item for an existiong list'})
self.assertRedirects(resp, '/list/%d/' % (correct_list.id,))
def test_validation_errors_end_up_on_lists_page(self):
list_ = List.objects.create()
resp = self.client.post('/list/%d/'%(list_.id,), data={"item_text":''})
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'listsss/list.html')
ex_error=escape('You cant have an empty list item')
self.assertContains(resp, ex_error)
class NewListTest(TestCase):
def test_saving_a_POST_request(self):
self.client.post('/list/new', data={'item_text': 'A new list item'})
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
# requ = HttpRequest()
# requ.method = 'POST'
# requ.POST['item_text'] = 'A new list item'
#
# rep = home_page(requ)
#
# self.assertEqual(Item.objects.count(), 1)
# new_item = Item.objects.first()
# self.assertEqual(new_item.text, 'A new list item')
#
# # 下面这部分单独拿出去做一个 单独的单元测试
# # self.assertIn('A new list item', rep.content.decode())
# # post 请求后页面重定向
# # self.assertEqual(rep.status_code, 302)
# # self.assertEqual(rep['location'], '/')
def test_redirects_after_POST(self):
rep = self.client.post('/list/new', data={'item_text': 'A new list item'})
# self.assertEqual(rep.status_code, 302)
new_list = List.objects.first()
self.assertRedirects(rep, '/list/%d/' % (new_list.id,))
# django 的检查项
# self.assertRedirects(rep, '/list/the-only-list-in-the-world/')
# 这段重新修改
# requ = HttpRequest()
# requ.method = 'POST'
# requ.POST['item_text'] = 'A new list item'
#
# rep = home_page(requ)
# self.assertEqual(rep.status_code, 302)
# self.assertEqual(rep['location'], '/list/the-only-list-in-the-world/')
def test_validation_error_are_sent_back_to_home_page_template(self):
resp = self.client.post('/list/new', data={'item_text': ''})
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'listsss/home.html')
ex_error = escape("You cant have an empty list item")
print(resp.content.decode())
self.assertContains(resp, ex_error)
def test_invalid_list_items_arent_saved(self):
self.client.post('/list/new', data={"item_text": ''})
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Item.objects.count(), 0)
| Python | 161 | 39.857143 | 87 | /test/listsss/tests_views.py | 0.620155 | 0.614835 |
liuchao012/myPythonWeb | refs/heads/master | # -*- coding: utf-8 -*-
# @Time : 2018/6/25 20:15
# @Author : Mat
# @Email : mat_wu@163.com
# @File : functional_tests1.py
# @Software: PyCharm
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from django.test import LiveServerTestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import unittest
from unittest import skip
class FunctionalTest(StaticLiveServerTestCase):
#不知道为什么加上下面两个方法之后就报错了
# @classmethod
# def setUpClass(cls):
# pass
#
# @classmethod
# def tearDownClass(cls):
# pass
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(3)
def tearDown(self):
self.driver.quit()
def check_for_row_in_list_table(self, row_text):
table = self.driver.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
| Python | 37 | 25.405405 | 71 | /functional_tests/base.py | 0.669734 | 0.655419 |
liuchao012/myPythonWeb | refs/heads/master | from django.test import TestCase
from django.urls import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from listsss.models import Item, List
from listsss.views import home_page
import unittest
from django.core.exceptions import ValidationError
class ListAndItemModelsTest(TestCase):
def test_saving_and_retrieving_items(self):
list_ = List()
list_.save()
first_item = Item()
first_item.text = 'The first (ever) list item'
first_item.list = list_
first_item.save()
second_item = Item()
second_item.text = 'Item the second'
second_item.list = list_
second_item.save()
saved_liat = List.objects.first()
self.assertEqual(saved_liat, list_)
saved_items = Item.objects.all()
self.assertEqual(saved_items.count(), 2)
first_save_item = saved_items[0]
second_save_item = saved_items[1]
self.assertEqual(first_save_item.text, 'The first (ever) list item')
self.assertEqual(first_save_item.list, list_)
self.assertEqual(second_save_item.text, 'Item the second')
self.assertEqual(second_save_item.list, list_)
def test_cannot_save_empty_list_items(self):
list_=List.objects.create()
item = Item(list= list_, text='')
with self.assertRaises(ValidationError):
item.save()
item.full_clean()
def test_get_absolute_url(self):
list_ = List.objects.create()
self.assertEqual(list_.get_absolute_url(), '/list/%d/'%(list_.id,))
| Python | 47 | 33.063831 | 76 | /test/listsss/tests_models.py | 0.645846 | 0.643973 |
liuchao012/myPythonWeb | refs/heads/master | from django.shortcuts import render, redirect # redirect是python的重定向方法
from django.http import HttpResponse
from listsss.models import Item, List
from django.core.exceptions import ValidationError
# Create your views here.
def home_page(request):
# return HttpResponse("<html><title>To-Do lists</title></html>")
# if (request.method=='POST'):
# return HttpResponse(request.POST['item_text'])
# 新加了页面这里就可以删除了
# if (request.method == 'POST'):
# new_item_text = request.POST['item_text']
# Item.objects.create(text=new_item_text)
# return redirect('/list/the-only-list-in-the-world/')
##第二种方法
# else:
# new_item_text = ''
# return render(request, 'listsss/home.html', {'new_item_text':new_item_text})
##第一种方法
# item = Item()
# item.text = request.POST.get('item_text', '')
# item.save()
# return render(request, 'listsss/home.html', {'new_item_text':request.POST.get('item_text','')})
# 这里首页不用展示相关的数据了
# items_list = Item.objects.all()
# return render(request, 'listsss/home.html', {'items_list': items_list})
return render(request, 'listsss/home.html')
def view_list(request, list_id):
error = None
list_ = List.objects.get(id=list_id)
if request.method == 'POST':
try:
item = Item.objects.create(text=request.POST['item_text'], list=list_)
item.full_clean()
item.save()
#简化
#return redirect('/list/%d/' % (list_.id,))
return redirect(list_)
except ValidationError:
item.delete() # 不知道为什么要加这一步,书里面没有这步骤,书上说抓取到这个错误就不会存到数据库里面了,可还是存进去了
error = 'You cant have an empty list item'
return render(request, 'listsss/list.html', {'list': list_, 'error': error})
def new_list(request):
list_ = List.objects.create()
item = Item.objects.create(text=request.POST['item_text'], list=list_)
try:
item.full_clean()
item.save()
except ValidationError:
list_.delete()
item.delete() # 不知道为什么要加这一步,书里面没有这步骤,书上说抓取到这个错误就不会存到数据库里面了,可还是存进去了
error = 'You cant have an empty list item'
return render(request, 'listsss/home.html', {"error": error})
# 重新定义到有效地址
# return redirect('/list/the-only-list-in-the-world/')
# 去除硬编码
# return redirect('/list/%d/' % (list_.id,))
return redirect('view_list', list_.id)
def add_item(request, list_id):
list_ = List.objects.get(id=list_id)
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/list/%d/' % (list_.id,))
class home_page_class():
pass
| Python | 78 | 32.756409 | 101 | /listsss/views.py | 0.623529 | 0.623529 |
liuchao012/myPythonWeb | refs/heads/master | # -*- coding: utf-8 -*-
# @Time : 2018/6/25 20:15
# @Author : Mat
# @Email : mat_wu@163.com
# @File : functional_tests1.py
# @Software: PyCharm
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from django.test import LiveServerTestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import unittest
from unittest import skip
from .base import FunctionalTest
class NewVisitorTest(FunctionalTest):
def test_can_start_a_list_and_retrieve_it_later(self):
# 类继承LiveServerTestCase 后将不使用实际部署的localhost 地址,使用 django提供的self.live_server_url地址
# self.driver.get("http://localhost:8000")
self.driver.get(self.live_server_url)
# 发现页面上显示的 TO-DO 字样
self.assertIn('To-Do', self.driver.title)
header_text = self.driver.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# 应用邀请输入一个代办事项
inputbox = self.driver.find_element_by_id('id_new_item')
self.assertEqual(inputbox.get_attribute('placeholder'), 'Enter a to-do item')
# 在输入框中输入购买孔雀羽毛
inputbox.send_keys('Buy peacock feathers')
# 点击回车后页面更新
# 代办事项中显示 ‘1:Buy peacock feathers’
inputbox.send_keys(Keys.ENTER)
edith_list_url = self.driver.current_url
self.assertRegex(edith_list_url, '/list/.+?')
table = self.driver.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
# self.assertTrue(any(row.text == '1:Buy peacock feathers' for row in rows), 'New to-do item did not appear in table - - its text was:\n%s' % (table.text))
# 页面又显示了一个文本框,可以输入其他代办事项
# 输入‘Use peacock feathers to make a fly’
inputbox = self.driver.find_element_by_id('id_new_item')
inputbox.send_keys('Use peacock feathers to make a fly')
inputbox.send_keys(Keys.ENTER)
table = self.driver.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn('1:Buy peacock feathers', [row.text for row in rows])
self.assertIn('2:Use peacock feathers to make a fly', [row.text for row in rows])
##我们需要新打开一个浏览器,并且不让cookice相互干扰
# 让录入的清单不会被别人看到
self.driver.quit()
# 其他人访问页面看不到刚才录入的清单
self.driver = webdriver.Firefox()
self.driver.get(self.live_server_url)
page_text = self.driver.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# 他输入了新的代办事项,创建了一个新的代办清单
inputbox = self.driver.find_element_by_id('id_new_item')
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
# 他获得了一个属于他自己的url
francis_list_url = self.driver.current_url
self.assertRegex(edith_list_url, '/list/.+?')
self.assertNotEquals(francis_list_url, edith_list_url)
# 这个页面还是没有其他人的清单
# 但是这个页面包含他自己的清单
page_text = self.driver.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# self.fail('Finisth the test')
| Python | 83 | 37.518074 | 163 | /functional_tests/test_simple_list_creation.py | 0.654676 | 0.647795 |
liuchao012/myPythonWeb | refs/heads/master | # -*- coding: utf-8 -*-
# @Time : 2018/6/25 20:15
# @Author : Mat
# @Email : mat_wu@163.com
# @File : functional_tests1.py
# @Software: PyCharm
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from django.test import LiveServerTestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import unittest
from unittest import skip
from .base import FunctionalTest
class LayoutAndStylingTest(FunctionalTest):
def test_layout_and_styling(self):
self.driver.get(self.live_server_url)
self.driver.set_window_size(1024, 768)
# 查看页面元素居中
inputbox = self.driver.find_element_by_id('id_new_item')
self.assertAlmostEqual(inputbox.location['x'] + inputbox.size['width'] / 2, 512, delta=10)
# 保存成功后,清单列表的输入框也居中
inputbox.send_keys('testing')
inputbox.send_keys(Keys.ENTER)
inputbox = self.driver.find_element_by_id('id_new_item')
self.assertAlmostEqual(inputbox.location['x'] + inputbox.size['width'] / 2, 512, delta=10)
| Python | 30 | 33.933334 | 98 | /functional_tests/tests_layout_and_styling.py | 0.696565 | 0.666031 |
liuchao012/myPythonWeb | refs/heads/master | # -*- coding: utf-8 -*-
# @Time : 2018/6/25 20:15
# @Author : Mat
# @Email : mat_wu@163.com
# @File : functional_tests1.py
# @Software: PyCharm
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from django.test import LiveServerTestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import unittest
from unittest import skip
from .base import FunctionalTest
class ItemValidationTest(FunctionalTest):
def test_cannot_add_empty_list_items(self):
self.driver.get(self.live_server_url)
self.driver.find_element_by_id('id_new_item').send_keys('\n')
error = self.driver.find_element_by_css_selector('.has-error')
self.assertEqual(error.text, "You cant have an empty list item")
self.driver.find_element_by_id('id_new_item').send_keys('Buy milk\n')
self.check_for_row_in_list_table('1:Buy milk')
self.driver.find_element_by_id('id_new_item').send_keys('\n')
self.check_for_row_in_list_table('1:Buy milk')
error = self.driver.find_element_by_css_selector('.has-error')
self.assertEqual(error.text, "You cant have an empty list item")
self.driver.find_element_by_id('id_new_item').send_keys('Buy tea\n')
self.check_for_row_in_list_table('1:Buy milk')
self.check_for_row_in_list_table('2:Buy tea')
self.fail("write me!")
| Python | 35 | 38.714287 | 77 | /functional_tests/tests_list_item_validation.py | 0.680576 | 0.668345 |
liuchao012/myPythonWeb | refs/heads/master | # -*- coding: utf-8 -*-
# @Time : 2018/6/28 17:06
# @Author : Mat
# @Email : mat_wu@163.com
# @File : __init__.py.py
# @Software: PyCharm
'''
functional_tests,中的文件需要已tests开头系统命令才能读取到测试用例并执行测试
测试执行命令python manage.py test functional_tests,来完成功能测试
如果执行 python manage.py test 那么django 将会执行 功能测试和单元测试
如果想只运行单元测试则需要执行固定的app ,python manage.py test listsss
''' | Python | 19 | 18.052631 | 52 | /functional_tests/__init__.py | 0.700831 | 0.66759 |
FernandoBontorin/spark-optimization-features | refs/heads/master | from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
from airflow.utils.dates import days_ago
fraud_features_jar = "/tmp/applications/spark-optimization-features-assembly-0.1.0-SNAPSHOT.jar"
sparklens_jar = "https://repos.spark-packages.org/qubole/sparklens/0.3.2-s_2.11/sparklens-0.3.2-s_2.11.jar"
with DAG(dag_id='spark_optimization_features', default_args={'owner': 'Airflow'}, schedule_interval=None,
start_date=days_ago(1), tags=['fraud_features_set'], catchup=False, concurrency=1, max_active_runs=1) as dag:
start = DummyOperator(task_id="start")
book_fraud = SparkSubmitOperator(
task_id="book_fraud",
conn_id="spark.default",
name="Book Fraud",
application=fraud_features_jar,
conf={
"spark.default.parallelism": 200,
"spark.dynamicAllocation.enabled": "false",
"spark.network.timeout": 360000,
"spark.shuffle.service.enabled": "false",
"spark.sql.autoBroadcastJoinThreshold": -1,
"spark.port.maxRetries": 10,
"spark.yarn.maxAppAttempts": 1,
"spark.executor.extraJavaOptions": "-XX:+UseG1GC",
"spark.extraListeners": "com.qubole.sparklens.QuboleJobListener",
"spark.sparklens.data.dir": "/tmp/data/history/sparklens"
},
jars=sparklens_jar,
num_executors=1,
executor_cores=1,
executor_memory="512m",
driver_memory="1G",
java_class="com.github.fernandobontorin.jobs.FraudBookProcessor",
application_args=[
"--dataframes",
"file:///tmp/data/fraudTest.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,"
"file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv",
"--output",
"file:///tmp/data/fraud_book_features"
]
)
book_fraud_optimized = SparkSubmitOperator(
task_id="book_fraud_optimized",
conn_id="spark.default",
name="Book Fraud Optimized",
application=fraud_features_jar,
conf={
"spark.default.parallelism": 1,
"spark.dynamicAllocation.enabled": "false",
"spark.network.timeout": 360000,
"spark.shuffle.service.enabled": "false",
"spark.sql.autoBroadcastJoinThreshold": -1,
"spark.port.maxRetries": 10,
"spark.yarn.maxAppAttempts": 1,
"spark.executor.extraJavaOptions": "-XX:+UseG1GC",
"spark.extraListeners": "com.qubole.sparklens.QuboleJobListener",
"spark.sparklens.data.dir": "/tmp/data/history/sparklens"
},
jars=sparklens_jar,
num_executors=1,
executor_cores=1,
executor_memory="512m",
driver_memory="1G",
java_class="com.github.fernandobontorin.jobs.FraudBookProcessor",
application_args=[
"--dataframes",
"file:///tmp/data/fraudTest.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,"
"file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv",
"--output",
"file:///tmp/data/fraud_book_features"
]
)
aggregation_fraud = SparkSubmitOperator(
task_id="aggregation_fraud",
conn_id="spark.default",
name="Agg Fraud Set",
application=fraud_features_jar,
conf={
"spark.default.parallelism": 200,
"spark.dynamicAllocation.enabled": "false",
"spark.network.timeout": 360000,
"spark.shuffle.service.enabled": "false",
"spark.sql.autoBroadcastJoinThreshold": -1,
"spark.port.maxRetries": 10,
"spark.yarn.maxAppAttempts": 1,
"spark.executor.extraJavaOptions": "-XX:+UseG1GC",
"spark.extraListeners": "com.qubole.sparklens.QuboleJobListener",
"spark.sparklens.data.dir": "/tmp/data/history/sparklens"
},
jars=sparklens_jar,
num_executors=1,
executor_cores=1,
executor_memory="512m",
driver_memory="1G",
java_class="com.github.fernandobontorin.jobs.AggregationProcessor",
application_args=[
"--dataframes",
"file:///tmp/data/fraudTest.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,"
"file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv",
"--output",
"file:///tmp/data/aggregation_fraud"
]
)
aggregation_fraud_par = SparkSubmitOperator(
task_id="aggregation_fraud_par",
conn_id="spark.default",
name="Agg Fraud Set Par",
application=fraud_features_jar,
conf={
"spark.default.parallelism": 200,
"spark.dynamicAllocation.enabled": "false",
"spark.network.timeout": 360000,
"spark.shuffle.service.enabled": "false",
"spark.sql.autoBroadcastJoinThreshold": -1,
"spark.port.maxRetries": 10,
"spark.yarn.maxAppAttempts": 1,
"spark.executor.extraJavaOptions": "-XX:+UseG1GC",
"spark.extraListeners": "com.qubole.sparklens.QuboleJobListener",
"spark.sparklens.data.dir": "/tmp/data/history/sparklens"
},
jars=sparklens_jar,
num_executors=1,
executor_cores=1,
executor_memory="512m",
driver_memory="1G",
java_class="com.github.fernandobontorin.jobs.ParAggregationProcessor",
application_args=[
"--dataframes",
"file:///tmp/data/fraudTest.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,"
"file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv",
"--output",
"file:///tmp/data/aggregation_fraud_par"
]
)
end = DummyOperator(task_id="end")
start >> book_fraud >> book_fraud_optimized >> (aggregation_fraud, aggregation_fraud_par) >> end
| Python | 144 | 42.138889 | 118 | /airflow/dags/spark_optimization_features.py | 0.606534 | 0.591085 |
wuljchange/interesting_python | refs/heads/master | import heapq
class PriorityQueue:
def __init__(self):
self._queue = []
self._index = 0
def push(self, priority, item):
heapq.heappush(self._queue, (-priority, self._index, item))
self._index += 1
def pop(self):
return heapq.heappop(self._queue)[-1]
class Item:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
if __name__ == "__main__":
nums = [1, 5, 2, 4, 3]
a = heapq.nlargest(3, nums)
print(a)
b = heapq.nsmallest(3, nums)
print(b)
print(type(b))
# 对集合进行堆排序放入列表中,返回值为 None
c = heapq.heapify(nums)
print(c)
print(nums)
nums2 = [1, 5, 2, 4, 3]
# heappop,heappush
d = heapq.heappop(nums2)
print(d)
e = heapq.heappop(nums2)
print(e)
print(nums2)
heapq.heappush(nums2, 1)
print(nums2)
f = heapq.heappop(nums2)
print(f)
# deque 保留最后插入的 N 个元素,返回值是可迭代对象
from collections import deque
q = deque(maxlen=3)
q.append(1)
q.appendleft(2)
print(q)
q.appendleft(3)
q.appendleft(4)
print(q)
q.append(5)
print(type(q))
a, *b, c = q
print(a, b, c)
# sorted 排序可迭代对象,通过切片,左闭右开,切记!
nums3 = [1, 5, 3, 2]
print(sorted(nums3)[1:])
# re 模块
t = 'asdf fjdk; afed, fjek,asdf, foo'
import re
# 多个分隔符,不保留分隔符分组
f1 = re.split(r'[;,\s]\s*', t)
# 多个分隔符,保留分隔符分组
f2 = re.split(r'(;|,|\s)\s*', t)
# 多个分隔符,不保留分隔符分组
f3 = re.split(r'(?:;|,|\s)\s*', t)
print(f1)
print(f2)
print(f3)
| Python | 74 | 20.027027 | 67 | /part-struct/test-heapq.py | 0.537275 | 0.510283 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-07 18:46
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test17.py
# ----------------------------------------------
def test():
# 在函数内部使用 global 声名全局变量
global A
A = 1
print(A)
if __name__ == "__main__":
# input 函数与用户交互使用
# 解释一下 python 中 pass 的作用。
# 是空语句,是为了保持程序结构的完整性。不做任何事情,一般用做占位语句。
# is == 的区别,== 是比较两个对象的 value 值是否相等,is 判断两个对象的 id 是否相等
# python 对象包含3个基本要素,value id type
# python 中的作用域,global,nonlocal 语句,全局作用域,在函数内部对函数外部的非全局变量的使用
# 三元运算符的用法
b = "1"
a = "0" if b == "0" else "1"
print(a)
# enumerate 模块,遍历会带上索引
for i, v in enumerate(range(1, 11)):
print(i, v)
# python 中的标准模块,functools collections logging
test()
A = 2
print(A)
# 断言成功继续往下执行,失败就报异常信息
assert 1 == 1
print("end")
# dir 用于查看对象的 属性和方法
a = [1, 2, [1, 2]]
b = a
import copy
c = copy.copy(a)
d = copy.deepcopy(a)
a[2].append(3)
print(a, b , c, d)
data = [1, 2, 3, 4]
print(data[::-1])
d = [1, 2, 3, 4]
e = d
print(id(d), id(e)) | Python | 54 | 20.24074 | 63 | /part-interview/test17.py | 0.486911 | 0.450262 |
wuljchange/interesting_python | refs/heads/master | from ruamel.yaml import YAML
if __name__ == "__main__":
# yaml文件解析
with open('deployments.yaml') as fp:
content = fp.read()
yaml = YAML()
print(content)
content = yaml.load_all(content)
print(type(content))
data = []
for c in content:
data.append(c)
print(data[0])
c = data[0]
tmp = c['spec']['template']['spec']['containers'][0]['args'][2]
c['spec']['template']['spec']['containers'][0]['args'][2] = tmp.format('http')
data[0] = c
content = (d for d in data)
print(content)
with open('new.yaml', 'w') as f:
yaml.dump_all(content, f)
| Python | 23 | 26.130434 | 82 | /part-yaml/test-yaml.py | 0.549679 | 0.538462 |
wuljchange/interesting_python | refs/heads/master | import io
if __name__ == "__main__":
s = io.StringIO()
s_byte = io.BytesIO()
print('test', file=s, end="\t")
s_byte.write(b'bytes')
print("new")
print(s.getvalue())
print(s_byte.getvalue())
| Python | 11 | 19 | 35 | /part-text/test-iofile.py | 0.531818 | 0.531818 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-01 13:05
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test06.py
# ----------------------------------------------
import json
from datetime import datetime
from json import JSONEncoder
from functools import wraps
class Man:
def __init__(self, name, age):
self.name = name
self.age = age
class ComplexEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.strftime("%Y-%m-%d %H:%M:%S")
else:
return super(ComplexEncoder, self).default(o)
def bb(n: int):
def multi(args):
return n*args
return multi
# 函数定义成装饰器的时候,建议加上 wraps,他能保留装饰器定义函数的原有名称和docstring
def dt(func):
@wraps(func)
def new(*args, **kwargs):
res = func(args)
return res
return new
if __name__ == "__main__":
# 交换两个变量的值,可以直接赋值
a, b = 1, 2
print(a, b)
print(id(a), id(b))
a, b = b, a
print(a, b)
print(id(a), id(b))
# read,readline,readlines
# read 是直接读取整个文件内容
# readline 是读取文件的一行内容,从最开始读起
# readlines 是读取文件所有内容,按行分隔成 list,会把换行符也带上
with open('test.txt', 'r') as f:
# r1 = f.read()
# print("r1 "+r1)
# r2 = f.readline()
# print("r2 "+r2)
r3 = f.readlines()
print(r3)
# json 序列化支持的数据类型有哪些
# 基本上 python3 的基本数据类型都支持
d1 = {"d1": 1}
d2 = {"d2": "2"}
d3 = {"d3": dict()}
d4 = {"d4": list()}
d5 = {"d5": tuple()}
d6 = {"d6": True}
print(json.dumps(d1))
# json 序列化对象支持 datetime 对象,定义一个函数或者类,把 datetime 对象转换成字符串即可
# 一般自己定义的类是有 self.__dict__ 方法的
m = Man("test", 24)
d7 = {"d7": m}
print(json.dumps(d7, default=lambda obj: obj.__dict__))
d8 = {"d8": datetime.now()}
print(json.dumps(d8, cls=ComplexEncoder))
# json 序列化的时候,遇到中文会默认转换成 unicode,要求让他保留中文格式
d9 = {"hello": "你好"}
print(json.dumps(d9))
print(json.dumps(d9, ensure_ascii=False))
# 合并文件信息,按顺序排列
with open('test1.txt', 'r') as f1:
t1 = f1.read()
with open('test2.txt', 'r') as f2:
t2 = f2.read()
print("t1 ", t1)
print("t2 ", t2)
# 字符串属于可迭代对象,sorted 过后返回一个 list
t = sorted(t1 + t2)
print("t ", "".join(t))
# 当前日期计算函数
dt1 = "20190530"
import datetime
dt1 = datetime.datetime.strptime(dt1, "%Y%m%d")
print(dt1)
dt2 = dt1 + datetime.timedelta(days=5)
print(dt2.strftime("%Y%m%d"))
import arrow
dt1 = "2019-05-30"
dt1 = arrow.get(dt1)
print(dt1)
dt2 = dt1.shift(days=+5)
print(dt2.isoformat())
# 1 行代码实现 1-100 之间的偶数
# range 方法是左闭右开
t = [i for i in range(1, 100) if i % 2 == 0]
print(t)
# with 语句的作用,用作上下文管理器,一般用于文件读写,方式没有及时关闭文件
# 如果一个对象有 self.__enter__(self) 和 self.__exit__(self) 方法的话,可以用 with 做上下文管理器
# python 计算一个文件中大写字母的个数
with open('test1.txt', 'r') as f:
t = f.read()
print(t)
l = [i for i in t if "A" <= i <= "Z"]
print(l)
print(len(l)) | Python | 117 | 24.820513 | 78 | /part-interview/test06.py | 0.537748 | 0.501324 |
wuljchange/interesting_python | refs/heads/master | from urllib.request import urlopen
def urltemplate(template):
def opener(**kwargs):
return template.format_map(kwargs)
# return urlopen(template.format_map(kwargs))
return opener
if __name__ == "__main__":
url = urltemplate('http://www.baidu.com?name={name}&age={age}')
print(url)
test = 'http://www.kingsoft.com?name={name}&age={age}'
s1 = test.format_map({'name': 'mac', 'age': 23})
print(s1)
s = url(name='Alex', age=23)
print(s) | Python | 18 | 26.166666 | 67 | /part-data/test-closepackage.py | 0.610656 | 0.598361 |
wuljchange/interesting_python | refs/heads/master | import sqlite3
if __name__ == "__main__":
data = [
(1, 2, 3),
(2, 3, 4),
]
s = sqlite3.connect('database.db')
# 给数据库建立游标,就可以执行sql查询语句了
db = s.cursor()
db.execute('create table wulj (name, number, rate)')
print(db)
s.commit()
db.executemany('insert into wulj (?,?,?)', data)
s.commit()
for row in db.execute('select * from wulj'):
print(row)
number = 10
# 用户输入参数用于交互查询,?代表占位符
for row in db.execute('select * from wulj where num > ?', (number,)):
print(row) | Python | 22 | 23.818182 | 73 | /part-data/test-sqlite.py | 0.541284 | 0.522936 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-07 18:11
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test16.py
# ----------------------------------------------
import re
if __name__ == "__main__":
# 使用正则表达式匹配地址
s = "www.baidu.com.jkjh"
if re.match(r'(.*).(.*).(.*)', s):
print("pass")
r = re.findall(r'(.*)\.(.*)\.(.*)', s)
print(r)
s = " 98 100 102 "
s = re.sub(r' (\d+) (\d+) (\d+) ', r'\3/\2/\1', s)
print(s)
# 正则匹配中 (.*) 和 (.*?) 的区别是一个是最长匹配,一个是最短匹配
text = 'Computer says "no." Phone says "yes."'
t1 = re.findall(r'"(.*)"', text)
t2 = re.findall(r'"(.*?)"', text)
print(t1)
print(t2)
# 匹配邮箱的正则表达式
text1 = "test@wulj.com, test2@wulinjiang.com,"
t3 = re.findall(r'\s*(.*?)@(.*?).com,\s*', text1)
print(t3) | Python | 30 | 26.433332 | 54 | /part-interview/test16.py | 0.394161 | 0.352798 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-08 11:05
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test18.py
# ----------------------------------------------
def search_2(data, l):
""" 二分查找法 """
length = len(l)
# 递归一定要写出退出条件
if length <= 1:
if length <= 0:
return False
elif data == l[0]:
return 0, l[0]
else:
return False
mid_index = int(length/2)
mid = l[mid_index]
if data > mid:
f_index = mid_index + 1
return search_2(data, l[f_index:])
elif data < mid:
return search_2(data, l[:mid_index])
else:
return mid_index, mid
if __name__ == "__main__":
data = 0
l = [i for i in range(10)]
if search_2(data, l):
index, value = search_2(data, l)
print(index, value)
else:
print(False)
| Python | 39 | 22.282051 | 48 | /part-interview/test18.py | 0.431718 | 0.398678 |
wuljchange/interesting_python | refs/heads/master | from functools import total_ordering
import re
class Room:
def __init__(self, name, length, width):
self.name = name
self.length = length
self.width = width
self.squre_foot = self.length*self.width
@total_ordering
class House:
def __init__(self, name, style):
self.name = name
self.style = style
self.rooms = list()
@property
def living_space_footage(self):
return sum(r.squre_foot for r in self.rooms)
def append_room(self, room):
self.rooms.append(room)
def __str__(self):
return '{} area is {}, style is {}'.format(self.name, self.living_space_footage, self.style)
def __eq__(self, other):
return self.living_space_footage == other.living_space_footage
def __lt__(self, other):
return self.living_space_footage < other.living_space_footage
if __name__ == "__main__":
# a = Room('bed_room', 20, 30)
# # b = Room('living_room', 30, 40)
# # c = Room('kitchen_room', 10, 20)
# # h = House('home', 'Asia')
# # h1 = House('new-home', 'Europe')
# # h.append_room(a)
# # h.append_room(b)
# # h1.append_room(c)
# # if h1 > h:
# # print('{} area > {}'.format(h1.living_space_footage, h.living_space_footage))
# # else:
# # print('{} area is {} and < {} area is {}'.format(h1.name, h1.living_space_footage, h.name, h.living_space_footage))
# #
# # data = [1, 3, 3, 2, 5, 7, 5, 4, 5]
# # a = list({k:'' for k in data})
# # print(a)
s = re.compile(r'[0-9]+')
if s.match('1'):
print('yes')
data = [1,2,3,5,7,8]
new = [23, 45, 1]
new.reverse()
print(new)
print(data+new)
print(round(7/3, 2)) | Python | 63 | 27.444445 | 131 | /part-class/test-compare.py | 0.526522 | 0.501954 |
wuljchange/interesting_python | refs/heads/master | import array
if __name__ == "__main__":
# xt模式测试写入文件不能直接覆盖,只能写入到不存在的文件里面
with open('test.file', 'xt') as f:
f.write('test not exist')
print("end", end='#') | Python | 8 | 21.25 | 38 | /part-text/test-newfile.py | 0.564972 | 0.564972 |
wuljchange/interesting_python | refs/heads/master | # 希尔排序 时间复杂度是O(NlogN)
# 又称缩小增量排序 首先设置一个基础增量d,对每间隔d的元素分组,然后对每个分组的元素进行直接插入排序
# 然后缩小增量,用同样的方法,直到增量小于0时,排序完成
def shell_sort(data: list):
n = len(data)
gap = int(n / 2) # 设置基础增量
# 当增量小于0时,排序完成
while gap > 0:
for i in range(gap, n):
j = i
while j >= gap and data[j-gap] > data[j]:
data[j-gap], data[j] = data[j], data[j-gap]
j -= gap
gap = int(gap / 2)
return data
if __name__ == "__main__":
t_data = [3, 2, 5, 4, 1]
print(shell_sort(t_data)) | Python | 22 | 23.636364 | 59 | /part-sort-alogrithm/test-shell.py | 0.51756 | 0.499076 |
wuljchange/interesting_python | refs/heads/master | from contextlib import contextmanager
from collections import defaultdict
class Exchange:
def __init__(self):
self._subscribers = set()
def attach(self, task):
self._subscribers.add(task)
def detach(self, task):
self._subscribers.remove(task)
@contextmanager
def subscribe(self, *tasks):
for task in tasks:
self.attach(task)
try:
yield
finally:
for task in tasks:
self.detach(task)
def send(self, msg):
for subscriber in self._subscribers:
subscriber.send(msg)
class Task:
def send(self, msg):
print(msg)
_changes = defaultdict(Exchange)
def get_change(name):
return _changes[name]
if __name__ == "__main__":
data = {'new1': 1, 'new3': 2, 'new2': 3}
# new = sorted(data.items())
print(dict(sorted(data.items())))
# exc = get_change('name')
# task_a = Task()
# task_b = Task()
# with exc.subscribe(task_a, task_b):
# exc.send('msg1')
# exc.send('msg2') | Python | 51 | 19.980392 | 44 | /part-thread/test_exchange.py | 0.560337 | 0.552853 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-01-13 14:30
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : kafka-consumer.py
# ----------------------------------------------
from kafka import KafkaConsumer
import time
def start_consumer():
consumer = KafkaConsumer('my_test_topic1',
bootstrap_servers='kafka-0-0.kafka-0-inside-svc.kafka.svc.cluster.local:32010,'
'kafka-1-0.kafka-1-inside-svc.kafka.svc.cluster.local:32011,'
'kafka-2-0.kafka-2-inside-svc.kafka.svc.cluster.local:32012,'
'kafka-3-0.kafka-3-inside-svc.kafka.svc.cluster.local:32013,'
'kafka-4-0.kafka-4-inside-svc.kafka.svc.cluster.local:32014,'
'kafka-5-0.kafka-5-inside-svc.kafka.svc.cluster.local:32015')
for msg in consumer:
print(msg)
print("topic = %s" % msg.topic) # topic default is string
print("partition = %d" % msg.offset)
print("value = %s" % msg.value.decode()) # bytes to string
print("timestamp = %d" % msg.timestamp)
print("time = ", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(msg.timestamp/1000)))
if __name__ == '__main__':
start_consumer()
| Python | 30 | 42.966667 | 108 | /part-kafka/kafka-consumer.py | 0.515542 | 0.465504 |
wuljchange/interesting_python | refs/heads/master | import pandas as pd
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='test.log',
filemode='w')
if __name__ == "__main__":
datas = pd.read_csv('test.csv')
print(datas)
# 输出每一列的数据类型
print(datas.dtypes)
# 输出前几行,会自动把header输出,不算行
print(datas.head(2))
# 每一列都有什么特征
print(datas.columns)
# 输出csv文件有多少行和列,不算header
print(datas.shape)
# pandas.Series传递一个list为参数
s = pd.Series([1, 2, 3, np.nan, 5, 6])
print(s)
dates = pd.date_range('20181201', periods=12)
print(dates)
da = np.random.randn(3, 4)
print(da)
# 传递一个np数组
df = pd.DataFrame(data=np.random.randn(12, 6), index=dates, columns=list('ABCDEF'))
print(df)
# 传递一个dict对象
df2 = pd.DataFrame({"a": [i for i in range(4)],
"b": "test"})
print(df2)
# view head or tail 元素,head default n=5
print(df.head())
print(df.tail(2))
# view index, columns, values
print(df.index)
print(df.columns)
print(df.values)
# describe 快速显示DataFrame的各项指标
print(df.describe())
# df.loc[] useful
print(df.loc[dates[0]])
print(df.loc[dates[0], ['A', 'B']])
print(df.loc[dates[0]:dates[2], ['A', 'B', 'C']])
print(df.iloc[0:2])
print(df.iloc[0:2, 3:4])
logging.info('new')
df3 = df.copy()
print(df3)
print(df3.mean())
print(df3.mean(1))
| Python | 56 | 27.107143 | 97 | /part-data/test-pandas.py | 0.560356 | 0.534307 |
wuljchange/interesting_python | refs/heads/master | from functools import partial
import math
def distance(p1, p2):
x1, y1 = p1
x2, y2 = p2
return math.hypot(x2-x1, y2-y1)
if __name__ == "__main__":
points = [(1, 2), (3, 4), (7, 8), (5, 6)]
pt = (5, 6)
points.sort(key=partial(distance, pt))
print(points)
| Python | 15 | 18.066668 | 45 | /part-data/test-partial.py | 0.541958 | 0.465035 |
wuljchange/interesting_python | refs/heads/master | from itertools import dropwhile, islice
from itertools import permutations, combinations
from itertools import combinations_with_replacement
def parser(filename):
with open(filename, 'rt') as f:
for lineno, line in enumerate(f, 1):
print(lineno, line)
fields = line.split()
try:
count = float(fields[0])
except ValueError as e:
print('Lineno {} parser {}'.format(lineno, e))
if __name__ == "__main__":
l1 = [1, 2, 3, 4]
l2 = [2, 3, 4, 5]
a = [(x, y) for x, y in zip(l1, l2)]
print(a)
for index, (x, y) in enumerate(a):
print(index, x, y)
line_text = 'test new world'
print(line_text.split())
items = [1, 2, 3, 4]
for i in enumerate(items):
print(i)
# 指定行号
for i in enumerate(items, 2):
print(i)
# 允许同一个元素被选取,在一个元祖中
cp = [i for i in combinations_with_replacement(items, 2)]
p_test = [i for i in permutations(items, 2)]
c_test = [i for i in combinations(items, 2)]
print(p_test)
print(c_test)
print(cp)
with open('../data-struct-algorithm/tmp/test') as f:
r = f.readlines()
print(r)
st = ['#new', '#test', 'test']
s = islice(st, 1, None)
for s1 in s:
print(s1)
print(s)
ret = list(filter(lambda x: x.startswith('#'), r))
print(ret)
for line in dropwhile(lambda x: x.startswith("#"), f):
print(line, end=" ") | Python | 50 | 28.959999 | 62 | /part-text/test-iter.py | 0.537742 | 0.521042 |
wuljchange/interesting_python | refs/heads/master | from operator import itemgetter
from itertools import groupby
data = [
{"date": 2019},
{"date": 2018},
{"date": 2020}
]
data.sort(key=itemgetter('date'))
print(data)
for date, item in groupby(data, key=itemgetter('date')):
print(date)
print(item)
for i in item:
print(type(i), i) | Python | 16 | 18.625 | 56 | /part-struct/test-groupby.py | 0.626198 | 0.587859 |
wuljchange/interesting_python | refs/heads/master | import gzip
import bz2
if __name__ == "__main__":
# gzip作用于一个已经打开的二进制文件 new character
f = open('file.gz', 'rb')
with gzip.open(f, 'rb') as f:
print(f.read())
# with语句结束自动会关闭文件
with gzip.open('file', 'wt') as f:
f.read("test")
print("new line")
with bz2.open('file', 'wt') as f:
f.read("test")
print("end") | Python | 16 | 21.75 | 39 | /part-text/test-gzip.py | 0.53719 | 0.53168 |
wuljchange/interesting_python | refs/heads/master | # 冒泡排序 该算法的事件复杂度未O(N^2)
# 具体过程如下 首先遍历数组中的n个元素,对数组中的相邻元素进行比较,如果左边的元素大于右边的元素,则交换两个元素所在的
# 位置,至此,数组的最右端的元素变成最大的元素,接着对剩下的n-1个元素执行相同的操作。
def bubble_sort(data: list):
# 外面的循环控制内部循环排序的次数,例如5个数,只需要4次排序就行了
for i in range(len(data)-1):
change = False
# 内部循环比较相邻元素,找到剩下元素的最大值放在数组的右边
for j in range(len(data)-i-1):
if data[j] > data[j+1]:
data[j], data[j+1] = data[j+1], data[j]
change = True
# 当change=False时,说明没有交换的情况发生,说明该数组已经排序完成
# 减少了循环的次数
if not change:
break
return data
if __name__ == "__main__":
t_data = [5, 4, 3, 2, 1]
print(bubble_sort(t_data)) | Python | 24 | 27.041666 | 61 | /part-sort-alogrithm/test-bubble.py | 0.584821 | 0.563988 |
wuljchange/interesting_python | refs/heads/master | from plumbum import local, FG, BG, cli, SshMachine, colors
from plumbum.cmd import grep, awk, wc, head, cat, ls, tail, sudo, ifconfig
if __name__ == "__main__":
ls = local["ls"]
print(ls())
# 环境在linux
# 管道符 pipe
command = ls["-a"] | awk['{if($2="100") print $2}'] | wc["-l"]
print(command())
# 重定向
command = cat['test.file'] | head["-n", 5]
print(command())
# 后台运行和当前终端运行
command = (cat['test.file'] | grep["-v", "test"] | (tail["-n", 5] > "out.file")) & FG
print(command())
command = (awk['-F', '\t', '{print $1, $2}', 'test.file'] | (head['-n', 5] >> 'out.file')) & BG
print(command())
# 嵌套命令
command = sudo[ifconfig['-a']]
command1 = (sudo[ifconfig["-a"]] | grep["-i", "loop"]) & FG
print(command())
print(command1())
| Python | 24 | 32.291668 | 99 | /part-plumbum/test01.py | 0.524405 | 0.509387 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2019-11-08 11:42
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test-requests.py
# ----------------------------------------------
import requests
if __name__ == "__main__":
url = "https://cn.bing.com/"
resp = requests.get("https://"+"cn.bing.com", verify=True)
print(resp.status_code)
print(resp.url) | Python | 15 | 26.266666 | 62 | /part-requests/test-requests.py | 0.426471 | 0.394608 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-04 23:48
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test11.py
# ----------------------------------------------
class Demo:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __call__(self, *args, **kwargs):
""" 改变实例的状态 """
self.x, self.y = args
class Counter:
def __init__(self, func):
self.func = func
self.count = 0
def __call__(self, *args, **kwargs):
self.count += 1
return self.func(*args, **kwargs)
@Counter
def foo(name):
print(name)
class Test:
# 静态属性
x = 1
y = 2
def __init__(self, x=7, y=8):
# 成员属性
self.x = x
self.y = y
def normal_func(self):
print("normal", self.x, self.y)
@staticmethod
def static_func():
print("static", Test().x, Test().y)
print(Test(3, 4).normal_func())
@classmethod
def class_func(cls):
print("class", Test.x, Test.y)
print(cls(5, 6).normal_func())
if __name__ == "__main__":
data = "sastfftsasdsh"
print(5/2)
q = [True if x % 3 == 0 else -x for x in range(1, 101)]
d = [True if data[i] == data[len(data)-i-1] else False for i in range(int(len(data)/2))]
print(d)
# 装饰器有什么作用
# 用于给给现有函数增加额外功能,接收一个函数作为参数
# 定义的装饰器函数可以带参数,函数本身也可以带参数
# python 垃圾回收机制
# 1. 引用计数器回收,引用计数器为0时,就会被解释器的 gc 回收
# 2. 分代垃圾回收机制,对于对象的相互引用和循环引用,第一种回收方式时无法实现的,具体分为第一代,第二代,第三代
# 第一代主要用于去除同一代中的相互索引和循环索引,存活的对象放入第二代,以此类推。
# __call__ 使用
# 可调用对象,对于类,函数,但凡是可以把()应用到一个对象上的情况都是可调用对象
# 如果一个类中实现了 __call__ 函数,就可以将一个实例对象变成一个可调用对象
demo = Demo(1, 2, 3)
print(demo.x, demo.y)
# 将实例对象当成函数调用,直接调用类中定义的 __call__ 函数,用于改变对象状态最直接优雅的方法
demo(5, 6)
print(demo.x, demo.y)
for i in range(10):
foo(i)
print(foo.count)
# 判断一个对象是函数还是方法
# 与类和实例无绑定关系的function都是函数
# 与类和实例有绑定关系的function都是方法
# @staticmethod 和 @classmethod
# @staticmethod 静态方法,与类和实例无关
# @classmethod 类方法
Test.static_func()
Test.class_func()
Test.x = 10
t = Test(1, 2)
print(t.x)
print(Test.x)
| Python | 102 | 20.59804 | 92 | /part-interview/test11.py | 0.529278 | 0.507036 |
wuljchange/interesting_python | refs/heads/master | # 选择排序 时间复杂度时O(N^2)
# 具体过程如下 首先在n个元素的数组中找到最小值放在数组的最左端,然后在剩下的n-1个元素中找到最小值放在左边第二个位置
# 以此类推,直到所有元素的顺序都已经确定
def select_sort(data: list):
# 外部循环只需遍历n-1次
for i in range(len(data)-1):
for j in range(i+1, len(data)):
if data[i] > data[j]:
data[i], data[j] = data[j], data[i]
return data
if __name__ == "__main__":
t_data = [5, 4, 3, 2, 1]
print(select_sort(t_data)) | Python | 17 | 23.588236 | 61 | /part-sort-alogrithm/test-select.py | 0.57554 | 0.551559 |
wuljchange/interesting_python | refs/heads/master | # 插入排序 时间复杂度O(N^2)
# 具体过程如下 每次循环往已经排好序的数组从后往前插入一个元素,第一趟比较两个元素的大小,第二趟插入元素
# 与前两个元素进行比较,放到合适的位置,以此类推。
def insert_sort(data: list):
for i in range(1, len(data)):
key = data[i]
# 相当于相邻元素进行比较,但是逻辑更清楚一点
for j in range(i-1, -1, -1):
if data[j] > key:
data[j+1] = data[j]
data[j] = key
return data
if __name__ == "__main__":
t_data = [2, 4, 1, 5, 3, 5, 9, 10, 8, 7]
print(insert_sort(t_data)) | Python | 19 | 23.894737 | 53 | /part-sort-alogrithm/test-insert.py | 0.527542 | 0.491525 |
wuljchange/interesting_python | refs/heads/master | import jsane
if __name__ == "__main__":
# jsane是一个json解析器
# loads 解析一个json字符串
j = jsane.loads('{"name": "wulj", "value": "pass"}')
print(j.name.r())
# from_dict 解析字典
j2 = jsane.from_dict({'key': ['v1', 'v2', ['v3', 'v4', {'inner': 'value'}]]})
print(j2.key[2][2].inner.r())
# 当解析找不到key时,设置默认值
print(j2.key.new.r(default="test"))
| Python | 13 | 27.23077 | 81 | /part-jsane/test01.py | 0.536785 | 0.512262 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-05 20:00
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test12.py
# ----------------------------------------------
from abc import abstractmethod, ABCMeta
class Interface(object):
__metaclass__ = ABCMeta
@abstractmethod
def test(self):
pass
def new(self):
pass
class NewTest(Interface):
def __init__(self):
print("interface")
def test(self):
print("test")
def new(self):
print("new")
if __name__ == "__main__":
print("test")
# nt = NewTest()
# nt.test()
# nt.new() | Python | 37 | 16.756756 | 48 | /part-interview/test12.py | 0.446646 | 0.423781 |
wuljchange/interesting_python | refs/heads/master | if __name__ == "__main__":
x = 1234
# 函数形式
print(bin(x))
print(oct(x))
print(hex(x))
# format形式,没有前缀 0b,0o,0x
print(format(x, 'b'))
print(format(x, 'o'))
print(format(x, 'x'))
#将进制的数据转换成整数字符串
a = format(x, 'b')
b = format(x, 'x')
print(int(a, 2))
print(int(b, 16)) | Python | 15 | 20.4 | 28 | /part-data/test-scale.py | 0.4875 | 0.45625 |
wuljchange/interesting_python | refs/heads/master | import glob
import fnmatch
import os.path
if __name__ == "__main__":
dir_path = '/root/tmp/test'
path = '/root/tmp/test/*.py'
pyfiles = glob.glob(path)
pyfiles2 = [name for name in os.listdir(dir_path) if fnmatch(name, '*.py')] | Python | 10 | 23.6 | 79 | /part-text/test-glob-fnmatch.py | 0.616327 | 0.612245 |
wuljchange/interesting_python | refs/heads/master | import arrow
import re
import pdb
import tempfile
if __name__ == "__main__":
# print(arrow.now().shift(days=-1).format('YYYY-MM-DD'))
# data = ['merge', '1', 'commit', 'merge']
# data.remove('1')
# print(data)
# d = [{'code': 12}, {'code': 11}, {'code': 13}]
# d.sort(key=lambda x: x['code'])
# print(d)
# s = ' --hello -world+ '
# print(re.sub("[' ', '-', '+']", '', s))
with tempfile.NamedTemporaryFile('w+t') as f:
print(f.name)
f.write('hello world!')
f.seek(0)
print(f.read()) | Python | 21 | 25.476191 | 60 | /part-text/test-list.py | 0.493694 | 0.475676 |
wuljchange/interesting_python | refs/heads/master | import re
text = '/* http new s */'
r = re.compile(r'/\*(.*?)\*/')
print(r.findall(text))
| Python | 6 | 14.333333 | 30 | /part-text/test-re.py | 0.521739 | 0.521739 |
wuljchange/interesting_python | refs/heads/master | # import smtplib
# from email.mime.text import MIMEText
# from email.header import Header
#
# # 第三方 SMTP 服务
# mail_host = "smtp.qq.com" # 设置服务器
# mail_user = "" # 用户名
# mail_pass = "XXXXXX" # 口令
#
# sender = 'from@runoob.com'
# receivers = ['429240967@qq.com'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
#
# message = MIMEText('Python 邮件发送测试...', 'plain', 'utf-8')
# message['From'] = Header("菜鸟教程", 'utf-8')
# message['To'] = Header("测试", 'utf-8')
#
# subject = 'Python SMTP 邮件测试'
# message['Subject'] = Header(subject, 'utf-8')
#
# try:
# smtpObj = smtplib.SMTP()
# smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号
# smtpObj.login(mail_user, mail_pass)
# smtpObj.sendmail(sender, receivers, message.as_string())
# print("邮件发送成功")
# except smtplib.SMTPException:
# print("Error: 无法发送邮件")
import arrow
import json
import os
from pathlib import Path
if __name__ == "__main__":
print(1)
print(Path(__file__).resolve().parent)
# with open('test.json', 'r') as config:
# print(config)
# print(type(json.load(config)))
# print(arrow.now())
# data = [1,2,3,4,5,6]
# print(data[3:]) | Python | 43 | 24.906977 | 62 | /part-text/test-smtp.py | 0.606469 | 0.592093 |
wuljchange/interesting_python | refs/heads/master | from decimal import Decimal, localcontext
def main(a, b):
a = Decimal(a)
b = Decimal(b)
return a+b
if __name__ == "__main__":
sum = main('3.2', '4.3')
# 使用上下文管理器更改输出的配置信息
with localcontext() as ctx:
ctx.prec = 3
print(Decimal('3.2')/Decimal('2.3'))
print(sum == 7.5)
| Python | 16 | 18.6875 | 44 | /part-data/test-decimal.py | 0.542857 | 0.507937 |
wuljchange/interesting_python | refs/heads/master | # 快速排序 时间复杂度时O(NlogN)
# 具体过程如下 采用一种分治递归的算法 从数组中任意选择一个数作为基准值,然后将数组中比基准值小的放在左边
# 比基准值大的放在右边,然后对左右两边的数使用递归的方法排序
def partition(data, start, end):
i = start - 1
for j in range(start, end):
# 刚开始以data[end]的值作为基准值
if data[j] < data[end]:
i += 1
# 如果j所在的位置的值小于end,则i往前进一步,并与j的值交换,即将一个新的值加入到小于end的区域
data[i], data[j] = data[j], data[i]
i += 1
data[i], data[end] = data[end], data[i]
return i
def quick_sort(data: list, start, end):
if start < end:
mid = partition(data, start, end)
quick_sort(data, start, mid-1)
quick_sort(data, mid+1, end)
return data
if __name__ == "__main__":
t_data = [5, 4, 3, 2, 1]
print(quick_sort(t_data, 0, 4)) | Python | 29 | 24.758621 | 64 | /part-sort-alogrithm/test-quick.py | 0.579088 | 0.563003 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-07 12:18
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test15.py
# ----------------------------------------------
if __name__ == "__main__":
# filter 方法,func + iterator
data = [i for i in range(1, 11)]
print(list(filter(lambda x: x % 2 == 0, data)))
# 什么是猴子补丁
# 运行是动态替换模块的方法
# python 是如何管理内存的,引用计数和分代垃圾回收的机制
# 当退出 python3 时,是否会释放所有内存分配,答案时否定的,对于循环引用和相互引用的内存还不会释放
| Python | 17 | 27.941177 | 58 | /part-interview/test15.py | 0.465447 | 0.422764 |
wuljchange/interesting_python | refs/heads/master | if __name__ == "__main__":
# list,dict,set是不可hash的
# int,float,str,tuple是可以hash的
data = [1, 2, '232', (2, 3)]
data1 = [2, 3, '213', (2, 3)]
# 两个list取补集,元素在data中,不在data1中
diff_list = list(set(data).difference(set(data1)))
print(diff_list)
# 取交集
inter_list = list(set(data).intersection(set(data1)))
print(inter_list)
# 取并集
union_list = list(set(data).union(set(data1)))
print(union_list) | Python | 17 | 25 | 57 | /part-text/test-set.py | 0.585034 | 0.54195 |
wuljchange/interesting_python | refs/heads/master | import arrow
bracket_dct = {'(': ')', '{': '}', '[': ']', '<': '>'}
def bracket(arg: str):
match_stack = []
for char in arg:
if char in bracket_dct.keys():
match_stack.append(char)
elif char in bracket_dct.values():
if len(match_stack) > 0 and bracket_dct[match_stack.pop()] == char:
continue
else:
return False
else:
continue
return True
if __name__ == "__main__":
test = '(12121){}dasda[oio{dad}232<asfsd>232]'
print(arrow.now().format('YYYY-MM-DD HH:MM:SS'))
print(bracket(test))
| Python | 25 | 23.68 | 79 | /part-text/bracket_expression.py | 0.49919 | 0.479741 |
wuljchange/interesting_python | refs/heads/master | import threading
from socket import socket, AF_INET, SOCK_STREAM
from functools import partial
from contextlib import contextmanager
# State to stored info on locks already acquired
_local = threading.local()
@contextmanager
def acquire(*locks):
locks = sorted(locks, key=lambda x: id(x))
acquired = getattr(_local, 'acquire', [])
if acquired and max(id(lock) for lock in acquired) >= id(locks[0]):
raise RuntimeError('Lock order violation')
acquired.extends(locks)
_local.acquired = acquired
try:
for lock in locks:
lock.acquire()
yield
finally:
for lock in reversed(locks):
lock.release()
del acquired[-len(locks):]
class LazyConnection:
def __init__(self, address, family=AF_INET, socket_type=SOCK_STREAM):
self.address = address
self.family = family
self.socket_type = socket_type
self.local = threading.local()
def __enter__(self):
if hasattr(self.local, 'sock'):
raise RuntimeError('connection existed')
self.local.sock = socket(self.family, self.socket_type)
self.local.sock.connect(self.address)
return self.local.sock
def __exit__(self, exc_type, exc_val, exc_tb):
self.local.sock.close()
del self.local.sock
def test(conn):
with conn as c:
c.send(b'test\n')
resp = b''.join(iter(partial(c.recv, 8192), b''))
print(len(resp))
if __name__ == "__main__":
conn = LazyConnection(("www.test.com", 8081))
t1 = threading.Thread(target=test, args=(conn,))
t2 = threading.Thread(target=test, args=(conn,))
t1.start()
t2.start()
t1.join()
t2.join()
| Python | 65 | 25.384615 | 73 | /part-thread/thread_lock.py | 0.61691 | 0.608163 |
wuljchange/interesting_python | refs/heads/master | import os.path
import time
import glob
import fnmatch
if __name__ == "__main__":
dir_path = '/data/proc/log'
file_name = [name for name in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, name))]
dir_name = [name for name in os.listdir(dir_path) if os.path.isdir(os.path.join(dir_path, name))]
pyfile = [name for name in os.listdir(dir_path) if name.endswith('.py')]
path = '/data/prolog/log/test.log'
print(os.path.basename(path))
print(os.path.dirname(path))
print(os.path.split(path))
print(os.path.join('root', 'tmp', os.path.basename(path)))
# 测试文件或者目录是否存在 指定类型判断
if os.path.exists(path):
print(True)
os.path.isfile(path)
os.path.isdir(path)
# 测试是否是软连接
os.path.islink(path)
# 得到软连接的完整路径
os.path.realpath(path)
os.path.getsize(path)
# 得到文件的创建时间
os.path.getmtime(path)
# 修改文件的创建时间
time.ctime(os.path.getmtime(path)) | Python | 30 | 30 | 103 | /part-text/test-path.py | 0.649085 | 0.649085 |
wuljchange/interesting_python | refs/heads/master | from marshmallow import Schema, fields, post_load, pprint
from hashlib import md5
sort_key = ['name', 'role']
class Actor(object):
"""
创建actor基础类
"""
def __init__(self, name, role, grade):
self.name = name
self.role = role
self.grade = grade
def __str__(self):
return '<Actor_str(name={self.name!r})>'.format(self=self)
def __repr__(self):
return '<Actor_repr(name={self.name!r})>'.format(self=self)
def __eq__(self, other):
bools = []
for key in sort_key:
bools.append(getattr(self, key) == getattr(other, key))
return all(bools)
@staticmethod
def get_hash(self):
source = ''.join([getattr(self, key) for key in sort_key])
m = md5(source.encode('utf-8'))
return m.hexdigest()
class Movie(object):
"""
创建movie基础类
"""
def __init__(self, name, actors):
self.name = name
self.actors = actors
# 重构内置的str函数
def __str__(self):
return '<Movie_str(name={self.name!r})>'.format(self=self)
# 重构内置的repr函数
def __repr__(self):
return '<Movie_repr(name={self.name!r})>'.format(self=self)
# 重构内置的 == 函数
def __eq__(self, other):
bools = []
act1 = {actor.get_hash(): actor for actor in self.actors}
act2 = {actor.get_hash(): actor for actor in other.actors}
common_key = set(act1) & set(act2)
for key in common_key:
bools.append(act1.pop(key) == act2.pop(key))
unique_count = len(act1.values()) + len(act2.values())
bl = (self.name == other.name)
return bl and all(bools) and (unique_count == 0)
class ActorScm(Schema):
"""
创建actor schema基础类
"""
name = fields.Str()
role = fields.Str()
grade = fields.Int()
@post_load
def make_data(self, data):
return Actor(**data)
class MovieScm(Schema):
"""
创建movie schema基础类
"""
name = fields.Str()
actors = fields.Nested(ActorScm, many=True)
@post_load
def make_data(self, data):
return Movie(**data)
if __name__ == "__main__":
# 将字典反序列化为movie基础类
actor1 = {'name': 'lucy', 'role': 'hero', 'grade': 9}
actor2 = {'name': 'mike', 'role': 'boy', 'grade': 10}
movie = {'name': 'green', 'actors': [actor1, actor2]}
schema = MovieScm()
ret = schema.load(movie)
# print 输出类时,调用的是__str__函数
print(ret)
# pprint 输出类时,调用的是__repr__函数
pprint(ret.data)
# 将movie基础类序列化为字典
schema = MovieScm()
ret_dct = schema.dump(ret.data)
pprint(ret_dct.data)
| Python | 104 | 23.875 | 67 | /part-marshmallow/test-load&dump.py | 0.558903 | 0.551564 |
wuljchange/interesting_python | refs/heads/master | def async_apply(func, args, *, callback):
result = func(*args)
callback(result)
def make_handle():
sequence = 0
while True:
result = yield
sequence += 1
print('[{}] result is {}'.format(sequence, result))
if __name__ == "__main__":
# 协程处理
handle = make_handle()
next(handle)
add = lambda x, y: x+y
async_apply(add, (2, 3), callback=handle.send)
async_apply(add, (3, 4), callback=handle.send) | Python | 20 | 22 | 59 | /part-data/test-callback.py | 0.566449 | 0.553377 |
wuljchange/interesting_python | refs/heads/master | from collections import OrderedDict
def dedupe(items):
"""
删除一个迭代器中重复的元素,并保持顺序
:param items: 迭代器
:return:
"""
a = set()
for item in items:
if item not in a:
yield item
a.add(item)
# 找出一个字符串中最长的没有重复字符的字段
def cutout(test: str):
max_data = []
for s in test:
if s not in max_data:
max_data.append(s)
else:
yield max_data
max_data = []
max_data.append(s)
yield max_data
if __name__ == "__main__":
data = [1, 2, 2, 1, 4, 5, 4]
print(list(dedupe(data)))
# 简单方法
order_dct = OrderedDict()
for item in data:
order_dct[item] = item
print(list(order_dct.keys()))
data = 'anmninminuc'
for item in cutout(data):
print(''.join(item))
output = ''.join(max(cutout(data), key=lambda s: len(s)))
print(output) | Python | 44 | 19.15909 | 61 | /part-text/data/test-copy-text.py | 0.529345 | 0.521445 |
wuljchange/interesting_python | refs/heads/master | from itertools import compress
import re
import arrow
addresses = [
'5412 N CLARK',
'5148 N CLARK',
'5800 E 58TH',
'2122 N CLARK',
'5645 N RAVENSWOOD',
'1060 W ADDISON',
'4801 N BROADWAY',
'1039 W GRANVILLE',
]
counts = [0, 3, 10, 4, 1, 7, 6, 1]
new = [n > 5 for n in counts]
l = list(compress(addresses, new))
print(l)
test = '12 23, 34; 1213'
print(re.split(r'\s*[,;\s]\s*', test))
print(arrow.now().isoformat())
t = arrow.get('2018-12-01 10:23')
print(t.isoformat().split('.')[0])
| Python | 27 | 18.370371 | 38 | /part-struct/test-compress.py | 0.590822 | 0.462715 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-04 15:31
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test09.py
# ----------------------------------------------
import pymysql
# 打开数据库连接
db = pymysql.connect("host", "username", "pw", "db")
# 创建一个游标对象
cursor = db.cursor()
# 执行查询
cursor.execute("select * from db.tb")
# 获取数据
data = cursor.fetchone()
print(data)
# 关闭连接
db.close()
# 数据库的三范式
# 1. 确保每列保持原子性,每一列的数据都是不可分解的原子值,根据需求而定哈
# 2. 确保表中的每列都和主键相关,不能只和一部分主键相关(主要针对联合主键而言)
# 3. 确保每列都和主键直接相关,而不能间接相关
| Python | 29 | 18.137932 | 52 | /part-interview/test09.py | 0.538739 | 0.506306 |
wuljchange/interesting_python | refs/heads/master | import random
if __name__ == "__main__":
values = [1, 2, 3, 4, 5]
# 随机选取一个元素
print(random.choice(values))
# 随机选取几个元素且不重复
print(random.sample(values, 3))
# 打乱原序列中的顺序
print(random.shuffle(values))
# 生成随机整数,包括边界值
print(random.randint(0, 10))
# 生成0-1的小数
print(random.random())
# 获取N位随机数的整数
print(random.getrandbits(10)) | Python | 17 | 20.764706 | 35 | /part-data/test-random.py | 0.604336 | 0.569106 |
wuljchange/interesting_python | refs/heads/master | import base64
import binascii
if __name__ == "__main__":
s = b'hello world!'
# 2进制转换成16进制
h = binascii.b2a_hex(s)
print(h)
# 16进制转换成2进制
print(binascii.a2b_hex(h))
h1 = base64.b16encode(s)
print(h1)
print(base64.b16decode(h1)) | Python | 14 | 17.857143 | 31 | /part-data/test-hex.py | 0.596958 | 0.51711 |
wuljchange/interesting_python | refs/heads/master | from struct import Struct
def record_data(records, format, file):
record_struct = Struct(format)
for r in records:
file.write(record_struct.pack(*r))
def read_data(format, f):
"""
增量块的形式迭代
:param format:
:param f:
:return:
"""
read_struct = Struct(format)
chunks = iter(lambda: f.read(read_struct.size), b'')
return (read_struct.unpack(chunk) for chunk in chunks)
def unpack_data(format, f):
"""
全量迭代
:param format:
:param f:
:return:
"""
unpack_data = Struct(format)
return (unpack_data.unpack_from(f, offset) for offset in range(0, len(f), unpack_data.size))
if __name__ == "__main__":
records = [
(1, 2, 3),
(2, 3, 4),
(3, 4, 5),
]
with open('test.file', 'wb') as f:
record_data(records, '<idd', f) | Python | 40 | 19.950001 | 96 | /part-data/test-b2-struct.py | 0.561529 | 0.549582 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2019-11-25 17:49
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test-elasticsearch.py
# ----------------------------------------------
from elasticsearch import Elasticsearch
from ssl import create_default_context
if __name__ == "__main__":
context = create_default_context(cafile="./ca.crt")
es = Elasticsearch(
['10.100.51.164'],
http_auth=('elastic', 'K6fgGGmOu359V4GY3TOw'),
scheme="https",
port=9200,
ssl_context=context
)
print(es.info())
| Python | 21 | 26.761906 | 55 | /part-elasticsearch/test-elasticsearch.py | 0.480274 | 0.433962 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-01 12:33
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test04.py
# ----------------------------------------------
if __name__ == "__main__":
# 字典操作
dct = {"a": 1, "b": 2}
a = dct.pop("a")
print(a)
print(dct)
del dct["b"]
print(dct)
# 合并两个字典
a = {"a": 1, "b": 2}
b = {"c": 3, "d": 4}
a.update(b)
print(a)
# 生成器的方式生成一个字典,dict 直接初始化 必须是元组的 list 形式才可以
values = [1, 2, 3]
keys = ["a", "b", "c"]
dct = {k: v for k, v in zip(keys, values)}
print(dct)
dct2 = dict(zip(keys, values))
dct3 = dict([("a", 1), ("b", 2)])
print(dct2)
print(dct3) | Python | 29 | 23.689655 | 48 | /part-interview/test04.py | 0.393007 | 0.351049 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-01 12:45
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test05.py
# ----------------------------------------------
# 定义一个生成器的函数,需要用到 yield
def my_generate(nums):
for i in nums:
yield i
if __name__ == "__main__":
# 一行代码搞定交换字典的 key value 值
dct1 = {"A": 1, "B": 2}
dct2 = {str(v): k for k, v in dct1.items()}
print(dct2)
# 实现 tuple 和 list 的转换
a = (1, 2, 3)
print(a)
b = list(a)
print(b)
# 把 列表转换成生成器
c = [i for i in range(3)]
g = my_generate(c)
print(g)
# 遍历生成器
for i in g:
print(i, end=" ")
print("")
# 编码
a = "hello"
b = "你好"
print(a.encode("utf-8"))
print(b.encode("utf-8")) | Python | 38 | 19.552631 | 48 | /part-interview/test05.py | 0.420513 | 0.385897 |
wuljchange/interesting_python | refs/heads/master | from functools import partial
from socket import socket, AF_INET, SOCK_STREAM
class LazyConnection:
def __init__(self, address, family=AF_INET, type=SOCK_STREAM):
self.address = address
self.family = family
self.type = type
self.connections = []
def __enter__(self):
sock = socket(self.family, self.type)
sock.connect(self.address)
self.connections.append(sock)
return sock
def __exit__(self, exc_type, exc_val, exc_tb):
self.connections.pop().close()
if __name__ == "__main__":
conn = LazyConnection(('http://www.baidu.com', 80))
# 嵌套使用conn
with conn as s1:
pass
with conn as s2:
pass | Python | 28 | 24.571428 | 66 | /part-class/test-with.py | 0.59021 | 0.584615 |
wuljchange/interesting_python | refs/heads/master | data = ['test', 90, 80, (1995, 8, 30)]
if __name__ == "__main__":
_, start, end, (_, _, day) = data
print(start)
print(end)
print(day) | Python | 8 | 18.125 | 38 | /part-struct/upack-value.py | 0.460526 | 0.388158 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-06 10:58
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test14.py
# ----------------------------------------------
class Demo(object):
# 类的属性
count = 0
def __init__(self, x, y):
self.x = x
self.y = y
print("__init__ 方法被执行")
def __new__(cls, *args, **kwargs):
print("__new__ 方法被执行")
# 调用 cls 类才会执行 __init__ 方法
return object.__new__(cls)
def __str__(self):
return "str test"
def __repr__(self):
return "repr test"
def __del__(self):
print("del")
def __getattribute__(self, item):
# 属性访问拦截器
if item == "x":
return "redirect x"
else:
return object.__getattribute__(self, item)
def __call__(self, *args, **kwargs):
self.x, self.y = args
print("__call__")
def test(a):
print(a)
print(id(a))
if __name__ == "__main__":
# 列举你所知道的 python 的魔法方法及用途
# python 有一些内置定义好的方法,这些方法在特定的时期会被自动调用
# __init__ 函数,创建实例化对象为其赋值使用,是在 __new__ 之后使用,没有返回值
# __new__ 是实例的构造函数,返回一个实例对象,__init__ 负责实例初始化操作,必须有返回值,返回一个实例对象
d = Demo(1, 2)
print(d)
print(d.x)
print(d.y)
# 获取指定类的所有父类
print(Demo.__bases__)
d(3, 4)
print(d.x)
print(d.y)
# print(type(d))
# # 获取已知对象的类
# print(d.__class__)
# type 用于查看 python 对象类型
print(type(d))
# 对于可变数据类型和不可变数据类型有差异,可变数据类型用引用传参,不可变数据类型用传值
# 不可变数据类型包括,数字,字符串,元组
# 可变数据类型包括,列表,字典,集合
a = 1
print(a)
print(id(a))
test(a)
a = [1, 2]
print(a)
print(id(a))
test(a)
a = {1, 2}
print(a)
print(id(a))
test(a)
# 简述 any(),all() 方法
# any 数组中的所有元素只要有一个为 True 就返回 True
if any([True, False]):
print("any")
# all 数组中的所有元素只要有一个为 False 就返回 False
if all([True, False]):
print("all")
else:
print("not all")
| Python | 95 | 19.589474 | 66 | /part-interview/test14.py | 0.478305 | 0.465544 |
wuljchange/interesting_python | refs/heads/master | import collections
import bisect
class ItemSequence(collections.Sequence):
def __init__(self, initial=None):
self._items = sorted(initial) if initial is not None else []
def __getitem__(self, item):
return self._items[item]
def __len__(self):
return len(self._items)
# bisect 插入item到有序队列里,并按照顺序排列
def add(self, item):
bisect.insort(self._items, item)
if __name__ == "__main__":
test = ItemSequence([1, 2, 3]) | Python | 21 | 21.476191 | 68 | /part-class/test-iter-inial.py | 0.613588 | 0.607219 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2019-11-07 18:50
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test-sanic.py
# ----------------------------------------------
from sanic import Sanic
from sanic.response import json
from pprint import pprint
app = Sanic()
@app.route('/', methods=['POST'])
async def bili_flv(request):
pprint(request.raw_args)
return json(True)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8821, debug=True)
| Python | 23 | 21.217392 | 50 | /part-sanic/test-sanic.py | 0.495108 | 0.454012 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-04 16:38
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test10.py
# ----------------------------------------------
import redis
import uuid
import time
from threading import Thread
redis_client = redis.Redis(host="127.0.0.1", port=6379, username="test", password="test", db=0)
def acquire_lock(lock_name, acquire_time=10, time_out=10):
"""
:param lock_name: 锁名称
:param acquire_time: 客户端等待获取锁的时间
:param time_out: 锁的超时时间
:return: True or False
"""
identifier = str(uuid.uuid4())
end = time.time() + acquire_time
lock = "string:lock:" + lock_name
while time.time() < end:
# 成功设置,则插入数据,返回1,否则已经有相同 key,返回0
if redis_client.setnx(lock, identifier):
# 设置 key 失效时间
redis_client.expire(lock, time_out)
# 获取成功,返回 identifier
return identifier
# 每次请求都更新锁名称的失效时间
elif not redis_client.ttl(lock):
redis_client.expire(lock, time_out)
time.sleep(0.001)
return False
def release_lock(lock_name, identifier):
"""
:param lock_name: 锁名称
:param identifier: uid
:return: True or False
"""
lock = "string:lock:" + lock_name
pip = redis_client.pipeline(True)
while True:
try:
pip.watch(lock)
lock_value = redis_client.get(lock)
if not lock_value:
return True
if lock_value.decode() == identifier:
pip.multi()
pip.delete(lock)
pip.execute()
return True
pip.unwatch()
break
except redis.exceptions.WatchError:
pass
return False
def sec_kill():
identifier = acquire_lock("resource")
print(Thread.getName(), "acquire resource")
release_lock("resource", identifier)
if __name__ == "__main__":
for i in range(50):
t = Thread(target=sec_kill)
t.start()
| Python | 77 | 25.350649 | 101 | /part-interview/test10.py | 0.53721 | 0.517989 |
wuljchange/interesting_python | refs/heads/master | from collections import Iterable
import random
import heapq
# 处理嵌套列表
def flatten(items, ignore_types=(str, bytes)):
for item in items:
if isinstance(item, Iterable) and not isinstance(item, ignore_types):
yield from flatten(item)
else:
yield item
if __name__ == "__main__":
l1 = [1, 2, 3, 4, 5, 10, 9]
l2 = [2, 3, 4, 5, 8, 6, 11]
for i in heapq.merge(l1, l2):
print(i)
print("end")
items = [1, 2, 3, [2, 3, 4], [5, 6, 7]]
for i in flatten(items):
print(i)
# 改变输出的分割符和行尾符
print(1, 2, sep=' ', end='#\n')
# str.join()只能连接字符串,非字符串的需要用sep方式隔开
d = ["wulj", 1, 2]
print(*d, sep=',')
data = {'name1': ['vau1', 'vau2'], 'name2': ['vau1', 'vau2'], 'name3': ['vau1', 'vau2']}
print(list(data.items()))
k, v = random.choice(list(data.items()))
data = {
k: random.choice(v)
}
random.choice()
print(data) | Python | 36 | 25 | 92 | /part-text/test-yield.py | 0.528342 | 0.483422 |
wuljchange/interesting_python | refs/heads/master | from marshmallow import Schema, fields, pprint, post_load, post_dump, ValidationError
from datetime import datetime
class VideoLog(object):
"""
vlog基础类
"""
def __init__(self, **data):
for k, v in data.items():
setattr(self, k, v)
def __str__(self):
return '<VideoLog_str(name={self.name})>'.format(self=self)
def __repr__(self):
return '<VideoLog_repr(name={self.name})>'.format(self=self)
class User(object):
"""
user基础类
"""
def __init__(self, name, age, email, videos=None):
self.name = name
self.age = age
self.email = email
self.videos = videos or []
def __str__(self):
return '<User_str(name={self.name})>'.format(self=self)
def __repr__(self):
return '<User_repr(name={self.name})>'.format(self=self)
class VideoLogSchema(Schema):
title = fields.Str(required=True)
content = fields.Str(required=True)
created_time = fields.DateTime()
@post_load
def make_data(self, data):
return VideoLog(**data)
class UserSchema(Schema):
name = fields.Str()
age = fields.Int()
email = fields.Email()
videos = fields.Nested(VideoLogSchema, many=True)
@post_load
def make_data(self, data):
return User(**data)
# 继承前面定义好的schema类
class ProVideoSchema(VideoLogSchema):
fans = fields.Nested(UserSchema, many=True)
@post_load
def make_data(self, data):
return VideoLog(**data)
class TestAttributeSchema(Schema):
new_name = fields.Str(attribute='name')
age = fields.Int()
email_addr = fields.Email(attribute='email')
new_videos = fields.Nested(VideoLogSchema, many=True)
@post_load
def make_data(self, data):
return User(**data)
# 重构,隐式字段创建
class NewUserSchema(Schema):
uppername = fields.Function(lambda obj: obj.name.upper())
class Meta:
fields = ("name", "age", "email", "videos", "uppername")
if __name__ == "__main__":
# 序列化为字典 example
video = VideoLog(title='example', content='test', created_time=datetime.now())
video_schema = VideoLogSchema()
video_ret = video_schema.dump(video)
pprint(video_ret.data)
# 反序列化为类 example
user_dct = {'name': 'wulj', 'age': 24, 'email': 'wuljfly@icloud.com', 'videos': [video_ret.data]}
user_schema = UserSchema()
user_ret = user_schema.load(user_dct)
pprint(user_ret.data)
# 测试validate error
test_video = {'title': 'test_validate'}
try:
print('test')
schema = VideoLogSchema()
ret = schema.load(test_video)
pprint(ret.data)
except ValidationError as err:
print('error')
pprint(err.valid_data)
# 测试partial,处理required=True的
partial_video = {'title': 'partial', 'created_time': datetime.now()}
ret = VideoLogSchema().load(partial_video, partial=('content', ))
print(ret)
new_ret = VideoLogSchema(partial=('content', )).load(partial_video)
new1_ret = VideoLogSchema(partial=True).load(partial_video)
new2_ret = VideoLogSchema().load(partial_video, partial=True)
# 测试attribute,指定属性名称
test_user_attribute = User(name='attribute', age=23, email='new@test.com', videos=[])
attribute_ret = TestAttributeSchema().dump(test_user_attribute)
pprint(attribute_ret.data)
| Python | 121 | 26.272728 | 90 | /part-marshmallow/test-load&dump2.py | 0.625492 | 0.623675 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-03 20:58
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test07.py
# ----------------------------------------------
from pymongo import MongoClient
class PyMongoDemo:
def __init__(self):
self.client = MongoClient("mongodb://{username}:{password}@{host}:{port}"
.format(username="test", password="test", host="test", port=27137))
self.db = self.client.my_db # 数据库
self.tb = self.db.tb # 表名
def insert_data(self):
users = [{"name": "test", "age": 10}, {"name": "nb", "age": 18}]
self.tb.insert(users)
def get_data(self):
self.insertData()
for data in self.tb.find():
print(data)
if __name__ == "__main__":
m = PyMongoDemo()
m.get_data()
col = MongoClient("the_client").get_database("the_db").get_collection("the_col")
col.create_index([("field", 1)], unique=False) | Python | 32 | 30.40625 | 107 | /part-interview/test07.py | 0.49004 | 0.465139 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-08 12:13
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test20.py
# ----------------------------------------------
from collections import defaultdict
if __name__ == "__main__":
# 找出列表中重复的元素
data = [1, 2, 3, 4, 3, 5, 5, 1]
dct = defaultdict(list)
for d in data:
dct[str(d)].append(d)
print(dct)
for k, v in dct.items():
if len(v) > 1:
print(k)
s = "+++--++--"
print("".join(sorted(s))) | Python | 22 | 23.818182 | 48 | /part-interview/test20.py | 0.39633 | 0.352294 |
wuljchange/interesting_python | refs/heads/master | # 使用lambda对list排序,正数在前,从小到大,负数在后,从大到小
# lambda设置2个条件,先将小于0的排在后面,再对每一部分绝对值排序
data = [-5, 8, 0, 4, 9, -4, -20, -2, 8, 2, -4]
a = sorted(data, key=lambda x: (x < 0, abs(x)))
print(a) | Python | 5 | 35 | 47 | /part-interview/test01.py | 0.631285 | 0.547486 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-04 10:32
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test08.py
# ----------------------------------------------
import redis
if __name__ == "__main__":
# redis 现有的数据类型
# 1. String 二进制安全,可以包含任何数据,一个 key 对应一个 value
# SET key value,GET key,DEL key
# 2. Hash 数据类型,键值对集合,适合存储对象的属性
# HMSET key field1 value1 field2 value2,HGET key field1
# 3. List 数据类型,双向链表,消息队列
# lpush key value,lrange key 0 10
# 4. Set 数据类型,hash 表实现,元素不重复
# sadd key value,smembers key
# 5. zset 数据类型,有序集合
# zadd key score member,排行榜,带权重的消息队列
# python 连接 redis
# 普通的连接方式
redis_conn = redis.Redis(host="127.0.0.1", port=6379, username="test", password="test", db=0)
# 连接池的方式
redis_pool = redis.ConnectionPool(host="127.0.0.1", port=6379, username="test", password="test", db=0)
redis_conn1 = redis.Redis(connection_pool=redis_pool)
# String 字符串
# set 操作,ex 过期时间(秒),px 过期时间(毫秒),nx (name 不存在时,当前操作才执行),xx (name 存在时,当前操作才执行)
redis_conn.set(name="test", value="test", ex="300", px=None, nx=True, xx=False)
# get 操作
v = redis_conn.get("test")
print(v)
# mset 设置多个值
redis_conn.mset({"1": 1, "2": 2})
# mget 获取多个值
m = redis_conn.mget(["1", "2"])
# getset 给已有的键设置新值
v = redis_conn.getset("1", 2)
# setrange 根据索引修改某个键的value值,返回的是值的长度
lg = redis_conn.setrange("1", 0, "232")
# getrange 根据索引获取键的部分value值,当所给键不存在时,返回 b''
v1 = redis_conn.getrange("key", 1, 2)
# strlen 获取value长度,如果没有key 返回 0
lg1 = redis_conn.strlen("key")
# incr/decr,int 类型的值或者字符串的数值,默认为1
v2 = redis_conn.incr("key", amount=1)
v3 = redis_conn.decr("key", amount=1)
# incrbyfloat,浮点数自增
v4 = redis_conn.incrbyfloat("key", amount=1.0)
# append,追加字符串,如果不存在 key 就设置新值,返回value的长度
lg2 = redis_conn.append("key", "666")
# List,在redis中,1个key对应一个列表
# lpush/rpush,返回列表的大小,当键不存在时,创建新的列表
lg3 = redis_conn.lpush("key", 1, 2, "test")
# lpushx/rpushx,当键不存在时,不添加也不创建新的列表
lg4 = redis_conn.lpushx("key", "value")
# llen,获取所给key列表的大小
lg5 = redis_conn.llen("key")
# linsert,在指定位置插入新值,ref_key 不存在就返回 0 ,否则就返回插入后list的长度
lg6 = redis_conn.linsert("key", "AFTER", "ref_key", "value")
# lset 通过索引赋值,返回 boolean 值
bl = redis_conn.lset("key", 0, "value")
# lindex 通过索引获取列表中的值
v6 = redis_conn.lindex("key", 0)
# lrange,获取列表中的一段数据
v7 = redis_conn.lrange("key", 0, 5)
# lpop/rpop 删除左边或者右边第一个值,返回被删除元素的值
v8 = redis_conn.lpop("key")
# lrem 删除列表中N个相同的值,返回被删除元素的个数
v9 = redis_conn.lrem("key", "value", -2)
# ltrim 删除列表范围外的所有元素
v10 = redis_conn.ltrim("key", 5, 6)
# blpop 删除并返回列表最左边的值,返回一个元组 (key, value)
v11 = redis_conn.blpop("key")
# rpoplpush 一个列表最右边的元素取出后添加到列表的最左边,返回取出的元素值
v12 = redis_conn.rpoplpush("key1", "key2")
# Hash,value 值一个 map
# hset,返回添加成功的个数
v13 = redis_conn.hset("key", "key1", "value")
# hmset 添加多个键值对
v14 = redis_conn.hmset("key", {"1": 1, "2": 2})
# hmget 获取多个键值对
v15 = redis_conn.hmget("key", ["1", "2"])
# hget
v16 = redis_conn.hget("key", "1")
# hgetall,获取所有的键值对
v17 = redis_conn.hgetall("name")
# hlen 获取键值对的个数
v18 = redis_conn.hlen("name")
# hkeys 获取所有的键
v19 = redis_conn.hkeys("name")
# hvals 获取所有的value
v20 = redis_conn.hvals("name")
# hexists 检查 hash 中是否存在某个 key
v21 = redis_conn.hexists("name", "key")
# hdel 删除 hash 中的键值对
v22 = redis_conn.hdel("name", "key1", "key2")
# hincrby 自增 hash 中的 value 值
v23 = redis_conn.hincrby("name", "key", -1)
# hincrbyfloat
v24 = redis_conn.hincrbyfloat("name", "key", 1.0)
# expire 设置某个键的过期时间
v25 = redis_conn.expire("name", "key")
# Set
# sadd 插入元素到集合中
s = redis_conn.sadd("name", "1", 3, 4)
# scard 返回集合中元素的个数
s1 = redis_conn.scard("name")
# smembers 获取集合中所有的元素
s2 = redis_conn.smembers("name")
# srandmember 随机获取一个或者N个元素
s3 = redis_conn.srandmember("name", number=2)
# sismember 判断一个值是否在集合中
s4 = redis_conn.sismember("name", "value")
# spop 随机删除集合中的元素
s5 = redis_conn.spop("name")
# srem 删除集合中的一个或者多个元素,返回删除元素的个数
s6 = redis_conn.srem("name", "a", "b")
# smove 将集合中的一个元素移动到另一个集合中去
s7 = redis_conn.smove("name1", "name2", "a")
# sdiff 两个集合求差集
s8 = redis_conn.sdiff("name1", "name2")
# sinter 两个集合求交集
s9 = redis_conn.sinter("name1", "name2")
# sunion 并集
s10 = redis_conn.sunion("name1", "name2")
# Zset
# redis 的事务
# MULTI 开始事务,命令入队,EXEC 执行事务,DISCARD 放弃事务。
# 与 mysql 事务的概念有所区别,不是原子性的,如果事务中途有命令失败,不会回滚,并继续往下执行。
# redis 对于单个命令的执行是原子性的
# 分布式锁是什么
# 分布式锁主要用于分布式集群服务互斥共享累或者方法中的变量,对于单机应用而言,可以采用并行处理互斥
# 分布式锁具备那些条件
# 1. 在分布式系统环境下,一个方法在同一时间只能被一个机器的一个线程执行
# 2. 高可用的获取锁与释放锁
# 3. 高性能的获取锁与释放锁
# 4. 具备可重入特性
# 5. 具备锁失效机制,防止死锁
# 6. 具备非阻塞锁特性,即没有获取到锁将直接返回获取锁失败 | Python | 150 | 31.9 | 112 | /part-interview/test08.py | 0.608026 | 0.571747 |
wuljchange/interesting_python | refs/heads/master | from selenium import webdriver
from selenium.webdriver.common.by import By
if __name__ == "__main__":
# 加载浏览器
browser = webdriver.Chrome()
# 获取页面
browser.get('https://www.baidu.com')
print(browser.page_source)
# 查找单个元素
input_first = browser.find_element_by_id('q')
input_second = browser.find_element_by_css_selector('#q')
input_third = browser.find_element(By.ID, 'q')
# 查找多个元素
input_elements = browser.find_elements(By.ID, 'q')
# 元素交互操作,搜索框查询
| Python | 17 | 28.117647 | 61 | /part-selenium/test01.py | 0.650505 | 0.650505 |
wuljchange/interesting_python | refs/heads/master | from operator import itemgetter, attrgetter
class User:
def __init__(self, uid, name):
self.uid = uid
self.name = name
def get_name(self):
return self.name
if __name__ == "__main__":
datas = [
{'fname': 'Brian', 'lname': 'Jones', 'uid': 1003},
{'fname': 'David', 'lname': 'Beazley', 'uid': 1002},
{'fname': 'John', 'lname': 'Cleese', 'uid': 1001},
{'fname': 'Big', 'lname': 'Jones', 'uid': 1004}
]
row1 = sorted(datas, key=itemgetter('fname', 'lname'))
print(row1)
row2 = sorted(datas, key=lambda x: x['uid'])
print(row2)
users = [User(1, 'first'), User(3, 'second'), User(2, 'third')]
row3 = sorted(users, key=attrgetter('uid', 'name'))
min_user = min(users, key=attrgetter('uid'))
max_user = max(users, key=lambda u: u.name)
print(min_user.uid, min_user.name)
print(max_user.uid, max_user.name)
print(row3) | Python | 30 | 30 | 67 | /part-struct/sort-dict.py | 0.555436 | 0.528525 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-05 20:43
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test13.py
# ----------------------------------------------
import test10
if __name__ == "__main__":
# python3 高级特性,反射
# 字符串返回映射到代码的一种机制,python3 提供了四个内置函数 getattr setattr hasattr delattr
obj = getattr(test10, "acquire_lock")
if hasattr(test10, "acquire_lock"):
print("test")
else:
print("new")
# metaclass 作用以及应用场景
# 元类是一个创建类的类
| Python | 21 | 24.142857 | 71 | /part-interview/test13.py | 0.477273 | 0.433712 |
wuljchange/interesting_python | refs/heads/master | from collections import deque
def search(lines, pattern, history):
pre_lines = deque(maxlen=history)
for line in lines:
if pattern in line:
pre_lines.append(line)
return pre_lines
if __name__ == "__main__":
with open('tmp/test', 'r') as f:
s = search(f, 'python', 5)
print(s)
s.append('python9')
s.appendleft('python')
s.pop()
s.popleft()
for line in s:
print(line)
print("end") | Python | 22 | 21.5 | 37 | /part-struct/test-deque.py | 0.52834 | 0.524292 |
wuljchange/interesting_python | refs/heads/master | from collections import Counter, defaultdict
import requests
import arrow
class Data:
def __init__(self, data):
self.data = data
if __name__ == "__main__":
url = 'http://10.100.51.45/rate/repair?start={}&end={}'
start = '2019-04-01 23:10'
end = '2019-04-07 23:10'
ret = requests.get(url.format(start, end))
print(ret.status_code)
# dct = {}
# d = Data('1')
# dct['re'] = d
# print(dct)
# test = defaultdict()
# data = [{'key1': 1}, {'key2': 2}]
# for d in data:
# test.update(d)
# print(test)
# dct = {'data': test}
# for k, v in dct.items():
# print(k)
# for k1, v1 in v.items():
# print(k1)
# print(v1)
# data = [1, 2, 3, 5, 5]
# data2 = [2, 3, 4, 6, 2]
# a = set(d for d in data)
# print(a)
# b = set(d for d in data2)
# print(b)
# print(a & b)
# print(a - (a & b))
# t = tuple(d for d in data)
# for i in t:
# print(i)
# print(tuple(d for d in data))
# link = 'http://jira.op.ksyun.com/browse/BIGDATA-614/test'
# print(link.split('/')[-2])
# print('http'.upper())
# for dct in data:
# for key in ('key1', 'key2'):
# if key not in dct.keys():
# dct[key] = 0
# print(data)
# ret = defaultdict(Counter)
# data1 = {'name': {'key1': [1, 2, 3]}}
# data2 = {'name': {'key1': [2, 3, 4]}}
# for d in (data1, data2):
# for name, data in d.items():
# ret[name] += Counter(data)
# print(ret) | Python | 58 | 25.827587 | 63 | /part-text/test-tt.py | 0.47717 | 0.430225 |
wuljchange/interesting_python | refs/heads/master | from functools import partial
# 从指定文件按固定大小迭代
with open('file', 'rb') as f:
re_size = 32
records = iter(partial(f.read, re_size), b'')
for r in records:
print(r) | Python | 9 | 19.333334 | 49 | /part-text/test-fixed-record.py | 0.615385 | 0.604396 |
wuljchange/interesting_python | refs/heads/master | records = [('foo', 1, 2),
('bar', 'hello'),
('foo', 3, 4),
]
def drop_first_last(grades):
_, *middle, _ = grades
return middle
def do_foo(x, y):
print('foo', x, y)
def do_bar(s):
print('bar', s)
if __name__ == "__main__":
for tag, *args in records:
print(args)
print(*args)
if tag == 'foo':
do_foo(*args)
elif tag == 'bar':
do_bar(*args)
print("done") | Python | 28 | 15.892858 | 30 | /part-struct/unpack-value2.py | 0.425847 | 0.417373 |
wuljchange/interesting_python | refs/heads/master | import re
import os
if __name__ == "__main__":
s = " hello new world \n"
# strip用于取出首尾指定字符
print(s.strip())
print(s.lstrip())
print(s.rstrip())
s = "test ?"
s1 = s.replace('?', 'new')
print(s1)
s2 = re.sub('new', 'fresh', s1, flags=re.IGNORECASE)
print(s2) | Python | 15 | 18.933332 | 56 | /part-text/test-strip.py | 0.526846 | 0.510067 |
wuljchange/interesting_python | refs/heads/master | class Structure1:
_fields = []
def __init__(self, *args, **kwargs):
if len(args) > len(self._fields):
raise TypeError('Excepted {} arguments'.format(len(self._fields)))
for name, value in zip(self._fields, args):
setattr(self, name, value)
for name in self._fields[len(args):]:
setattr(self, name, kwargs.pop(name))
if kwargs:
raise TypeError('Invalid arguments {}'.format(','.join(kwargs)))
class Stock(Structure1):
_fields = ["name", "age", "career"]
class Structure2:
_fields = ["name", "age", "career"]
def __init__(self, *args, **kwargs):
if len(args) != len(self._fields):
raise TypeError('Excepted {} arguments'.format(len(self._fields)))
for name, value in zip(self._fields, args):
setattr(self, name, value)
extra_args = kwargs - self._fields
for name in extra_args:
setattr(self, name, kwargs.pop(name))
if kwargs:
raise TypeError('Invalid arguments {}'.format(','.join(kwargs)))
if __name__ == "__main__":
data = ["test1", "test2", "name"]
kwargs = {"name": "wulj", "age": 23}
print(kwargs.keys()-data)
test_dict = {"name": "value", "test": "new"}
print(','.join(test_dict))
s1 = Stock("Alex", 23, "programmer")
print(s1.name, s1.age, s1.career)
s2 = Stock("lucy", age=22, career="teacher")
print(s2)
s3 = Stock("Mary", 23, "player", "like")
print(s3)
| Python | 51 | 28.588236 | 78 | /part-class/test-class.py | 0.553347 | 0.53943 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.