blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d799d742c80e6da6a9e0aa57eb969af57adb516f | 8229164ee1e5167deac7d92d45c03c41e41c2c03 | /misc/image_segmentation_helper_functions.py | ffdead7b62f5cbe80667d536a2666dbd175ae418 | [] | no_license | maxgrossenbacher/Reuse_me | d29d89390eca806a137979c416e3947fa3bb6081 | f9ca8605e6f46f844f1e743307cc28b486253464 | refs/heads/master | 2020-05-26T01:30:43.950991 | 2019-05-28T14:58:27 | 2019-05-28T14:58:27 | 188,061,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66,978 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import os
import warnings
import sys
import glob
import datetime
import pickle
import time
from rsgislib.segmentation import segutils
import rsgislib
from rsgislib import imagefilter
from rsgislib import imageutils
from rsgislib import rastergis
from rsgislib.rastergis import ratutils
import gdal
import rios
from skimage import graph, data, io, segmentation, color
from skimage.segmentation import slic
from skimage.exposure import rescale_intensity
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage import io
from skimage.future import graph
from skimage.measure import regionprops
from skimage.color import rgb2lab, rgb2xyz, xyz2lab, rgb2hsv
from skimage import exposure
from skimage import draw
import traceback
import cv2
import heapq
import imutils
# from imutils import build_montages
from imutils import paths
import multiprocessing as mp
from collections import defaultdict
from libtiff import TIFF
# module_path = os.path.abspath(os.path.join('helper_functions'))
# if module_path not in sys.path:
# sys.path.append(module_path)
import validation
import functions as fct
import extraction_helper as eh
warnings.filterwarnings('ignore')
plt.style.use('classic')
###############################################################################
# Get Image from Azure and output np.array()
def visualize(path, title=None, plot=False):
'''
DESC: Visualize image from img_path and output np.array
INPUT: img_path=str, optional(title=str), plot(bool)
-----
OUTPUT: np.array with dtype uint8
'''
img = TIFF.open(path, mode="r")
img = img.read_image()
img = eh.rgb_standardization(eh.minmax_scaling(eh.normalization(img, 65535.)))
img = img.astype(np.uint8)
# print (np.mean(img[:,:,0]), np.mean(img[:,:,1]), np.mean(img[:,:,2]), np.mean(img[:,:,3]))
plt.imshow(img[:,:,:3])
plt.title(title)
if plot:
plt.show()
return img
###############################################################################
# Griffin Features - processing bands and generating image features
def process_bands(original_bands, source='PlanetScope'):
'''
DESC: Processes DOVE/PlaentScope and RapidEye images by band
INPUT: original_bands = np.array of image with shape (x,y,num_bands)
'''
reference_image = original_bands[:,:,2].copy() / 1.0
reference_image[reference_image == 0.0] = np.nan
image_red = original_bands[:,:,2].copy() / 65535.0
image_red[np.isnan(image_red)] = 0
image_green = original_bands[:,:,1].copy() / 65535.0
image_green[np.isnan(image_green)] = 0
image_blue = original_bands[:,:,0].copy() / 65535.0
image_blue[np.isnan(image_blue)] = 0
image_nir = original_bands[:,:,3].copy() / 65535.0
image_nir[np.isnan(image_nir)] = 0
if source == 'RapidEye':
image_rededge = original_bands['rededge'].copy() / 65535.0
image_rededge[np.isnan(image_rededge)] = 0
image_bgr = np.dstack((image_red, image_green, image_blue))
ret_bgr = image_bgr.copy()
# ret_bgr = exposure.adjust_log(ret_bgr)
ret_bgr = exposure.equalize_adapthist(ret_bgr, clip_limit=0.04)
hsv = rgb2hsv(image_bgr)
image_h = hsv[:, :, 0]
image_s = hsv[:, :, 1]
image_v = hsv[:, :, 2]
lab = rgb2lab(image_bgr)
image_l = lab[:, :, 0]
image_a = lab[:, :, 1]
image_b = lab[:, :, 2]
xyz = rgb2xyz(image_bgr)
image_x = xyz[:, :, 0]
image_y = xyz[:, :, 1]
image_z = xyz[:, :, 2]
clab = xyz2lab(xyz)
image_cl = clab[:, :, 0]
image_ca = clab[:, :, 1]
image_cb = clab[:, :, 2]
return ret_bgr, reference_image, image_red, image_green, image_blue, image_nir, \
image_h, image_s, image_v, image_l, image_a, image_b, image_x, image_y, image_z, \
image_cl, image_ca, image_cb
################################################################################
# Helper functions for creating Griffin Features
def getMean(indx, image, reference_image):
referenceData = reference_image[indx[0], indx[1]]
data = image[indx[0], indx[1]]
data = data[~np.isnan(referenceData)]
if len(data) == 0:
return np.nan
else:
p1 = np.percentile(data, 25)
p2 = np.percentile(data, 75)
data = data[data > p1]
data = data[data < p2]
return np.mean(data)
def getImageMean(image, reference_image):
image = image[~np.isnan(reference_image)]
return np.mean(image)
def get_SR(image_nir, image_red, indx, reference_image):
referenceData = reference_image[indx[0], indx[1]]
data_nir = image_nir[indx[0], indx[1]]
data_nir = data_nir[~np.isnan(referenceData)]
# data_nir = data_nir[np.nonzero(data_nir)]
data_red = image_red[indx[0], indx[1]]
data_red = data_red[~np.isnan(referenceData)]
# data_red = data_red[np.nonzero(data_red)]
return np.mean(np.divide(data_nir, data_red + 1)), data_nir, data_red
def get_EVI(data_nir, data_red, image_blue, indx, reference_image):
referenceData = reference_image[indx[0], indx[1]]
data_blue = image_blue[indx[0], indx[1]]
data_blue = data_blue[~np.isnan(referenceData)]
# data_blue = data_blue[np.nonzero(data_blue)]
return np.mean(2.5 * np.divide((data_nir - data_red), (1.0 + data_nir + (6.0 * data_red) - (7.5 * data_blue)) + 1.0)), data_blue
def get_CL_green(data_nir, image_green, indx, reference_image):
referenceData = reference_image[indx[0], indx[1]]
data_green = image_green[indx[0], indx[1]]
data_green = data_green[~np.isnan(referenceData)]
# data_green = data_green[np.nonzero(data_green)]
return np.mean(np.divide(data_nir, data_green + 1.0) - 1.0), data_green
def get_MTCI(data_nir, data_rededge, data_red):
return np.mean(np.divide((data_nir - data_rededge),(data_rededge - data_red) + 1.0))
def get_data_blue(image_blue, indx, reference_image):
referenceData = reference_image[indx[0], indx[1]]
data_blue = image_blue[indx[0], indx[1]]
data_blue = data_blue[~np.isnan(referenceData)]
return data_blue
################################################################################
# Griffin Features
def extractFeatures(reference_image, cluster_segments, cluster_list, image_red, image_green, image_blue, image_nir, image_h, image_s, image_v,
image_l, image_a, image_b, image_x, image_y, image_z, image_cl, image_ca, image_cb, image_rededge, img_date, source):
'''
DESC: Griffin Feature generation - create a df of features by band per segement for image
INPUT: reference_image = np.array, cluster_segments=np.array segment mask, cluster_list=list of unique segments,
image_red - image_rededge = output from process_bands fxn,
img_date=datetime object [year,month,day] (tuple), source=str()- PlaentScope or RapidEye
'''
if source == 'RapidEye':
day_of_year = float(img_date.timetuple().tm_yday) / 365.0
else:
day_of_year = float(img_date.timetuple().tm_yday) / 365.0
image_mean_red = getImageMean(image_red, reference_image)
image_mean_green = getImageMean(image_green, reference_image)
image_mean_blue = getImageMean(image_blue, reference_image)
image_mean_rededge = 0
if source == 'RapidEye':
image_mean_rededge = getImageMean(image_rededge, reference_image)
image_mean_nir = getImageMean(image_nir, reference_image)
image_mean_h = getImageMean(image_h, reference_image)
image_mean_s = getImageMean(image_s, reference_image)
image_mean_v = getImageMean(image_v, reference_image)
image_mean_l = getImageMean(image_l, reference_image)
image_mean_a = getImageMean(image_a, reference_image)
image_mean_b = getImageMean(image_b, reference_image)
image_mean_x = getImageMean(image_x, reference_image)
image_mean_y = getImageMean(image_y, reference_image)
image_mean_z = getImageMean(image_z, reference_image)
image_mean_cl = getImageMean(image_cl, reference_image)
image_mean_ca = getImageMean(image_ca, reference_image)
image_mean_cb = getImageMean(image_cb, reference_image)
features = dict()
features['day_of_year'] = []
features['SR'] = []
features['CL_green'] = []
if source == 'RapidEye':
features['CL_rededge'] = []
features['MTCI'] = []
features['red_mean'] = []
features['green_mean'] = []
features['blue_mean'] = []
if source == 'RapidEye':
features['rededge_mean'] = []
features['nir_mean'] = []
features['segment']=[]
features['h_mean'] = []
features['s_mean'] = []
features['v_mean'] = []
features['l_mean'] = []
features['a_mean'] = []
features['b_mean'] = []
features['x_mean'] = []
features['y_mean'] = []
features['z_mean'] = []
features['cl_mean'] = []
features['ca_mean'] = []
features['cb_mean'] = []
features['image_mean_red'] = []
features['image_mean_green'] = []
features['image_mean_blue'] = []
if source == 'RapidEye':
features['image_mean_rededge'] = []
features['image_mean_nir'] = []
features['image_mean_h'] = []
features['image_mean_s'] = []
features['image_mean_v'] = []
features['image_mean_l'] = []
features['image_mean_a'] = []
features['image_mean_b'] = []
features['image_mean_x'] = []
features['image_mean_y'] = []
features['image_mean_z'] = []
features['image_mean_cl'] = []
features['image_mean_ca'] = []
features['image_mean_cb'] = []
features['normalized_R'] = []
features['normalized_G'] = []
features['normalized_B'] = []
features['mean_R_by_B'] = []
features['mean_R_by_B_plus_R'] = []
features['mean_chroma'] = []
features['R-G'] = []
features['R-B'] = []
features['G-R'] = []
features['G-B'] = []
features['B-R'] = []
features['B-G'] = []
for cluster_num in cluster_list:
cluster_indx = np.where(cluster_segments == cluster_num)
features['day_of_year'].append(day_of_year)
sr, data_nir, data_red = get_SR(image_nir, image_red, cluster_indx, reference_image)
features['SR'].append(sr)
data_blue = get_data_blue(image_blue, cluster_indx, reference_image)
cl_green, data_green = get_CL_green(data_nir, image_green, cluster_indx, reference_image)
features['CL_green'].append(cl_green)
if source == 'RapidEye':
cl_rededge, data_rededge = get_CL_green(data_nir, image_rededge, cluster_indx, reference_image)
features['CL_rededge'].append(cl_rededge)
features['MTCI'].append(get_MTCI(data_nir, data_rededge, data_red))
features['red_mean'].append(getMean(cluster_indx, image_red, reference_image))
features['green_mean'].append(getMean(cluster_indx, image_green, reference_image))
features['blue_mean'].append(getMean(cluster_indx, image_blue, reference_image))
if source == 'RapidEye':
features['rededge_mean'].append(getMean(cluster_indx, image_rededge, reference_image))
features['nir_mean'].append(getMean(cluster_indx, image_nir, reference_image))
features['segment'].append(cluster_num)
features['h_mean'].append(getMean(cluster_indx, image_h, reference_image))
features['s_mean'].append(getMean(cluster_indx, image_s, reference_image))
features['v_mean'].append(getMean(cluster_indx, image_v, reference_image))
features['l_mean'].append(getMean(cluster_indx, image_l, reference_image))
features['a_mean'].append(getMean(cluster_indx, image_a, reference_image))
features['b_mean'].append(getMean(cluster_indx, image_b, reference_image))
features['x_mean'].append(getMean(cluster_indx, image_x, reference_image))
features['y_mean'].append(getMean(cluster_indx, image_y, reference_image))
features['z_mean'].append(getMean(cluster_indx, image_z, reference_image))
features['cl_mean'].append(getMean(cluster_indx, image_cl, reference_image))
features['ca_mean'].append(getMean(cluster_indx, image_ca, reference_image))
features['cb_mean'].append(getMean(cluster_indx, image_cb, reference_image))
features['image_mean_red'].append(image_mean_red)
features['image_mean_green'].append(image_mean_green)
features['image_mean_blue'].append(image_mean_blue)
if source == 'RapidEye':
features['image_mean_rededge'].append(image_mean_rededge)
features['image_mean_nir'].append(image_mean_nir)
features['image_mean_h'].append(image_mean_h)
features['image_mean_s'].append(image_mean_s)
features['image_mean_v'].append(image_mean_v)
features['image_mean_l'].append(image_mean_l)
features['image_mean_a'].append(image_mean_a)
features['image_mean_b'].append(image_mean_b)
features['image_mean_x'].append(image_mean_x)
features['image_mean_y'].append(image_mean_y)
features['image_mean_z'].append(image_mean_z)
features['image_mean_cl'].append(image_mean_cl)
features['image_mean_ca'].append(image_mean_ca)
features['image_mean_cb'].append(image_mean_cb)
features['normalized_R'].append(np.mean(np.divide(data_red, (data_red + data_green + data_blue + 1.0))))
features['normalized_G'].append(np.mean(np.divide(data_green, (data_red + data_green + data_blue + 1.0))))
features['normalized_B'].append(np.mean(np.divide(data_blue, (data_red + data_green + data_blue + 1.0))))
features['mean_R_by_B'].append(np.mean(np.divide(data_red, data_blue + 1.0)))
features['mean_R_by_B_plus_R'].append(np.mean(np.divide(data_red, data_blue + data_red + 1.0)))
try:
features['mean_chroma'].append(max(np.nanmax(data_red), np.nanmax(data_green), np.nanmax(data_blue)) - \
min(np.nanmin(data_red), np.nanmin(data_green), np.nanmin(data_blue)))
except ValueError:
features['mean_chroma'].append(np.nan)
features['R-G'].append(np.mean(data_red - data_green))
features['R-B'].append(np.mean(data_red - data_blue))
features['G-R'].append(np.mean(data_green - data_red))
features['G-B'].append(np.mean(data_green - data_blue))
features['B-R'].append(np.mean(data_blue - data_red))
features['B-G'].append(np.mean(data_blue - data_green))
df = pd.DataFrame.from_dict(features)
return df
###############################################################################
# Utlitiy Functions
def load_obj(filepath):
'''
DESC: Load object as pickle from filepath
INPUT: filepath = str()
-----
OUTPUT: loads pickled objected
'''
import dill
with open(filepath ,'rb') as f:
return dill.load(f)
def save_obj(obj, filepath):
'''
DESC: Save object as pickle to filepath
INPUT: obj=(list, dict, etc.), filepath = str()
-----
OUTPUT: saves pickled object to filepath
'''
import dill
with open(filepath ,'wb') as f:
return dill.dump(obj, f, protocol=2)
def sp_idx(s):
'''
DESC: creates a flattened array/list of segments with pixel values
INPUT: segment np.array
-----
OUTPUT: list of segments with pixel values
'''
u = np.unique(s)
return [np.where(s == i) for i in u]
def rgb_metric(s, metric='sd'):
'''
DESC: calcuates (R-G)/B per pixel
INPUT: segment_list from (sp_idx fxn), metric=str() ['sd', 'mean']
-----
OUTPUT: np.nanstd or np.nanmean for each segment/superpixel
'''
B=s[:,0]
G=s[:,1]
R=s[:,2]
metric = (R-G)/B
if 'sd':
return np.nanstd(metric)
if 'mean':
return np.nanmean(metric)
def grayscale_metric(s, metric='sd'):
'''
DESC: calcuates mean/sd of grayscale (R+G+B)/3 per segment/superpixel
INPUT: segment_list from (sp_idx fxn), metric=str() ['sd', 'mean']
-----
OUTPUT: np.nanstd or np.nanmean for each segment/superpixel
'''
B=s[:,0]
G=s[:,1]
R=s[:,2]
metric = (R+G+B)/3
if 'sd':
return np.nanstd(metric)
if 'mean':
return np.nanmean(metric)
################################################################################
# Merging segments by mean color - http://scikit-image.org/docs/dev/api/skimage.future.graph.html#skimage.future.graph.merge_hierarchical
def _weight_mean_color(graph, src, dst, n):
"""Callback to handle merging nodes by recomputing mean color.
The method expects that the mean color of `dst` is already computed.
Parameters
----------
graph : RAG
The graph under consideration.
src, dst : int
The vertices in `graph` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
data : dict
A dictionary with the `"weight"` attribute set as the absolute
difference of the mean color between node `dst` and `n`.
"""
diff = graph.node[dst]['mean color'] - graph.node[n]['mean color']
diff = np.linalg.norm(diff)
return {'weight': diff}
def _revalidate_node_edges(rag, node, heap_list):
"""Handles validation and invalidation of edges incident to a node.
This function invalidates all existing edges incident on `node` and inserts
new items in `heap_list` updated with the valid weights.
rag : RAG
The Region Adjacency Graph.
node : int
The id of the node whose incident edges are to be validated/invalidated
.
heap_list : list
The list containing the existing heap of edges.
"""
# networkx updates data dictionary if edge exists
# this would mean we have to reposition these edges in
# heap if their weight is updated.
# instead we invalidate them
for nbr in rag.neighbors(node):
data = rag[node][nbr]
try:
# invalidate edges incident on `dst`, they have new weights
data['heap item'][3] = False
_invalidate_edge(rag, node, nbr)
except KeyError:
# will handle the case where the edge did not exist in the existing
# graph
pass
wt = data['weight']
heap_item = [wt, node, nbr, True]
data['heap item'] = heap_item
heapq.heappush(heap_list, heap_item)
def _rename_node(graph, node_id, copy_id):
""" Rename `node_id` in `graph` to `copy_id`. """
graph._add_node_silent(copy_id)
graph.node[copy_id].update(graph.node[node_id])
for nbr in graph.neighbors(node_id):
wt = graph[node_id][nbr]['weight']
graph.add_edge(nbr, copy_id, {'weight': wt})
graph.remove_node(node_id)
def _invalidate_edge(graph, n1, n2):
""" Invalidates the edge (n1, n2) in the heap. """
graph[n1][n2]['heap item'][3] = False
def merge_mean_color(graph, src, dst):
"""Callback called before merging two nodes of a mean color distance graph.
This method computes the mean color of `dst`.
Parameters
----------
graph : RAG
The graph under consideration.
src, dst : int
The vertices in `graph` to be merged.
"""
graph.node[dst]['total color'] += graph.node[src]['total color']
graph.node[dst]['pixel count'] += graph.node[src]['pixel count']
graph.node[dst]['mean color'] = (graph.node[dst]['total color'] /
graph.node[dst]['pixel count'])
def merge_hierarchical_segments(labels, rag, segments, rag_copy, in_place_merge,
merge_func, weight_func):
"""Perform hierarchical merging of a RAG.
Greedily merges the most similar pair of nodes until no edges lower than
`thresh` remain.
Parameters
----------
labels : ndarray
The array of labels.
rag : RAG
The Region Adjacency Graph.
thresh : float
Regions connected by an edge with weight smaller than `thresh` are
merged.
rag_copy : bool
If set, the RAG copied before modifying.
in_place_merge : bool
If set, the nodes are merged in place. Otherwise, a new node is
created for each merge..
merge_func : callable
This function is called before merging two nodes. For the RAG `graph`
while merging `src` and `dst`, it is called as follows
``merge_func(graph, src, dst)``.
weight_func : callable
The function to compute the new weights of the nodes adjacent to the
merged node. This is directly supplied as the argument `weight_func`
to `merge_nodes`.
Returns
-------
out : ndarray
The new labeled array.
"""
if rag_copy:
rag = rag.copy()
edge_heap = []
for n1, n2, data in rag.edges(data=True):
# Push a valid edge in the heap
wt = data['weight']
heap_item = [wt, n1, n2, True]
heapq.heappush(edge_heap, heap_item)
# Reference to the heap item in the graph
data['heap item'] = heap_item
while len(edge_heap) > 0 and len(rag.nodes()) > segments:
_, n1, n2, valid = heapq.heappop(edge_heap)
# Ensure popped edge is valid, if not, the edge is discarded
if valid:
# Invalidate all neigbors of `src` before its deleted
for nbr in rag.neighbors(n1):
_invalidate_edge(rag, n1, nbr)
for nbr in rag.neighbors(n2):
_invalidate_edge(rag, n2, nbr)
if not in_place_merge:
next_id = rag.next_id()
_rename_node(rag, n2, next_id)
src, dst = n1, next_id
else:
src, dst = n1, n2
merge_func(rag, src, dst)
new_id = rag.merge_nodes(src, dst, weight_func)
_revalidate_node_edges(rag, new_id, edge_heap)
label_map = np.arange(labels.max() + 1)
for ix, (n, d) in enumerate(rag.nodes(data=True)):
for label in d['labels']:
label_map[label] = ix
return label_map[labels]
###############################################################################
# Not really necessary function
def try_different_num_segments(image, num_segments=(5, 10, 15, 20)):
# loop over the number of segments
segments=[]
for num in num_segments:
# print("Superpixels -- {} segments" .format(num))
# apply SLIC and extract (approximately) the supplied number
# of segments
seg = slic(img_as_float(image),
n_segments = num,
sigma=5,
max_iter=100,
compactness=10,
enforce_connectivity=True,
slic_zero=True)
# show the output of SLIC
fig = plt.figure("Superpixels")
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(img_as_float(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)), seg))
plt.show()
seg = seg + 1
segments.append(seg)
return segments
def segment_image(image, n_segments, standardize=None):
'''
DESC: Segment image
INPUT: images=np.array(), n_segemnts=int(), standardize=int()
-----
OUTPUT: returns segment np.array and number of original segments created
'''
# loop over the number of segments
# print("Superpixels -- {} segments" .format(n_segments))
# apply SLIC and extract (approximately) the supplied number
# of segments
seg = slic(img_as_float(image),
n_segments = n_segments,
sigma=5,
max_iter=200,
slic_zero=True)
original_segs = len(np.unique(seg))
if len(np.unique(seg)) > standardize:
g = graph.rag_mean_color(image, seg)
seg = merge_hierarchical_segments(seg, g, segments=standardize, rag_copy=False,
in_place_merge=True,
merge_func=merge_mean_color,
weight_func=_weight_mean_color)
seg = seg + 1
return seg, original_segs
def image_preparation(df, img_path):
# get img_path and date and fieldID and filename
ind = np.where(df['img_path']==img_path)
f = df['filename'].iloc[ind].to_string(index=False, header=True)
f = f.split(' ')[-1]
date = df['date'].iloc[ind].to_string(index=False, header=True)
year = int(date[:4])
month = int(date[4:6])
day =int(date[6:])
img_date = datetime.datetime(year,month,day)
# original image
image_raw = visualize(img_path, plot=False)
return image_raw, img_date, f
def get_griffin_features(image_raw, segment, img_date, filename):
# Process image
ret_bgr, reference_image, image_red, image_green, image_blue, image_nir, \
image_h, image_s, image_v, image_l, image_a, image_b, image_x, image_y, image_z, \
image_cl, image_ca, image_cb = process_bands(image_raw)
griffin_df= extractFeatures(
reference_image = reference_image,
cluster_segments=segment,
cluster_list=np.unique(segment),
image_red=image_red,
image_green=image_green,
image_blue=image_blue,
image_nir=image_nir,
image_h=image_h,
image_s=image_s,
image_v=image_v,
image_l=image_l,
image_a=image_a,
image_b=image_b,
image_x=image_x,
image_y=image_y,
image_z=image_z,
image_cl=image_cl,
image_ca=image_ca,
image_cb=image_cb,
image_rededge=None,
img_date=img_date,
source='PlanetScope')
griffin_df['filename'] = filename
return griffin_df
def SLIC_segmentation(image_raw, n_segments=10, standardize=8):
image = image_raw[:,:,:3]
image = img_as_float(image)
image = image.astype(np.float32)
# Super Pixel Segmentaiton
segment, original_segs = segment_image(image, n_segments=n_segments, standardize=standardize)
return segment, original_segs
def get_SLIC_segmentation(df, fieldIDs, files_list=[],n_segments=10, standardize=8):
images, segments, original_segs ={},{},{}
# rgbs, grays = {},{}
dfs = []
if isinstance(fieldIDs,int):
fieldIDs = [fieldIDs]
selected_field = df[df['fieldID'].isin(fieldIDs)]
if len(files_list) > 0:
selected_field = selected_field[selected_field['filename'].isin(files_list)]
for x in selected_field['img_path']:
# process image
image_raw, img_date, filename = image_preparation(selected_field, x)
# segmentation using SLIC
segment, original_seg = SLIC_segmentation(image_raw[:,:,:3], n_segments=n_segments, standardize=standardize)
# superpixel_list = sp_idx(segment)
# superpixel = [image_raw[:,:,:3][idx] for idx in superpixel_list]
# rgb_std_segment = [rgb_metric(s, metric='sd') for s in superpixel]
# gray_std_segment = [grayscale_metric(s, metric='sd') for s in superpixel]
# Get Griffin Features
griffin_df = get_griffin_features(image_raw, segment, img_date, filename)
# Collect data
dfs.append(griffin_df)
images[filename] = image_raw
segments[filename] = segment
original_segs[filename] = original_seg
# rgbs[filename] = rgb_std_segment
# grays[filename] = gray_std_segment
dfs_field = pd.concat(dfs, axis=0)
return images, segments, original_segs, dfs_field
# def get_image_segmentation(df, fieldIDs, files_list=[],n_segments=10, standardize=8, save_dir=None, number_fields=np.inf):
# months = {0:'jan',1:'feb',2:'mar',3:'apr',4:'may',5:'june', 6:'july', 7:'aug', 8:'sept', 9:'oct', 10:'nov', 11:'dec'}
# images, segments, original_segs ={},{},{}
# dfs = []
# if isinstance(fieldIDs,int):
# fieldIDs = [fieldIDs]
# selected_field = df[df['fieldID'].isin(fieldIDs)]
# if len(files_list) > 0:
# selected_field = selected_field[selected_field['filename'].isin(files_list)]
# count = 0
# for x in selected_field['img_path']:
# if count < number_fields:
# count += 1
# # get img_path and date and fieldID and filename
# ind = np.where(selected_field['img_path']==x)
# f = selected_field['filename'].iloc[ind].to_string(index=False, header=True)
# i = selected_field['fieldID'].iloc[ind].to_string(index=False, header=True)
# f = f.split(' ')[-1]
#
# date = selected_field['date'].iloc[ind].to_string(index=False, header=True)
# year = int(date[:4])
# month = int(date[4:6])
# day =int(date[6:])
# img_date = datetime.datetime(year,month,day)
#
# # original image
# image_raw = visualize(x)
#
# # Process image
# ret_bgr, reference_image, image_red, image_green, image_blue, image_nir, \
# image_h, image_s, image_v, image_l, image_a, image_b, image_x, image_y, image_z, \
# image_cl, image_ca, image_cb = process_bands(image_raw)
#
# image = image_raw[:,:,:3]
# image = img_as_float(image)
# image = image.astype(np.float32)
#
# # Super Pixel Segmentaiton
# segment, original_seg = segment_image(image, n_segments=n_segments, standardize=standardize)
#
# # superpixel_list = sp_idx(segment)
# # superpixel = [image_raw[:,:,:3][idx] for idx in superpixel_list]
# # rgb_std_segment = [rgb_metric(s, metric='sd') for s in superpixel]
# # gray_std_segment = [grayscale_metric(s, metric='sd') for s in superpixel]
#
# griffin_df= extractFeatures(
# reference_image = reference_image,
# cluster_segments=segment,
# cluster_list=np.unique(segment),
# image_red=image_red,
# image_green=image_green,
# image_blue=image_blue,
# image_nir=image_nir,
# image_h=image_h,
# image_s=image_s,
# image_v=image_v,
# image_l=image_l,
# image_a=image_a,
# image_b=image_b,
# image_x=image_x,
# image_y=image_y,
# image_z=image_z,
# image_cl=image_cl,
# image_ca=image_ca,
# image_cb=image_cb,
# image_rededge=None,
# img_date=img_date,
# source='PlanetScope')
# griffin_df['filename'] = f
# dfs.append(griffin_df)
#
# original_segs[f] = original_seg
#
# images[f] = image_raw
# segments[f] = segment
# dfs_field = pd.concat(dfs, axis=0)
# return images,segments,original_segs, dfs_field
################################################################################
# RSGISLib segmentation
def RSGISLib_segmentation(img_path, save_name, save_dir, numClusters=10, minPxls=5000, distThres=500):
save_dir = save_dir+'/'+img_path.split("/")[-1]+"/"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
output_clump = save_dir+'{}_clumps.kea'.format(save_name)
mean = save_dir+'{}_mean.kea'.format(save_name)
json =save_dir+'{}_json'.format(save_name)
imgstretchstats = save_dir+'{}_imgstretchstats.txt'.format(save_name)
kmeans = save_dir+'{}_kmeans'.format(save_name)
segutils.runShepherdSegmentation(inputImg=img_path,
outputClumps=output_clump,
outputMeanImg=mean,
minPxls=minPxls,
numClusters=numClusters,
saveProcessStats=True,
distThres=distThres,
imgStretchStats = imgstretchstats,
kMeansCentres = kmeans,
imgStatsJSONFile=json)
outascii = save_dir+'{}_imgstats.txt'.format(save_name)
ratutils.populateImageStats(img_path, output_clump, outascii=outascii,
threshold=0.0, calcMin=True, calcMax=True,
calcSum=True, calcMean=True, calcStDev=True,
calcMedian=False, calcCount=False, calcArea=False,
calcLength=False, calcWidth=False, calcLengthWidth=False)
imageutils.popImageStats(output_clump, True, 0, True)
outimage = save_dir+"{}_test_gdal.kea".format(save_name)
gdalformat = 'KEA'
datatype = rsgislib.TYPE_32FLOAT
fields = ['Histogram', 'Red', 'Green', 'Blue', 'Alpha']
rastergis.exportCols2GDALImage(output_clump, outimage, gdalformat, datatype, fields)
output = save_dir+'{}_array.txt'.format(save_name)
rastergis.export2Ascii(output_clump, outfile=output, fields=fields)
ds = gdal.Open(outimage)
myarray = np.array(ds.ReadAsArray())
new = np.dstack((myarray[1,:,:], myarray[2,:,:], myarray[3,:,:]))
new = new.astype(np.uint8)
segment = (new[:,:,0]+new[:,:,1]+new[:,:,2])/3
return segment, new, myarray
def get_RSGISLib_segmentation(df, fieldIDs, save_dir,files_list=[],standardize=8, numClusters=10, minPxls=5000, distThres=500):
images, segments, seg_dicts, original_segs ={},{},{},{}
# rgbs, grays ={},{}
dfs = []
if isinstance(fieldIDs,int):
fieldIDs = [fieldIDs]
selected_field = df[df['fieldID'].isin(fieldIDs)]
if len(files_list) > 0:
selected_field = selected_field[selected_field['filename'].isin(files_list)]
for img_path in selected_field['img_path']:
seg_dict = {}
# process image
try:
image_raw, img_date, filename = image_preparation(selected_field, img_path)
# segmentation using RSGISLib
segment, new, myarray = RSGISLib_segmentation(img_path,
save_name=filename,
save_dir=save_dir,
numClusters=numClusters,
minPxls=minPxls,
distThres=distThres)
for ind, s in enumerate(np.unique(segment)):
segment[segment==s] = int(ind+1)
segment = segment.astype(np.uint8)
original_seg = np.unique(segment)
# standardize
if len(np.unique(segment)) > standardize:
g = graph.rag_mean_color(image_raw[:,:,:3], segment)
segment = merge_hierarchical_segments(segment, g, segments=standardize, rag_copy=False,
in_place_merge=True,
merge_func=merge_mean_color,
weight_func=_weight_mean_color)
# relabel segments by brightness
new_segment = segment.copy()
for ind, s in enumerate(sorted(np.unique(segment))):
seg_dict[s] = ind+1
for k, v in seg_dict.items():
new_segment[segment==k] = v
new_segment = new_segment.astype(np.uint8)
# superpixel_list = sp_idx(new_segment)
# superpixel = [image_raw[:,:,:3][idx] for idx in superpixel_list]
# rgb_std_segment = [rgb_metric(s, metric='sd') for s in superpixel]
# gray_std_segment = [grayscale_metric(s, metric='sd') for s in superpixel]
# Get Griffin Features
griffin_df = get_griffin_features(image_raw, new_segment, img_date, filename)
# Collect data
dfs.append(griffin_df)
seg_dicts[filename] = seg_dict
segments[filename] = new_segment
original_segs[filename] = original_seg
images[filename] = image_raw
# rgbs[filename] = rgb_std_segment
# grays[filename] = gray_std_segment
except:
pass
dfs_field = pd.concat(dfs, axis=0)
return images, segments,original_segs, dfs_field, seg_dicts
###############################################################################
# Image Colorfulness equations - https://www.pyimagesearch.com/2017/06/05/computing-image-colorfulness-with-opencv-and-python/
def image_colorfulness(image):
'''
DESC: Get Colorfulness value of an image
INPUT: image=np.array()
-----
OUTPUT: Colorfulness value
'''
# split the image into its respective RGB components
(B, G, R) = cv2.split(image.astype("float"))
# compute rg = R - G
rg = np.absolute(R - G)
# compute yb = 0.5 * (R + G) - B
yb = np.absolute(0.5 * (R + G) - B)
# compute the mean and standard deviation of both `rg` and `yb`
(rbMean, rbStd) = (np.mean(rg), np.std(rg))
(ybMean, ybStd) = (np.mean(yb), np.std(yb))
# combine the mean and standard deviations
stdRoot = np.sqrt((rbStd ** 2) + (ybStd ** 2))
meanRoot = np.sqrt((rbMean ** 2) + (ybMean ** 2))
# derive the "colorfulness" metric and return it
return stdRoot + (0.3 * meanRoot)
def segment_colorfulness(image, mask):
'''
DESC: Get Colorfulness value of an image segment
INPUT: image=np.array(), mask=segment mask np.array()
-----
OUTPUT: Colorfulness value
'''
# split the image into its respective RGB components, then mask
# each of the individual RGB channels so we can compute
# statistics only for the masked region
(B, G, R) = cv2.split(image.astype("float"))
R = np.ma.masked_array(R, mask=mask)
G = np.ma.masked_array(B, mask=mask)
B = np.ma.masked_array(B, mask=mask)
# compute rg = R - G
rg = np.absolute(R - G)
# compute yb = 0.5 * (R + G) - B
yb = np.absolute(0.5 * (R + G) - B)
# compute the mean and standard deviation of both `rg` and `yb`,
# then combine them
stdRoot = np.sqrt((rg.std() ** 2) + (yb.std() ** 2))
meanRoot = np.sqrt((rg.mean() ** 2) + (yb.mean() ** 2))
# derive the "colorfulness" metric and return it
return stdRoot + (0.3 * meanRoot)
################################################################################
# Image Segmentation features
def NDVI_r(img):
if len(img.shape) == 3:
return 1.0 * ((img[:, :, 3]-img[:, :, 2]) / (img[:, :, 3] + img[:, :, 2]))
elif len(img.shape) == 4:
return 1.0 * ((img[:, :, 3, :]-img[:, :, 2, :]) / (img[:, :, 3, :] + img[:, :, 2, :]))
elif len(img.shape) == 2:
return 1.0 * ((img[:, 3]-img[:, 2]) / (img[:, 3] + img[:, 2]))
def NDVI_g(img):
if len(img.shape) == 3:
return 1.0 * ((img[:, :, 3]-img[:, :, 1]) / (img[:, :, 3] + img[:, :, 1]))
elif len(img.shape) == 4:
return 1.0 * ((img[:, :, 3, :]-img[:, :, 1, :]) / (img[:, :, 3, :] + img[:, :, 1, :]))
elif len(img.shape)==2:
return 1.0 * ((img[:, 3]-img[:,1]) / (img[:, 3] + img[:, 1]))
def NDVI_b(img):
if len(img.shape) == 3:
return 1.0 * ((img[:, :, 3]-img[:, :, 0]) / (img[:, :, 3] + img[:, :, 0]))
elif len(img.shape) == 4:
return 1.0 * ((img[:, :, 3, :]-img[:, :, 0, :]) / (img[:, :, 3, :] + img[:, :, 0, :]))
elif len(img.shape)==2:
return 1.0 * ((img[:, 3]-img[:, 0]) / (img[:, 3] + img[:, 0]))
def NDWI(img):
if len(img.shape) == 3:
return 1.0 * ((img[:, :, 1]-img[:, :, 3]) / (img[:, :, 1]+img[:, :, 3]))
elif len(img.shape) == 4:
return 1.0 * ((img[:, :, 1, :]-img[:, :, 3, :]) / (img[:, :, 1, :]+img[:, :, 3, :]))
elif len(img.shape) == 2:
return 1.0 * ((img[:, 1]-img[:, 3]) / (img[:, 1]+img[:, 3]))
def EVI(img):
if len(img.shape) == 3:
return 2.5 * ((img[:, :, 3]-img[:, :, 2]) /
(img[:, :, 3]+6*img[:, :, 2] - 7.5 * img[:, :, 0] + 1))
elif len(img.shape) == 4:
return 2.5 * ((img[:, :, 3, :]-img[:, :, 2, :]) /
(img[:, :, 3, :]+6*img[:, :, 2, :] - 7.5 * img[:, :, 0, :] + 1))
elif len(img.shape) == 2:
return 2.5 * ((img[:, 3]-img[:, 2]) /
(img[:, 3]+6*img[:, 2] - 7.5 * img[:, 0] + 1))
def SAVI(img):
if len(img.shape) == 3:
return ((img[:, :, 3]-img[:, :, 2]) / (img[:, :, 3]+img[:, :, 2]+0.5)) * 1.5
if len(img.shape) == 4:
return ((img[:, :, 3, :]-img[:, :, 2, :]) / (img[:, :, 3, :]+img[:, :, 2, :]+0.5)) * 1.5
if len(img.shape) == 2:
return ((img[:, 3]-img[:, 2]) / (img[:, 3]+img[:, 2]+0.5)) * 1.5
def MSAVI(img):
if len(img.shape) == 3:
return (2*img[:, :, 3] + 1 -
np.sqrt((2 * img[:, :, 3] + 1)**2 -
8 * (img[:, :, 3] - img[:, :, 2]))) / 2.0
if len(img.shape) == 4:
return (2*img[:, :, 3, :] + 1 -
np.sqrt((2 * img[:, :, 3, :] + 1)**2 -
8 * (img[:, :, 3, :] - img[:, :, 2, :]))) / 2.0
if len(img.shape) == 2:
return (2*img[:, 3] + 1 -
np.sqrt((2 * img[:, 3] + 1)**2 -
8 * (img[:, 3] - img[:, 2]))) / 2.0
################################################################################
# Create dictionary of image features per segment
def get_superpixel_image_features(superpixel):
'''
DESC: Get image features per segment
INPUT: image=np.array(), segments=np.array(), plot=bool
-----
OUTPUT: zone_dict of image features per segment/superpixel
'''
zone_dict= {}
NDVI_r_img = NDVI_r(superpixel)
NDVI_g_img = NDVI_g(superpixel)
NDVI_b_img = NDVI_b(superpixel)
NDWI_img = NDWI(superpixel)
EVI_img = EVI(superpixel)
SAVI_img = SAVI(superpixel)
MSAVI_img = MSAVI(superpixel)
channels_min = np.nanmin(superpixel, axis=(0))
channels_max = np.nanmax(superpixel, axis=(0))
channels_mean = np.nanmean(superpixel, axis=(0))
channels_std = np.nanstd(superpixel, axis=(0))
channels_median = np.nanmedian(superpixel, axis=(0))
zone_dict["blue"]=(channels_min[0], channels_max[0], channels_std[0], channels_mean[0], channels_median[0])
zone_dict["green"]=(channels_min[1], channels_max[1], channels_std[1], channels_mean[1], channels_median[1])
zone_dict["red"]=(channels_min[2], channels_max[2], channels_std[2], channels_mean[2], channels_median[2])
zone_dict["nir"]=(channels_min[3], channels_max[3], channels_std[3], channels_mean[3], channels_median[3])
zone_dict["NDVI_r"]=(np.nanmin(NDVI_r_img), np.nanmax(NDVI_r_img), np.nanstd(NDVI_r_img), np.nanmean(NDVI_r_img), np.nanmedian(NDVI_r_img))
zone_dict["NDVI_g"]=(np.nanmin(NDVI_g_img), np.nanmax(NDVI_g_img), np.nanstd(NDVI_g_img), np.nanmean(NDVI_g_img), np.nanmedian(NDVI_g_img))
zone_dict["NDVI_b"]=(np.nanmin(NDVI_b_img), np.nanmax(NDVI_b_img), np.nanstd(NDVI_b_img), np.nanmean(NDVI_b_img), np.nanmedian(NDVI_b_img))
zone_dict["EVI"]=(np.nanmin(EVI_img), np.nanmax(EVI_img), np.nanstd(EVI_img), np.nanmean(EVI_img), np.nanmedian(EVI_img))
zone_dict["SAVI"]=(np.nanmin(SAVI_img), np.nanmax(SAVI_img), np.nanstd(SAVI_img), np.nanmean(SAVI_img), np.nanmedian(SAVI_img))
zone_dict["MSAVI"]=(np.nanmin(MSAVI_img), np.nanmax(MSAVI_img), np.nanstd(MSAVI_img), np.nanmean(MSAVI_img), np.nanmedian(MSAVI_img))
zone_dict["NDWI"]=(np.nanmin(NDWI_img), np.nanmax(NDWI_img), np.nanstd(NDWI_img), np.nanmean(NDWI_img), np.nanmedian(NDWI_img))
return zone_dict
################################################################################
# Get region properties per segments
def get_region_props(image, segments, plot=False):
'''
DESC: Get segment/region properties per segment
INPUT: image=np.array(), segments=np.array(), plot=bool
-----
OUTPUT: seg_stats=dictionary key is image filename, value segment properties,
g=Networkx graph
'''
grayscaledimg = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY )
regions_gray = regionprops(segments, grayscaledimg)
regions_blue = regionprops(segments, image[:,:,0])
regions_green = regionprops(segments, image[:,:,1])
regions_red = regionprops(segments, image[:,:,2])
seg_stats = {'gray':regions_gray,
'red': regions_red,
'green':regions_green,
'blue':regions_blue}
# Calculate simiarity of segments and graph
if plot:
mean_label_rgb = get_mean_pixel_value(image, segments, plot=plot)
g = graph.rag_mean_color(image, segments, mode='similarity')
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(6, 8))
ax[0].set_title('RAG drawn with default settings')
lc = graph.show_rag(segments, g, image, edge_cmap='viridis',ax=ax[0])
# specify the fraction of the plot area that will be used to draw the colorbar
fig.colorbar(lc, fraction=0.03, ax=ax[0])
ax[1].set_title('RAG drawn with grayscale image and viridis colormap')
lc = graph.show_rag(segments, g, image,
img_cmap='gray', edge_cmap='viridis', ax=ax[1])
fig.colorbar(lc, fraction=0.03, ax=ax[1])
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
for region in regions_gray:
g.node[region['label']]['centroid'] = region['centroid']
edges_drawn_all, weights = display_edges(mean_label_rgb, rag=g, threshold=np.inf )
plt.imshow(edges_drawn_all)
return seg_stats, g
else:
return seg_stats
################################################################################
# Generating feature dictionary functions
def get_superpixels(image, segments):
'''
DESC: Get flattened array/list of segment and pixel values
INPUT: image=np.array(), segments=np.array()
-----
OUTPUT: flattened array
'''
superpixel_list = sp_idx(segments)
superpixel = [image[idx] for idx in superpixel_list]
return superpixel
def get_image_colorfulness(image):
# Get how colorful image is
C = image_colorfulness(image[:,:,:3])
return C
def get_segment_masks(image, segments):
'''
DESC: Get segment mask
INPUT: image=np.array(), segments=np.array()
-----
OUTPUT: mask
'''
# Segment masks
masks=[]
for (i, seg) in enumerate(np.unique(segments)):
# construct a mask for the segment
mask = np.zeros(image.shape[:2], dtype = "uint8")
mask[segments == seg] = 255
masks.append(mask)
# show the masked region (this will crash notebook)
# cv2.imshow("Mask", mask)
# cv2.imshow("Applied", cv2.bitwise_and(image, image, mask = mask))
return mask
def get_mean_pixel_value(image, segments, plot=False):
# Get avg pixel value of segment
mean_label_rgb = color.label2rgb(segments, image, kind='avg')
if plot:
plt.imshow(mean_label_rgb)
return mean_label_rgb
def replace_inf_by_nan(img):
img[img == np.inf] = np.nan
img[img == -np.inf] = np.nan
return img
def get_segment_colorfulness(image, segments, plot=False):
# loop over each of the unique superpixels
seg_color = {}
vis = np.zeros(image.shape[:2], dtype=np.float32)
for v in np.unique(segments):
# construct a mask for the segment so we can compute image statistics for *only* the masked region
mask = np.ones(image.shape[:2])
mask[segments == v] = 0
# compute the superpixel colorfulness, then update the visualization array
C_seg = segment_colorfulness(image[:,:,:3], mask)
vis[segments == v] = C_seg
seg_color[v] = C_seg
if plot:
vis = rescale_intensity(vis, out_range=(0, 255)).astype('uint8')
# overlay the superpixel colorfulness visualization on the original image
alpha = 0.3
overlay = np.dstack([vis] * 3)
output = image.copy().astype('uint8')
cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)
# show the output images (this will crash notebook)
# cv2.imshow("Input", image)
# cv2.imshow("Visualization", vis)
# cv2.imshow("Output", output)
return seg_color
def get_threshold_image(image, segments, rag, weights, plot=False):
thres_labels = graph.cut_threshold(segments, rag, np.percentile(np.array(weights), 15))
thres_label_rgb = color.label2rgb(thres_labels, image, kind='avg')
if plot:
plt.imshow(thres_label_rgb)
plt.show()
return thres_label_rgb
###############################################################################
# Generate features
def get_segment_features(images, segments):
'''
DESC: Get dictionaies of generated features
INPUT: images=np.array(), segments=np.array()
-----
OUTPUT: feature dictionaries with key as image filename
'''
superpixels, features, segment_properties, rags, image_color, seg_color ={},{},{},{},{},{}
for k in images.keys():
superpixels[k] = get_superpixels(images[k], segments[k])
segment_properties[k] = get_region_props(images[k][:,:,:3], segments[k])
image_color[k] = get_image_colorfulness(images[k])
seg_color[k] = get_segment_colorfulness(images[k], segments[k])
features[k] = {}
for ind, spxl in enumerate(superpixels[k]):
seg = ind + 1
features[k][seg]= get_superpixel_image_features(spxl)
return features, segment_properties, image_color, seg_color
###############################################################################
# Generate df Helpher functions
def get_image_features_df(features):
'''
DESC: create dataframe of image features per segment
INPUT: features=dict key is image filename, values are image features
-----
OUTPUT: df
'''
files,frames=[],[]
for f, seg in features.items():
files.append(f)
frames.append(pd.DataFrame.from_dict(seg, orient='index'))
f_df = pd.concat(frames, keys=files)
orig_cols = f_df.columns
f_df.reset_index(inplace=True)
f_df.rename(columns={'level_0':'filename', 'level_1':'segment'},inplace=True)
f_df[['red_min','red_max','red_std','red_mean','red_median']] = f_df['red'].apply(pd.Series)
f_df[['blue_min','blue_max','blue_std','blue_mean','blue_median']] = f_df['blue'].apply(pd.Series)
f_df[['green_min','green_max','green_std','green_mean','green_median']] = f_df['green'].apply(pd.Series)
f_df[['nir_min','nir_max','nir_std','nir_mean','nir_median']] = f_df['nir'].apply(pd.Series)
f_df[['SAVI_min','SAVI_max','SAVI_std','SAVI_mean','SAVI_median']] = f_df['SAVI'].apply(pd.Series)
f_df[['NDVI_b_min','NDVI_b_max','NDVI_b_std','NDVI_b_mean','NDVI_b_median']] = f_df['NDVI_b'].apply(pd.Series)
f_df[['NDVI_g_min','NDVI_g_max','NDVI_g_std','NDVI_g_mean','NDVI_g_median']] = f_df['NDVI_g'].apply(pd.Series)
f_df[['NDVI_r_min','NDVI_r_max','NDVI_r_std','NDVI_r_mean','NDVI_r_median']] = f_df['NDVI_r'].apply(pd.Series)
f_df[['EVI_min','EVI_max','EVI_std','EVI_mean','EVI_median']] = f_df['EVI'].apply(pd.Series)
f_df[['MSAVI_min','MSAVI_max','MSAVI_std','MSAVI_mean','MSAVI_median']] = f_df['MSAVI'].apply(pd.Series)
f_df[['NDWI_min','NDWI_max','NDWI_std','NDWI_mean','NDWI_median']] = f_df['NDWI'].apply(pd.Series)
f_df.drop(orig_cols, axis=1, inplace=True)
return f_df
def get_seg_color_df(seg_color):
'''
DESC: Get df of segment colorfulness
INPUT: seg_color=dict key is image filename, value is colorfulness of segment
-----
OUTPUT: flattened array
'''
files,frames=[],[]
for f, seg in seg_color.items():
files.append(f)
frames.append(pd.DataFrame.from_dict(seg, orient='index'))
c_df = pd.concat(frames, keys=files)
c_df.reset_index(inplace=True)
c_df.rename(columns={'level_0':'filename', 'level_1':'segment', 0:'seg_colorfulness'},inplace=True)
return c_df
def get_rag_properties_df(rags):
'''
DESC: Get df of region adjecenty graph per image
INPUT: rags=dict key is image filename, value is RAG properties
-----
OUTPUT: df
'''
rag_dfs = []
for k in rags.keys():
l=list(rags[k].node(data=True))
q ={}
for b in l:
q[b[0]] = b[1]
rag_df = pd.DataFrame.from_dict(q, orient='index')
rag_df[['blue_total_color','green_total_color','red_total_color']] = rag_df['total color'].apply(pd.Series)
rag_df['segment'] = rag_df['labels'].apply(lambda x: int(x[0]))
rag_df[['blue_mean_color','green_mean_color','red_mean_color']] = rag_df['mean color'].apply(pd.Series)
rag_df.drop(['centroid', 'total color', 'mean color', 'labels'], axis=1, inplace=True)
rag_df.rename(columns={'pixel count':'pixel_count'}, inplace=True)
rag_df['filename'] = k
rag_dfs.append(rag_df)
return pd.concat(rag_dfs)
def get_segment_properties_df(segment_properties):
'''
DESC: Get df of segment region properties
INPUT: rags=dict key is image filename, value is segment properties http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops
-----
OUTPUT: df
'''
image_dfs = []
for image in segment_properties.keys():
for segment in range(len(segment_properties[image]['gray'])):
image_df = pd.DataFrame({
'area':segment_properties[image]['gray'][segment]['area'],
'bbox':[segment_properties[image]['gray'][segment]['bbox']],
'bbox_area':segment_properties[image]['gray'][segment]['bbox_area'],
'centroid_x':segment_properties[image]['gray'][segment]['centroid'][1],
'centroid_y':segment_properties[image]['gray'][segment]['centroid'][0],
'convex_area':segment_properties[image]['gray'][segment]['convex_area'],
# 'convex_image':seg_stats_15[image]['gray'][segment]['convex_image'],
'coord':[segment_properties[image]['gray'][segment]['coords']],
# 'coord_x':seg_stats_15[image]['gray'][segment]['coords'][0],
# 'coord_y':seg_stats_15[image]['gray'][segment]['coords'][1],
'eccentricity':segment_properties[image]['gray'][segment]['eccentricity'],
'equivalent_diameter':segment_properties[image]['gray'][segment]['equivalent_diameter'],
'euler_number':segment_properties[image]['gray'][segment]['euler_number'],
'extent':segment_properties[image]['gray'][segment]['extent'],
'filled_area':segment_properties[image]['gray'][segment]['filled_area'],
# 'filled_image':seg_stats_15[image]['gray'][segment]['filled_image'],
# 'image':seg_stats_15[image]['gray'][segment]['image'],
'inertia_tensor':[segment_properties[image]['gray'][segment]['inertia_tensor']],
'inertia_tensor_eigvals':[[segment_properties[image]['gray'][segment]['inertia_tensor_eigvals']]],
# 'intensity_image':seg_stats_15[image]['gray'][segment]['intensity_image'],
'label':segment_properties[image]['gray'][segment]['label'],
# 'local_centroid_':[segment_properties[image]['gray'][segment]['local_centroid']],
'local_centroid_x':segment_properties[image]['gray'][segment]['local_centroid'][1],
'local_centroid_y':segment_properties[image]['gray'][segment]['local_centroid'][0],
'major_axis_length':segment_properties[image]['gray'][segment]['major_axis_length'],
'gray_max_intensity':segment_properties[image]['gray'][segment]['max_intensity'],
'gray_mean_intensity':segment_properties[image]['gray'][segment]['mean_intensity'],
'gray_min_intensity':segment_properties[image]['gray'][segment]['min_intensity'],
'red_max_intensity':segment_properties[image]['red'][segment]['max_intensity'],
'red_mean_intensity':segment_properties[image]['red'][segment]['mean_intensity'],
'red_min_intensity':segment_properties[image]['red'][segment]['min_intensity'],
'blue_max_intensity':segment_properties[image]['blue'][segment]['max_intensity'],
'blue_mean_intensity':segment_properties[image]['blue'][segment]['mean_intensity'],
'blue_min_intensity':segment_properties[image]['blue'][segment]['min_intensity'],
'green_max_intensity':segment_properties[image]['green'][segment]['max_intensity'],
'green_mean_intensity':segment_properties[image]['green'][segment]['mean_intensity'],
'green_min_intensity':segment_properties[image]['green'][segment]['min_intensity'],
'minor_axis_length':segment_properties[image]['gray'][segment]['minor_axis_length'],
'moments':[segment_properties[image]['gray'][segment]['moments']],
# 'moments_y':[seg_stats_15[image]['gray'][segment]['moments'][:,1]],
# 'moments_z':[seg_stats_15[image]['gray'][segment]['moments'][:,2]],
'moments_central':[segment_properties[image]['gray'][segment]['moments_central']],
# 'moments_central_y':[seg_stats_15[image]['gray'][segment]['moments_central'][:,1]],
# 'moments_central_z':[seg_stats_15[image]['gray'][segment]['moments_central'][:,2]],
'moments_hu':[segment_properties[image]['gray'][segment]['moments_hu']],
'moments_normalized':[segment_properties[image]['gray'][segment]['moments_normalized']],
'orientation':segment_properties[image]['gray'][segment]['orientation'],
'perimeter':segment_properties[image]['gray'][segment]['perimeter'],
'solidity':segment_properties[image]['gray'][segment]['solidity'],
'weighted_centroid':[segment_properties[image]['gray'][segment]['weighted_centroid']],
'weighted_local_centroid':[segment_properties[image]['gray'][segment]['weighted_local_centroid']],
'weighted_moments':[segment_properties[image]['gray'][segment]['weighted_moments']],
'weighted_moments_central':[segment_properties[image]['gray'][segment]['weighted_moments_central']],
'weighted_moments_hu':[segment_properties[image]['gray'][segment]['weighted_moments_hu']]
}, index=[0])
# image_df[['moments_1', 'moments_2','moments_3']] = image_df['moments'].apply(pd.Series)
# image_df[['moments_central_1', 'moments_central_2','moments_central_3']] = image_df['moments_central'].apply(pd.Series)
# image_df[['moments_hu_1', 'moments_hu_2','moments_hu_3']] = image_df['moments_central'].apply(pd.Series)
segment = segment + 1
image_df['segment'] = segment
image_df['filename'] = image.strip('\n')
image_dfs.append(image_df)
return pd.concat(image_dfs)
################################################################################
# Relabeling segments by gray_mean_intensity
def relabel(df):
'''
DESC: Relabel segments by gray_mean_intensity
INPUT: df=df
-----
OUTPUT: df with relabled_segs col
'''
df.sort_values(['gray_mean_intensity'],ascending=False,inplace=True)
df.reset_index(inplace=True,drop=True)
df['relabeled_segs'] = df.index+1
del df['filename']
return df
################################################################################
# Create df from feature dictionaries with key as image filename
def create_df(images_df, griffin_features, features, segment_properties, image_color, seg_color, original_segs):
'''
DESC: Create single dataframe appended to images df from feature generation dictionaries (outputs from get_segment_features)
INPUT: images_df=df, griffin_features=df (from extractFeatures), features - seg_color=dicts (from get_segment_features), original_segs=dict ()
-----
OUTPUT: concatened df
'''
result_df_list =[]
# generating df
f_df = get_image_features_df(features)
c_df = get_seg_color_df(seg_color)
# rag_df = get_rag_properties_df(rags)
f_df = pd.merge(f_df,c_df, on=['filename', 'segment'], how='left')
seg_props_df = get_segment_properties_df(segment_properties)
seg_c_f_df = pd.merge(f_df, seg_props_df, on=['filename', 'segment'])
# rag_seg_c_f_df = pd.merge(seg_c_f_df, rag_df, on=['filename', 'segment'])
grif_seg_c_f_df = pd.merge(seg_c_f_df, griffin_features, on=['filename', 'segment'])
relabel_df = grif_seg_c_f_df.groupby(['filename']).apply(relabel)
relabel_df.reset_index(inplace=True)
del relabel_df['level_1']
for c in relabel_df.columns:
if c not in ['filename', 'relabeled_segs']:
pivot_df = relabel_df.pivot(values = c,index='filename',columns='relabeled_segs')
pivot_df= pivot_df.add_prefix(str(c)+'_seg'+'_')
result_df_list.append(pivot_df)
result_df = pd.concat(result_df_list,axis=1)
result_df.reset_index(inplace=True)
orig_segs = pd.DataFrame.from_dict(original_segs, orient='index').reset_index()
orig_segs.rename(columns={'index':'filename',0:'num_orig_segments'}, inplace=True)
img_c_df = pd.DataFrame.from_dict(image_color, orient='index').reset_index()
img_c_df.rename(columns={'index':'filename',0:'img_colorfulness'}, inplace=True)
img_c_df = pd.merge(img_c_df, orig_segs, on=['filename'])
final_df = pd.merge(result_df, img_c_df, on=['filename'], how='left')
final_df = pd.merge(images_df,final_df, on=['filename'], how='inner')
return final_df
################################################################################
# multiprocessing
def get_batch(df, col, batchsize=50, save=True):
'''
DESC: create batches of field IDs
INPUT: df=df, col=str() [col for unique identifier (ex fieldID)], batchsize=int()
-----
OUTPUT: list of unique batched ids by col value
'''
ls = []
for i in range(int(2200/batchsize)):
n = i*batchsize
k = (i+1)*batchsize
f = df[col].unique().tolist()[n:k]
if save:
save_obj(f, 'fields{}_{}.p'.format(n,k))
ls.append(f)
return ls
def batch_iterator(n_items, batch_size):
import math
n_batches = int(math.ceil(n_items/(batch_size+1e-9)))
for b in range(n_batches):
start = (b*batch_size)
end = ((b+1)*batch_size)
if end >= n_items:
end = n_items
yield (start, end)
################################################################################
# Plotting functions
def display_edges(image, rag, threshold):
"""Draw edges of a RAG on its image
Returns a modified image with the edges drawn.Edges are drawn in green
and nodes are drawn in yellow.
Parameters
----------
image : ndarray
The image to be drawn on.
g : RAG
The Region Adjacency Graph.
threshold : float
Only edges in `g` below `threshold` are drawn.
Returns:
out: ndarray
Image with the edges drawn.
"""
image = image.copy()
rag2 = rag.copy()
weights = []
for edge in rag2.edges():
n1, n2 = edge
r1, c1 = map(int, rag2.node[n1]['centroid'])
r2, c2 = map(int, rag2.node[n2]['centroid'])
line = draw.line(r1, c1, r2, c2)
circle = draw.circle(r1,c1,2)
if rag2[n1][n2]['weight'] < threshold:
image[line] = 0,1,0
weights.append(rag2[n1][n2]['weight'])
image[circle] = 1,1,0
return image, weights
def plot_segments(image, seg):
'''
DESC: Plot segments boundaries, centroids on image and mean color per segment image
INPUT: image=np.array(), seg=np.array()
-----
OUTPUT: mean color plot, segment boundary with labeled centroid
'''
out = color.label2rgb(seg, image, kind='avg')
out = segmentation.mark_boundaries(out, seg, (0, 0, 0))
io.imshow(out)
io.show()
grayscaledimg = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
regions = regionprops(seg, grayscaledimg)
relabeled_centroids = [(region['label'], region['centroid']) for region in regions]
fig = plt.figure("Superpixels")
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(img_as_float(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)), seg))
c_dict = {}
for label, center in relabeled_centroids:
x,y =center
c_dict[label] = center
plt.text(y, x, label, color='yellow')
plt.show()
return
| [
"grossenbacher.max@gmail.com"
] | grossenbacher.max@gmail.com |
484a3832fe1220b753cbd655f80c945e40facef4 | 74df9552654d77a89869f9723f833f074014bb55 | /organization/application.py | c8db2358ba16d55120cc29ab0414576f2f4f6279 | [] | no_license | dipayandutta/flask-codes | c7d11e875b4f06ec4924f4b692d65e4f38472df6 | 2ca9ff7e31ca33cb0307c9f58d2911d4f2da735c | refs/heads/master | 2021-07-24T12:08:01.813535 | 2020-04-06T14:20:47 | 2020-04-06T14:20:47 | 131,844,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# Create the Flask application instance
app = Flask(__name__)
# load the main Configuration file
app.config.from_pyfile('/work/python/flask/organization/configuration/config.py')
# make the database instance
db = SQLAlchemy(app)
# import all views
from view.views import *
if __name__ == '__main__':
app.run()
| [
"dipayan@capitalnumbers.com"
] | dipayan@capitalnumbers.com |
0a88cd981f6448079b985065446b946dc4bdefd9 | fcc41b2d76a53bc0d674627f04808110b727d018 | /setup.py | 7e4111abefb8d8358514ac967ff205c058f4ea1e | [
"BSD-3-Clause"
] | permissive | underworlds-robot/uwds | 595208191cb5eb72f2a1868b9a39a88389fcbf14 | 018544b59f32a7d23a57bf7dfb5b500f028e5fe2 | refs/heads/master | 2020-04-06T23:16:33.930700 | 2019-12-03T16:19:31 | 2019-12-03T16:19:31 | 157,863,497 | 0 | 3 | BSD-3-Clause | 2019-03-29T12:42:53 | 2018-11-16T12:17:55 | C++ | UTF-8 | Python | false | false | 243 | py | from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['pyuwds'],
package_dir={'': 'src'},
)
setup(**setup_args)
| [
"ysallami@laas.fr"
] | ysallami@laas.fr |
50d8f72d24ab0c5274f092c52952957c8a218c6d | b272be7919fc7d31f8f8bceee016cc7889aab5fc | /food/settings.py | f233c4ba57b305ff12444cd77cb8f2ab382e0431 | [] | no_license | olzhobaeva13/food_project | 2b6931f36febe51bc977ea8088babe09f0057484 | fdbabffe1c9d01b2c1e5fc5ef5a54ac3d62ae8a4 | refs/heads/main | 2023-07-10T13:48:54.966700 | 2021-08-22T11:02:31 | 2021-08-22T11:02:31 | 397,274,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,321 | py | """
Django settings for food project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-i@w48p$-7a*d@#4fts=p6-$6(b-wff8o7$*c#bahwy%e%7!ac$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'foodapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'food.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'food.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"avatolzhobaeva@gmail.com"
] | avatolzhobaeva@gmail.com |
31290be7c65e51f040ca55c421c9fbc6289a8300 | 07a883a45103830506b0c1e47d2f425af0b8f89a | /app.py | a9a45c69e3aa0e75126b4660c5b6bcb062a5eec5 | [] | no_license | surajs004/Car-Price-Prediction-end-to--end- | b11562081beea0c316770370ebf804c3acca9ccd | 9489e2ef23c9629adafdf194f7d50a725d877920 | refs/heads/main | 2023-05-12T06:39:30.182464 | 2021-06-04T11:01:22 | 2021-06-04T11:01:22 | 373,812,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,996 | py |
from flask import Flask, render_template, request
import jsonify
import requests
import pickle
import numpy as np
import sklearn
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
model = pickle.load(open('random_forest_regression_model.pkl', 'rb'))
@app.route('/',methods=['GET'])
def Home():
return render_template('index.html')
standard_to = StandardScaler()
@app.route("/predict", methods=['POST'])
def predict():
Fuel_Type_Diesel=0
if request.method == 'POST':
Year = int(request.form['Year'])
Present_Price=float(request.form['Present_Price'])
Kms_Driven=int(request.form['Kms_Driven'])
Kms_Driven2=np.log(Kms_Driven)
Owner=int(request.form['Owner'])
Fuel_Type_Petrol=request.form['Fuel_Type_Petrol']
if(Fuel_Type_Petrol=='Petrol'):
Fuel_Type_Petrol=1
Fuel_Type_Diesel=0
else:
Fuel_Type_Petrol=0
Fuel_Type_Diesel=1
Year=2020-Year
Seller_Type_Individual=request.form['Seller_Type_Individual']
if(Seller_Type_Individual=='Individual'):
Seller_Type_Individual=1
else:
Seller_Type_Individual=0
Transmission_Mannual=request.form['Transmission_Mannual']
if(Transmission_Mannual=='Mannual'):
Transmission_Mannual=1
else:
Transmission_Mannual=0
prediction=model.predict([[Present_Price,Kms_Driven2,Owner,Year,Fuel_Type_Diesel,Fuel_Type_Petrol,Seller_Type_Individual,Transmission_Mannual]])
output=round(prediction[0],2)
if output<0:
return render_template('index.html',prediction_texts="Sorry you cannot sell this car")
else:
return render_template('index.html',prediction_text="You Can Sell The Car at {}".format(output))
else:
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True) | [
"noreply@github.com"
] | surajs004.noreply@github.com |
7fc60fb3e6e5e97749994890220137591cb4ec56 | 51f6443116ef09aa91cca0ac91387c1ce9cb445a | /Curso_Python_3_UDEMY/banco_dados/incluir_contato.py | 79c073a92c51debf70d449c7b8897597efd60f36 | [
"MIT"
] | permissive | DanilooSilva/Cursos_de_Python | f449f75bc586f7cb5a7e43000583a83fff942e53 | 8f167a4c6e16f01601e23b6f107578aa1454472d | refs/heads/main | 2023-07-30T02:11:27.002831 | 2021-10-01T21:52:15 | 2021-10-01T21:52:15 | 331,683,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from mysql.connector.errors import ProgrammingError
from db import nova_conexao
sql = 'INSERT INTO contatos (nome, tel) VALUES (%s, %s)'
args = ('Danilo', '94955-2951')
with nova_conexao() as conexao:
try:
cursor = conexao.cursor()
cursor.execute(sql, args)
conexao.commit()
except ProgrammingError as e:
print(f'Erro: {e.msg}')
else:
print('1 registro incluído, ID:', cursor.lastrowid)
| [
"dno.gomesps@gmail.com"
] | dno.gomesps@gmail.com |
4ccedddc2b57b838b1ce8d371d22755df9c213c3 | bc72897a3c8141de62446162cc254fa1993bf646 | /app/webapp/views.py | 3907327d2334c865e243fc8f3f73b7c72003ef33 | [] | no_license | Le-Steph/Django | 625c7abc2a3f91e0632aaf6a6ea95bde9bca8f7d | d8f30fcb7b840e20a2123f631536a5c75b4738c8 | refs/heads/master | 2020-12-02T17:14:15.152794 | 2019-12-26T13:57:33 | 2019-12-26T13:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from webapp.models import *
from webapp.serializers import *
class farmerList(APIView):
def get(self, request):
farmer1 = farmer.objects.all()
serializer = farmerSerializer(farmer1, many=True)
return Response(serializer.data)
def post(self):
pass
class productList(APIView):
def get(self, request):
product1 = product.objects.all()
serializer = productSerializer(product1, many=True)
return Response(serializer.data)
def post(self):
pass
class certificateList(APIView):
def get(self, request):
certificate1 = certificate.objects.all()
#certificate1 = certificate1.objects.filter(types="biologique")
serializer = certificateSerializer(certificate1, many=True)
return Response(serializer.data)
def post(self):
pass | [
"noreply@github.com"
] | Le-Steph.noreply@github.com |
6cf4fbb0fccf9b261da5a1544208c26dbea281eb | d5f7891e3e9779f61089f66a0b7caf7088100834 | /supply_transaction/models/transaction.py | 60706c51be729837a03da279c3331ec9359a32bf | [] | no_license | sandeepgit32/flask_inventory_microservices | bf8dd2faa3b346c2cc8f78237f2abff3b85479c5 | 4fc8e2c436480a1fa7c9d16a3d301453241e1ba3 | refs/heads/main | 2023-05-31T07:08:37.594542 | 2021-06-27T06:31:23 | 2021-06-27T06:31:23 | 374,137,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | from typing import List
from db import db
from sqlalchemy import and_
class TransactionModel(db.Model):
__tablename__ = "transactions"
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.Date)
supplier_name = db.Column(db.String(100), nullable=False)
city = db.Column(db.String(50))
zipcode = db.Column(db.Integer)
contact_person = db.Column(db.String(80))
phone = db.Column(db.String(20))
email = db.Column(db.String(80))
product_code = db.Column(db.String(80), nullable=False)
product_name = db.Column(db.String(80), nullable=False)
product_category = db.Column(db.String(50))
unit_price = db.Column(db.Float(precision=2), nullable=False)
quantity = db.Column(db.Integer)
total_cost = db.Column(db.Float(precision=2), nullable=False)
measure_unit = db.Column(db.String(10))
@classmethod
def find_by_id(cls, id: int) -> "TransactionModel":
return cls.query.filter_by(id=id).first()
@classmethod
def find_all(cls) -> List["TransactionModel"]:
return cls.query.all()
@classmethod
def filter_by_supplier(cls, supplier_name: str) -> List["TransactionModel"]:
return cls.query.filter_by(supplier_name=supplier_name)
@classmethod
def filter_by_product(cls, product_code: str) -> List["TransactionModel"]:
return cls.query.filter_by(product_code=product_code)
@classmethod
def filter_by_product_and_supplier(cls, product_code: str, supplier_name: str) -> List["TransactionModel"]:
return cls.query.filter(and_(cls.supplier_name==supplier_name, cls.product_code==product_code)).all()
def save_to_db(self) -> None:
db.session.add(self)
db.session.commit()
def delete_from_db(self) -> None:
db.session.delete(self)
db.session.commit()
| [
"sandip.karar@augmentedscm.com"
] | sandip.karar@augmentedscm.com |
9a897640ec04549bcc8a09a4e2f8a660fe844975 | 61d3e8e75a0733ac707490059ae9306b57afd1cd | /ppcdef.py | bf16c2516168f3b08316214ea87f74b30a89df1e | [] | no_license | Swind/PPCGOV | fd9870766c0fdbceaee5b3cabcbd7bf61b2cc328 | d9fce2c0ec9242006b2d1f2475965bb0f9f70c94 | refs/heads/master | 2016-09-06T16:57:49.975095 | 2014-05-12T01:35:00 | 2014-05-12T01:35:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,916 | py | ORG_IDS = { u'\u4ea4\u901a\u90e8': u'3.15',
u'\u5167\u653f\u90e8': u'3.1',
u'\u5357\u6295\u7e23': u'3.76.48',
u'\u53f0\u7063\u4e2d\u6cb9\u80a1\u4efd\u6709\u9650\u516c\u53f8': u'3.13.50',
u'\u53f0\u7063\u96fb\u529b\u80a1\u4efd\u6709\u9650\u516c\u53f8': u'3.13.31',
u'\u53f8\u6cd5\u9662': u'5',
u'\u5609\u7fa9\u5e02': u'3.76.60',
u'\u5609\u7fa9\u7e23': u'3.76.50',
u'\u570b\u5bb6\u5b89\u5168\u6703\u8b70': u'8',
u'\u570b\u9632\u90e8': u'3.5',
u'\u57fa\u9686\u5e02': u'3.76.57',
u'\u5916\u4ea4\u90e8': u'3.3',
u'\u5b9c\u862d\u7e23': u'3.76.42',
u'\u5c4f\u6771\u7e23': u'3.76.53',
u'\u5f70\u5316\u7e23': u'3.76.47',
u'\u65b0\u5317\u5e02': u'3.82',
u'\u65b0\u7af9\u5e02': u'3.76.58',
u'\u65b0\u7af9\u7e23': u'3.76.44',
u'\u6843\u5712\u7e23': u'3.76.43',
u'\u6cd5\u52d9\u90e8': u'3.11',
u'\u6f8e\u6e56\u7e23': u'3.76.56',
u'\u76e3\u5bdf\u9662': u'7',
u'\u7acb\u6cd5\u9662': u'4',
u'\u7d93\u6fdf\u90e8': u'3.13',
u'\u7e3d\u7d71\u5e9c': u'2',
u'\u8003\u8a66\u9662': u'6',
u'\u81fa\u4e2d\u5e02': u'3.76.59',
u'\u81fa\u4e2d\u7e23': u'3.76.46',
u'\u81fa\u5317\u5e02': u'3.79',
u'\u81fa\u5317\u7e23': u'3.76.41',
u'\u81fa\u5357\u5e02': u'3.76.61',
u'\u81fa\u5357\u7e23': u'3.76.51',
u'\u81fa\u6771\u7e23': u'3.76.54',
u'\u82b1\u84ee\u7e23': u'3.76.55',
u'\u82d7\u6817\u7e23': u'3.76.45',
u'\u884c\u653f\u9662': u'3',
u'\u8ca1\u653f\u90e8': u'3.7',
u'\u9023\u6c5f\u7e23': u'3.71.3',
u'\u91d1\u9580\u7e23': u'3.71.2',
u'\u96f2\u6797\u7e23': u'3.76.49',
u'\u9ad8\u96c4\u5e02': u'3.83',
u'\u9ad8\u96c4\u7e23': u'3.76.52'}
PAYLOAD = {'awardAnnounceEndDate': '103/04/29',
'awardAnnounceStartDate': '103/04/29',
'btnQuery': '\xe6\x9f\xa5\xe8\xa9\xa2',
'gottenVendorId': '',
'gottenVendorName': '',
'hid_1': '1',
'hid_2': '1',
'hid_3': '1',
'isReConstruct': '',
'item': '',
'location': '',
'maxBudget': '',
'method': 'search',
'minBudget': '',
'orgId': '',
'orgName': '',
'priorityCate': '',
'proctrgCate': '',
'radProctrgCate': '',
'searchMethod': 'true',
'searchTarget': 'ATM',
'submitVendorId': '',
'submitVendorName': '',
'tenderId': '',
'tenderName': '',
'tenderRange': '',
'tenderStatus': '4,5,21,29',
'tenderWay': ''}
NO_DATA = "NO_DATA"
| [
"swind@code-life.info"
] | swind@code-life.info |
acb65fbacc27a8ad5009c305ffa87265cef993a0 | be6d5ac1b415335cc7a27cf44e3afa041ef299e3 | /1_3.py | 764d33752a0c10e1a5835a028ea67466c05963df | [
"MIT"
] | permissive | JeffreyAsuncion/PCEP_training_2020_12 | 4746a28f399c499e1bc2c3bf848ce0b05ad903bd | 7477fb57a526ca0efdd156811aa72fae6129b062 | refs/heads/main | 2023-02-05T07:52:13.374651 | 2020-12-20T16:50:24 | 2020-12-20T16:50:24 | 319,857,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | print(2**3)
print(2**3.)
print(2.**3)
print(2.**3.)
print(5//2)
print(2**2**3)
print(2*4)
print(2**4)
print(2.*4)
print(2**4.)
print(2/4)
print(2//4)
print(-2/4)
print(-2//4)
print(2%4)
print(2%-4) | [
"jeffrey.l.asuncion@gmail.com"
] | jeffrey.l.asuncion@gmail.com |
39dedc3e1806828edc897ee4ef1594e2c65d6363 | b492b2ef35419268d5385ef2ee1f3e3948e33b67 | /src/main.py | b8ad513368e1f7f06e3b421e7fd37082c290a3e7 | [] | no_license | team5115/frc_tabletop_2020_infinite_recharge | d09ba5cab6dd86f22e9aff9907b012b87acefc0f | 34e66525c05d195ce6a024d89608f080ec1dd985 | refs/heads/master | 2023-01-22T22:20:51.309971 | 2023-01-09T22:13:13 | 2023-01-09T22:13:13 | 232,722,432 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 17,170 | py | #!/usr/bin/python
#
#
"""
FRC robot sim
Team 5115 - Knight Riders
Author: Joe Adams
Email: joseph.s.adams@gmail.com
URL: https://github.com/jsadams/frc_tabletop.git
version: 3
20/01/08 - updated for 2020
19/01/11 - multiple keymaps now working
"""
import pygame, sys
from pygame.locals import *
from colors import *
import rotation_utils
import pygame
from robot import Robot
#from cargo_ship import Cargo_ship
from shield_generator import Shield_generator
from wall import Wall
from truss import Truss
from trench_run import Trench_run
import control_panel
import loading_bay
import power_port
import field
#from hab_platform_level_0 import Hab_platform_level_0
#from hab_platform_level_1 import Hab_platform_level_1
#from hab_platform_level_2 import Hab_platform_level_2
#from hab_platform_level_3 import Hab_platform_level_3
#from depot import Depot
#from loading_station import LoadingStation
from colors import *
from units import *
from pygame.math import Vector2
import keymaps
class Game:
def __init__(self):
##############################################
#field_width=230*in_*3
#field_height=133*in_*3
left_margin=50
right_margin=50
bottom_margin=50
top_margin=50
self.field_width=52*ft_+5.25*in_;
self.field_height=26*ft_+11.25*in_;
self.hab_line_x=94.3*in_;
# Call this function so the Pygame library can initialize itself
pygame.init()
# Create an 800x600 sized screen
#screen_size=[800,600]
screen_size=[int(field.screen_width*1.10),int(field.screen_height*1.10)]
self.screen = pygame.display.set_mode(screen_size,pygame.RESIZABLE)
# Set the title of the window
pygame.display.set_caption('Infinite Recharge')
self.font = pygame.font.SysFont('Arial', 24)
wall_thickness=1*in_
wall_1=Wall(field.min_x,field.min_y,width=self.field_width,height=wall_thickness,color=BLACK)
wall_2=Wall(field.min_x,field.max_y,width=self.field_width,height=wall_thickness,color=BLACK)
wall_3=Wall(field.min_x,field.min_y,width=wall_thickness,height=self.field_height,color=BLACK)
wall_4=Wall(field.max_x,field.min_y,width=wall_thickness,height=self.field_height,color=BLACK)
#####################################################################3
#
#
# Shield Generator
#
#
#####################################################################
angle=22.5
#angle=0
shield_generator_xo=field.mid_x
shield_generator_yo=field.mid_y
shield_generator_1=Shield_generator(shield_generator_xo,shield_generator_yo,angle)
#####################################################################3
#
#
# Trusses
#
#
#####################################################################
sg_width=14*ft_+0.75*in_
sg_height=13*ft_+1.5*in_
truss_width=12*in_;
xo=shield_generator_xo
yo=shield_generator_yo
dx=sg_width/2.0-truss_width/2.0
dy1=sg_height/2.0-truss_width
dy2=sg_height/2.0
truss_origin=Vector2(xo,yo)
truss_1_xo=shield_generator_xo-dx
truss_1_yo=shield_generator_yo+dy1
truss_1_r=Vector2(truss_1_xo,truss_1_yo)
truss_1_r=rotation_utils.rotate_vector(truss_1_r,truss_origin,-angle)
truss_2_xo=shield_generator_xo+dx
truss_2_yo=shield_generator_yo+dy1
truss_2_r=Vector2(truss_2_xo,truss_2_yo)
truss_2_r=rotation_utils.rotate_vector(truss_2_r,truss_origin,-angle)
truss_3_xo=shield_generator_xo+dx
truss_3_yo=shield_generator_yo-dy2
truss_3_r=Vector2(truss_3_xo,truss_3_yo)
truss_3_r=rotation_utils.rotate_vector(truss_3_r,truss_origin,-angle)
truss_4_xo=shield_generator_xo-dx
truss_4_yo=shield_generator_yo-dy2
truss_4_r=Vector2(truss_4_xo,truss_4_yo)
truss_4_r=rotation_utils.rotate_vector(truss_4_r,truss_origin,-angle)
truss_1=Truss(truss_1_r,angle,GREEN)
truss_2=Truss(truss_2_r,angle,GREEN)
truss_3=Truss(truss_3_r,angle,GREEN)
truss_4=Truss(truss_4_r,angle,GREEN)
#####################################################################3
#
#
# Trench runs
#
#
#####################################################################
# trench_run_red_xo=field.mid_x
# trench_run_red_yo=field.min_y
# trench_run_blue_xo=trench_run_red_xo
# trench_run_blue_yo=field.max_y
# trench_run_red=Trench_run(trench_run_red_xo,trench_run_red_yo,BLUE)
# trench_run_blue=Trench_run(trench_run_blue_xo,trench_run_blue_yo,RED)
############################################################
#
#
# control_panel
#
#
#############################################################
control_panel_red_xo=field.mid_x+field.trench_width/2.0-control_panel.width*2
control_panel_red_yo=field.min_y
control_panel_blue_xo=field.mid_x-field.trench_width/2.0+control_panel.width*2
control_panel_blue_yo=field.max_y-field.trench_height
control_panel_red=control_panel.Control_panel(control_panel_red_xo,control_panel_red_yo,BLUE)
control_panel_blue=control_panel.Control_panel(control_panel_blue_xo,control_panel_blue_yo,RED)
############################################################
#
#
# loading bays
#
#
#############################################################
loading_bay_offset=5*ft_
loading_bay_red_xo=field.max_x
loading_bay_red_yo=field.min_y+loading_bay_offset
loading_bay_origin_red=Vector2(loading_bay_red_xo,loading_bay_red_yo)
self.loading_bay_red=loading_bay.Loading_bay(loading_bay_origin_red,RED,True)
loading_bay_blue_xo=field.min_x-loading_bay.WIDTH
loading_bay_blue_yo=field.max_y-loading_bay_offset-loading_bay.HEIGHT
loading_bay_origin_blue=Vector2(loading_bay_blue_xo,loading_bay_blue_yo)
self.loading_bay_blue=loading_bay.Loading_bay(loading_bay_origin_blue,BLUE)
############################################################
#
#
# power port
#
#
#############################################################
power_port_offset=7*ft_
power_port_red_xo=field.min_x-power_port.WIDTH
power_port_red_yo=field.min_y+power_port_offset-power_port.HEIGHT
power_port_origin_red=Vector2(power_port_red_xo,power_port_red_yo)
self.power_port_red=power_port.Power_port(power_port_origin_red,RED)
power_port_blue_xo=field.max_x
power_port_blue_yo=field.max_y-power_port_offset
power_port_origin_blue=Vector2(power_port_blue_xo,power_port_blue_yo)
self.power_port_blue=power_port.Power_port(power_port_origin_blue,BLUE,True)
############################################
# Robot starts
#
#x=field.min_x
#y=field.mid_y
#field.min_x=0
dy=field.max_y-field.min_y
# field.mid_x=field.max_x/2.0
# field.mid_y=field.max_y/2.0
blue_x=field.initiation_line_blue_x
blue_y1=field.mid_y-dy/3
blue_y2=field.mid_y
blue_y3=field.mid_y+dy/3
red_x=field.initiation_line_red_x
red_y1=blue_y1
red_y2=blue_y2
red_y3=blue_y3
# Create the robot object
self.robot1 = Robot(blue_x, blue_y1,BLUE1,angle=270,keymap=keymaps.key_map_1, is_mecanum=True,team_name=5115,width=36*in_,length=45*in_)
self.robot2 = Robot(blue_x, blue_y2,BLUE2,angle=270,keymap=keymaps.key_map_2, is_mecanum=False,width=3*ft_,team_name=493)
self.robot3 = Robot(blue_x, blue_y3,BLUE3,angle=270,keymap=keymaps.key_map_3, is_mecanum=False,team_name=503)
self.robot4 = Robot(red_x, red_y1,RED1,angle=90,keymap=keymaps.key_map_4,is_mecanum=True,team_name=3361,width=3*ft_)
self.robot5 = Robot(red_x, red_y2,RED2,angle=90,keymap=keymaps.key_map_5,is_mecanum=False,team_name=3258)
self.robot6 = Robot(red_x, red_y3,RED3,angle=90,keymap=keymaps.key_map_6,is_mecanum=False,team_name=2106)
# self.all_sprites_list = pygame.sprite.Group()
self.all_sprites_list = pygame.sprite.OrderedUpdates()
self.all_sprites_list.add(wall_1)
self.all_sprites_list.add(wall_2)
self.all_sprites_list.add(wall_3)
self.all_sprites_list.add(wall_4)
# self.all_sprites_list.add(cargo_ship_1)
self.all_sprites_list.add(shield_generator_1)
# self.all_sprites_list.add(rocket_1)
# self.all_sprites_list.add(rocket_2)
# self.all_sprites_list.add(rocket_3)
# self.all_sprites_list.add(rocket_4)
# self.all_sprites_list.add(trench_run_red)
# self.all_sprites_list.add(trench_run_blue)
self.all_sprites_list.add(control_panel_blue)
self.all_sprites_list.add(control_panel_red)
self.all_sprites_list.add(self.loading_bay_blue)
self.all_sprites_list.add(self.loading_bay_red)
self.all_sprites_list.add(self.power_port_blue)
self.all_sprites_list.add(self.power_port_red)
self.all_sprites_list.add(truss_1)
self.all_sprites_list.add(truss_2)
self.all_sprites_list.add(truss_3)
self.all_sprites_list.add(truss_4)
self.all_sprites_list.add(self.robot1)
self.all_sprites_list.add(self.robot2)
self.all_sprites_list.add(self.robot3)
self.all_sprites_list.add(self.robot4)
self.all_sprites_list.add(self.robot5)
self.all_sprites_list.add(self.robot6)
self.solid_sprites_list = pygame.sprite.Group()
self.solid_sprites_list.add(wall_1)
self.solid_sprites_list.add(wall_2)
self.solid_sprites_list.add(wall_3)
self.solid_sprites_list.add(wall_4)
self.solid_sprites_list.add(truss_1)
self.solid_sprites_list.add(truss_2)
self.solid_sprites_list.add(truss_3)
self.solid_sprites_list.add(truss_4)
self.solid_sprites_list.add(self.robot1)
self.solid_sprites_list.add(self.robot2)
self.solid_sprites_list.add(self.robot3)
self.solid_sprites_list.add(self.robot4)
self.solid_sprites_list.add(self.robot5)
self.solid_sprites_list.add(self.robot6)
self.robots_list = pygame.sprite.Group()
self.robots_list.add(self.robot1)
self.robots_list.add(self.robot2)
self.robots_list.add(self.robot3)
self.robots_list.add(self.robot4)
self.robots_list.add(self.robot5)
self.robots_list.add(self.robot6)
self.clock = pygame.time.Clock()
# def draw_vertical_line(self,x,color):
# line_width=2*in_
# pygame.draw.line(self.screen, color, (x, field.min_y), (x, field.max_y), line_width)
# def draw_horizontal_line(self,y,color):
# line_width=2*in_
# pygame.draw.line(self.screen, color, (field.min_x, y), (field.max_x, y), line_width)
# def draw_rectangle(self,x1,y1,x2,y2,color):
# line_width=2*in_
# pygame.draw.line(self.screen, color, (field.min_x, y), (field.max_x, y), line_width)
# def draw_trench_runs(self):
# thickness=5
# width=self.trench_width
# height=self.trench_height
# trench_run_red_xo=field.mid_x-self.trench_width/2
# trench_run_red_yo=field.min_y
# trench_run_blue_xo=trench_run_red_xo
# trench_run_blue_yo=field.max_y-self.trench_height
# x=trench_run_blue_xo
# y=trench_run_blue_yo
# pygame.draw.rect(self.screen, BLUE, (trench_run_blue_xo,trench_run_blue_yo,width,height), thickness)
# pygame.draw.rect(self.screen, RED, (trench_run_red_xo,trench_run_red_yo,width,height), thickness)
def redraw_screen(self):
line_width=2*in_
# draw on the surface object
self.screen.fill(WHITE)
pygame.draw.polygon(self.screen, GREY, ((field.min_x,field.min_y), (field.max_x, field.min_y), (field.max_x,field.max_y), (field.min_x,field.max_y), (field.min_x, field.min_y)))
field.draw_horizontal_line(self.screen,y=field.mid_y,color=YELLOW)
#self.draw_vertical_line(x=field.initiation_line_blue_x,color=BLUE)
#self.draw_vertical_line(x=field.initiation_line_red_x,color=RED)
field.draw_vertical_line(self.screen,field.initiation_line_blue_x,BLUE)
field.draw_vertical_line(self.screen,field.initiation_line_red_x,RED)
field.draw_trench_runs(self.screen)
self.loading_bay_blue.draw_triangle(self.screen)
self.loading_bay_red.draw_triangle(self.screen)
self.power_port_red.draw_triangle(self.screen)
self.power_port_blue.draw_triangle(self.screen)
def run(self):
d_angle=3
d_speed=3
done=False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.VIDEORESIZE:
old_surface_saved = surface
surface = pygame.display.set_mode((event.w, event.h),
pygame.RESIZABLE)
# On the next line, if only part of the window
# needs to be copied, there's some other options.
surface.blit(old_surface_saved, (0,0))
del old_surface_saved
# # Set the speed based on the key pressed
# elif event.type == pygame.KEYDOWN:
# if event.key == pygame.K_a:
# self.robot1.changespeed(-d_speed, 0)
# elif event.key == pygame.K_d:
# self.robot1.changespeed(d_speed, 0)
# elif event.key == pygame.K_w:
# self.robot1.changespeed(0, -d_speed)
# elif event.key == pygame.K_s:
# self.robot1.changespeed(0, d_speed)
# elif event.key == pygame.K_q:
# self.robot1.rotate(d_angle)
# elif event.key == pygame.K_e:
# self.robot1.rotate(-d_angle)
# # Reset speed when key goes up
# elif event.type == pygame.KEYUP:
# if event.key == pygame.K_a:
# self.robot1.changespeed(d_speed, 0)
# elif event.key == pygame.K_d:
# self.robot1.changespeed(-d_speed, 0)
# elif event.key == pygame.K_w:
# self.robot1.changespeed(0, d_speed)
# elif event.key == pygame.K_s:
# self.robot1.changespeed(0, -d_speed)
# elif event.key == pygame.K_q:
# self.robot1.rotate(-d_angle)
# elif event.key == pygame.K_e:
# self.robot1.rotate(d_angle)
elif event.type == pygame.KEYDOWN:
for robot in self.robots_list:
robot.process_event(event)
elif event.type == pygame.KEYUP:
for robot in self.robots_list:
robot.process_event(event)
for robot in self.robots_list:
robot.update(self.solid_sprites_list)
# This actually moves the robot block based on the current speed
#self.robot1.update(self.solid_sprites_list)
#self.robot2.update()
#self.robot3.update()
#self.robot4.update()
#self.robot5.update()
#self.robot6.update()
# -- Draw everything
# Clear self.screen
#self.screen.fill(WHITE)
self.redraw_screen()
# Draw sprites
self.all_sprites_list.draw(self.screen)
# Flip screen
pygame.display.flip()
# Frame Rate
self.clock.tick(60)
# pygame.display.set_caption(f'FPS: {round(self.clock.get_fps(), 2)}')
#if self.robot1.is_collided_with(self.robot2):
# print "COLLISION"
pygame.quit()
#######################################################
#
#
# main
#
#
#######################################################
if __name__ == '__main__':
try:
g = Game()
g.run()
except:
traceback.print_exc()
pygame.quit()
| [
"jadams@jasmine"
] | jadams@jasmine |
3a020a27d85122f1bb0d985fb9f33c93ca341988 | 1a3916e5ff14ad1689b5f92ebc1041d1d4ab3884 | /ui/flask_page.py | fd119a67238d38f3ff06fca69b2f795939db1792 | [] | no_license | pooniavaibhav/twitter_analysis_flask | c60dcc147332fa26d2def2c13f2952a31466e36c | f8aec1e35ebe23b55c5006c06e2ec9edc7d65d28 | refs/heads/master | 2022-11-28T17:02:26.981135 | 2020-08-09T08:55:47 | 2020-08-09T08:55:47 | 286,198,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | from flask import Flask, render_template,url_for, redirect, request
from ui.forms import SearchForm
from src.data_extract import data_extract
#__name__ is the special variable in python that has tha name of the module.
#This is done so that you flask know where to look for you templates and static files.
app = Flask(__name__)
app.config['SECRET_KEY'] = '8acf8cf5d43f5d3edf93f2f9a433f640'
#route-A route is what we write into the browser to go to different pages. so here we do through route decorators.
# so this forward slash is the root/home page of our website.
@app.route("/", methods = ['GET','POST'])
def register():
form = SearchForm()
if form.submit():
twitter = form.twitter_handle.data
count = form.count.data
if twitter:
extraxt_obj = data_extract(twitter, count)
tweet_df = extraxt_obj.authentication()
tweet_df = extraxt_obj.clean(tweet_df)
tweet_df = extraxt_obj.sentiment_analysis(tweet_df)
tweet_df = extraxt_obj.key_phrases(tweet_df)
tweet_df = extraxt_obj.get_entities(tweet_df)
print(tweet_df)
tweet_df.to_csv("/home/webhav/Documents/sentiment_analysis/analysis/sentiments_alaysis.csv")
return redirect(url_for('about'))
return render_template('search.html', title = 'search', form = form)
@app.route("/about")
def about():
return render_template('about.html')
if __name__ == "__main__":
app.run(host="localhost", port=8000, debug=True)
def analysis(twitter, count):
extraxt_obj = data_extract(twitter, count)
tweet_df = extraxt_obj.authentication()
tweet_df = extraxt_obj.clean(tweet_df)
tweet_df = extraxt_obj.sentiment_analysis(tweet_df)
tweet_df = extraxt_obj.key_phrases(tweet_df)
tweet_df = extraxt_obj.get_entities(tweet_df)
print(tweet_df)
tweet_df.to_csv("/home/webhav/Documents/sentiment_analysis/analysis/sentiments_alaysis.csv") | [
"vaibhav.poonia@crmnext.in"
] | vaibhav.poonia@crmnext.in |
6f65e7602b147ee2a2f2b5e4762c1b4200a9435b | ea6730fe206d29758fef6b8bd24d3e8ec88adb81 | /CS253/HW-07/src/db_model/user.py | f169129d44b10f3499f488f8dee5d59f75cbaa54 | [] | no_license | JoRoPi/Udacity | 7608d1dcdfa05ce494489d8e7a9b8fdf91449417 | 13cef904d4e7f561b545481e74936f8b867763a2 | refs/heads/master | 2020-04-19T22:29:20.812564 | 2012-06-10T20:34:34 | 2012-06-10T20:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,573 | py | # -*- coding: utf-8 -*-
import re
import logging
from google.appengine.ext import db
from google.appengine.api import memcache
import utils.hashing as hashing
class User(db.Model):
username = db.StringProperty(required=True)
password = db.StringProperty(required=True)
email = db.StringProperty(required=False)
created = db.DateTimeProperty(auto_now_add=True)
last_modified = db.DateTimeProperty(auto_now=True)
@classmethod
def by_id(cls, uid):
mclient = memcache.Client()
memkey = cls._memkey_by_id(uid)
user = mclient.get(memkey)
if not user:
logging.info('memcache fail: User.get_by_id')
user = User.get_by_id(uid, parent = cls._users_key())
if user:
mclient.set(memkey, user)
return user
VAL_ERROR_INVALID_USERNAME = 1
VAL_ERROR_INVALID_PASSWORD = 2
VAL_ERROR_PASSWORDS_MISMATCH = 3
VAL_ERROR_INVALID_EMAIL = 4
VAL_ERROR_USER_EXIST = 5
@classmethod
def create_user(cls, username, password, verify_password, email):
val_errors = set()
cls.valid_username(username, val_errors)
cls.valid_password(password, val_errors)
cls.valid_verify(password, verify_password, val_errors)
cls.valid_email(email, val_errors)
if val_errors:
return None, val_errors
else:
mclient = memcache.Client()
user = User(username=username, password='%s' % (hashing.make_pw_hash(username, password)), email=email)
user.put()
memkey = cls._memkey_by_id(user.key().id())
mclient.set(memkey, user)
return user, None
# Validations
@classmethod
def valid_username(cls, username, val_errors):
user_validation_re = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
if not(user_validation_re.match(username)):
val_errors.add(User.VAL_ERROR_INVALID_USERNAME)
return False
return cls.valid_user_exist(username, val_errors)
@classmethod
def valid_password(cls, password, val_errors):
password_validation_re = re.compile(r"^.{3,20}$")
if not (password_validation_re.match(password)):
val_errors.add(User.VAL_ERROR_INVALID_PASSWORD)
return False
return True
@classmethod
def valid_verify(cls, password, verify, val_errors):
if password != verify:
val_errors.add(User.VAL_ERROR_PASSWORDS_MISMATCH)
return False
return True
@classmethod
def valid_email(cls, email, val_errors):
email_validation_re = re.compile(r"^[\S]+@[\S]+\.[\S]+$")
if email and not(email_validation_re.match(email)):
val_errors.add(User.VAL_ERROR_INVALID_EMAIL)
return False
return True
@classmethod
def valid_user_exist(cls, username, val_errors):
(user, memkey, mclient) = cls._get_by_username(username)
if user:
val_errors.add(User.VAL_ERROR_USER_EXIST)
if memkey:
mclient.set(memkey, user)
return False
return True
@classmethod
def get_verified_user(cls, username, password):
(user, memkey, mclient) = cls._get_by_username(username)
if user:
if not hashing.valid_pw(username, password, user.password):
user = None
elif memkey:
mclient.set(memkey, user)
return user
@classmethod
def _users_key(cls, group = 'cs252-wiki'):
# Temporary disabled. return db.Key.from_path('users', group)
return None
@classmethod
def _memkey_by_username(cls, username):
return 'user_by_username: %s' % username
@classmethod
def _memkey_by_id(cls, uid):
return 'user_by_id: %s' % uid
@classmethod
def _get_by_username(cls, username):
""" ret (user, memkey, mclient)
user - Can be None if not found in memcache nor DB
memkey - If not None, user was get from DB and is a good idea set in memcache
mclient - memcache.Client()
"""
mclient = memcache.Client()
memkey = cls._memkey_by_username(username)
user = mclient.get(memkey)
if user:
memkey = None
else:
logging.info('memcache fail: User.gql')
query = User.gql('WHERE username=:username', username=username)
user = query.get()
return (user, memkey, mclient)
| [
"joropi01@gmail.com"
] | joropi01@gmail.com |
4cc163174dd2cd27ea349f42f6823c5afed30126 | fd41984178ffba0846fa7ab1f67c1a0843a5e3ff | /py2与py3的区别和测试/1.作业-文件的封装/dealFile.py | 43f453b28ac890199b9c17686a9fc1aff0e8e72b | [] | no_license | LasterSmithKim/Python-Base | 23f17472ee80f7224e96a4185775c9cd05ac7a98 | 27756126d999ddabf53b6bdc7114903a297464a0 | refs/heads/master | 2020-03-28T08:00:11.156911 | 2018-11-28T09:54:51 | 2018-11-28T09:54:51 | 147,939,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,170 | py | import csv
import sys
import importlib
importlib.reload(sys)
from pdfminer.pdfparser import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager,PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
class DealFile(object):
#读csv
def readCsv(self,path):
InfoList = []
with open(path, "r") as f:
allFileInfo = csv.reader(f)
print(allFileInfo)
for row in allFileInfo:
InfoList.append(row)
return InfoList
#写csv
#数据格式:[[1,2,3],[4,5,6],[7,8,9]]
def writeCsv(self,path, data):
with open(path, "w") as f:
writer = csv.writer(f)
for rowData in data:
writer.writerow(rowData)
#读取PDF
def readPDF(self,path, callback=None,toPath=""):
f = open(path, "rb")
parser = PDFParser(f)
pdfFile = PDFDocument()
parser.set_document(pdfFile)
pdfFile.set_parser(parser)
pdfFile.initialize()
if not pdfFile.is_extractable:
raise PDFTextExtractionNotAllowed
else:
manager = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(manager, laparams=laparams)
interpreter = PDFPageInterpreter(manager, device)
for page in pdfFile.get_pages():
interpreter.process_page(page)
layout = device.get_result()
for x in layout:
if (isinstance(x, LTTextBoxHorizontal)):
#处理每行数据
if toPath == "":
#处理每一行数据
str = x.get_text()
if callback != None:
callback(str)
else:
print(str)
else:
#写文件
print("将PDF文件写入文件:")
| [
"kingone@yeah.net"
] | kingone@yeah.net |
c49e4ab23c93852d9e44bd943682f0882b723090 | 774013c23d017b801e1fff05ed435e65fbdc58a8 | /Actividades/AC02/AC02.py | 631d6b0755bf6b15df5608135f18aa2c88439254 | [] | no_license | isidoravs/iic2233-2016-2 | fdd858334f112ca26b30b92011cbecd8898edee8 | 26f746fd339de67795fb55d36f35b41cc4e42756 | refs/heads/master | 2020-11-30T04:33:15.279222 | 2016-11-22T14:16:56 | 2016-11-22T14:16:56 | 230,302,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,434 | py |
class Animal:
def __init__(self, nombre, color, sexo):
self.nombre = nombre
self.color = color
self.sexo = sexo
self.horas_sueno = 0
self.horas_juego_ind = 0
self.horas_juego_grup = 0
self.comidas = 0
self.horas_regaloneo = 0
def set_parametros(self, animal):
if animal.personalidad == 'juguetona':
self.horas_sueno = 8 * animal.expresion
self.horas_juego_ind = 1 * animal.expresion
self.horas_juego_grup = 7 * animal.expresion
self.comidas = 4 * animal.expresion
self.horas_regaloneo = 4 * animal.expresion
else:
self.horas_sueno = 12 * animal.expresion
self.horas_juego_ind = 5 * animal.expresion
self.horas_juego_grup = 1 * animal.expresion
self.comidas = 4 * animal.expresion
self.horas_regaloneo = 2 * animal.expresion
def jugar(self):
pass
def comer(self):
pass
def __str__(self):
return "Me llamo {}, soy {} y tengo el pelo {}.".format(self.nombre, self.sexo, self.color)
class Gato(Animal):
def __init__(self, nombre, color, sexo):
super().__init__(nombre, color, sexo)
def maullar(self):
print("Miauuu!! Miauuu!")
return
def jugar(self):
print("Humano, ahora, juguemos.")
return
def comer(self):
print("El pellet es horrible. Dame comida en lata.")
return
class Perro(Animal):
def __init__(self, nombre, color, sexo):
super().__init__(nombre, color, sexo)
def ladrar(self):
print('Guau!! Guau!!')
return
def jugar(self):
print('Tirame la pelota :)')
return
def comer(self):
print('Mami :) Quiero comeeeerr!!')
return
class SiamePUC(Gato):
def __init__(self, expresion, nombre, color, sexo):
super().__init__(nombre, color, sexo)
self.expresion = expresion
self.personalidad = 'egoista'
if self.sexo == "Hembra":
self.expresion *= 1.5
self.set_parametros(self)
def comer(self):
print("Quiero comida.")
super().comer()
super().maullar()
class GoldenPUC(Perro):
def __init__(self, expresion, nombre, color, sexo):
super().__init__(nombre, color, sexo)
self.expresion = expresion
self.personalidad = 'juguetona'
if self.sexo == "Hembra":
self.expresion *= 0.9
else:
self.expresion *= 1.1
self.set_parametros(self)
def jugar(self):
print("Quiero jugar.")
super().jugar()
self.ladrar()
class PUCTerrier(Perro):
def __init__(self, expresion, nombre, color, sexo):
super().__init__(nombre, color, sexo)
self.expresion = expresion
self.personalidad = 'egoista'
if self.sexo == "Hembra":
self.expresion *= 1
else:
self.expresion *= 1.2
self.set_parametros(self)
def comer(self):
print("Quiero comer.")
super().comer()
self.ladrar()
def estadisticas(animales):
sueno, juego_ind, juego_grup, comidas, horas_regaloneo = 1000, 1000, 0, 0, 0
for animal in animales:
if animal.horas_sueno < sueno:
sueno = animal.horas_sueno
if animal.horas_juego_ind < juego_ind:
juego_ind = animal.horas_juego_ind
if animal.horas_juego_grup > juego_grup:
juego_grup = animal.horas_juego_grup
comidas += animal.comidas
horas_regaloneo += animal.horas_regaloneo
print('''Tiempo de sueno: {}\nTiempo de juego individual: {}
Tiempo de juego grupal: {}\nCantidad de comidas: {}
Tiempo de regaloneo: {}
'''.format(sueno, juego_ind, juego_grup, comidas, horas_regaloneo))
return
if __name__ == '__main__':
animals = list()
animals.append(GoldenPUC(expresion=0.5, nombre="Mara", color="Blanco", sexo="Hembra"))
animals.append(GoldenPUC(expresion=0.9, nombre="Eddie", color="Rubio", sexo="Macho"))
animals.append(SiamePUC(expresion=0.9, nombre="Felix", color="Naranjo", sexo="Hembra"))
animals.append(PUCTerrier(expresion=0.8, nombre="Betty", color="Café", sexo="Hembra"))
for a in animals:
print(a)
a.jugar()
a.comer()
estadisticas(animals)
| [
"isvizcaya@uc.cl"
] | isvizcaya@uc.cl |
c35aeabe101d4d3e4ad6194c2cc8c932429890dd | 2275cbc9589476217b60ce83bdf697349bcb8e26 | /s0047-permutations-ii.py | 2795041e406b46a19878eedc9c7713682dd12ebc | [] | no_license | Vincent0700/leetcode-solution | 0ed772fa6cf9bdd786b63be3a3309b12d92d9bdb | 0f98f8e9fbef3c6478e6f2c27014323ba70909de | refs/heads/master | 2020-03-08T01:09:44.951264 | 2019-07-10T06:28:31 | 2019-07-10T06:28:31 | 127,823,166 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from itertools import permutations
class Solution(object):
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
l = [list(x) for x in permutations(nums)]
temp_list = list(set([str(i) for i in l]))
return [eval(i) for i in temp_list]
print Solution().permuteUnique([1,1,2]) | [
"wang.yuanqiu007@gmail.com"
] | wang.yuanqiu007@gmail.com |
0e2f406e8b95900c7ff8aa1281e0b2a770a758d4 | 8f0c757e0a1142a8cac44ac6cea8b50f5e0d6366 | /libs/utils/report.py | a0600d9927631396d740aa7c86830394b85bd78c | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Leo-Yan/lisa | a29492ad59a61606e5eff365db106c3585f83165 | 9149d565e84b3566b0973e40fe0c39f7b21288fe | refs/heads/master | 2022-05-28T04:40:49.889816 | 2016-03-29T10:20:06 | 2016-03-29T10:20:06 | 55,029,658 | 0 | 2 | null | 2016-03-30T03:30:25 | 2016-03-30T03:30:25 | null | UTF-8 | Python | false | false | 14,403 | py | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import fnmatch as fnm
import json
import math
import numpy as np
import os
import re
import sys
from collections import defaultdict
from colors import TestColors
from results import Results
# Configure logging
import logging
reload(logging)
logging.basicConfig(
format='%(asctime)-9s %(levelname)-8s: %(message)s',
# level=logging.DEBUG,
level=logging.INFO,
datefmt='%I:%M:%S')
# By default compare all the possible combinations
DEFAULT_COMPARE = [(r'base_', r'test_')]
class Report(object):
def __init__(self, results_dir, compare=None, formats=['relative']):
self.results_json = results_dir + '/results.json'
self.results = {}
self.compare = []
# Parse results (if required)
if not os.path.isfile(self.results_json):
Results(results_dir)
# Load results from file (if already parsed)
logging.info('%14s - Load results from [%s]...',
'Results', self.results_json)
with open(self.results_json) as infile:
self.results = json.load(infile)
# Setup configuration comparisons
if compare is None:
compare = DEFAULT_COMPARE
logging.warning('%14s - Comparing all the possible combination',
'Results')
for (base_rexp, test_rexp) in compare:
logging.info('Configured regexps for comparisions (bases , tests): (%s, %s)',
base_rexp, test_rexp)
base_rexp = re.compile(base_rexp, re.DOTALL)
test_rexp = re.compile(test_rexp, re.DOTALL)
self.compare.append((base_rexp, test_rexp))
# Report all supported workload classes
self.__rtapp_report(formats)
self.__default_report(formats)
############################### REPORT RTAPP ###############################
def __rtapp_report(self, formats):
if 'rtapp' not in self.results.keys():
logging.debug('%14s - No RTApp workloads to report', 'ReportRTApp')
return
logging.debug('%14s - Reporting RTApp workloads', 'ReportRTApp')
# Setup lables depending on requested report
if 'absolute' in formats:
nrg_lable = 'Energy Indexes (Absolute)'
prf_lable = 'Performance Indexes (Absolute)'
logging.info('')
logging.info('%14s - Absolute comparisions:', 'Report')
print ''
else:
nrg_lable = 'Energy Indexes (Relative)'
prf_lable = 'Performance Indexes (Relative)'
logging.info('')
logging.info('%14s - Relative comparisions:', 'Report')
print ''
# Dump headers
print '{:13s} {:20s} |'\
' {:33s} | {:54s} |'\
.format('Test Id', 'Comparision',
nrg_lable, prf_lable)
print '{:13s} {:20s} |'\
' {:>10s} {:>10s} {:>10s} |'\
' {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} |'\
.format('', '',
'LITTLE', 'big', 'Total',
'PerfIndex', 'NegSlacks', 'EDP1', 'EDP2', 'EDP3')
# For each test
_results = self.results['rtapp']
for tid in sorted(_results.keys()):
new_test = True
# For each configuration...
for base_idx in sorted(_results[tid].keys()):
# Which matches at least on base regexp
for (base_rexp, test_rexp) in self.compare:
if not base_rexp.match(base_idx):
continue
# Look for a configuration which matches the test regexp
for test_idx in sorted(_results[tid].keys()):
if test_idx == base_idx:
continue
if new_test:
print '{:-<37s}+{:-<35s}+{:-<56s}+'\
.format('','', '')
self.__rtapp_reference(tid, base_idx)
new_test = False
if test_rexp.match(test_idx) == None:
continue
self.__rtapp_compare(tid, base_idx, test_idx, formats)
print ''
def __rtapp_reference(self, tid, base_idx):
_results = self.results['rtapp']
logging.debug('Test %s: compare against [%s] base',
tid, base_idx)
res_line = '{0:12s}: {1:22s} | '.format(tid, base_idx)
# Dump all energy metrics
for cpus in ['LITTLE', 'big', 'Total']:
res_base = _results[tid][base_idx]['energy'][cpus]['avg']
# Dump absolute values
res_line += ' {0:10.3f}'.format(res_base)
res_line += ' |'
# If available, dump also performance results
if 'performance' not in _results[tid][base_idx].keys():
print res_line
return
for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']:
res_base = _results[tid][base_idx]['performance'][pidx]['avg']
logging.debug('idx: %s, base: %s', pidx, res_base)
if pidx in ['perf_avg']:
res_line += ' {0:s}'.format(TestColors.rate(res_base))
continue
if pidx in ['slack_pct']:
res_line += ' {0:s}'.format(
TestColors.rate(res_base, positive_is_good = False))
continue
if 'edp' in pidx:
res_line += ' {0:10.2e}'.format(res_base)
continue
res_line += ' |'
print res_line
def __rtapp_compare(self, tid, base_idx, test_idx, formats):
_results = self.results['rtapp']
logging.debug('Test %s: compare %s with %s',
tid, base_idx, test_idx)
res_line = '{0:12s}: {1:20s} | '.format(tid, test_idx)
# Dump all energy metrics
for cpus in ['LITTLE', 'big', 'Total']:
res_base = _results[tid][base_idx]['energy'][cpus]['avg']
res_test = _results[tid][test_idx]['energy'][cpus]['avg']
speedup_cnt = res_test - res_base
if 'absolute' in formats:
res_line += ' {0:10.2f}'.format(speedup_cnt)
else:
speedup_pct = 0
if res_base != 0:
speedup_pct = 100.0 * speedup_cnt / res_base
res_line += ' {0:s}'\
.format(TestColors.rate(
speedup_pct,
positive_is_good = False))
res_line += ' |'
# If available, dump also performance results
if 'performance' not in _results[tid][base_idx].keys():
print res_line
return
for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']:
res_base = _results[tid][base_idx]['performance'][pidx]['avg']
res_test = _results[tid][test_idx]['performance'][pidx]['avg']
logging.debug('idx: %s, base: %s, test: %s',
pidx, res_base, res_test)
if pidx in ['perf_avg']:
res_line += ' {0:s}'.format(TestColors.rate(res_test))
continue
if pidx in ['slack_pct']:
res_line += ' {0:s}'.format(
TestColors.rate(res_test, positive_is_good = False))
continue
# Compute difference base-vs-test
if 'edp' in pidx:
speedup_cnt = res_base - res_test
if 'absolute':
res_line += ' {0:10.2e}'.format(speedup_cnt)
else:
res_line += ' {0:s}'.format(TestColors.rate(speedup_pct))
res_line += ' |'
print res_line
############################### REPORT DEFAULT #############################
def __default_report(self, formats):
# Build list of workload types which can be rendered using the default parser
wtypes = []
for supported_wtype in DEFAULT_WTYPES:
if supported_wtype in self.results.keys():
wtypes.append(supported_wtype)
if len(wtypes) == 0:
logging.debug('%14s - No Default workloads to report', 'ReportDefault')
return
logging.debug('%14s - Reporting Default workloads', 'ReportDefault')
# Setup lables depending on requested report
if 'absolute' in formats:
nrg_lable = 'Energy Indexes (Absolute)'
prf_lable = 'Performance Indexes (Absolute)'
logging.info('')
logging.info('%14s - Absolute comparisions:', 'Report')
print ''
else:
nrg_lable = 'Energy Indexes (Relative)'
prf_lable = 'Performance Indexes (Relative)'
logging.info('')
logging.info('%14s - Relative comparisions:', 'Report')
print ''
# Dump headers
print '{:9s} {:20s} |'\
' {:33s} | {:54s} |'\
.format('Test Id', 'Comparision',
nrg_lable, prf_lable)
print '{:9s} {:20s} |'\
' {:>10s} {:>10s} {:>10s} |'\
' {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} |'\
.format('', '',
'LITTLE', 'big', 'Total',
'Perf', 'CTime', 'EDP1', 'EDP2', 'EDP3')
# For each default test
for wtype in wtypes:
_results = self.results[wtype]
for tid in sorted(_results.keys()):
new_test = True
# For each configuration...
for base_idx in sorted(_results[tid].keys()):
# Which matches at least on base regexp
for (base_rexp, test_rexp) in self.compare:
if not base_rexp.match(base_idx):
continue
# Look for a configuration which matches the test regexp
for test_idx in sorted(_results[tid].keys()):
if test_idx == base_idx:
continue
if new_test:
print '{:-<37s}+{:-<35s}+{:-<56s}+'\
.format('','', '')
new_test = False
if not test_rexp.match(test_idx):
continue
self.__default_compare(wtype, tid, base_idx, test_idx, formats)
print ''
def __default_compare(self, wtype, tid, base_idx, test_idx, formats):
_results = self.results[wtype]
logging.debug('Test %s: compare %s with %s',
tid, base_idx, test_idx)
res_comp = '{0:s} vs {1:s}'.format(test_idx, base_idx)
res_line = '{0:8s}: {1:22s} | '.format(tid, res_comp)
# Dump all energy metrics
for cpus in ['LITTLE', 'big', 'Total']:
# If either base of test have a 0 MAX energy, this measn that
# energy has not been collected
base_max = _results[tid][base_idx]['energy'][cpus]['max']
test_max = _results[tid][test_idx]['energy'][cpus]['max']
if base_max == 0 or test_max == 0:
res_line += ' {0:10s}'.format('NA')
continue
# Otherwise, report energy values
res_base = _results[tid][base_idx]['energy'][cpus]['avg']
res_test = _results[tid][test_idx]['energy'][cpus]['avg']
speedup_cnt = res_test - res_base
if 'absolute' in formats:
res_line += ' {0:10.2f}'.format(speedup_cnt)
else:
speedup_pct = 100.0 * speedup_cnt / res_base
res_line += ' {0:s}'\
.format(TestColors.rate(
speedup_pct,
positive_is_good = False))
res_line += ' |'
# If available, dump also performance results
if 'performance' not in _results[tid][base_idx].keys():
print res_line
return
for pidx in ['perf_avg', 'ctime_avg', 'edp1', 'edp2', 'edp3']:
res_base = _results[tid][base_idx]['performance'][pidx]['avg']
res_test = _results[tid][test_idx]['performance'][pidx]['avg']
logging.debug('idx: %s, base: %s, test: %s',
pidx, res_base, res_test)
# Compute difference base-vs-test
speedup_cnt = 0
if res_base != 0:
if pidx in ['perf_avg']:
speedup_cnt = res_test - res_base
else:
speedup_cnt = res_base - res_test
# Compute speedup if required
speedup_pct = 0
if 'absolute' in formats:
if 'edp' in pidx:
res_line += ' {0:10.2e}'.format(speedup_cnt)
else:
res_line += ' {0:10.2f}'.format(speedup_cnt)
else:
if res_base != 0:
if pidx in ['perf_avg']:
# speedup_pct = 100.0 * speedup_cnt / res_base
speedup_pct = speedup_cnt
else:
speedup_pct = 100.0 * speedup_cnt / res_base
res_line += ' {0:s}'.format(TestColors.rate(speedup_pct))
res_line += ' |'
print res_line
# List of workload types which can be parsed using the default test parser
DEFAULT_WTYPES = ['perf_bench_messaging', 'perf_bench_pipe']
#vim :set tabstop=4 shiftwidth=4 expandtab
| [
"patrick.bellasi@arm.com"
] | patrick.bellasi@arm.com |
617916a8ff8fe3b5064f4fe017637bafe427fc75 | 9216e5cb1434ba08dfb43561ea0650e63c945804 | /fid_score.py | 0dd466bdd365a1d90559aa05911255aec93e1b78 | [] | no_license | EliasDimitriou14/Text2Image_Thesis | 50dfe0dd842ef0038c8a9fb1c0584788eb0b5b5e | f0faca7709b75926c599bb015ad43fb6bdf291c2 | refs/heads/main | 2023-06-10T20:03:57.046086 | 2021-07-05T16:37:36 | 2021-07-05T16:37:36 | 383,198,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,081 | py | '''
From https://github.com/tsc2017/Frechet-Inception-Distance
Code derived from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
Usage:
Call get_fid(images1, images2)
Args:
images1, images2: Numpy arrays with values ranging from 0 to 255 and shape in the form [N, 3, HEIGHT, WIDTH] where N, HEIGHT and WIDTH can be arbitrary.
dtype of the images is recommended to be np.uint8 to save CPU memory.
Returns:
Frechet Inception Distance between the two image distributions.
'''
import tensorflow as tf
import os
import functools
import numpy as np
import time
from tensorflow.python.ops import array_ops
if float('.'.join(tf.__version__.split('.')[:2])) < 1.15:
tfgan = tf.contrib.gan
else:
import tensorflow_gan as tfgan
session = tf.compat.v1.InteractiveSession()
# A smaller BATCH_SIZE reduces GPU memory usage, but at the cost of a slight slowdown
BATCH_SIZE = 64
# Run images through Inception.
inception_images = tf.compat.v1.placeholder(tf.float32, [None, 3, None, None])
activations1 = tf.compat.v1.placeholder(tf.float32, [None, None], name='activations1')
activations2 = tf.compat.v1.placeholder(tf.float32, [None, None], name='activations2')
fcd = tfgan.eval.frechet_classifier_distance_from_activations(activations1, activations2)
def inception_activations(images=inception_images, num_splits=1):
images = tf.transpose(images, [0, 2, 3, 1])
size = 299
images = tf.compat.v1.image.resize_bilinear(images, [size, size])
generated_images_list = array_ops.split(images, num_or_size_splits=num_splits)
activations = tf.map_fn(
fn=functools.partial(tfgan.eval.run_inception, output_tensor='pool_3:0'),
elems=array_ops.stack(generated_images_list),
parallel_iterations=8,
back_prop=False,
swap_memory=True,
name='RunClassifier')
activations = array_ops.concat(array_ops.unstack(activations), 0)
return activations
activations = inception_activations()
def get_inception_activations(inps):
n_batches = int(np.ceil(float(inps.shape[0]) / BATCH_SIZE))
act = np.zeros([inps.shape[0], 2048], dtype=np.float32)
for i in range(n_batches):
inp = inps[i * BATCH_SIZE: (i + 1) * BATCH_SIZE] / 255. * 2 - 1
act[i * BATCH_SIZE: i * BATCH_SIZE + min(BATCH_SIZE, inp.shape[0])] = session.run(activations, feed_dict={
inception_images: inp})
return act
def activations2distance(act1, act2):
return session.run(fcd, feed_dict={activations1: act1, activations2: act2})
def get_fid(images1, images2):
assert (type(images1) == np.ndarray)
assert (len(images1.shape) == 4)
assert (images1.shape[1] == 3)
assert (np.min(images1[0]) >= 0 and np.max(images1[0]) > 10), 'Image values should be in the range [0, 255]'
assert (type(images2) == np.ndarray)
assert (len(images2.shape) == 4)
assert (images2.shape[1] == 3)
assert (np.min(images2[0]) >= 0 and np.max(images2[0]) > 10), 'Image values should be in the range [0, 255]'
assert (images1.shape == images2.shape), 'The two numpy arrays must have the same shape'
print('Calculating FID with %i images from each distribution' % (images1.shape[0]))
start_time = time.time()
act1 = get_inception_activations(images1)
act2 = get_inception_activations(images2)
fid = activations2distance(act1, act2)
print('FID calculation time: %f s' % (time.time() - start_time))
return fid
# my code to format the images to get fid score
import os
from PIL import Image
import numpy as np
# get the folder where the images from the GAN are and get the images after that
folder = "train_samples_sliced/"
images = sorted(os.listdir(folder)) #["train_00_01_01", "train_00_01_02", "train_00_01_03", ...]
# create an array to store them as numpy array. That is the input for the fid score code
images_array = []
for image in images:
im = Image.open(folder + image)
images_array.append(np.asarray(im))
# show the shape of the array created in order to know how to transpose it
images_array = np.asarray(images_array)
print(images_array.shape)
# once transposed check if everything is right
transposed_images = np.transpose(images_array, [0, 3, 1, 2])
print(transposed_images.shape)
# Format must be (N, 3, HEIGHT, WIDTH)
# load the training images to calculate the fid score
train_images = np.load('train_images.npy', encoding='latin1', allow_pickle=True)
# take only as many images as you have from the train_sliced_samples in order for the shapes to match eachother
sub_images = train_images[0:7370]
print(sub_images.shape)
# once transposed check if everything is right
transposed_sub_images = np.transpose(sub_images, [0, 3, 1, 2])
print(transposed_sub_images.shape)
# Format must be (N, 3, HEIGHT, WIDTH)
# Calculate the FID score of the images and show it.
fid = get_fid(transposed_images, transposed_sub_images)
print("FID:", fid) | [
"noreply@github.com"
] | EliasDimitriou14.noreply@github.com |
8a54ee96beb2f99b09d6f7bb03d4b055d8f42f05 | 26c374f2ac3da240f19f77d748a916950121946f | /kNN.py | c7601edfa484882663112c6e4f54e8d2712880a4 | [] | no_license | yjallan/Supervised-Machine-Learning | f3215406504b3f1d5b3a077290a724036b38e5f1 | 926cdfc2b7c0506b3e37e79614a57380f5e4bedb | refs/heads/master | 2020-04-12T20:48:07.558697 | 2018-12-21T18:46:02 | 2018-12-21T18:46:02 | 162,747,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,779 | py | #Import Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import model_selection
#from sklearn import grid_search
from sklearn import preprocessing
from sklearn import neighbors
import time
#Column names
features = ["X1","X2","X3","X4","X5","X6","X7","X8","Y"]
#features = ["X1","X2","X3","X4","X5","X6","X7","X8","X9","X10","X11","X12","X13","X14","X15","X16","Y"]
features=["age","job","marital","education","default","housing","loan","contact","month","day_of_week","duration","campaign","pdays","previous","poutcome","emp.var.rate","cons.price.idx","cons.conf.idx","euribor3m","nr.employed","Y"]
features=["fixed acidity","volatile acidity","citric acid","residual sugar","chlorides","free sulfur dioxide","total sulfur dioxide","density","pH","sulphates","alcohol","Y"]
#Read the file
df=pd.read_csv("diabetes.csv",header=None,names=features)
#df=pd.read_csv("bank.csv",header=None,names=features)
df=pd.read_csv("bank_full.csv",header=None,names=features)
df=pd.read_csv("winequality-red.csv",header=None,names=features)
df=df.drop(df[(df.Y==3)].index)
df=df.drop(df[(df.Y==8)].index)
#df=pd.read_csv("letter.csv",header=None,names=features)
#Label Encoding required for Bank Dataset only
#for i in range(len(features)):
# if (type(df[features[i]][0])==str):
# #print(i)
# le = preprocessing.LabelEncoder()
# le.fit(df[features[i]])
# df[features[i]]=le.transform(df[features[i]])
no_of_features=df.shape[1]-1
no_of_rows=df.shape[0]
X_df = df[features[:-1]]
Y_df = df['Y']
X_train, X_test, y_train, y_test = model_selection.train_test_split(X_df,Y_df,\
test_size=0.3,random_state=0)
"""
k NEAREST NEIGHBOR
"""
start_time=time.clock()
clf = neighbors.KNeighborsClassifier()
clf.fit(X=X_train, y=y_train)
accuracy_train=clf.score(X_train,y_train)
accuracy_test=clf.score(X_test,y_test)
print("Training accuracy is: ",accuracy_train)
print("Test accuracy is: ",accuracy_test)
end_time=time.clock()
print("Total Time taken: ",end_time-start_time)
"""PLOTS"""
x_axis_vals=[]
accuracy_train=[]
accuracy_test=[]
for i in range(9,-1,-1):
#for i in range(1,35):
print(i)
#i=0
X_train_sub, X_unused, y_train_sub, y_unused = model_selection.train_test_split(\
X_train,y_train,test_size=i*0.1,random_state=0)
#clf = neighbors.KNeighborsClassifier(n_neighbors=i)
clf = neighbors.KNeighborsClassifier(n_neighbors=10)
clf=clf.fit(X=X_train_sub, y=y_train_sub)
#clf=clf.fit(X=X_train, y=y_train)
x_axis_vals.append(100-10*i)
#x_axis_vals.append(i)
accuracy_train.append(clf.score(X_train_sub,y_train_sub))
#accuracy_train.append(clf.score(X_train,y_train))
accuracy_test.append(clf.score(X_test,y_test))
plt.plot(x_axis_vals,accuracy_test)
plt.plot(x_axis_vals,accuracy_train)
plt.title("Learning Curve")
plt.xlabel("Percentage of Training Data Used")
#plt.xlabel("'k' values")
plt.ylabel("Accuracy")
plt.legend(['Test Accuracy','Training Accuracy'])
"""FINDING BEST HYPER PARAMETERS"""
parameters = {\
'n_neighbors': list(range(1,35)),\
#'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'],\
#'leaf_size': list(range(1,50)),\
}
algo=neighbors.KNeighborsClassifier()
clf = model_selection.GridSearchCV(algo,parameters,cv=10)
clf.fit(X=X_train, y=y_train)
print (clf.best_score_, clf.best_params_)
clf = clf.best_estimator_
accuracy_train=clf.score(X_train,y_train)
accuracy_test=clf.score(X_test,y_test)
print("Training accuracy is: ",accuracy_train)
print("Test accuracy is: ",accuracy_test)
| [
"noreply@github.com"
] | yjallan.noreply@github.com |
1548d7e9e2ea08a2e8e1481b25bd82fe9fbe11f9 | cc0fcb7d233a5f8c2aca7827d884c2f8f27ce554 | /PCA.py | 8fe7f75aabb12729e4e891c6f531f23a1fc29b64 | [] | no_license | wangchangchun/FLD-PCA | 41871679aec37c812070712e1d76a0b41945d797 | f2af3df8b609bfc9fb73f48fb1c2001f51fa8b4e | refs/heads/master | 2020-05-24T11:15:29.933637 | 2019-05-17T15:51:14 | 2019-05-17T15:51:14 | 187,243,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,218 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import loadData
def meanVector(dataset):
mean = [sum(attribute)/float(len(attribute)) for attribute in zip(*dataset)]
return mean
def stdMat(dataset,attrNum):
std = np.zeros((len(meanVector(dataset)), len(meanVector(dataset))))
miu = np.array(meanVector(dataset))
for i in range(len(dataset)):
diff = (dataset[i] - miu)[:, None]
std += diff * diff.T
std /= len(dataset)
std += np.eye(attrNum) * 1e-15
# print(np.shape(std))
return std
def splitXandY(dataset, attrNum, len):
splitX = np.zeros((len, attrNum))
splitY = np.zeros((len, 1))
for i in range(len):
for j in range(attrNum):
splitX[i][j] = dataset[i][j]
splitY[i][0] = dataset[i][attrNum]
return [splitX, splitY]
def pca(XMat, k):
# print(XMat)
average = meanVector(XMat)
# print(average)
m, n = np.shape(XMat)
data_adjust = []
avgs = np.tile(average, (m, 1))
data_adjust = XMat - avgs
covX = stdMat(data_adjust,len(XMat[0])) #計算協方差矩陣
featValue, featVec= np.linalg.eig(covX) #求解協方差矩陣的特徵值和特徵向量
index = np.argsort(-featValue) #按照featValue進行從大到小排序
finalData = []
if k > n:
print ("k is bigger than feature num!!")
return
else:
selectVec = np.array(featVec.T[index[:k]]) #所以這裡需要進行轉置
finalData = data_adjust.dot(selectVec.T)
reconData = (finalData.dot(selectVec)) + average
return finalData, reconData
def plotBestFit(data1, data2, y):
dataArr1 = np.array(data1)
dataArr2 = np.array(data2)
m = np.shape(dataArr1)[0]
for i in range(m):
color = ''
if y[i]==1: color = 'r'
if y[i] == 2: color = 'g'
if y[i] == 3: color = 'b'
plt.scatter(dataArr1[i, 0], dataArr1[i, 1], s=50, c=color)
plt.show()
'''
dataset = loadData.loadIris()
# print(dataset)
X,y = splitXandY(dataset,4,len(dataset))
# print(X)
finalData, reconMat = pca(X, 2)
print(finalData.shape)
print(reconMat.shape)
plotBestFit(finalData, reconMat,y)
'''
| [
"j432985029@gmail.com"
] | j432985029@gmail.com |
cc2d30db8d019a537c6aee28c48f743a032c4eb2 | aa5ee659db13e03e32adfd9ebe9a10a21f52e041 | /ql516/assignment10.py | 9f365250a83c73a9fdba25f5c8872c31e1258926 | [] | no_license | ky822/assignment10 | 6eaab428cd033a7c742d576631b789212e832cda | 893a2889e9b8e92082dbdb7ec16deab506de3eb3 | refs/heads/master | 2021-01-23T15:58:38.201881 | 2014-12-02T03:29:18 | 2014-12-02T03:29:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 18 21:59:35 2014
@author: LaiQX
"""
import pandas as pd
import sys
from functions import *
def main():
while 1:
file_path = raw_input("Please input the relative path of the csv dataset file: ")
try:
raw_data = pd.read_csv(file_path)
break
except (KeyboardInterrupt,EOFError):
sys.exit()
except IOError:
print "Not a valid path name, please try again, or you can use <C-C> or <C-D> to interrupt this program"
#or you can just put the data in the same directory of this .py script and uncomment the next line
#and the whole while loop above
#raw_data = pd.read_csv('DOHMH_New_York_City_Restaurant_Inspection_Results.csv')
#Clean the data
data = data_clean(raw_data)
#count the grade of NYC and Each Boroughs
grade_test_count_all(data)
grade_test_count(data)
# Plot the improvement
print "Ploting... it will take 5 ~ 7 minutes, you can press <C-C> or <C-D> to interrupt"
grade_plot(data, 'NYC')
group_boroughs = data.groupby('BORO')
for name, groups in group_boroughs:
grade_plot(groups, name)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt,EOFError):
pass | [
"ql516@nyu.edu"
] | ql516@nyu.edu |
5c79a11d22604dbd28c784dbc573c55c4746410d | 0b789b2e62b1d4c46132c279174ab32c073c2df1 | /test/test_combining.py | e11380ca2e3ba59f9ab060737b88601e0bc37575 | [
"Beerware",
"MIT"
] | permissive | jbjjbjjbj/eittek651 | 4be62a9ec99fc5147b726b729d8dff6c9d7e0aef | 3f735eb1836fc6e144b885654f71d3fe2b4e2c03 | refs/heads/main | 2023-05-06T18:54:21.565120 | 2021-05-25T13:46:05 | 2021-05-25T13:46:05 | 338,322,683 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | import unittest
from antenna_diversity.diversity_technique import combining
import numpy as np
class TestCombining(unittest.TestCase):
def setUp(self):
self.signals = np.array([[1, 2, 3], [4, 5, 6], [9, 8, 7]])
self.expected = [
1 + 4 + 9,
2 + 5 + 8,
3 + 6 + 7
]
def test_egc(self):
comb = combining.egc(self.signals)
np.testing.assert_array_equal(self.expected, comb)
def test_mrc_simple(self):
comb = combining.mrc(self.signals, np.ones(len(self.signals)))
np.testing.assert_array_equal(self.expected, comb)
def test_mrc(self):
h = [2, 0.5, 1]
comb = combining.mrc(self.signals, h)
exp = [
2 * 1 + 0.5 * 4 + 1 * 9,
2 * 2 + 0.5 * 5 + 1 * 8,
2 * 3 + 0.5 * 6 + 1 * 7
]
np.testing.assert_array_equal(exp, comb)
| [
"julian@jtle.dk"
] | julian@jtle.dk |
36471427d8a2ea215674ddf2d49c1ae55803c568 | aeb90678ea7da6d4239140ce911582ac59479376 | /Alphabets/Small_Alphabets/x.py | 31f5e432cd10333f3f3cea25b1f670e01d43f1ed | [
"MIT"
] | permissive | chandhukogila/Pattern-Package | 6971bb7afe29ee2dfeff30ec9ce6983babd1404c | 9c0c2924d1c356747f2b351dba01299b71490036 | refs/heads/main | 2023-03-24T06:00:26.818502 | 2021-03-20T12:52:39 | 2021-03-20T12:52:39 | 342,488,976 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | def for_x():
for row in range(5):
for col in range(4):
if row-col==1 or row+col==4:
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_x():
row=0
while row<5:
col=0
while col<4:
if row-col==1 or row+col==4:
print("*",end=" ")
else:
print(" ",end=" ")
col+=1
row+=1
print()
| [
"noreply@github.com"
] | chandhukogila.noreply@github.com |
39605e2d34194fa84b99d370e31e678f2bba6463 | 67929a76934c8c6bdacd573e2bc5ad6c0254d69c | /pyfusion/pyfusion/conf/utils.py | 73bb7d253a04345383db0865cd7f7937bf7ccef3 | [] | no_license | SyntaxVoid/PyFusionDIIID | bc284b8480a8c4fc7881585c4fdd76ecc61162e4 | 4d19abed536f7b4d0322636828254ed3dd7a9b4c | refs/heads/master | 2020-05-29T08:41:16.970539 | 2017-06-19T21:26:16 | 2017-06-19T21:26:16 | 69,825,057 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,471 | py | """ Useful functions for manipulating config files."""
from ConfigParser import NoSectionError
import pyfusion
def CannotImportFromConfigError(Exception):
"""Failed to import a module, class or method from config setting."""
def import_from_str(string_value):
# TODO: make shortcuts for loading from within pyfusion
split_val = string_value.split('.')
val_module = __import__('.'.join(split_val[:-1]),
globals(), locals(),
[split_val[-1]])
return val_module.__dict__[split_val[-1]]
def import_setting(component, component_name, setting):
"""Attempt to import and return a config setting."""
value_str = pyfusion.config.pf_get(component, component_name, setting)
return import_from_str(value_str)
def kwarg_config_handler(component_type, component_name, **kwargs):
for config_var in pyfusion.config.pf_options(component_type, component_name):
if not config_var in kwargs.keys():
kwargs[config_var] = pyfusion.config.pf_get(component_type,
component_name, config_var)
return kwargs
def get_config_as_dict(component_type, component_name):
config_option_list = pyfusion.config.pf_options(component_type, component_name)
config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
return dict(map(config_map, config_option_list))
def read_config(config_files):
"""Read config files.
Argument is either a single file object, or a list of filenames.
"""
try:
existing_database = pyfusion.config.get('global', 'database')
except NoSectionError:
existing_database = 'None'
try:
files_read = pyfusion.config.readfp(config_files)
except:
files_read = pyfusion.config.read(config_files)
if files_read != None: # readfp returns None
if len(files_read) == 0:
raise LookupError, str('failed to read config files from [%s]' %
(config_files))
config_database = pyfusion.config.get('global', 'database')
if config_database.lower() != existing_database.lower():
pyfusion.orm_manager.shutdown_orm()
if config_database.lower() != 'none':
pyfusion.orm_manager.load_orm()
def clear_config():
"""Clear pyfusion.config."""
import pyfusion
pyfusion.config = pyfusion.conf.PyfusionConfigParser()
| [
"j.gresl12@gmail.com"
] | j.gresl12@gmail.com |
c1d060716c73d50f3d89af4de8863197e83ede4f | 7d2a2ab315ed29c56187b31d19ff250d224a830f | /gui/system/alert.py | 279928aaa6b7a5e0340316293c2b9dd1f0bfdf44 | [] | no_license | yvesc/freegui | f89c6b5a3ee15a98ae9cdc3c1a21db655f3e18df | 434f856f2ba57323a86b1305f231d1b8d5fb7b03 | refs/heads/master | 2020-03-26T16:02:31.159977 | 2014-10-14T18:31:13 | 2014-10-14T18:31:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,848 | py | import cPickle
import hashlib
import imp
import logging
import os
import time
from django.utils.translation import ugettext_lazy as _
from freenasUI.common.system import send_mail
from freenasUI.system.models import Alert as mAlert
log = logging.getLogger('system.alert')
class BaseAlertMetaclass(type):
def __new__(cls, name, *args, **kwargs):
klass = type.__new__(cls, name, *args, **kwargs)
if name.endswith('Alert'):
klass.name = name[:-5]
return klass
class BaseAlert(object):
__metaclass__ = BaseAlertMetaclass
alert = None
name = None
def __init__(self, alert):
self.alert = alert
def run(self):
"""
Returns a list of Alert objects
"""
raise NotImplementedError
class Alert(object):
OK = 'OK'
CRIT = 'CRIT'
WARN = 'WARN'
def __init__(self, level, message, id=None):
self._level = level
self._message = message
if id is None:
self._id = hashlib.md5(message.encode('utf8')).hexdigest()
else:
self._id = id
def __repr__(self):
return '<Alert: %s>' % self._id
def __str__(self):
return str(self._message)
def __unicode__(self):
return self._message.decode('utf8')
def __eq__(self, other):
return self.getId() == other.getId()
def getId(self):
return self._id
def getLevel(self):
return self._level
def getMessage(self):
return self._message
class AlertPlugins(object):
ALERT_FILE = '/var/tmp/alert'
def __init__(self):
self.basepath = os.path.abspath(
os.path.dirname(__file__)
)
self.modspath = os.path.join(self.basepath, 'alertmods/')
self.mods = []
def rescan(self):
self.mods = []
for f in sorted(os.listdir(self.modspath)):
if f.startswith('__') or not f.endswith('.py'):
continue
f = f.replace('.py', '')
fp, pathname, description = imp.find_module(f, [self.modspath])
try:
imp.load_module(f, fp, pathname, description)
finally:
if fp:
fp.close()
def register(self, klass):
instance = klass(self)
self.mods.append(instance)
def email(self, alerts):
dismisseds = [a.message_id
for a in mAlert.objects.filter(dismiss=True)]
msgs = []
for alert in alerts:
if alert.getId() not in dismisseds:
msgs.append(unicode(alert))
if len(msgs) == 0:
return
send_mail(subject=_("Critical Alerts"),
text='\n'.join(msgs))
def run(self):
obj = None
if os.path.exists(self.ALERT_FILE):
with open(self.ALERT_FILE, 'r') as f:
try:
obj = cPickle.load(f)
except:
pass
rvs = []
for instance in self.mods:
try:
rv = instance.run()
if rv:
rvs.extend(filter(None, rv))
except Exception, e:
log.error("Alert module '%s' failed: %s", instance, e)
crits = sorted([a for a in rvs if a and a.getLevel() == Alert.CRIT])
if obj and crits:
lastcrits = sorted([
a for a in obj['alerts'] if a and a.getLevel() == Alert.CRIT
])
if crits == lastcrits:
crits = []
if crits:
self.email(crits)
with open(self.ALERT_FILE, 'w') as f:
cPickle.dump({
'last': time.time(),
'alerts': rvs,
}, f)
return rvs
alertPlugins = AlertPlugins()
alertPlugins.rescan()
| [
"yvche@CJPINF65.LAN.CHC"
] | yvche@CJPINF65.LAN.CHC |
904629fa290cf70755d1b0feb377e25900090388 | 6f8f785cd767894fcc7b6133b0c2aead471d2387 | /test_project/testapp/app/loaders/__init__.py | 4e1040b5264c19bb76a7c60ee68cdcc26591512f | [] | no_license | vladz/py_ma | a9a19c246c871124bc63bf78dd18dfae961aedae | 07144dc1c40c8fb9b51bba40f19b6c84c58c7ffb | refs/heads/master | 2022-12-24T15:48:34.066226 | 2018-07-15T23:13:20 | 2018-07-15T23:13:20 | 140,962,765 | 0 | 0 | null | 2022-12-08T02:18:35 | 2018-07-14T16:53:38 | Python | UTF-8 | Python | false | false | 844 | py | import logging
from typing import Any, Dict, Iterator, Union
import requests
from .habra_config import HabraSchemaRSS
from .reddit_config import RedditSchemaRSS
logger = logging.getLogger(__name__)
_habra_schema = HabraSchemaRSS()
_reddit_schema = RedditSchemaRSS()
CONFIGS = {
'habra': {'url': 'https://habr.com/rss/hubs/all/',
'schema': _habra_schema},
'reddit': {'url': 'https://www.reddit.com/r/news/.rss',
'schema': _reddit_schema},
}
def load_rss(type: str) -> Union[Iterator[Dict[str, Any]], str]:
rss_config = CONFIGS[type]
response = requests.get(rss_config['url'])
if response.status_code != requests.codes.ok:
err = f'CODE: {response.status_code}\nMSG: {response.text}'
logger.warning(err)
return err
return rss_config['schema'].loads(response.text)
| [
"vlad.zverev@gmail.com"
] | vlad.zverev@gmail.com |
ee66a6bd15526f6ff00f62a9ee1641bd9236a49f | 66e06eec0d72dd0f1fbbf2985bbbda858591bffc | /2016/007-Mathsjam/CircleInTriangle.py | 5c15fba7fb64885b75eff5dac15c497aec504ad1 | [] | no_license | kobylin/Lab | b35cd5eba8087946d475202e4d36ef7329bb74a5 | 35a33d84e0de6c891c34aa2806052b5f695f527d | refs/heads/master | 2021-08-30T07:12:52.955872 | 2017-12-16T16:14:27 | 2017-12-16T16:14:27 | 114,474,224 | 0 | 0 | null | 2017-12-16T16:21:33 | 2017-12-16T16:21:33 | null | UTF-8 | Python | false | false | 1,004 | py | from sympy import Point,Line,Circle,intersection,Triangle,N
from svg import Svg
C = Point(0,8)
D = Point(0,2)
xaxis = Line(Point(0,0),Point(1,0))
CircleD = Circle(D,2)
tangentE = CircleD.tangent_lines(C)[0]
E = intersection(tangentE,CircleD)[0]
A = intersection(tangentE, xaxis)[0]
CircleD = Circle(D,2)
svg = Svg()
svg.append(C,"C")
#svg.append(D)
svg.append(CircleD,"CircleD")
svg.append(tangentE,"tangE")
svg.append(E,"E")
svg.append(A,"A")
def find_circle(circle,A,C,D,i):
AD = Line(A,D)
svg.append(AD,"AD",i)
K = intersection(circle, AD)[0]
svg.append(K,"K",i)
tangentK = Line(A,D).perpendicular_line(K)
svg.append(tangentK,"tangK",i)
P1 = intersection(tangentK, Line(A,C))[0]
svg.append(P1,"P1",i)
P2 = intersection(tangentK, xaxis)[0]
svg.append(P2,"P2",i)
T = Triangle(P1,A,P2)
svg.append(T,"T",i)
return T.incircle
circle = CircleD
for i in range(1):
circle = find_circle(circle,A,C,D,i)
svg.append(circle,"circle",i)
svg.close() | [
"janchrister.nilsson@gmail.com"
] | janchrister.nilsson@gmail.com |
c357b46820e4f59a76a58fc098402f26efcd25ef | 45450c9f44b6d0411efe378ab82c451b5d9008f5 | /apps/usuarios/models.py | 4d510ed152079f180e20c576478e26afdc0cb688 | [] | no_license | michel110299/Controle_visitantes | 2bb8a9b2d4103fbd121d9eaaed2a50bedc0c6a67 | 09cff24099d1578e4c1cca8094044e52a2e774ab | refs/heads/main | 2023-03-28T09:34:50.438664 | 2021-03-22T21:19:46 | 2021-03-22T21:19:46 | 349,213,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | from django.db import models
from django.contrib.auth.models import (
BaseUserManager,
AbstractBaseUser,
PermissionsMixin,
)
class UsuarioManager(BaseUserManager):
def create_user(self,email,password=None):
usuario = self.model(
email = self.normalize_email(email)
)
usuario.is_active = True
usuario.is_staff = False
usuario.is_superuser = False
if password:
usuario.set_password(password)
usuario.save()
return usuario
def create_superuser(self,email,password):
usuario = self.create_user(
email = self.normalize_email(email),
password = password,
)
usuario.is_active = True
usuario.is_staff = True
usuario.is_superuser = True
usuario.set_password(password)
usuario.save()
return usuario
class Usuario(AbstractBaseUser,PermissionsMixin):
email = models.EmailField(
verbose_name="E-mail do usuário",
max_length = 194,
unique = True,
)
is_active = models.BooleanField(
verbose_name="Usuário está ativo",
default=True,
)
is_staff = models.BooleanField(
verbose_name="Usuário é da equipe de desenvolvimento",
default= False,
)
is_superuser = models.BooleanField(
verbose_name= "Usuário é um superusuario",
default=False,
)
USERNAME_FIELD = "email"
objects = UsuarioManager()
class Meta:
verbose_name = "Usuário"
verbose_name_plural = "Usuários"
db_table = "usuario"
def __str__(self):
return self.email | [
"michelbong1@hotmail.com"
] | michelbong1@hotmail.com |
e656498f0ca49848d8a42856e755b6d4cd71d9a7 | 3847e61db8a91dec38f8c98c5b403b9d1f7e43f2 | /getfile.py | c36559c48c64681de00a79d59b813193701dcf35 | [
"Unlicense"
] | permissive | blackkucai/carina | 4ac8753fd237b59f3ab16103d402d45d41531d9c | 4f02958ff081d5db35cc8cc93c3fbc427bdfa3b6 | refs/heads/main | 2023-03-03T08:08:35.843024 | 2021-02-11T15:11:21 | 2021-02-11T15:11:21 | 332,027,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | # getfile script
# created by blackkucai 2021
#############################
import os
def mixin(infile, infile1, outfile):
print('merging.....')
os.system(f"ffmpeg -y -loglevel repeat+info -i {infile} -i {infile1} -c copy -map 0:v:0 -map 1:a:0 {outfile}")
def audio2mp3(file, out):
print('converting...')
os.system(f'ffmpeg -y -loglevel repeat+info -i {file} {out}')
def cdir():
dire = 'False'
print('checking download directory')
sc = os.system('find . -name download >> _isok')
with open('_isok', 'r') as f:
a = f.read()
if a == '':
dire = 'True'
print('creating directory..')
os.mkdir('download')
os.mkdir('download/mayang')
os.mkdir('download/audio')
os.mkdir('download/video')
os.mkdir('download/dist')
f.close()
os.system('rm _isok')
print('creating dirs done')
else:
print(dire)
os.system('rm _isok')
| [
"noreply@github.com"
] | blackkucai.noreply@github.com |
411ef79890d14c02e742337d2991215440e6dba8 | e3acab02bf017411877a39b648770c4900917381 | /ORDSR.py | 62888a760ce346fd7ccc09e3c205d521532df0bc | [] | no_license | amwons/ORDSR | 9615c7181cd0656203427d520eebb1968b610868 | e7626cb2d85561921b4e4e82a1e8164d24c20907 | refs/heads/master | 2020-05-18T21:40:33.490414 | 2019-02-21T21:08:35 | 2019-02-21T21:08:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | import tensorflow as tf
import cv2
import numpy as np
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="ORDSR")
return graph
if __name__ == '__main__':
image = cv2.imread('dem.bmp', 0)
image = image.astype(np.float32) / 255
testInput = image[np.newaxis, ..., np.newaxis]
scale = 3
graph = load_graph('./model/x{}.pb'.format(scale))
input_op = graph.get_tensor_by_name('ORDSR/input_op:0')
output_op = graph.get_tensor_by_name('ORDSR/output_op:0')
print('{}'.format(testInput.dtype))
with tf.Session(graph=graph) as sess:
SR = sess.run([output_op], feed_dict={input_op: testInput.astype(np.float)})
cv2.imwrite('./dem_SR.bmp', SR[0][0, ...] * 255)
print('ORDSR finished!')
| [
"tong.renly@gmail.com"
] | tong.renly@gmail.com |
079fd51ca5e45cffa0f9425d1c67621163603fb6 | 4b533bb2bebcc5ba238aac63d22b5d67d240ce5f | /ibanity/api/SandboxAccount.py | d7961455f3ad47e16b4fc740e5a41aaf8222757b | [
"MIT"
] | permissive | amrfaissal/ibanity-python | 2b623dfcab9aaa0ca3f130b26f787a4aac238f59 | be3ceb667e9d8f6e51074ca47c473f3f81060399 | refs/heads/master | 2020-03-10T08:59:40.122604 | 2018-03-23T14:53:34 | 2018-03-23T14:53:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,046 | py | from collections import namedtuple
from ibanity import Ibanity
def get_list_for_financial_institution(financial_institution_id, sandbox_user_id, params={}):
uri = Ibanity.client.api_schema["sandbox"]["accounts"] \
.replace("{financialInstitutionId}", financial_institution_id) \
.replace("{sandboxUserId}", sandbox_user_id) \
.replace("{sandboxAccountId}", "")
response = Ibanity.client.get(uri, params, None)
return list(
map(
lambda account:
__create_account_named_tuple__(account), response["data"]
)
)
def create(financial_institution_id, sandbox_user_id, attributes):
uri = Ibanity.client.api_schema["sandbox"]["accounts"] \
.replace("{financialInstitutionId}", financial_institution_id) \
.replace("{sandboxUserId}", sandbox_user_id) \
.replace("{sandboxAccountId}", "")
body = {
"data": {
"type": "sandboxAccount",
"attributes": attributes
}
}
response = Ibanity.client.post(uri, body, {}, None)
return __create_account_named_tuple__(response["data"])
def delete(financial_institution_id, sandbox_user_id, id):
uri = Ibanity.client.api_schema["sandbox"]["accounts"] \
.replace("{financialInstitutionId}", financial_institution_id) \
.replace("{sandboxUserId}", sandbox_user_id) \
.replace("{sandboxAccountId}", id)
response = Ibanity.client.delete(uri, {}, None)
return __create_account_named_tuple__(response["data"])
def find(financial_institution_id, sandbox_user_id, id):
uri = Ibanity.client.api_schema["sandbox"]["accounts"] \
.replace("{financialInstitutionId}", financial_institution_id) \
.replace("{sandboxUserId}", sandbox_user_id) \
.replace("{sandboxAccountId}", id)
response = Ibanity.client.get(uri, {}, None)
return __create_account_named_tuple__(response["data"])
def __create_account_named_tuple__(account):
return namedtuple("SandboxAccount", account.keys())(**account)
| [
"marc.lainez@gmail.com"
] | marc.lainez@gmail.com |
cadcc980a4f91068138a6cb9c357e3e803ec43f5 | c5bdc6feced5756320024972cd4ac05bc484bed6 | /_papers/fmc_1/mpp_params.py | e2b9f07ea054ba5bc2f3d4a27e8ce5985fa545a2 | [] | no_license | atimokhin/tdc_vis | 88e3e4f535bc4f0d1e48904ac7ab7ce76983a1a5 | 775dc841b1d8538584c8c68a5f75ae997191e685 | refs/heads/master | 2020-06-04T01:58:28.211240 | 2014-07-08T15:09:21 | 2014-07-08T15:09:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | mpp_params = dict( fig_width_abs = 3.5,
aspect_ratio = 1.618,
dx_pad_abs = 0.1,
dy_pad_abs = 0.1,
left_margin_abs = 0.45,
right_margin_abs = 0.05,
top_margin_abs = 0.2,
bottom_margin_abs = 0.35,
xlabel_bottom_y_abs = 0.01,
xlabel_top_y_abs = 0.01,
ylabel_left_x_abs = 0.01,
ylabel_right_x_abs = 0.10 )
| [
"atimokhin@gmail.com"
] | atimokhin@gmail.com |
2823a48cbeebcac8a5b49aeb6306ea0ebabe21e0 | 01f535557c2275a0c0cd91687d52c644e8176d00 | /src/vtra/analysis/flow_assignment/industry_flows.py | f08d77411f7d216c3c28a8190c7613a014fea9c4 | [] | no_license | mmc00/oia-transport-archive | a8eaf72751a2c11b2cc2dc475e6eed2421d75381 | f89cb686704fe76c1665697b35d14caccf37f3a1 | refs/heads/master | 2022-03-28T17:44:41.915217 | 2020-01-09T16:22:10 | 2020-01-09T16:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,444 | py | """Summarise hazard data
Get OD data and process it
Author: Raghav Pant
Date: April 20, 2018
"""
import configparser
import csv
import glob
import os
import fiona
import fiona.crs
import rasterio
from sqlalchemy import create_engine
import subprocess as sp
import psycopg2
import osgeo.ogr as ogr
import pandas as pd
import copy
import ast
from osgeo import gdal
import geopandas as gpd
from shapely.geometry import Point
from geoalchemy2 import Geometry, WKTElement
import numpy as np
from vtra.utils import load_config
from vtra.dbutils import *
import vtra.transport_network_creation as tnc
def main():
'''
Create the database connection
'''
conf = load_config()
try:
conn = psycopg2.connect(**conf['database'])
except:
print ("I am unable to connect to the database")
curs = conn.cursor()
engine = create_engine('postgresql://{user}:{password}@{host}:{port}/{database}'.format({
**conf['database']
}))
od_data_file = os.path.join(conf['paths']['data'], 'od_data', 'OD_transport_data_2008_v2.xlsx')
'''
Step 2: Create the OD proprotions for the differnet modes
'''
'''
First get the modal shares
'''
modes = ['road','rail','air','water']
mode_cols = ['road','rail','air','inland','coastal']
new_mode_cols = ['o','d','road','rail','air','water']
mode_table = ['airport_nodes','waternodes','railnetworknodes','road2009nodes']
mode_edge_tables = ['airport_edges','wateredges','railnetworkedges','road2009edges']
mode_flow_tables = []
for mo in mode_edge_tables:
fl_table = mo + '_flows'
mode_flow_tables.append(fl_table)
'''
Get the modal shares
'''
od_data_modes = pd.read_excel(od_data_file,sheet_name = 'mode').fillna(0)
# od_data_modes.columns = map(str.lower, od_data_modes.columns)
o_id_col = 'o'
d_id_col = 'd'
od_data_modes['total'] = od_data_modes[mode_cols].sum(axis=1)
for m in mode_cols:
od_data_modes[m] = od_data_modes[m]/od_data_modes['total'].replace(np.inf, 0)
od_data_modes['water'] = od_data_modes['inland'] + od_data_modes['coastal']
od_data_modes = od_data_modes.fillna(0)
# od_data_modes.to_csv('mode_frac.csv',index = False)
od_fracs = od_data_modes[new_mode_cols]
od_data_com = pd.read_excel(od_data_file,sheet_name = 'goods').fillna(0)
ind_cols = ['sugar','wood','steel','constructi','cement','fertilizer','coal','petroluem','manufactur','fishery','meat']
od_fracs = pd.merge(od_fracs,od_data_com,how='left', on=['o','d'])
del od_data_com,od_data_modes
od_fracs = od_fracs.fillna(0)
# od_fracs.to_csv('od_fracs.csv')
for ind in ind_cols:
'''
Step 2 assign the crop to the closest transport mode node
'''
# mode_table = ['road2009nodes','railwaynetworknodes','airport_nodes','waternodes']
# mode_edge_tables = ['road2009edges','railwaynetworkedges','airport_edges','wateredges']
# modes = ['road','rail','air','water']
modes = ['air','water','rail','road']
mode_id = 'node_id'
od_id = 'od_id'
pop_id = 'population'
o_id_col = 'o'
d_id_col = 'd'
'''
Get the network
'''
eid = 'edge_id'
nfid = 'node_f_id'
ntid = 'node_t_id'
spid = 'speed'
gmid = 'geom'
o_id_col = 'o'
d_id_col = 'd'
'''
Get the node edge flows
'''
excel_writer = pd.ExcelWriter('vietnam_flow_stats_' + ind + '.xlsx')
for m in range(len(mode_table)):
od_nodes_regions = []
sql_query = '''select {0}, {1}, 100*{2}/(sum({3}) over (Partition by {4})) from {5}
'''.format(mode_id,od_id,pop_id,pop_id,od_id,mode_table[m])
curs.execute(sql_query)
read_layer = curs.fetchall()
if read_layer:
for row in read_layer:
n = row[0]
r = row[1]
p = float(row[2])
if p > 0:
od_nodes_regions.append((n,r,p))
all_net_dict = {'edge':[],'from_node':[],'to_node':[],'distance':[],'speed':[],'travel_cost':[]}
all_net_dict = tnc.create_network_dictionary(all_net_dict,mode_edge_tables[m],eid,nfid,ntid,spid,'geom',curs,conn)
od_net = tnc.create_igraph_topology(all_net_dict)
'''
Get the OD flows
'''
net_dict = {'Origin_id':[],'Destination_id':[],'Origin_region':[],'Destination_region':[],'Tonnage':[],'edge_path':[],'node_path':[]}
ofile = 'network_od_flows_' + ind + modes[m] + '.csv'
output_file = open(ofile,'w')
wr = csv.writer(output_file, delimiter=',', quoting=csv.QUOTE_MINIMAL)
wr.writerow(net_dict.keys())
ind_mode = modes[m]+ '_' + ind
od_fracs[ind_mode] = od_fracs[modes[m]]*od_fracs[ind]
od_flows = list(zip(od_fracs[o_id_col].values.tolist(),od_fracs[d_id_col].values.tolist(),od_fracs[ind_mode].values.tolist()))
origins = list(set(od_fracs[o_id_col].values.tolist()))
destinations = list(set(od_fracs[d_id_col].values.tolist()))
dflows = []
# print (od_flows)
for o in origins:
for d in destinations:
fval = [fl for (org,des,fl) in od_flows if org == o and des == d]
if len(fval) == 1 and fval[0] > 0:
o_matches = [(item[0],item[2]) for item in od_nodes_regions if item[1] == o]
if len(o_matches) > 0:
for o_vals in o_matches:
o_val = 1.0*fval[0]*(1.0*o_vals[1]/100)
o_node = o_vals[0]
d_matches = [(item[0],item[2]) for item in od_nodes_regions if item[1] == d]
if len(d_matches) > 0:
for d_vals in d_matches:
od_val = 1.0*o_val*(1.0*d_vals[1]/100)
d_node = d_vals[0]
if od_val > 0 and o_node != d_node:
# od_net = tnc.add_igraph_costs(od_net,t_val,0)
orgn_node = od_net.vs['node'].index(o_node)
dest_node = od_net.vs['node'].index(d_node)
# n_pth = od_net.get_shortest_paths(orgn_node,to = dest_node, weights = 'travel_cost', mode = 'OUT', output='vpath')[0]
e_pth = od_net.get_shortest_paths(orgn_node,to = dest_node, weights = 'travel_cost', mode = 'OUT', output='epath')[0]
# n_list = [od_net.vs[n]['node'] for n in n_pth]
e_list = [od_net.es[n]['edge'] for n in e_pth]
# cst = sum([od_net.es[n]['cost'] for n in e_pth])
net_dict = {'Origin_id':o_node,'Destination_id':d_node,'Origin_region':o,'Destination_region':d,
'Tonnage':od_val,'edge_path':e_list,'node_path':[o_node,d_node]}
wr.writerow(net_dict.values())
dflows.append((str([o_node,d_node]),str(e_list),od_val))
print (o,d,fval,modes[m],ind)
node_table = modes[m] + '_node_flows'
edge_table = modes[m] + '_edge_flows'
# dom_flows = pd.read_csv(ofile).fillna(0)
dom_flows = pd.DataFrame(dflows,columns = ['node_path', 'edge_path','Tonnage'])
flow_node_edge = dom_flows.groupby(['node_path', 'edge_path'])['Tonnage'].sum().reset_index()
n_dict = {}
e_dict = {}
n_dict,e_dict = get_node_edge_flows(flow_node_edge,n_dict,e_dict)
node_list = get_id_flows(n_dict)
df = pd.DataFrame(node_list, columns = ['node_id',ind])
df.to_excel(excel_writer,node_table,index = False)
excel_writer.save()
edge_list = get_id_flows(e_dict)
df = pd.DataFrame(edge_list, columns = ['edge_id',ind])
df.to_excel(excel_writer,edge_table,index = False)
excel_writer.save()
if df.empty:
add_zeros_columns_to_table_psycopg2(mode_flow_tables[m], [ind],['double precision'],conn)
else:
df.to_sql('dummy_flows', engine, if_exists = 'replace', schema = 'public', index = False)
add_columns_to_table_psycopg2(mode_flow_tables[m], 'dummy_flows', [ind],['double precision'], 'edge_id',conn)
curs.close()
conn.close()
if __name__ == '__main__':
main()
| [
"tomalrussell@gmail.com"
] | tomalrussell@gmail.com |
71c247fe2f36569f7826c57d4720399f571dc35c | 7dd4cbde628943bf9940d252531a3aa5e16185e3 | /user/migrations/0003_auto_20200418_1910.py | 6bcbe9898355ec8db71d379e72ba1d196c121fec | [] | no_license | rahulsa123/GannaWorld | 3bdcea27842b5270b7afd1ef29ed43723c394262 | d22345514e0a1c97d28f3af4af8136e2e7cca4e2 | refs/heads/master | 2022-11-29T23:30:34.708026 | 2020-08-07T05:13:07 | 2020-08-07T05:13:07 | 285,736,253 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # Generated by Django 3.0.5 on 2020-04-18 19:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user', '0002_auto_20200418_1836'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
]
| [
"Rahul.et2016@gmail.com"
] | Rahul.et2016@gmail.com |
389b53f6852ec0e871e2dccfb26ff6b7882d196d | d15e3e60b5a5e5343d70bd054acd4d911aa26e0a | /app/core/migrations/0001_initial.py | b3e18b0c371f057411ffb35af3315140bb76bd37 | [
"MIT"
] | permissive | mgunn0103/recipe-app-api | d9f827060c42dd787cafd38d9bc91ed1f599a05c | b9b616293745bff485f6b43b0f7b263679f5214e | refs/heads/master | 2020-04-28T14:00:43.778564 | 2019-03-30T17:52:47 | 2019-03-30T17:52:47 | 175,324,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | # Generated by Django 2.1.7 on 2019-03-30 15:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"marcus.gunn@yahoo.com"
] | marcus.gunn@yahoo.com |
4ca5452f0df11cd0388491948693a1c50cf6a03e | 6be1990abf99c85ef886b49dcea1824aabb648d3 | /weixinofneolocal/weixinofneolocal/libs/PIL/GbrImagePlugin.py | ff0f60f5d130760331d401418d04076713c432fc | [] | no_license | neoguojing/cloudServer | b53ae205efe52cf0aea28dbb9e6c16c20caf991f | 7c19101789b0c46474269e4c8fe00e92203e9cd7 | refs/heads/master | 2020-12-04T23:02:23.551479 | 2017-09-22T03:08:35 | 2017-09-22T03:08:35 | 67,382,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | #
# The Python Imaging Library
# $Id$
#
# load a GIMP brush file
#
# History:
# 96-03-14 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
import Image, ImageFile
def i32(c):
return ord(c[3]) + (ord(c[2]) << 8) + (ord(c[1]) << 16) + (ord(c[0]) << 24L)
def _accept(prefix):
return i32(prefix) >= 20 and i32(prefix[4:8]) == 1
# #
# Image plugin for the GIMP brush format.
class GbrImageFile(ImageFile.ImageFile):
format = "GBR"
format_description = "GIMP brush file"
def _open(self):
header_size = i32(self.fp.read(4))
version = i32(self.fp.read(4))
if header_size < 20 or version != 1:
raise SyntaxError, "not a GIMP brush"
width = i32(self.fp.read(4))
height = i32(self.fp.read(4))
bytes = i32(self.fp.read(4))
if width <= 0 or height <= 0 or bytes != 1:
raise SyntaxError, "not a GIMP brush"
comment = self.fp.read(header_size - 20)[:-1]
self.mode = "L"
self.size = width, height
self.info["comment"] = comment
# Since the brush is so small, we read the data immediately
self.data = self.fp.read(width * height)
def load(self):
if not self.data:
return
# create an image out of the brush data block
self.im = Image.core.new(self.mode, self.size)
self.im.fromstring(self.data)
self.data = ""
#
# registry
Image.register_open("GBR", GbrImageFile, _accept)
Image.register_extension("GBR", ".gbr")
| [
"guojing_neo@163.com"
] | guojing_neo@163.com |
dbfd25de7fd3544d488836fbfcd5b034e9420674 | af1446a9d8a2954d87d31ae6ce0ce0445f14cb21 | /04_testing/tests/cases.py | a5ff38c7f639ea9be3c696a9bd4b1ae4895e41ea | [
"Apache-2.0"
] | permissive | fiercemark/Otus | 2116911bba5823599b865bed66f254c2c355cf29 | a7dfbe480d7c5f9345cf32778c63562a62103f71 | refs/heads/master | 2022-12-18T03:49:10.774968 | 2020-03-09T20:20:02 | 2020-03-09T20:20:02 | 221,866,276 | 0 | 0 | null | 2022-12-08T06:18:22 | 2019-11-15T07:15:35 | Python | UTF-8 | Python | false | false | 469 | py | from functools import wraps
def cases(cases):
def decorator(f):
@wraps(f)
def wrapper(*args):
for case in cases:
new_args = args + (case if isinstance(case, tuple) else (case,))
try:
f(*new_args)
except Exception as e:
print('\n')
print('FAIL case:', case)
raise e
return wrapper
return decorator | [
"vlavrenchenko@yandex-team.ru"
] | vlavrenchenko@yandex-team.ru |
f0ca024c368b545cabb624e52bed43b175847023 | e729ef07bf22538effdd330b29aed184435195da | /tests/unit/validators/test_length_validator.py | c2951beb340434b6df17341237cc4bcbfc4e44d8 | [] | no_license | Storytelling-Software/backend-boilerplate | 6876f1c52194d20b59823de1a60290eef24f12d7 | ff1083aa1b94ca1a3bb075976ad58dd40c529b47 | refs/heads/master | 2023-05-06T01:32:01.210363 | 2021-05-31T09:23:50 | 2021-05-31T09:23:50 | 370,971,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | from validators import LengthValidator
from validators.base_validator import BaseValidator
class TestLengthValidator:
def setup(self):
self.validator = LengthValidator('password', 8, 32)
assert self.validator.key == 'password'
assert isinstance(self.validator, BaseValidator) is True
def test_is_valid_invalid_len_0(self):
args = {'password': ''}
assert not self.validator.is_valid(args)
def test_is_valid_invalid_len_7(self):
args = {'password': '1234567'}
assert not self.validator.is_valid(args)
def test_is_valid_valid_len_8(self):
args = {'password': '12345678'}
assert self.validator.is_valid(args)
def test_is_valid_valid_len_32(self):
args = {'password': '12345678901234567890123456789012'}
assert self.validator.is_valid(args)
def test_is_valid_invalid_len_33(self):
args = {'password': '123456789012345678901234567890123'}
assert not self.validator.is_valid(args)
def test_error(self):
result = self.validator.error()
assert result['message'] == 'Length must be between 8 and 32'
assert result['key'] == 'error_invalid_length'
| [
"dmitrij.u.gusev@gmail.com"
] | dmitrij.u.gusev@gmail.com |
cf09bc4e967350c5ec01e4a5d85be9ff5ef90de7 | 1a5f6233dafa17c763b0e96f3712ca225d6ff6e6 | /code/labels_range.py | 9c74816f1410b15deea263519e0b63fb31d3e65e | [
"Apache-2.0"
] | permissive | jaypriyadarshi/Expert-recommendation-system | 722fe8fe491ee0297cb59c7f2062d9f6c00c8535 | 544c0ad29b2faf2d767d0f28aea82a6cdb43563f | refs/heads/master | 2020-04-23T04:31:54.528955 | 2016-12-13T11:12:17 | 2016-12-13T11:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | import json
import numpy as np
import cPickle as pickle
with open('../validation/v_xgboost_word_tfidf.csv') as train_file:
content = train_file.readlines()
testData = []
scores = []
element = content[1].strip("\r\n").split(",")
for i in range(1, len(content)):
element = content[i].strip("\r\n").split(",")
testData.append([element[0],element[1]])
scores.append(float(element[2]))
predictions = []
maxscore = max(scores)
minscore = min(scores)
for score in scores:
predictions.append((score-minscore)/float(maxscore-minscore))
ypred = predictions
with open('../validation/v_xgboost_word_tfidf_0-1.csv', 'w') as f1:
f1.write('qid,uid,label\n')
for i in range(0, len(ypred)):
f1.write(testData[i][0]+','+testData[i][1]+','+str(ypred[i])+'\n') | [
"jswt001@gmail.com"
] | jswt001@gmail.com |
c2afa2f4ed3d27b5eb256f45fbb043bb45179a34 | e167dfb535b72f56ea3c30c498f2a74324e9e04c | /app/common/model_utils.py | 7b0f98496899cb726bdd5a7ea11ccb8adc155300 | [
"MIT"
] | permissive | wlmsoft/Alpha-Gobang-Zero | ebde341af3ac6ecd9b6a71fdb0decedce078d2e8 | f836aee7147aa2aeb47dd8b370f94950b833718d | refs/heads/master | 2023-07-23T20:40:51.448213 | 2021-09-02T14:42:25 | 2021-09-02T14:42:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # coding:utf-8
import torch
from alphazero import PolicyValueNet
def testModel(model: str):
""" 测试模型是否可用
Parameters
----------
model: str
模型路径
"""
try:
model = torch.load(model)
return isinstance(model, PolicyValueNet)
except:
return False
| [
"1319158137@qq.com"
] | 1319158137@qq.com |
9f2dbf764451a2c4122d7b974bb9bc7f7a433a1c | 63901febdaea0453abdd1d7e4b704ac072152df3 | /storage_delete_file.py | 206c15cefa11d354509ffbd9e1d03b67ffc6b026 | [] | no_license | TekSubedi/comp4312_assignment2_0869016 | b21032cd80269b5c26c6bb9ce1746cf763ddfa30 | db4159d513bf608651820abc010555cf38109a56 | refs/heads/main | 2023-01-03T05:25:47.098525 | 2020-10-20T23:15:18 | 2020-10-20T23:15:18 | 305,853,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | #!/usr/bin/env python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# [START storage_delete_file]
from google.cloud import storage
def delete_blob(bucket_name, blob_name):
"""Deletes a blob from the bucket."""
# bucket_name = "your-bucket-name"
# blob_name = "your-object-name"
storage_client = storage.Client("assignment2-tek")
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.delete()
print("Blob {} deleted.".format(blob_name))
# [END storage_delete_file]
if __name__ == "__main__":
delete_blob(bucket_name=sys.argv[1], blob_name=sys.argv[2])
| [
"noreply@github.com"
] | TekSubedi.noreply@github.com |
d806ee3980c7ae0398650889ab57d8dc28f35f55 | cae429939b5e2a36a84ec5007e050e0ad6d37ebb | /advanced_filters/tests/test_admin.py | 6da2ced6c33585da9997ab65b273ec2a08dd4360 | [
"MIT"
] | permissive | smartfactory-gmbh/django-advanced-filters | 6a6afda8fa09a9b4c6dc8d8dfd7f78bf40dcae51 | 88c5faf0a6e40c23b7bfa3fcec8c02c6da57574e | refs/heads/master | 2022-12-31T01:32:20.917439 | 2017-03-24T09:48:52 | 2017-03-24T09:52:28 | 256,427,481 | 0 | 1 | MIT | 2020-04-17T07:01:19 | 2020-04-17T07:01:18 | null | UTF-8 | Python | false | false | 7,616 | py | from django.core.urlresolvers import reverse
from django.contrib.auth.models import Permission
from django.db.models import Q
from django.test import TestCase
from ..models import AdvancedFilter
from tests import factories
class ChageFormAdminTest(TestCase):
""" Test the AdvancedFilter admin change page """
def setUp(self):
self.user = factories.SalesRep()
assert self.client.login(username='user', password='test')
self.a = AdvancedFilter(title='test', url='test', created_by=self.user,
model='customers.Client')
self.a.query = Q(email__iexact='a@a.com')
self.a.save()
def test_change_page_requires_perms(self):
url = reverse('admin:advanced_filters_advancedfilter_change',
args=(self.a.pk,))
res = self.client.get(url)
assert res.status_code == 403
def test_change_page_renders(self):
self.user.user_permissions.add(Permission.objects.get(
codename='change_advancedfilter'))
url = reverse('admin:advanced_filters_advancedfilter_change',
args=(self.a.pk,))
res = self.client.get(url)
assert res.status_code == 200
def test_change_and_goto(self):
self.user.user_permissions.add(Permission.objects.get(
codename='change_advancedfilter'))
url = reverse('admin:advanced_filters_advancedfilter_change',
args=(self.a.pk,))
form_data = {'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 0,
'_save_goto': 1}
res = self.client.post(url, data=form_data)
assert res.status_code == 302
# django == 1.5 support
if hasattr(res, 'url'):
assert res.url.endswith('admin/customers/client/?_afilter=1')
else:
url = res['location']
assert url.endswith('admin/customers/client/?_afilter=1')
def test_create_page_disabled(self):
self.user.user_permissions.add(Permission.objects.get(
codename='add_advancedfilter'))
url = reverse('admin:advanced_filters_advancedfilter_add')
res = self.client.get(url)
assert res.status_code == 403
class AdvancedFilterCreationTest(TestCase):
""" Test creation of AdvancedFilter in target model changelist """
form_data = {'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 0,
'action': 'advanced_filters'}
good_data = {'title': 'Test title', 'form-0-field': 'language',
'form-0-operator': 'iexact', 'form-0-value': 'ru', }
query = ['language__iexact', 'ru']
def setUp(self):
self.user = factories.SalesRep()
assert self.client.login(username='user', password='test')
def test_changelist_includes_form(self):
self.user.user_permissions.add(Permission.objects.get(
codename='change_client'))
url = reverse('admin:customers_client_changelist')
res = self.client.get(url)
assert res.status_code == 200
title = ['Create advanced filter']
fields = ['First name', 'Language', 'Sales Rep']
# python >= 3.3 support
response_content = res.content.decode('utf-8')
for part in title + fields:
assert part in response_content
def test_create_form_validation(self):
self.user.user_permissions.add(Permission.objects.get(
codename='change_client'))
url = reverse('admin:customers_client_changelist')
form_data = self.form_data.copy()
res = self.client.post(url, data=form_data)
assert res.status_code == 200
form = res.context_data['advanced_filters']
assert 'title' in form.errors
assert '__all__' in form.errors
assert form.errors['title'] == ['This field is required.']
assert form.errors['__all__'] == ['Error validating filter forms']
def test_create_form_valid(self):
self.user.user_permissions.add(Permission.objects.get(
codename='change_client'))
url = reverse('admin:customers_client_changelist')
form_data = self.form_data.copy()
form_data.update(self.good_data)
res = self.client.post(url, data=form_data)
assert res.status_code == 200
form = res.context_data['advanced_filters']
assert form.is_valid()
assert AdvancedFilter.objects.count() == 1
# django == 1.5 support
created_filter = AdvancedFilter.objects.order_by('-pk')[0]
assert created_filter.title == self.good_data['title']
assert list(created_filter.query.children[0]) == self.query
# save with redirect to filter
form_data['_save_goto'] = 1
res = self.client.post(url, data=form_data)
assert res.status_code == 302
assert AdvancedFilter.objects.count() == 2
# django == 1.5 support
created_filter = AdvancedFilter.objects.order_by('-pk')[0]
if hasattr(res, 'url'):
assert res.url.endswith('admin/customers/client/?_afilter=%s' %
created_filter.pk)
else:
url = res['location']
assert url.endswith('admin/customers/client/?_afilter=%s' %
created_filter.pk)
assert list(created_filter.query.children[0]) == self.query
class AdvancedFilterUsageTest(TestCase):
""" Test filter visibility and actual filtering of a changelist """
def setUp(self):
self.user = factories.SalesRep()
assert self.client.login(username='user', password='test')
factories.Client.create_batch(8, assigned_to=self.user, language='en')
factories.Client.create_batch(2, assigned_to=self.user, language='ru')
self.user.user_permissions.add(Permission.objects.get(
codename='change_client'))
self.a = AdvancedFilter(title='Russian speakers', url='foo',
created_by=self.user, model='customers.Client')
self.a.query = Q(language='ru')
self.a.save()
def test_filters_not_available(self):
url = reverse('admin:customers_client_changelist')
res = self.client.get(url, data={'_afilter': self.a.pk})
assert res.status_code == 200
cl = res.context_data['cl']
assert not cl.filter_specs
# filter not applied due to user not being in list
if hasattr(cl, 'queryset'):
assert cl.queryset.count() == 10
else:
# django == 1.5 support
assert cl.query_set.count() == 10
def test_filters_available_to_users(self):
self.a.users.add(self.user)
url = reverse('admin:customers_client_changelist')
res = self.client.get(url, data={'_afilter': self.a.pk})
assert res.status_code == 200
cl = res.context_data['cl']
assert cl.filter_specs
if hasattr(cl, 'queryset'):
assert cl.queryset.count() == 2
else:
# django == 1.5 support
assert cl.query_set.count() == 2
def test_filters_available_to_groups(self):
group = self.user.groups.create()
self.a.groups.add(group)
url = reverse('admin:customers_client_changelist')
res = self.client.get(url, data={'_afilter': self.a.pk})
assert res.status_code == 200
cl = res.context_data['cl']
assert cl.filter_specs
if hasattr(cl, 'queryset'):
assert cl.queryset.count() == 2
else:
# django == 1.5 support
assert cl.query_set.count() == 2
| [
"asfaltboy@gmail.com"
] | asfaltboy@gmail.com |
bd15811b1f2fa433f9fbce560c2bb146a9882c43 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Bert-text-classification_for_PyTorch/transformers/src/transformers/models/convnext/feature_extraction_convnext.py | 860bda96b6d2ca7b488d2f710a55318ee5e5e41c | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 7,348 | py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature extractor class for ConvNeXT."""
from typing import Optional, Union
import numpy as np
from PIL import Image
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...file_utils import TensorType
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ImageFeatureExtractionMixin,
ImageInput,
is_torch_tensor,
)
from ...utils import logging
logger = logging.get_logger(__name__)
class ConvNextFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
r"""
Constructs a ConvNeXT feature extractor.
This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize (and optionally center crop) the input to a certain `size`.
size (`int`, *optional*, defaults to 224):
Resize the input to the given size. If 384 or larger, the image is resized to (`size`, `size`). Else, the
smaller edge of the image will be matched to int(`size`/ `crop_pct`), after which the image is cropped to
`size`. Only has an effect if `do_resize` is set to `True`.
resample (`int`, *optional*, defaults to `PIL.Image.BICUBIC`):
An optional resampling filter. This can be one of `PIL.Image.NEAREST`, `PIL.Image.BOX`,
`PIL.Image.BILINEAR`, `PIL.Image.HAMMING`, `PIL.Image.BICUBIC` or `PIL.Image.LANCZOS`. Only has an effect
if `do_resize` is set to `True`.
crop_pct (`float`, *optional*):
The percentage of the image to crop. If `None`, then a cropping percentage of 224 / 256 is used. Only has
an effect if `do_resize` is set to `True` and `size` < 384.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`List[int]`, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images.
image_std (`List[int]`, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize=True,
size=224,
resample=Image.BICUBIC,
crop_pct=None,
do_normalize=True,
image_mean=None,
image_std=None,
**kwargs
):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.crop_pct = crop_pct
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __call__(
self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs
) -> BatchFeature:
"""
Main method to prepare for the model one or several image(s).
<Tip warning={true}>
NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass
PIL images.
</Tip>
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*, defaults to `'np'`):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
width).
"""
# Input type checking for clearer error
valid_images = False
# Check that images has a valid type
if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images):
valid_images = True
elif isinstance(images, (list, tuple)):
if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]):
valid_images = True
if not valid_images:
raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
is_batched = bool(
isinstance(images, (list, tuple))
and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))
)
if not is_batched:
images = [images]
# transformations (resizing and optional center cropping + normalization)
if self.do_resize and self.size is not None:
if self.size >= 384:
# warping (no cropping) when evaluated at 384 or larger
images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images]
else:
if self.crop_pct is None:
self.crop_pct = 224 / 256
size = int(self.size / self.crop_pct)
# to maintain same ratio w.r.t. 224 images
images = [
self.resize(image=image, size=size, default_to_square=False, resample=self.resample)
for image in images
]
images = [self.center_crop(image=image, size=self.size) for image in images]
if self.do_normalize:
images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images]
# return as BatchFeature
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs
| [
"dongwenbo6@huawei.com"
] | dongwenbo6@huawei.com |
0d9f0095db0121632110f71437b771c3d05d915e | 1fe6c1f00c84477db85651dfa4cb7ce5b1bd9eb0 | /AI.py | 1b97c96438995e024f28a6ee175adf7dac3b23af | [] | no_license | FarzamAmjad/AI | d5795cbb20784e0b324d98aa36d7ac384c818404 | 663efe0e81e346193a4ee57814f3bfb1a5eed120 | refs/heads/master | 2021-05-18T01:54:42.153293 | 2020-03-29T14:44:27 | 2020-03-29T14:44:27 | 251,054,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | from collections import deque
class Node:
def __init__(self, state, action=-1, cost=0, parent=None):
self.State = state
self.Action = action
self.Cost = cost
self.Parent = parent
def __repr__(self):
return f"<Node {self.State}>"
def goal_test(node, goal, states):
for index in range(len(states)):
if goal == states[index]:
if node.State == index:
return True
else:
return False
MNT = str(input("enter header"))
MNT = MNT.split()
M = int(MNT[0])
N = int(MNT[1])
T = int(MNT[2])
states = []
Actions = []
transition_model = []
print("Enter States")
for i in range(M):
states.append(input())
print("Actions")
for i in range(N):
Actions.append(input())
print("Transition table")
for i in range(M):
row = []
row = input().split()
row = [int(z) for z in row]
transition_model.append(row)
def search_problem(problem):
for index in range(len(states)):
if problem[0] == states[index]:
FirstNode = Node(index)
frontier = deque([FirstNode])
explored = set()
exploredNode = set()
sol = []
while True:
if frontier is not None:
node = frontier.popleft()
explored.add(node.State)
if goal_test(node, problem[1], states):
print("ahtesham")
return sol
else:
state_of_current_node = node.State
children = transition_model[state_of_current_node]
for child in range(len(children)):
new_child_node = Node(int(children[child]), child, node.Cost + 1, node)
if new_child_node.State not in explored and new_child_node not in frontier:
sol.append(Actions[child])
if goal_test(new_child_node, problem[1], states):
return sol
frontier.append(new_child_node)
break
else:
return None
def main():
print("Enter Test case")
for x in range(T):
start_goal = input().split("\t")
solution = search_problem(start_goal)
for i in range(len(solution)):
print(solution[i], end="")
if i is not len(solution) - 1:
print("->", end="")
print("")
main() | [
"noreply@github.com"
] | FarzamAmjad.noreply@github.com |
facd26e72083591f20fd9947a7f6409b8c10f32a | 1a5fe9a728b786bf2c01fda26333c55c411be07b | /encryption.py | 0f864ac8fc8fce0b62570cc1fe81f99d6cd2fce4 | [] | no_license | atakanozguryildiz/HackerRank | 4fe19a9747a424f2a7a01519e0a7df950296da36 | 5cfad94d1578fe69d668d5fd51c989d8ef6b29fd | refs/heads/master | 2021-06-17T08:03:27.087746 | 2021-02-18T16:24:41 | 2021-02-18T16:24:41 | 163,420,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import math
s = input()
s = s.replace(" ", "")
len_s = len(s)
sqrt_s = math.sqrt(len_s)
row_count = math.floor(sqrt_s)
col_count = math.ceil(sqrt_s)
while (row_count * col_count) < len_s:
if row_count <= col_count:
row_count += 1
else:
col_count += 1
matrix = []
for i in range(0, row_count):
start_index = i * col_count
end_index = start_index + col_count
row = s[start_index:end_index]
matrix.append(row)
result = ""
for i in range(0, col_count):
col_text = ""
for row in matrix:
if i < len(row):
result += row[i]
result += col_text + " "
result = result.strip()
print(result)
| [
"atakanozguryildiz@gmail.com"
] | atakanozguryildiz@gmail.com |
b194351bd7dd9b944f9c8ed6005429bb754a7c5e | a29332b283161e1b2cfbdaa0a7fe94e5c28491ec | /helpers/utils.py | fde6ba475d8cc15acbd1b7858d9e0a04e68f8d19 | [] | no_license | navee-hans/pytest-automation-framework | aa016b2c2a58a66f02eccba0dff6683876bc26d5 | e8a0736de00469e047bfc072e86f21535339df32 | refs/heads/main | 2023-08-24T05:26:11.321910 | 2021-10-18T23:58:33 | 2021-10-18T23:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import csv
import os
import requests
import json
base_folder = os.path.abspath('.')
input_file = os.path.join(base_folder +'/datas', 'products.csv')
def readdatas():
datas = []
with open(input_file, newline='') as csvfile:
data = csv.reader(csvfile, delimiter='|')
next(data)
for row in data:
datas.append(row)
return datas
def getresponsetext(api_url):
response = requests.get(api_url)
response_data = json.loads(response.text)
return response_data
def getresponse(api_url):
response = requests.get(api_url)
return response | [
"noreply@github.com"
] | navee-hans.noreply@github.com |
980bda2b3c196114fb6af4ae73ed9164d4565ebd | 501aa2e8013205317273d6bb76c88b3d2eb572c6 | /tool/file_logger/__init__.py | c434415aaaa1cd1e5f22a5f5ffef1d47a7925736 | [] | no_license | leonW7/karonte | 6f9f6ecaa29adad69cf1c170984930f36f969204 | a47f6aa3d805e4afd1a0188a0a6273a66477e1f6 | refs/heads/master | 2020-09-16T17:55:05.742092 | 2020-02-16T04:34:07 | 2020-02-16T04:34:07 | 223,845,933 | 4 | 2 | null | 2020-02-14T10:25:40 | 2019-11-25T02:27:03 | Python | UTF-8 | Python | false | false | 26 | py | from file_logger import * | [
"nredini@cs.ucsb.edu"
] | nredini@cs.ucsb.edu |
89ebf639b716a0dc97632bb811ff597daf3970ee | a54fe8fa0b9d6eddd4b70d32ed74167092070c96 | /Introduction_to_Algorithm/Minimum_Spanning_Tree.py | d30ead72670925cfc1071775f7461fa9dfddb0f1 | [] | no_license | shiung0123/portfolio | 962d5db8674566d336890f22befc6d304bcf3cae | ea78344915bf4e7ea4ab8b33c0e889b1ac1d1414 | refs/heads/main | 2022-12-24T06:37:54.548593 | 2020-10-05T17:36:39 | 2020-10-05T17:36:39 | 301,310,136 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,014 | py | # 演算法分析機測
# 學號:10627116/10627123
# 姓名:許逸翔/黃致堯
# 中原大學資訊工程學系
import numpy as np
import heapq as hq
def main() :
case = 1
while True :
n, line = list(map(int, input().split()))
if not n : break
# 建立n*n的二維陣列,作為圖的資料結構
# 初始化維-1,若相連則設為w
dataset = np.full((n,n), -1)
for i in range(line) :
a, b, w = list(map(int, input().split()))
dataset[a-1][b-1] = dataset[b-1][a-1] = w
# 使用方法Kruskal's
# h 作為 priority queue
# ans 即為 MST 之和
# vertex 紀錄此點是否走訪過,用來取代 Disjoinset
h = []
ans = 0
vertex = np.zeros(n, dtype=int)
# 從第0的點開始
vertex[0] = 1
for i in range(n) :
if dataset[0][i] != -1 :
# 把此點所有可行Edge加入priority queue
hq.heappush( h, (dataset[0][i], i) )
# 持續直到所有點都走訪過
while ( not np.all(vertex) ) :
# 取出當前最小的Edge
cost, cur = hq.heappop(h)
# 判斷有無走過,確保不會情成cycle
if not vertex[cur] :
# 記錄此點以造訪
vertex[cur] = 1
# 加入MST cost
ans += cost
for i in range(n) :
if not vertex[i] and dataset[cur][i] != -1 :
# 把此點所有可行Edge加入priority queue
hq.heappush( h, (dataset[cur][i], i) )
# 印出答案
print("Case {i}\nMinimum Cost = {ans}".format(i = case, ans = ans))
print()
case += 1
main()
"""
4 4
1 2 10
1 3 8
2 4 5
3 4 2
5 7
1 2 2
1 4 10
1 5 6
2 3 5
2 5 9
3 5 8
4 5 12
9 14
1 2 4
2 3 8
3 4 7
4 5 9
5 6 10
6 7 2
7 8 1
8 1 8
2 8 11
3 9 2
9 8 7
9 7 6
3 6 4
4 6 14
0 0
ANS 15 23 37
"""
| [
"shiung0123@gmail.com"
] | shiung0123@gmail.com |
5148dd6adf2392e432f3676d1bcfe990e2e64cf1 | bfb54c196c0910b6c372828e18c9470122bf9354 | /DjangoFun/wsgi.py | f8647a9faec75ba06daa64d68a83ccbb475e4004 | [] | no_license | JustinBeckwith/DjangoFun | 4819050960172c9ae8777c298d0643372a81ec2e | ccdb6184fde364c094ed12436ec52f3630527d92 | refs/heads/master | 2016-09-06T17:52:53.133987 | 2012-05-27T06:20:24 | 2012-05-27T06:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | """
WSGI config for DjangoFun project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoFun.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"justin.beckwith@gmail.com"
] | justin.beckwith@gmail.com |
de0d0583b9385cff0c2cff37b202e65b13ed1fa2 | 12a7c19b7db354e6027c63973b4980c9abdcb2fc | /projects/urls.py | 3327f8e6bdbd32d17dae5aa400acabe399a42dfa | [] | no_license | mkbaker/portfolio2021 | 30bc5b79a25f50efd20b91b414f64437802cdee7 | a797c2fc8710c8e29a94f4b72d8250d66ab830d8 | refs/heads/main | 2023-09-01T15:11:39.374779 | 2021-10-22T20:43:02 | 2021-10-22T20:43:02 | 419,454,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | from django.urls import path
from . import views
urlpatterns = [
path("", views.home, name="home"),
path("projects/", views.projects, name="projects_list"),
path("project/<slug>", views.project, name="project"),
path("contact/", views.contact, name="contact"),
]
| [
"milton.baker@bsci.com"
] | milton.baker@bsci.com |
16b93229b03936799fb366deb70beeb32959ddde | 16caebb320bb10499d3712bf0bdc07539a4d0007 | /objc/_AVFCore.py | 8eff0d83bfa6c2ce26f78a9b763e51d9f784ce49 | [] | no_license | swosnick/Apple-Frameworks-Python | 876d30f308a7ac1471b98a9da2fabd22f30c0fa5 | 751510137e9fa35cc806543db4e4415861d4f252 | refs/heads/master | 2022-12-08T07:08:40.154553 | 2020-09-04T17:36:24 | 2020-09-04T17:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,715 | py | '''
Classes from the 'AVFCore' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
AVStreamDataParser = _Class('AVStreamDataParser')
AVStreamDataParserInternal = _Class('AVStreamDataParserInternal')
AVRouteDetector = _Class('AVRouteDetector')
AVRouteDetectorInternal = _Class('AVRouteDetectorInternal')
AVFigEndpointUIAgentOutputDeviceAuthorizationRequestImpl = _Class('AVFigEndpointUIAgentOutputDeviceAuthorizationRequestImpl')
AVFigEndpointUIAgentOutputDeviceAuthorizationSessionImpl = _Class('AVFigEndpointUIAgentOutputDeviceAuthorizationSessionImpl')
AVContentKeyReportGroup = _Class('AVContentKeyReportGroup')
AVContentKeySession = _Class('AVContentKeySession')
AVContentKeySessionInternal = _Class('AVContentKeySessionInternal')
AVContentKeyResponseInternal = _Class('AVContentKeyResponseInternal')
AVContentKeyResponse = _Class('AVContentKeyResponse')
AVContentKeyResponseAuthorizationToken = _Class('AVContentKeyResponseAuthorizationToken')
AVContentKeyResponseClearKey = _Class('AVContentKeyResponseClearKey')
AVContentKeyResponseFairPlayStreaming = _Class('AVContentKeyResponseFairPlayStreaming')
AVContentKeyRequest = _Class('AVContentKeyRequest')
AVPersistableContentKeyRequest = _Class('AVPersistableContentKeyRequest')
AVContentKeyRequestInternal = _Class('AVContentKeyRequestInternal')
AVHUDStringGenerator = _Class('AVHUDStringGenerator')
AVMutableMovieInternal = _Class('AVMutableMovieInternal')
AVMovieInternal = _Class('AVMovieInternal')
AVMediaDataStorage = _Class('AVMediaDataStorage')
AVMediaDataStorageInternal = _Class('AVMediaDataStorageInternal')
AVFigEndpointUIAgentOutputContextManagerImpl = _Class('AVFigEndpointUIAgentOutputContextManagerImpl')
AVFigCommChannelUUIDOutputContextCommunicationChannelImpl = _Class('AVFigCommChannelUUIDOutputContextCommunicationChannelImpl')
AVFigRouteDescriptorFigRoutingContextOutputDeviceTranslator = _Class('AVFigRouteDescriptorFigRoutingContextOutputDeviceTranslator')
AVFigEndpointFigRoutingContextOutputDeviceTranslator = _Class('AVFigEndpointFigRoutingContextOutputDeviceTranslator')
AVFigCommChannelUUIDCommunicationChannelManager = _Class('AVFigCommChannelUUIDCommunicationChannelManager')
AVFigRoutingContextOutputContextImpl = _Class('AVFigRoutingContextOutputContextImpl')
AVVideoCompositionRenderContext = _Class('AVVideoCompositionRenderContext')
AVVideoCompositionRenderContextInternal = _Class('AVVideoCompositionRenderContextInternal')
AVKeyPathFlattenerKVOIntrospectionShim = _Class('AVKeyPathFlattenerKVOIntrospectionShim')
AVKeyPathFlattener = _Class('AVKeyPathFlattener')
AVTwoPartKeyPath = _Class('AVTwoPartKeyPath')
AVKeyPathDependency = _Class('AVKeyPathDependency')
AVKeyPathDependencyManager = _Class('AVKeyPathDependencyManager')
AVWeakObservableCallbackCancellationHelper = _Class('AVWeakObservableCallbackCancellationHelper')
AVWeaklyObservedObjectClientBlockKVONotifier = _Class('AVWeaklyObservedObjectClientBlockKVONotifier')
AVClientBlockKVONotifier = _Class('AVClientBlockKVONotifier')
AVWeakObservationBlockFactory = _Class('AVWeakObservationBlockFactory')
AVObservationBlockFactory = _Class('AVObservationBlockFactory')
AVKVODispatcher = _Class('AVKVODispatcher')
AVAsynchronousVideoCompositionRequest = _Class('AVAsynchronousVideoCompositionRequest')
AVAsynchronousVideoCompositionRequestInternal = _Class('AVAsynchronousVideoCompositionRequestInternal')
AVFigEndpointOutputDeviceDiscoverySessionAvailableOutputDevicesImpl = _Class('AVFigEndpointOutputDeviceDiscoverySessionAvailableOutputDevicesImpl')
AVCustomVideoCompositorSession = _Class('AVCustomVideoCompositorSession')
AVExternalDevice = _Class('AVExternalDevice')
AVExternalDeviceTurnByTurnToken = _Class('AVExternalDeviceTurnByTurnToken')
AVExternalDeviceScreenBorrowToken = _Class('AVExternalDeviceScreenBorrowToken')
AVExternalDeviceInternal = _Class('AVExternalDeviceInternal')
AVExternalDeviceIcon = _Class('AVExternalDeviceIcon')
AVExternalDeviceIconInternal = _Class('AVExternalDeviceIconInternal')
AVExternalDeviceHID = _Class('AVExternalDeviceHID')
AVExternalDeviceHIDInternal = _Class('AVExternalDeviceHIDInternal')
AVMediaSelection = _Class('AVMediaSelection')
AVMutableMediaSelection = _Class('AVMutableMediaSelection')
AVMediaSelectionInternal = _Class('AVMediaSelectionInternal')
AVIOKitOutputSettingsAssistantVideoEncoderCapabilities = _Class('AVIOKitOutputSettingsAssistantVideoEncoderCapabilities')
AVExportSettingsOutputSettingsAssistantVideoSettingsAdjuster = _Class('AVExportSettingsOutputSettingsAssistantVideoSettingsAdjuster')
AVExportSettingsOutputSettingsAssistantBaseSettings = _Class('AVExportSettingsOutputSettingsAssistantBaseSettings')
AVOutputSettingsAssistant = _Class('AVOutputSettingsAssistant')
AVOutputSettingsAssistantInternal = _Class('AVOutputSettingsAssistantInternal')
AVCoreImageFilterCustomVideoCompositor = _Class('AVCoreImageFilterCustomVideoCompositor')
AVCoreImageFilterVideoCompositionInstruction = _Class('AVCoreImageFilterVideoCompositionInstruction')
AVAsynchronousCIImageFilteringRequest = _Class('AVAsynchronousCIImageFilteringRequest')
AVAsynchronousCIImageFilteringRequestInternal = _Class('AVAsynchronousCIImageFilteringRequestInternal')
AVFigRouteDescriptorOutputDeviceDiscoverySessionAvailableOutputDevicesImpl = _Class('AVFigRouteDescriptorOutputDeviceDiscoverySessionAvailableOutputDevicesImpl')
AVFigRouteDiscovererOutputDeviceDiscoverySessionImpl = _Class('AVFigRouteDiscovererOutputDeviceDiscoverySessionImpl')
AVFigRouteDiscovererOutputDeviceDiscoverySessionFactory = _Class('AVFigRouteDiscovererOutputDeviceDiscoverySessionFactory')
AVPlayerItemLegibleOutputInternal = _Class('AVPlayerItemLegibleOutputInternal')
AVPlayerItemLegibleOutputRealDependencyFactory = _Class('AVPlayerItemLegibleOutputRealDependencyFactory')
AVPlayerMediaSelectionCriteria = _Class('AVPlayerMediaSelectionCriteria')
AVTextStyleRule = _Class('AVTextStyleRule')
AVTextStyleRuleInternal = _Class('AVTextStyleRuleInternal')
AVRemoteFigSampleBufferRenderSynchronizerFactory = _Class('AVRemoteFigSampleBufferRenderSynchronizerFactory')
AVSampleBufferRenderSynchronizer = _Class('AVSampleBufferRenderSynchronizer')
AVSampleBufferRenderSynchronizerInternal = _Class('AVSampleBufferRenderSynchronizerInternal')
AVAssetResourceLoadingRequestor = _Class('AVAssetResourceLoadingRequestor')
AVAssetResourceLoadingRequestorInternal = _Class('AVAssetResourceLoadingRequestorInternal')
AVAssetResourceLoadingRequest = _Class('AVAssetResourceLoadingRequest')
AVAssetResourceRenewalRequest = _Class('AVAssetResourceRenewalRequest')
AVAssetResourceLoadingRequestInternal = _Class('AVAssetResourceLoadingRequestInternal')
AVAssetResourceLoadingDataRequest = _Class('AVAssetResourceLoadingDataRequest')
AVAssetResourceLoadingDataRequestInternal = _Class('AVAssetResourceLoadingDataRequestInternal')
AVAssetResourceLoadingContentInformationRequest = _Class('AVAssetResourceLoadingContentInformationRequest')
AVAssetResourceLoadingContentInformationRequestInternal = _Class('AVAssetResourceLoadingContentInformationRequestInternal')
AVAssetResourceLoader = _Class('AVAssetResourceLoader')
AVAssetResourceLoaderInternal = _Class('AVAssetResourceLoaderInternal')
AVAssetResourceLoaderRemoteHandlerContext = _Class('AVAssetResourceLoaderRemoteHandlerContext')
AVPixelBufferAttributeMediator = _Class('AVPixelBufferAttributeMediator')
AVSampleBufferDisplayLayerInternal = _Class('AVSampleBufferDisplayLayerInternal')
AVAPSyncControllerOutputDeviceImpl = _Class('AVAPSyncControllerOutputDeviceImpl')
AVPlayerItemVideoOutputInternal = _Class('AVPlayerItemVideoOutputInternal')
AVPlayerItemOutputInternal = _Class('AVPlayerItemOutputInternal')
AVAssetDownloadSession = _Class('AVAssetDownloadSession')
AVAssetDownloadSessionInternal = _Class('AVAssetDownloadSessionInternal')
AVFloat64Range = _Class('AVFloat64Range')
AVAudioSettingsValueConstrainer = _Class('AVAudioSettingsValueConstrainer')
AVAssetSegmentReport = _Class('AVAssetSegmentReport')
AVAssetSegmentTrackReport = _Class('AVAssetSegmentTrackReport')
AVAssetSegmentReportSampleInformation = _Class('AVAssetSegmentReportSampleInformation')
AVMediaFileOutputSettingsValidator = _Class('AVMediaFileOutputSettingsValidator')
AVGenericMediaFileOutputSettingsValidator = _Class('AVGenericMediaFileOutputSettingsValidator')
AVISOOutputSettingsValidator = _Class('AVISOOutputSettingsValidator')
AVAIFCOutputSettingsValidator = _Class('AVAIFCOutputSettingsValidator')
AVAIFFOutputSettingsValidator = _Class('AVAIFFOutputSettingsValidator')
AVWAVEOutputSettingsValidator = _Class('AVWAVEOutputSettingsValidator')
AVMediaFileType = _Class('AVMediaFileType')
AVDisplayCriteria = _Class('AVDisplayCriteria')
AVDisplayCriteriaInternal = _Class('AVDisplayCriteriaInternal')
AVFormatSpecification = _Class('AVFormatSpecification')
AVOutputSettings = _Class('AVOutputSettings')
AVVideoOutputSettings = _Class('AVVideoOutputSettings')
AVAVVideoSettingsVideoOutputSettings = _Class('AVAVVideoSettingsVideoOutputSettings')
AVPixelBufferAttributesVideoOutputSettings = _Class('AVPixelBufferAttributesVideoOutputSettings')
AVAudioOutputSettings = _Class('AVAudioOutputSettings')
AVAVAudioSettingsAudioOutputSettings = _Class('AVAVAudioSettingsAudioOutputSettings')
AVMediaSelectionOptionInternal = _Class('AVMediaSelectionOptionInternal')
AVMediaSelectionGroupInternal = _Class('AVMediaSelectionGroupInternal')
AVAudioSessionMediaPlayerOnly = _Class('AVAudioSessionMediaPlayerOnly')
AVAudioSessionMediaPlayerOnlyInternal = _Class('AVAudioSessionMediaPlayerOnlyInternal')
AVPlayerItemErrorLogEvent = _Class('AVPlayerItemErrorLogEvent')
AVPlayerItemErrorLogEventInternal = _Class('AVPlayerItemErrorLogEventInternal')
AVPlayerItemErrorLog = _Class('AVPlayerItemErrorLog')
AVPlayerItemErrorLogInternal = _Class('AVPlayerItemErrorLogInternal')
AVPlayerItemAccessLogEvent = _Class('AVPlayerItemAccessLogEvent')
AVPlayerItemAccessLogEventInternal = _Class('AVPlayerItemAccessLogEventInternal')
AVPlayerItemAccessLog = _Class('AVPlayerItemAccessLog')
AVPlayerItemAccessLogInternal = _Class('AVPlayerItemAccessLogInternal')
AVAssetDownloadCacheInternal = _Class('AVAssetDownloadCacheInternal')
AVManagedAssetCacheInternal = _Class('AVManagedAssetCacheInternal')
AVAssetCache = _Class('AVAssetCache')
AVAssetDownloadCache = _Class('AVAssetDownloadCache')
AVManagedAssetCache = _Class('AVManagedAssetCache')
AVDateRangeMetadataGroupInternal = _Class('AVDateRangeMetadataGroupInternal')
AVTimedMetadataGroupInternal = _Class('AVTimedMetadataGroupInternal')
AVMetadataGroup = _Class('AVMetadataGroup')
AVDateRangeMetadataGroup = _Class('AVDateRangeMetadataGroup')
AVMutableDateRangeMetadataGroup = _Class('AVMutableDateRangeMetadataGroup')
AVTimedMetadataGroup = _Class('AVTimedMetadataGroup')
AVMutableTimedMetadataGroup = _Class('AVMutableTimedMetadataGroup')
AVDispatchOnce = _Class('AVDispatchOnce')
AVEventWaiter = _Class('AVEventWaiter')
AVAPSyncOutputDeviceCommunicationChannelImpl = _Class('AVAPSyncOutputDeviceCommunicationChannelImpl')
AVAPSyncOutputDeviceCommunicationChannelManager = _Class('AVAPSyncOutputDeviceCommunicationChannelManager')
AVAssetTrackGroup = _Class('AVAssetTrackGroup')
AVAssetTrackGroupInternal = _Class('AVAssetTrackGroupInternal')
AVPlayerItemMediaDataCollectorInternal = _Class('AVPlayerItemMediaDataCollectorInternal')
AVCMNotificationDispatcherListenerKey = _Class('AVCMNotificationDispatcherListenerKey')
AVCMNotificationDispatcher = _Class('AVCMNotificationDispatcher')
AVAPSyncControllerRemoteOutputDeviceGroupImpl = _Class('AVAPSyncControllerRemoteOutputDeviceGroupImpl')
AVCallbackContextRegistry = _Class('AVCallbackContextRegistry')
AVFigRoutingContextCommandOutputDeviceConfiguration = _Class('AVFigRoutingContextCommandOutputDeviceConfiguration')
AVFigRoutingContextCommandOutputDeviceConfigurationModification = _Class('AVFigRoutingContextCommandOutputDeviceConfigurationModification')
AVWeakReference = _Class('AVWeakReference')
AVRetainReleaseWeakReference = _Class('AVRetainReleaseWeakReference')
AVResult = _Class('AVResult')
AVAssetInspectorLoader = _Class('AVAssetInspectorLoader')
AVUnreachableAssetInspectorLoader = _Class('AVUnreachableAssetInspectorLoader')
AVFigAssetInspectorLoader = _Class('AVFigAssetInspectorLoader')
AVAssetMakeReadyForInspectionLoader = _Class('AVAssetMakeReadyForInspectionLoader')
AVPlaybackItemInspectorLoader = _Class('AVPlaybackItemInspectorLoader')
AVAssetSynchronousInspectorLoader = _Class('AVAssetSynchronousInspectorLoader')
AVDepartureAnnouncingObjectMonitor = _Class('AVDepartureAnnouncingObjectMonitor')
AVGlobalOperationQueue = _Class('AVGlobalOperationQueue')
AVWeakReferencingDelegateStorage = _Class('AVWeakReferencingDelegateStorage')
AVScheduledAudioParameters = _Class('AVScheduledAudioParameters')
AVMutableScheduledAudioParameters = _Class('AVMutableScheduledAudioParameters')
AVScheduledAudioParametersInternal = _Class('AVScheduledAudioParametersInternal')
AVVideoPerformanceMetrics = _Class('AVVideoPerformanceMetrics')
AVVideoPerformanceMetricsInternal = _Class('AVVideoPerformanceMetricsInternal')
AVMutableMovieTrackInternal = _Class('AVMutableMovieTrackInternal')
AVMovieTrackInternal = _Class('AVMovieTrackInternal')
AVSystemRemotePoolOutputDeviceCommunicationChannelImpl = _Class('AVSystemRemotePoolOutputDeviceCommunicationChannelImpl')
AVSystemRemotePoolOutputDeviceCommunicationChannelManager = _Class('AVSystemRemotePoolOutputDeviceCommunicationChannelManager')
AVOutputContextManager = _Class('AVOutputContextManager')
AVOutputContextManagerInternal = _Class('AVOutputContextManagerInternal')
AVOutputContextDestinationChange = _Class('AVOutputContextDestinationChange')
AVOutputContextDestinationChangeInternal = _Class('AVOutputContextDestinationChangeInternal')
AVOutputContextCommunicationChannel = _Class('AVOutputContextCommunicationChannel')
AVOutputContextCommunicationChannelInternal = _Class('AVOutputContextCommunicationChannelInternal')
AVOutputContext = _Class('AVOutputContext')
AVOutputContextInternal = _Class('AVOutputContextInternal')
AVRunLoopConditionRunLoopState = _Class('AVRunLoopConditionRunLoopState')
AVAudioMixInputParametersInternal = _Class('AVAudioMixInputParametersInternal')
AVAudioMixInputParameters = _Class('AVAudioMixInputParameters')
AVMutableAudioMixInputParameters = _Class('AVMutableAudioMixInputParameters')
AVAudioMixInternal = _Class('AVAudioMixInternal')
AVAudioMix = _Class('AVAudioMix')
AVMutableAudioMix = _Class('AVMutableAudioMix')
AVAssetCustomURLAuthentication = _Class('AVAssetCustomURLAuthentication')
AVAssetCustomURLBridgeForNSURLProtocol = _Class('AVAssetCustomURLBridgeForNSURLProtocol')
AVAssetCustomURLBridgeForNSURLSession = _Class('AVAssetCustomURLBridgeForNSURLSession')
AVAssetCustomURLRequest = _Class('AVAssetCustomURLRequest')
AVNSURLProtocolRequest = _Class('AVNSURLProtocolRequest')
AVFigEndpointSecondDisplayModeToken = _Class('AVFigEndpointSecondDisplayModeToken')
AVFigEndpointOutputDeviceImpl = _Class('AVFigEndpointOutputDeviceImpl')
AVFigRouteDescriptorOutputDeviceImpl = _Class('AVFigRouteDescriptorOutputDeviceImpl')
AVClusterComponentOutputDeviceDescription = _Class('AVClusterComponentOutputDeviceDescription')
AVOutputDeviceCommunicationChannel = _Class('AVOutputDeviceCommunicationChannel')
AVLocalOutputDeviceImpl = _Class('AVLocalOutputDeviceImpl')
AVPairedDevice = _Class('AVPairedDevice')
AVPairedDeviceInternal = _Class('AVPairedDeviceInternal')
AVOutputDeviceAuthorizedPeer = _Class('AVOutputDeviceAuthorizedPeer')
AVOutputDeviceAuthorizedPeerInternal = _Class('AVOutputDeviceAuthorizedPeerInternal')
AVOutputDeviceLegacyFrecentsWriter = _Class('AVOutputDeviceLegacyFrecentsWriter')
AVOutputDeviceLegacyFrecentsReader = _Class('AVOutputDeviceLegacyFrecentsReader')
AVOutputDeviceFrecentsWriter = _Class('AVOutputDeviceFrecentsWriter')
AVOutputDeviceFrecentsReader = _Class('AVOutputDeviceFrecentsReader')
AVOutputDeviceFrecencyManager = _Class('AVOutputDeviceFrecencyManager')
AVOutputDevice = _Class('AVOutputDevice')
AVOutputDeviceInternal = _Class('AVOutputDeviceInternal')
AVMediaDataRequester = _Class('AVMediaDataRequester')
AVSerializedMostlySynchronousReentrantBlockScheduler = _Class('AVSerializedMostlySynchronousReentrantBlockScheduler')
AVSynchronousBlockScheduler = _Class('AVSynchronousBlockScheduler')
AVFragmentedMovieTrackInternal = _Class('AVFragmentedMovieTrackInternal')
AVExecutionEnvironment = _Class('AVExecutionEnvironment')
AVSampleBufferVideoOutput = _Class('AVSampleBufferVideoOutput')
AVSampleBufferVideoOutputInternal = _Class('AVSampleBufferVideoOutputInternal')
AVExternalPlaybackMonitor = _Class('AVExternalPlaybackMonitor')
AVExternalPlaybackMonitorInternal = _Class('AVExternalPlaybackMonitorInternal')
AVTimeFormatterInternal = _Class('AVTimeFormatterInternal')
AVOutputDeviceAuthorizationRequest = _Class('AVOutputDeviceAuthorizationRequest')
AVOutputDeviceAuthorizationRequestInternal = _Class('AVOutputDeviceAuthorizationRequestInternal')
AVOutputDeviceAuthorizationSession = _Class('AVOutputDeviceAuthorizationSession')
AVOutputDeviceAuthorizationSessionInternal = _Class('AVOutputDeviceAuthorizationSessionInternal')
AVVideoCompositionRenderHint = _Class('AVVideoCompositionRenderHint')
AVVideoCompositionRenderHintInternal = _Class('AVVideoCompositionRenderHintInternal')
AVPlayerItemOutput = _Class('AVPlayerItemOutput')
AVPlayerItemLegibleOutput = _Class('AVPlayerItemLegibleOutput')
AVPlayerItemVideoOutput = _Class('AVPlayerItemVideoOutput')
AVPlayerItemMetadataOutput = _Class('AVPlayerItemMetadataOutput')
AVPlayerItemMetadataOutputInternal = _Class('AVPlayerItemMetadataOutputInternal')
AVOutputDeviceGroupMembershipChangeResult = _Class('AVOutputDeviceGroupMembershipChangeResult')
AVOutputDeviceGroup = _Class('AVOutputDeviceGroup')
AVExternalProtectionMonitor = _Class('AVExternalProtectionMonitor')
AVExternalProtectionMonitorInternal = _Class('AVExternalProtectionMonitorInternal')
AVFragmentedAssetTrackInternal = _Class('AVFragmentedAssetTrackInternal')
AVFragmentedAssetMinder = _Class('AVFragmentedAssetMinder')
AVFragmentedMovieMinder = _Class('AVFragmentedMovieMinder')
AVFragmentedAssetMinderInternal = _Class('AVFragmentedAssetMinderInternal')
AVFragmentedAssetInternal = _Class('AVFragmentedAssetInternal')
AVSampleBufferAudioRenderer = _Class('AVSampleBufferAudioRenderer')
AVSampleBufferAudioRendererInternal = _Class('AVSampleBufferAudioRendererInternal')
AVAssetWriterInputMetadataAdaptor = _Class('AVAssetWriterInputMetadataAdaptor')
AVAssetWriterInputMetadataAdaptorInternal = _Class('AVAssetWriterInputMetadataAdaptorInternal')
AVSynchronizedLayerInternal = _Class('AVSynchronizedLayerInternal')
AVAudioMixSweepFilterEffectParametersInternal = _Class('AVAudioMixSweepFilterEffectParametersInternal')
AVAudioMixEffectParameters = _Class('AVAudioMixEffectParameters')
AVAudioMixSweepFilterEffectParameters = _Class('AVAudioMixSweepFilterEffectParameters')
AVAssetExportSession = _Class('AVAssetExportSession')
AVAssetExportSessionInternal = _Class('AVAssetExportSessionInternal')
AVAssetProxyInternal = _Class('AVAssetProxyInternal')
AVVideoCompositionCoreAnimationToolInternal = _Class('AVVideoCompositionCoreAnimationToolInternal')
AVVideoCompositionCoreAnimationTool = _Class('AVVideoCompositionCoreAnimationTool')
AVVideoComposition = _Class('AVVideoComposition')
AVMutableVideoComposition = _Class('AVMutableVideoComposition')
AVVideoCompositionInternal = _Class('AVVideoCompositionInternal')
AVVideoCompositionLayerInstruction = _Class('AVVideoCompositionLayerInstruction')
AVMutableVideoCompositionLayerInstruction = _Class('AVMutableVideoCompositionLayerInstruction')
AVVideoCompositionLayerInstructionInternal = _Class('AVVideoCompositionLayerInstructionInternal')
AVVideoCompositionInstruction = _Class('AVVideoCompositionInstruction')
AVMutableVideoCompositionInstruction = _Class('AVMutableVideoCompositionInstruction')
AVVideoCompositionInstructionInternal = _Class('AVVideoCompositionInstructionInternal')
AVAssetWriterInputPassDescription = _Class('AVAssetWriterInputPassDescription')
AVAssetWriterInputPassDescriptionInternal = _Class('AVAssetWriterInputPassDescriptionInternal')
AVAssetWriterInputPassDescriptionResponder = _Class('AVAssetWriterInputPassDescriptionResponder')
AVAssetWriterInputMediaDataRequester = _Class('AVAssetWriterInputMediaDataRequester')
AVFigAssetWriterTrack = _Class('AVFigAssetWriterTrack')
AVFigAssetWriterGenericTrack = _Class('AVFigAssetWriterGenericTrack')
AVFigAssetWriterVideoTrack = _Class('AVFigAssetWriterVideoTrack')
AVFigAssetWriterAudioTrack = _Class('AVFigAssetWriterAudioTrack')
AVAssetWriterInputPixelBufferAdaptor = _Class('AVAssetWriterInputPixelBufferAdaptor')
AVAssetWriterInputPixelBufferAdaptorInternal = _Class('AVAssetWriterInputPixelBufferAdaptorInternal')
AVAssetWriterInputHelper = _Class('AVAssetWriterInputHelper')
AVAssetWriterInputTerminalHelper = _Class('AVAssetWriterInputTerminalHelper')
AVAssetWriterInputNoMorePassesHelper = _Class('AVAssetWriterInputNoMorePassesHelper')
AVAssetWriterInputInterPassAnalysisHelper = _Class('AVAssetWriterInputInterPassAnalysisHelper')
AVAssetWriterInputWritingHelper = _Class('AVAssetWriterInputWritingHelper')
AVAssetWriterInputUnknownHelper = _Class('AVAssetWriterInputUnknownHelper')
AVAssetWriterInput = _Class('AVAssetWriterInput')
AVAssetWriterInputInternal = _Class('AVAssetWriterInputInternal')
AVAssetWriterInputConfigurationState = _Class('AVAssetWriterInputConfigurationState')
AVRoutingSessionDestination = _Class('AVRoutingSessionDestination')
AVRoutingSessionDestinationInternal = _Class('AVRoutingSessionDestinationInternal')
AVRoutingSession = _Class('AVRoutingSession')
AVRoutingSessionInternal = _Class('AVRoutingSessionInternal')
AVRoutingSessionManager = _Class('AVRoutingSessionManager')
AVRoutingSessionManagerInternal = _Class('AVRoutingSessionManagerInternal')
AVPlayerItemMediaDataCollector = _Class('AVPlayerItemMediaDataCollector')
AVPlayerItemMetadataCollector = _Class('AVPlayerItemMetadataCollector')
AVPlayerItemMetadataCollectorInternal = _Class('AVPlayerItemMetadataCollectorInternal')
AVTimebaseObserver = _Class('AVTimebaseObserver')
AVOnceTimebaseObserver = _Class('AVOnceTimebaseObserver')
AVOccasionalTimebaseObserver = _Class('AVOccasionalTimebaseObserver')
AVPeriodicTimebaseObserver = _Class('AVPeriodicTimebaseObserver')
AVMediaSelectionOption = _Class('AVMediaSelectionOption')
AVMediaSelectionNilOption = _Class('AVMediaSelectionNilOption')
AVMediaSelectionKeyValueOption = _Class('AVMediaSelectionKeyValueOption')
AVMediaSelectionTrackOption = _Class('AVMediaSelectionTrackOption')
AVAssetWriterInputSelectionOption = _Class('AVAssetWriterInputSelectionOption')
AVMediaSelectionGroup = _Class('AVMediaSelectionGroup')
AVAssetMediaSelectionGroup = _Class('AVAssetMediaSelectionGroup')
AVAssetWriterInputGroup = _Class('AVAssetWriterInputGroup')
AVAssetWriterInputGroupInternal = _Class('AVAssetWriterInputGroupInternal')
AVFragmentedMediaDataReport = _Class('AVFragmentedMediaDataReport')
AVFragmentedMediaDataReportInternal = _Class('AVFragmentedMediaDataReportInternal')
AVAssetWriterFigAssetWriterNotificationHandler = _Class('AVAssetWriterFigAssetWriterNotificationHandler')
AVAssetWriterHelper = _Class('AVAssetWriterHelper')
AVAssetWriterTerminalHelper = _Class('AVAssetWriterTerminalHelper')
AVAssetWriterClientInitiatedTerminalHelper = _Class('AVAssetWriterClientInitiatedTerminalHelper')
AVAssetWriterFailedTerminalHelper = _Class('AVAssetWriterFailedTerminalHelper')
AVAssetWriterFinishWritingHelper = _Class('AVAssetWriterFinishWritingHelper')
AVAssetWriterWritingHelper = _Class('AVAssetWriterWritingHelper')
AVAssetWriterUnknownHelper = _Class('AVAssetWriterUnknownHelper')
AVAssetWriter = _Class('AVAssetWriter')
AVAssetWriterInternal = _Class('AVAssetWriterInternal')
AVAssetWriterConfigurationState = _Class('AVAssetWriterConfigurationState')
AVAssetReaderSampleReferenceOutputInternal = _Class('AVAssetReaderSampleReferenceOutputInternal')
AVAssetReaderVideoCompositionOutputInternal = _Class('AVAssetReaderVideoCompositionOutputInternal')
AVAssetReaderAudioMixOutputInternal = _Class('AVAssetReaderAudioMixOutputInternal')
AVAssetReaderTrackOutputInternal = _Class('AVAssetReaderTrackOutputInternal')
AVAssetReaderOutput = _Class('AVAssetReaderOutput')
AVAssetReaderSampleReferenceOutput = _Class('AVAssetReaderSampleReferenceOutput')
AVAssetReaderVideoCompositionOutput = _Class('AVAssetReaderVideoCompositionOutput')
AVAssetReaderAudioMixOutput = _Class('AVAssetReaderAudioMixOutput')
AVAssetReaderTrackOutput = _Class('AVAssetReaderTrackOutput')
AVAssetReaderOutputInternal = _Class('AVAssetReaderOutputInternal')
AVAssetReader = _Class('AVAssetReader')
AVAssetReaderInternal = _Class('AVAssetReaderInternal')
AVAssetTrackSegment = _Class('AVAssetTrackSegment')
AVCompositionTrackSegment = _Class('AVCompositionTrackSegment')
AVCompositionTrackSegmentInternal = _Class('AVCompositionTrackSegmentInternal')
AVMutableCompositionTrackInternal = _Class('AVMutableCompositionTrackInternal')
AVCompositionTrackInternal = _Class('AVCompositionTrackInternal')
AVCompositionTrackFormatDescriptionReplacement = _Class('AVCompositionTrackFormatDescriptionReplacement')
AVFigObjectInspector = _Class('AVFigObjectInspector')
AVAssetTrackInspector = _Class('AVAssetTrackInspector')
AVStreamDataAssetTrackInspector = _Class('AVStreamDataAssetTrackInspector')
AVPlaybackItemTrackInspector = _Class('AVPlaybackItemTrackInspector')
AVFigAssetTrackInspector = _Class('AVFigAssetTrackInspector')
AVTrackReaderInspector = _Class('AVTrackReaderInspector')
AVCompositionTrackReaderInspector = _Class('AVCompositionTrackReaderInspector')
AVAssetInspector = _Class('AVAssetInspector')
AVStreamDataAssetInspector = _Class('AVStreamDataAssetInspector')
AVFigAssetInspector = _Class('AVFigAssetInspector')
AVStreamingResourceInspector = _Class('AVStreamingResourceInspector')
AVPlaybackItemInspector = _Class('AVPlaybackItemInspector')
AVFormatReaderInspector = _Class('AVFormatReaderInspector')
AVCompositionFormatReaderInspector = _Class('AVCompositionFormatReaderInspector')
AVMutableCompositionInternal = _Class('AVMutableCompositionInternal')
AVCompositionInternal = _Class('AVCompositionInternal')
AVOutputDeviceDiscoverySessionAvailableOutputDevices = _Class('AVOutputDeviceDiscoverySessionAvailableOutputDevices')
AVEmptyOutputDeviceDiscoverySessionAvailableOutputDevices = _Class('AVEmptyOutputDeviceDiscoverySessionAvailableOutputDevices')
AVOutputDeviceDiscoverySession = _Class('AVOutputDeviceDiscoverySession')
AVOutputDeviceDiscoverySessionAvailableOutputDevicesInternal = _Class('AVOutputDeviceDiscoverySessionAvailableOutputDevicesInternal')
AVOutputDeviceDiscoverySessionInternal = _Class('AVOutputDeviceDiscoverySessionInternal')
AVQueuePlayerInternal = _Class('AVQueuePlayerInternal')
AVAssetDownloadStorageManagementPolicyInternal = _Class('AVAssetDownloadStorageManagementPolicyInternal')
AVAssetDownloadStorageManagementPolicy = _Class('AVAssetDownloadStorageManagementPolicy')
AVMutableAssetDownloadStorageManagementPolicy = _Class('AVMutableAssetDownloadStorageManagementPolicy')
AVAssetDownloadStorageManager = _Class('AVAssetDownloadStorageManager')
AVPlayerItemTrack = _Class('AVPlayerItemTrack')
AVPlayerItemTrackInternal = _Class('AVPlayerItemTrackInternal')
AVPlayerLoggingIdentifier = _Class('AVPlayerLoggingIdentifier')
AVPlayerLoggingIdentifierInternal = _Class('AVPlayerLoggingIdentifierInternal')
AVAssetLoggingIdentifier = _Class('AVAssetLoggingIdentifier')
AVAssetLoggingIdentifierInternal = _Class('AVAssetLoggingIdentifierInternal')
AVSpecifiedLoggingIdentifier = _Class('AVSpecifiedLoggingIdentifier')
AVSpecifiedLoggingIdentifierInternal = _Class('AVSpecifiedLoggingIdentifierInternal')
AVPlayerConnection = _Class('AVPlayerConnection')
AVPlayerItem = _Class('AVPlayerItem')
AVPlayerItemInternal = _Class('AVPlayerItemInternal')
AVOutputContextLocalOutputDeviceGroupImpl = _Class('AVOutputContextLocalOutputDeviceGroupImpl')
AVPlayerQueueModificationDescription = _Class('AVPlayerQueueModificationDescription')
AVPlayer = _Class('AVPlayer')
AVQueuePlayer = _Class('AVQueuePlayer')
AVPlayerInternal = _Class('AVPlayerInternal')
AVAssetTrack = _Class('AVAssetTrack')
AVMovieTrack = _Class('AVMovieTrack')
AVMutableMovieTrack = _Class('AVMutableMovieTrack')
AVFragmentedMovieTrack = _Class('AVFragmentedMovieTrack')
AVFragmentedAssetTrack = _Class('AVFragmentedAssetTrack')
AVCompositionTrack = _Class('AVCompositionTrack')
AVMutableCompositionTrack = _Class('AVMutableCompositionTrack')
AVAssetTrackInternal = _Class('AVAssetTrackInternal')
AVAssetReaderOutputMetadataAdaptor = _Class('AVAssetReaderOutputMetadataAdaptor')
AVAssetReaderOutputMetadataAdaptorInternal = _Class('AVAssetReaderOutputMetadataAdaptorInternal')
AVAssetImageGenerator = _Class('AVAssetImageGenerator')
AVAssetImageGeneratorInternal = _Class('AVAssetImageGeneratorInternal')
AVURLAssetItemProviderData = _Class('AVURLAssetItemProviderData')
AVAssetClientURLRequestHelper = _Class('AVAssetClientURLRequestHelper')
AVURLAssetInternal = _Class('AVURLAssetInternal')
AVAssetFragment = _Class('AVAssetFragment')
AVAssetFragmentInternal = _Class('AVAssetFragmentInternal')
AVAsset = _Class('AVAsset')
AVStreamDataAsset = _Class('AVStreamDataAsset')
AVMovie = _Class('AVMovie')
AVMutableMovie = _Class('AVMutableMovie')
AVFragmentedMovie = _Class('AVFragmentedMovie')
AVAssetProxy = _Class('AVAssetProxy')
AVComposition = _Class('AVComposition')
AVMutableComposition = _Class('AVMutableComposition')
AVDataAsset = _Class('AVDataAsset')
AVURLAsset = _Class('AVURLAsset')
AVStreamDataInspectionOnlyAsset = _Class('AVStreamDataInspectionOnlyAsset')
AVFragmentedAsset = _Class('AVFragmentedAsset')
AVAssetInternal = _Class('AVAssetInternal')
AVMetadataItemFilterInternal = _Class('AVMetadataItemFilterInternal')
AVMetadataItemFilter = _Class('AVMetadataItemFilter')
AVMetadataItemFilterForSharing = _Class('AVMetadataItemFilterForSharing')
AVChapterMetadataItemInternal = _Class('AVChapterMetadataItemInternal')
AVMetadataItemValueRequest = _Class('AVMetadataItemValueRequest')
AVMetadataItemValueRequestInternal = _Class('AVMetadataItemValueRequestInternal')
AVLazyValueLoadingMetadataItemInternal = _Class('AVLazyValueLoadingMetadataItemInternal')
AVMetadataItem = _Class('AVMetadataItem')
AVChapterMetadataItem = _Class('AVChapterMetadataItem')
AVLazyValueLoadingMetadataItem = _Class('AVLazyValueLoadingMetadataItem')
AVMutableMetadataItem = _Class('AVMutableMetadataItem')
AVMetadataItemInternal = _Class('AVMetadataItemInternal')
AVPlayerLooper = _Class('AVPlayerLooper')
AVPlayerLooperInternal = _Class('AVPlayerLooperInternal')
AVPlayerLayerInternal = _Class('AVPlayerLayerInternal')
AVFigRemoteRouteDiscovererFactory = _Class('AVFigRemoteRouteDiscovererFactory')
AVRunLoopCondition = _Class('AVRunLoopCondition')
AVURLAuthenticationChallenge = _Class('AVURLAuthenticationChallenge')
AVAggregateAssetDownloadTask = _Class('AVAggregateAssetDownloadTask')
AVOperationQueueWithFundamentalDependency = _Class('AVOperationQueueWithFundamentalDependency')
AVNetworkPlaybackPerfHUDLayer = _Class('AVNetworkPlaybackPerfHUDLayer')
AVSampleBufferDisplayLayer = _Class('AVSampleBufferDisplayLayer')
AVSampleBufferDisplayLayerContentLayer = _Class('AVSampleBufferDisplayLayerContentLayer')
AVSynchronizedLayer = _Class('AVSynchronizedLayer')
AVPlayerLayer = _Class('AVPlayerLayer')
AVPlayerLayerIntermediateLayer = _Class('AVPlayerLayerIntermediateLayer')
AVWaitForNotificationOrDeallocationOperation = _Class('AVWaitForNotificationOrDeallocationOperation')
AVOperation = _Class('AVOperation')
AVRouteConfigUpdatedFigRoutingContextRouteChangeOperation = _Class('AVRouteConfigUpdatedFigRoutingContextRouteChangeOperation')
AVFigRoutingContextRouteChangeOperation = _Class('AVFigRoutingContextRouteChangeOperation')
AVFigRoutingContextSendConfigureDeviceCommandOperation = _Class('AVFigRoutingContextSendConfigureDeviceCommandOperation')
AVBlockOperation = _Class('AVBlockOperation')
AVAssetWriterInputFigAssetWriterEndPassOperation = _Class('AVAssetWriterInputFigAssetWriterEndPassOperation')
AVFigAssetWriterFinishWritingAsyncOperation = _Class('AVFigAssetWriterFinishWritingAsyncOperation')
AVWorkaroundNSBlockOperation = _Class('AVWorkaroundNSBlockOperation')
AVMetadataEnumerator = _Class('AVMetadataEnumerator')
AVAssetTrackEnumerator = _Class('AVAssetTrackEnumerator')
AVTimeFormatter = _Class('AVTimeFormatter')
CMTimeMappingAsValue = _Class('CMTimeMappingAsValue')
CMTimeRangeAsValue = _Class('CMTimeRangeAsValue')
CMTimeAsValue = _Class('CMTimeAsValue')
AVFragmentedAssetsArray = _Class('AVFragmentedAssetsArray')
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
3dee366948aad08413249beb3e3ed9215f54b420 | 59226a0a54e831b12ad6cbf16584892304c8bddc | /basic/theater_module.py | 419ef28926b0844bee7582494a8ee581705d7a48 | [] | no_license | lgduke/python-work | c23023ea73f382cb81df8ad57223d8e9b27dc28c | 85ea909c191113f9475b73c2aa2434f1c0658f66 | refs/heads/master | 2023-06-07T11:08:06.845006 | 2021-06-30T13:34:10 | 2021-06-30T13:34:10 | 376,301,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # 일반 가격
def price(people):
print("{0}명 가격은 {1}원입니다.".format(people, people * 10000))
# 조조할인
def price_morning(people):
print("{0}명 조조 할인 가격은 {1}원입니다.".format(people, people * 6000))
# 군인 할인
def price_soldier(people):
print("{0}명 군인 할인 가격은 {1}원입니다.".format(people, people * 4000))
| [
"lgduke.us@gmail.com"
] | lgduke.us@gmail.com |
ae70c4936c59151f6424edc94310294c257854b9 | 48ec3e51d2e5243a1d3c174837e5361a47e9253f | /src/solaredge_interface/__init__.py | 8bcb31f0bb106e927c846538b4a62b886445539b | [
"BSD-2-Clause"
] | permissive | gjdv/solaredge-interface | 77a55ef5950e28284aa932201025056c3f2e4716 | f209a133292d804a3a00a24c8b21f8b99be680cf | refs/heads/master | 2023-03-30T12:48:33.194393 | 2021-04-04T10:53:26 | 2021-04-04T10:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # -*- coding: utf8 -*-
# Copyright (c) 2020 Nicholas de Jong
__title__ = "solaredge-interface"
__author__ = "Nicholas de Jong <contact@nicholasdejong.com>"
__version__ = '0.3.2'
__license__ = "MIT"
__env_api_key__ = 'SOLAREDGE_API_KEY'
__env_site_id__ = 'SOLAREDGE_SITE_ID'
__env_output_format__ = 'SOLAREDGE_OUTPUT_FORMAT'
__output_format_default__ = 'json'
__config_file_user__ = '~/.solaredge-interface'
__config_file_system__ = '/etc/solaredge-interface'
__config_section_name__ = 'solaredge-interface'
__solaredge_api_baseurl__ = 'https://monitoringapi.solaredge.com'
__http_request_timeout__ = 10
__http_request_user_agent__ = '{}/{}'.format(__title__, __version__)
| [
"me@nicholasdejong.com"
] | me@nicholasdejong.com |
34fbcbb5b07243310281ddcea4e59205032d636b | 153da69b35f032f5b83a06f17008ba41a1b336b4 | /src/demo/__init__.py | da984a42f90721752b48b31d39530ff3bf6f8ff9 | [
"MIT"
] | permissive | TrendingTechnology/hspylib | 6400cadf9dfe6ab5733712dcfeccf8022d61c589 | c79a2c17e89fe21d00ccd9c1646a03407cd61839 | refs/heads/master | 2023-06-20T15:47:35.962661 | 2021-07-19T22:12:18 | 2021-07-19T23:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # _*_ coding: utf-8 _*_
#
# HSPyLib v0.11.1
#
# Package: demo
"""Package initialization."""
__all__ = [
'calculator',
'cli',
'phonebook'
]
| [
"yorevs@gmail.com"
] | yorevs@gmail.com |
6a8a96f9d45b4d8ecadd47e84b9980ad06b03f6a | 856e275db46f8f4ef2429d79aa832d685895afca | /ImageScrapper/scraper.py | e062da9184c459a081862415121b29fa91831d78 | [] | no_license | Raghavmht09/ImageScrapper-DeepLearning | 3fdacb07c8b0cdbeb1a701ed6a0aec80672971f3 | 1fb9b29ff495f77fa9de48c00da505e2c21acd2e | refs/heads/main | 2023-01-09T17:03:02.665447 | 2020-11-07T14:01:37 | 2020-11-07T14:01:37 | 310,856,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,130 | py | import os
import time
import requests
from selenium import webdriver
def fetch_image_urls(query: str, max_links_to_fetch: int, wd: webdriver, sleep_between_interactions: int = 1):
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(sleep_between_interactions)
# build the google query
search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img"
# https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q=dog&oq=dog&gs_l=img
# load the page
wd.get(search_url.format(q=query))
image_urls = set()
image_count = 0
results_start = 0
while image_count < max_links_to_fetch:
scroll_to_end(wd)
# get all image thumbnail results
thumbnail_results = wd.find_elements_by_css_selector("img.Q4LuWd")
number_results = len(thumbnail_results)
print(f"Found: {number_results} search results. Extracting links from {results_start}:{number_results}")
for img in thumbnail_results[results_start:number_results]:
# try to click every thumbnail such that we can get the real image behind it
try:
img.click()
time.sleep(sleep_between_interactions)
except Exception:
continue
# extract image urls
actual_images = wd.find_elements_by_css_selector('img.n3VNCb')
for actual_image in actual_images:
if actual_image.get_attribute('src') and 'http' in actual_image.get_attribute('src'):
image_urls.add(actual_image.get_attribute('src'))
image_count = len(image_urls)
if len(image_urls) >= max_links_to_fetch:
print(f"Found: {len(image_urls)} image links, done!")
break
else:
print("Found:", len(image_urls), "image links, looking for more ...")
time.sleep(30)
return
load_more_button = wd.find_element_by_css_selector(".mye4qd")
if load_more_button:
wd.execute_script("document.querySelector('.mye4qd').click();")
# move the result startpoint further down
results_start = len(thumbnail_results)
return image_urls
def persist_image(folder_path:str,url:str, counter):
try:
image_content = requests.get(url).content
except Exception as e:
print(f"ERROR - Could not download {url} - {e}")
try:
f = open(os.path.join(folder_path, 'jpg' + "_" + str(counter) + ".jpg"), 'wb')
f.write(image_content)
f.close()
print(f"SUCCESS - saved {url} - as {folder_path}")
except Exception as e:
print(f"ERROR - Could not save {url} - {e}")
def search_and_download(search_term: str, driver_path: str, target_path='./images', number_images=10):
target_folder = os.path.join(target_path, '_'.join(search_term.lower().split(' ')))
if not os.path.exists(target_folder):
os.makedirs(target_folder)
with webdriver.Chrome(executable_path=driver_path) as wd:
res = fetch_image_urls(search_term, number_images, wd=wd, sleep_between_interactions=0.5)
counter = 0
for elem in res:
persist_image(target_folder, elem, counter)
counter += 1
# pip install -r requirements.txt
# My chrome Version 85.0.4183.102
# My Firefox Version 80.0.1 (64-bit)
# How to execute this code
# Step 1 : pip install selenium, pillow, requests
# Step 2 : make sure you have chrome/Mozilla installed on your machine
# Step 3 : Check your chrome version ( go to three dot then help then about google chrome )
# Step 4 : Download the same chrome driver from here " https://chromedriver.storage.googleapis.com/index.html "
# Step 5 : put it inside the same folder of this code
DRIVER_PATH = './chromedriver'
search_term = 'Spiti Valley'
# num of images you can pass it from here by default it's 10 if you are not passing
number_images = 5
search_and_download(search_term=search_term, driver_path=DRIVER_PATH, number_images = number_images) | [
"noreply@github.com"
] | Raghavmht09.noreply@github.com |
18670726191b94436e247b590364c77a2e3f4b02 | 693c80b92a7b346b745ff067bdea3fca995fad24 | /setup.py | 5b9a6a8e3f918ec59c635b6b4ceb36f25e45f274 | [
"BSD-2-Clause"
] | permissive | stigmergic/django-publish | 2f3bf3c3c60abd7d25d3331d962554cebbea9705 | ae27f14371e2393efab2a05130b6e3b9d0ea8497 | refs/heads/master | 2021-01-20T04:00:19.485026 | 2017-06-15T20:46:11 | 2017-06-15T20:46:11 | 89,622,072 | 3 | 0 | null | 2017-04-27T17:13:36 | 2017-04-27T17:13:36 | null | UTF-8 | Python | false | false | 1,079 | py | from setuptools import setup, find_packages
version=__import__('publish').__version__
setup(
name='django-publish',
version=version,
description='Handy mixin/abstract class for providing a "publisher workflow" to arbitrary Django models.',
long_description=open('README.rst').read(),
author='John Montgomery',
author_email='john@sensibledevelopment.com',
url='http://github.com/johnsensible/django-publish',
download_url='https://github.com/johnsensible/django-publish/archive/v%s.zip#egg=django-publish-%s' % (version, version),
license='BSD',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
zip_safe=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| [
"john@sensibledevelopment.com"
] | john@sensibledevelopment.com |
0b26c47657ed335b9508cf3bee948cafe37f46e5 | 787b88a994ed1257555aa766f682c90c7db2099e | /cookieofzhihu/login.py | 3da3d955bc8bcdd7b4b68f542017b71fced85a51 | [] | no_license | maoyuchuan/myspider | 9fe7e96324f382eccdc87d6c2ac0f3fe2eede1d9 | 49051412759d3dcffa9703c80a393983a098b5ec | refs/heads/master | 2020-05-21T10:10:05.495115 | 2016-09-25T03:13:56 | 2016-09-25T03:13:56 | 68,897,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,331 | py | # -*- coding: utf-8 -*-
import requests
try:
import cookielib
except:
import http.cookiejar as cookielib
import re
import time
headers = {
"Host": "www.zhihu.com",
"Referer": "https://www.zhihu.com/",
'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0",
}
session = requests.session()
url = 'http://www.zhihu.com'
session.cookies = cookielib.LWPCookieJar(filename='cookies')
def login_cookie():
try:
session.cookies.load(ignore_discard=True)
page = session.get(url, headers=headers).text
pattern = re.compile(u'<a class="question_link"',re.S)
result = re.search(pattern,page)
if result:
print(u'cookie登录成功')
else:
print(u'cookie登录失败,使用账号密码登录')
return False
except:
print(u"Cookie 未能加载")
return False
def login(account, secret):
# 通过输入的用户名判断是否是手机号
if re.match(r"^1\d{10}$", account):
print(u"手机号登录" + u"\n")
post_url = 'http://www.zhihu.com/login/phone_num'
postdata = {
'_xsrf': get_xsrf(),
'password': secret,
'remember_me': 'true',
'phone_num': account,
}
else:
if "@" in account:
print(u"邮箱登录" + u"\n")
else:
print(u"你的账号输入有问题,请重新登录")
return None
post_url = 'http://www.zhihu.com/login/email'
postdata = {
'_xsrf': get_xsrf(),
'password': secret,
'remember_me': 'true',
'email': account,
}
try:
# 不需要验证码直接登录成功
login_page = session.post(post_url, data=postdata, headers=headers)
except:
# 需要输入验证码后才能登录成功
postdata["authcode"] = get_authcode()
login_page = session.post(post_url, data=postdata, headers=headers)
session.cookies.save()
return session
def login_code(session):
profile_url = "https://www.zhihu.com/settings/profile"
login_code = session.get(profile_url, headers=headers, allow_redirects=False).status_code
if login_code == 200:
print(u'登录成功')
else:
print(u'登录失败,请检查你的输入')
def get_xsrf():
'''_xsrf 是一个动态变化的参数'''
# 获取登录时需要用到的_xsrf
index_page = session.get(url, headers=headers)
html = index_page.text
pattern = r'name="_xsrf" value="(.*?)"'
# 这里的_xsrf 返回的是一个list
_xsrf = re.findall(pattern, html)
return _xsrf[0]
def get_authcode():
t = str(int(time.time() * 1000))
auth_url = 'http://www.zhihu.com/captcha.gif?r=' + t + "&type=login"
r = session.get(auth_url, headers=headers)
with open('authcode.jpg', 'wb') as f:
f.write(r.content)
authcode = raw_input("plz enter authcode:")
return authcode
if __name__ == '__main__':
if login_cookie() == False:
username = raw_input("plz enter username:")
password = raw_input("plz enter password:")
session = login(username, password)
if session != None:
login_code(session)
| [
"maoyuchuan27@163.com"
] | maoyuchuan27@163.com |
24c2b33dc9f0e4f5d04a6c43a9c52b0cc36e10c0 | b3e096d4135ee4d070ecf09ce042a79b2a54b8ef | /5_Capstone/gmane/gmanesummary.py | b947b1037a5277efca8e7fec3f93d9d551b1b1c7 | [] | no_license | joinalahmed/University_of_Michigan_Python_for_Informatics_Certificate | d077ad7f452f04f958ef7225af20b5a70c15ca88 | 44d34fa4cf58f47280c184b4ce6deb2b242457c8 | refs/heads/master | 2021-06-12T00:00:02.314511 | 2016-05-29T05:55:33 | 2016-05-29T05:55:33 | 100,685,739 | 0 | 1 | null | 2017-08-18T07:30:29 | 2017-08-18T07:30:29 | null | UTF-8 | Python | false | false | 1,432 | py | #Gmane
#Mailing List Data - Part I
#In this assignment you will download some of the mailing list data from http://mbox.dr-chuck.net/ and run the data cleaning / modeling process and take
#some screen shots.
#Don't take off points for little mistakes. If they seem to have done the assignment give them full credit. Feel free to make suggestions if there are small
#mistakes. Please keep your comments positive and useful.
#Sample solution: http://www.dr-chuck.net/pythonlearn/code/gmane.zip
#Steps:
#Run gmane.py from http://mbox.dr-chuck.net/, data is large so give time for the sql to be created.
#Run gmodel.py which compresses/cleanups the content.sqlite file.
#Run gbasic.py dump top 15 people and organizations for finding anomolies.
#For visualization:
#Run gword.py to determine the top words (without any punctuation, numbers, or words less than 4); range of lowest and highest words outputted. Then writes to gword.js and open gword.htm in browser to see visualization. Code was taken from D3 website.
#Run gyear.py by counting senders, determining 10 organizations who are senders, get keys for highest senders, then create a histogram for top organization for each year (year, domain name is a tuple and is used as a key in the dictionary). Creates gline.js and open gline.htm.
#Or run gline.py (which is almost identical as gyear.py but asks for the month vs the year). Creates a new gline.js and new gline.htm. | [
"orysyastus2012@gmail.com"
] | orysyastus2012@gmail.com |
61c0692953d8005431c61b4a41c758a8b893a8e4 | 751df989418820fb10abdab13cd5e216e5c631f5 | /main.py | ef70851eab09e59122058fbd12c30fa93b679fdd | [] | no_license | yashbbelkhedkar1/String-seperator | 5546f0e6bc2b9f4e816d3b412622d7dd3150f5f2 | f3faa8026f82e0035e3349e3e9ffef84e8eb47c4 | refs/heads/main | 2023-03-29T10:50:11.111070 | 2021-04-03T04:30:45 | 2021-04-03T04:30:45 | 354,010,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | import nltk
from nltk.corpus import stopwords
def func() :
res = []
string = input("What is your name ? Where do you live?\t(example : I am XYZ. I live in Srinagar) \n").lower()
stop = stopwords.words('english')
document = []
for i in string.split() :
if i not in stop:
document.append(i)
document = " ".join(document)
sentences = nltk.sent_tokenize(document)
name_word = nltk.word_tokenize(sentences[0].title())
name = []
for i in nltk.pos_tag(name_word) :
if(i[1] == 'NNP') :
name.append(i[0])
name = " ".join(name)
res.append(name)
addr_word = nltk.word_tokenize(sentences[1].title())
addr = []
for i in nltk.pos_tag(addr_word) :
if(i[1] == 'NNP') :
addr.append(i[0])
addr = " ".join(addr)
res.append(addr)
return res
| [
"noreply@github.com"
] | yashbbelkhedkar1.noreply@github.com |
fa717c170de05eac692368753eb304b47a460736 | 2e6a83e130a2f54561e42e68afbbde28c4c43160 | /pythonProject/main.py | 2d29dcc12b4e600aea74d5cec9e2b2695a8114dc | [] | no_license | andrykhinadaria/Andrykhinadaria | c3c8fd00614ff7d89ccb2c3927c9a53e4bb46638 | 53cd1dba59c8cdfaa55b0bab5c4002b17fdcdd8f | refs/heads/main | 2023-08-25T00:20:07.345075 | 2021-10-05T06:29:17 | 2021-10-05T06:29:17 | 413,698,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | from tkinter import *
root = Tk()
root.title("Опрос")
root.geometry("720x800")
btn = Button(text="Посмотреть результаты", # текст кнопки
background="#555", # фоновый цвет кнопки
foreground="#ccc", # цвет текста
padx="20", # отступ от границ до содержимого по горизонтали
pady="8", # отступ от границ до содержимого по вертикали
font="16") # высота шрифта
btn.pack(side=BOTTOM, padx=0, pady=50)
label1 = Label(text="Какой язык программирования вам больше всего нравится ?")
label1.pack(side=TOP,padx=0,pady=10)
Possible_answer = IntVar()
python_checkbutton = Radiobutton(text="Java",variable=Possible_answer, value=1, padx=15, pady=10)
python_checkbutton.pack(side=TOP,padx=0,pady=10)
python_checkbutton = Radiobutton(text="Python",variable=Possible_answer, value=2, padx=15, pady=10)
python_checkbutton.pack(side=TOP,padx=0,pady=10)
label1 = Label(text="Какой язык программирования вам больше всего нравится ?")
label1.pack(side=TOP,padx=0,pady=10)
answer = IntVar()
python_checkbutton = Radiobutton(text="Java",variable=answer, value=3, padx=15, pady=10)
python_checkbutton.pack(side=TOP,padx=0,pady=10)
python_checkbutton = Radiobutton(text="Python",variable=answer, value=4, padx=15, pady=10)
python_checkbutton.pack(side=TOP,padx=0,pady=10)
root.mainloop() | [
"noreply@github.com"
] | andrykhinadaria.noreply@github.com |
d9b117d8852421c5c48b4e35a7c43c4261036062 | 2e6f33932cc9b6d2e8e55690e5e46693db78fd28 | /backup.py | e402a567d3836c00757ec8e9aff8b7be0eb48b10 | [] | no_license | l631768226/cal | c5bf16a95034c7158b84e036803b5b5e8ddb03df | a058136481c47ce0329a52f1625b4cff7af65e1e | refs/heads/master | 2023-07-12T10:57:09.501778 | 2021-08-31T03:27:17 | 2021-08-31T03:27:17 | 350,166,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,387 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
author: sophay
date: 2021/1/6
email: 1427853491@qq.com
"""
import os
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from astropy.timeseries import LombScargle
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from multiprocessing import Process, Lock, Queue
import joblib
# matplotlib 解决不能使用中文的方法
plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False
# 关闭警告信息
pd.set_option('mode.chained_assignment', None)
class DatasetPreprocessing:
def __init__(self, rsc_path: str, dist_path: str):
print("数据处理中...")
# 读取签到数据中用到的特征
self.df = pd.read_csv(
rsc_path,
low_memory=False,
encoding='gbk',
usecols=['user_id', 'placeID', 'lat', 'lng', 'time_offset', 'time', 'label']
)
self.users = None
self.dist_path = dist_path
self.remove_infrequently_data()
self.numerical_place_id()
self.transform_utc_time()
# 输出处理完成的df按照标签顺序
self.df = self.df[['user_id', 'placeID', 'lat', 'lng', 'localtime', 'label']]
self.df.to_csv(os.path.join(self.dist_path, 'part1_result.csv'), index=None)
# 绘制打卡次数柱状图
self.plot_user_checkins()
def numerical_place_id(self):
"""将placeID数值化, useID重新排序"""
print("将placeID数值化")
unique_place_id = self.df['placeID'].unique()
map_dict = dict(zip(unique_place_id, range(len(unique_place_id))))
self.df['placeID'] = self.df['placeID'].map(map_dict)
unique_user_id = self.df['user_id'].unique()
map_dict = dict(zip(unique_user_id, range(len(unique_user_id))))
self.df['user_id'] = self.df['user_id'].map(map_dict)
def transform_utc_time(self):
"""将UTC时间加上时间偏移量得到localtime"""
print("将UTC时间加上时间偏移量得到localtime")
self.df['time'] = pd.to_datetime(self.df['time']) # 这里最好提前解析时间,不然会很耗时
self.df['time'] = self.df['time'].dt.tz_localize(None) # 去掉时区
# 使用numpy进行时间校正,效率更快
self.df['localtime'] = (self.df.time.values.astype('M8[s]') +
self.df.time_offset.values * np.timedelta64(1, 'h')).astype('M8[s]')
def remove_infrequently_data(self):
"""移除八个半月访问次数少于5次的数据"""
print("移除八个半月访问次数少于5次的数据")
features = pd.DataFrame()
users = self.df['user_id'].unique()
# 遍历所有用户,提取每个用户的周期,加入群体的features中
for user in users:
# 选中特定ID的用户,去除用户签到次数不大于5次的签到点,提取周期
user_df = self.df[self.df['user_id'] == user]
satisfied_data_counts = user_df['placeID'].value_counts()
satisfied_data_index = satisfied_data_counts[satisfied_data_counts > 5].index
satisfied_data = user_df[user_df['placeID'].isin(satisfied_data_index)]
# 如果用户的数据不符合条件,则过滤掉该用户
if satisfied_data is None:
continue
features = pd.concat([features, satisfied_data], ignore_index=True)
self.df = features
def plot_user_checkins(self):
"""将每个用户的信息绘制出来"""
users = self.df['user_id'].values
users_dict = {}
for item in users:
users_dict[item] = users_dict.get(item, 0) + 1
users_dict_temp = dict(sorted(users_dict.items(), key=lambda x: x[1], reverse=True))
self.users = list(users_dict_temp.keys())[:10]
user = np.array(list(users_dict.keys()))
times = np.array(list(users_dict.values()))
# 使用暗黑 StyleSheet
with plt.style.context('dark_background'):
plt.figure(facecolor='#084874', figsize=(10, 8), dpi=150)
ax = plt.gca()
ax.set_facecolor('#084874')
ax.plot(user, times, 'w')
ax.set_title("用户签到次数折线图")
ax.set_xlabel("用户ID")
ax.set_ylabel("签到次数")
plt.savefig(os.path.join(self.dist_path, 'all_users_checkin.png'))
def plot_single_user(this, user_id):
df = this.df[this.df['user_id'] == user_id]
data = df['placeID'].values
data_dict = {}
for each in data:
data_dict[each] = data_dict.get(each, 0) + 1
place_id = np.array(list(data_dict.keys()))
freq = np.array(list(data_dict.values()))
plt.figure(facecolor='#084874', figsize=(10, 8), dpi=150)
ax2 = plt.gca()
ax2.set_facecolor('#084874')
bar_space = 0.1
bar_width = 0.3
index = np.array([i * (bar_space + bar_width) for i in range(len(place_id))])
ax2.bar(index, freq, bar_width, color='white')
for a, b in zip(index, freq):
ax2.text(a, b, str(b), ha='center', va='bottom', color='white')
ax2.set_xticks(ticks=index)
ax2.set_xticklabels(place_id, rotation=45)
ax2.set_title("用户%s签到地点柱状图" % user_id)
ax2.set_xlabel("签到地点ID")
ax2.set_ylabel("签到次数")
plt.savefig(os.path.join(this.dist_path, '%s.png' % user_id))
for user in self.users:
plot_single_user(self, user)
class PeriodMining:
def __init__(self, rsc_path, dist_path):
print("周期模式挖掘中...")
self.rsc_path = rsc_path
self.dist_path = os.path.join(dist_path, 'part2_result.csv')
if os.path.exists(self.dist_path):
print("检测到有历史文件,正在删除...")
os.remove(self.dist_path)
print("删除文件%s完成!" % self.dist_path)
self.periods = {}
self.multiprocessing_mining()
def appended_write_csv(self, user_df):
# 以追加的模式输出user_df
if not os.path.exists(self.dist_path):
user_df.to_csv(self.dist_path, index=None)
else:
user_df.to_csv(self.dist_path, index=None, mode='a', header=False)
@staticmethod
def period_mining(user_df: pd.DataFrame):
"""设置用户初始时间为0将时间转化为时间序列(0,1,2,...)(小时), 得到单个用户全部活动的周期"""
def get_time_intervals(t, base_t):
"""返回 t减去base_t的小时数"""
diff = pd.to_datetime(t) - pd.to_datetime(base_t)
return round(diff.days * 24 + diff.seconds / 3600)
checkin_time = np.array(user_df['localtime'].apply(lambda t: get_time_intervals(t, user_df['localtime'].min())))
checkin_id = user_df['placeID'].to_numpy()
periods = {}
# 遍历全部的placeID对某个单独的placeID进行周期提取
for cur_id in np.unique(checkin_id):
# print("打卡地点:", cur_id)
# 选择出当前id的打卡时间列表
cur_checkin_time = checkin_time[checkin_id == cur_id]
# 以最大打卡时间范围为x轴,最小为y轴, 间隔1建立横轴
x = np.arange(cur_checkin_time.min(), cur_checkin_time.max())
y = []
# 在打卡时间内的设置为cur_id值,其余的设置为0,建立y轴
for i in range(cur_checkin_time.min(), cur_checkin_time.max()):
y.append(cur_id if i in list(cur_checkin_time) else 0)
y = np.array(y)
ls = LombScargle(x, y)
# 控制最大频率(频率范围),因为知道周期不会小于3/24小时,则频率必定落在(0, 1)中, 最大频率设置为8个月至少访问5次,8/5 * 30 * 24
frequency = None
try:
frequency, power = ls.autopower(minimum_frequency=1 / ((8 / 5) * 30 * 24), maximum_frequency=3 / 24)
# 如果没有符合条件的说明没有周期性
if frequency.size:
# 选取满足条件的周期中最大的,并保留两位小数
periods[str(cur_id)] = [round(1 / frequency[np.where(power == power.max())][0]),
round(ls.false_alarm_probability(power.max()), 3)]
else:
# 没有周期性的时候将周期设置为-1表示没有周期性
periods[str(cur_id)] = [-1, 1]
except Exception as e:
print(e, frequency)
periods[str(cur_id)] = [-1, 1]
continue
return periods
def multiprocessing_mining(self):
processes = []
queue = Queue()
lock = Lock()
df = pd.read_csv(self.rsc_path, low_memory=False)
users = df['user_id'].unique()
for user in users:
queue.put(user)
start_t = time.time()
for _ in range(8):
p = Process(target=self.multiprocessing_task, args=(df, queue, lock))
p.start()
processes.append(p)
for p in processes:
p.join()
print("耗时%.2f分钟" % ((time.time() - start_t) / 60))
def multiprocessing_task(self, df, queue, lock):
while not queue.empty():
cur_user = queue.get()
user_df = df[df['user_id'] == cur_user].copy()
user_periods = self.period_mining(user_df)
for name, group in user_df.groupby(['placeID']):
group['period'], group['period_er'] = user_periods[str(name)]
group = group[['user_id', 'placeID', 'lat', 'lng', 'localtime', 'period', 'period_er', 'label']]
lock.acquire() # 获取锁
self.appended_write_csv(group)
lock.release() # 释放锁
print("剩余%s" % queue.qsize())
class FeatureExtraction:
def __init__(self, rsc_path):
print("特征提取中...")
self.rsc_path = rsc_path
self.df = pd.read_csv(self.rsc_path, low_memory=False)
self.time_extraction()
self.label_dealing()
self.save()
def time_extraction(self):
# 处理时间,分成日,周,月
temp_time = pd.to_datetime(self.df['localtime'])
self.df['day_time'] = temp_time.dt.day
self.df['week_time'] = temp_time.dt.dayofweek
self.df['month_time'] = temp_time.dt.month
self.df['year_time'] = temp_time.dt.year
def label_dealing(self):
# 将标签数值化
self.df['activity'] = self.df['label']
activities_dict = {'Shopping': '0', 'Work': '1', 'Entertainment': '2', 'Sports': '3', 'Rest': '4',
'Service': '5', 'Restaurant': '6', 'Travel': '7', 'Medical': '8', 'Art': '9',
'Meeting': '10', 'Education': '11'}
self.df['activity'] = self.df['activity'].map(activities_dict)
def save(self):
# 将特征进行排序整理,标签放在最后一行
self.df = self.df.reset_index(drop=True)
self.df = self.df[['user_id', 'lng', 'lat', 'placeID', 'day_time', 'week_time', 'month_time', 'year_time',
'period', 'period_er', 'activity']]
class Model:
def __init__(self, df, path, user_id):
self.df = df
self.user_id = user_id
self.dir = path
self.dist_path = os.path.join(self.dir, 'part3_result.csv')
self.pred_user = df[df['user_id'] == user_id]
self.rf_model()
def rf_model(self):
"""使用 Random Forest 分类器进行分类"""
feature_name_list = ['user_id', 'lng', 'lat', 'day_time', 'week_time', 'month_time', 'year_time',
'period', 'period_er']
# 模型没有才训练
model_path = os.path.join(self.dir, 'rf.model')
if not os.path.exists(model_path):
self.df.dropna(inplace=True)
features = self.df[feature_name_list].values
labels = self.df['activity'].values
# 将数据集分成7:3进行训练和测试
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.3)
rf = RandomForestClassifier(oob_score=True, random_state=10, n_estimators=84, n_jobs=-1)
rf.fit(x_train, y_train)
joblib.dump(rf, model_path)
print("测试集上的精度为: %.2f%%" % (rf.oob_score_ * 100))
# 加载已经训练完成的模型
else:
rf = joblib.load(model_path)
# 标签反编码
activities_dict = {'0': 'Shopping', '1': 'Work', '2': 'Entertainment', '3': 'Sports', '4': 'Rest',
'5': 'Service', '6': 'Restaurant', '7': 'Travel', '8': 'Medical', '9': 'Art',
'10': 'Meeting', '11': 'Education'}
single_user = self.pred_user[feature_name_list]
# 识别个人的活动语义
self.pred_user['activity_pred'] = rf.predict(single_user.to_numpy())
self.pred_user['activity_pred'] = self.pred_user['activity_pred'].map(activities_dict)
self.pred_user['activity_real'] = self.pred_user['activity'].map(activities_dict)
# 保存文件
self.pred_user.to_csv(self.dist_path, index=None)
def data_analysis(rsc_path1, dist_path1):
"""分析数据,数据预处理,输入文件 dist_path1/part1_result.csv
:param rsc_path1 原始数据路径
:param dist_path1 预处理完后的新文件保存目录
:return 需要绘制签到次数柱状图的用户列表
"""
obj = DatasetPreprocessing(rsc_path1, dist_path1)
return obj.users
def period_mining(dist_path1, dist_path2):
"""周期模式挖掘,输出文件 dist_path2/part2_result.csv
:param dist_path1 数据预处理完后的文件路径
:param dist_path2 周期模式挖掘完后新文件的保存目录
:return 各个参考点的周期字典
"""
obj = PeriodMining(dist_path1, dist_path2)
return obj.periods
def activity_semantic_recognition(dist_path2, dist_path3, user_id):
"""活动语义识别,输出文件 待识别用户的识别结果文件 dist_path3/part3_result.csv
:param dist_path2 周期模式挖掘完成的文件路径
:param dist_path3 活动语义识别后的文件保存目录
:param user_id 需要识别的用户id
:return None
"""
obj = FeatureExtraction(dist_path2)
Model(obj.df, dist_path3, user_id)
return None
# if __name__ == '__main__':
# data_analysis(r'D:\科研er\时空轨迹挖掘的数据集\已标记签到数据\NYC.csv', 'rsc')
# period_mining('rsc/part1_result.csv', 'rsc')
# activity_semantic_recognition('rsc/part2_result.csv', 'rsc', 15)
if __name__ == '__main__':
data_analysis(r'E:\building\NYC.csv', r'E:\building\sf\result')
period_mining(r'E:\building\sf\result\part1_result.csv', r'E:\building\sf\result')
activity_semantic_recognition(r'E:\building\sf\result\part2_result.csv', r'E:\building\sf\result', 217)
| [
"631768226@qq.com"
] | 631768226@qq.com |
30b2b633485473169ebe3f7392c7b57e23c0e4d2 | da7a165522daea7c346693c5f32850017c482967 | /leetcode/60questions/347_top_k_frequent_elements.py | b13499eff1c603b9085c6ed2ac07a357fad804ac | [] | no_license | SShayashi/ABC | 19f8750919208c5ff8935638dbaab941c255f914 | 3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c | refs/heads/master | 2021-05-04T21:06:10.720367 | 2020-07-11T13:59:16 | 2020-07-11T13:59:29 | 119,886,572 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | from typing import List
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
d = {}
for num in nums:
d[num] = d[num] + 1 if d.get(num, 0) else 1
tmp = list(d.items())
tmp.sort(key=lambda x: x[1], reverse=True)
ans = []
for i in range(k):
ans.append(tmp[i][0])
return ans
def maxheaplify(nums: List[int], i):
left = nums[i * 2 + 1]
right = nums[i * 2 + 2] if (i * 2 + 2) < len(nums) else -9999999
large_child_i = i * 2 + 1 if left > right else i * 2 + 2
if nums[i] < nums[large_child_i]:
nums[i], nums[large_child_i] = nums[large_child_i], nums[i]
maxheaplify(nums, i // 2)
def heaplify(nums: List[int]):
length = len(nums)
for i in reversed(range(length // 2)):
maxheaplify(nums, i)
return nums
y = [3, 5, 6, 8, 2, 3, 4, 5, 21, 1, 4, 5, 7, 9, 2, 22]
print(heaplify(y))
| [
"sshayashi0208@gmail.com"
] | sshayashi0208@gmail.com |
ed5333d3e2b4a592cf3ba18dedf61abc2eb6921a | 6104576463bde085993ca6aa2d614164b66bd4ba | /payment/urls.py | 7ea1f4b41ae8cc47263632fbc5dfa0b579852a14 | [] | no_license | Himanshu-goel86121/pk-accounts | 173a7882519c28ac8b65ca3c625317dd7d121792 | d31c5446df553fb45778d71045de644c3dfec91f | refs/heads/master | 2023-04-23T13:27:23.751607 | 2021-05-06T14:52:18 | 2021-05-06T14:52:18 | 285,996,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | # -*- coding: utf-8 -*-
from django.urls import path
from . import views
app_name = 'payment_app'
urlpatterns = [
path(r'add/', views.payment_add, name='add_page'),
path(r'get_challans/', views.get_challans, name='get_challans'),
path(r'add_payment/', views.add_payment, name='add_payment'),
path(r'delete/', views.payment_delete, name='delete_page'),
path(r'delete_payment/', views.delete_payment, name='delete_payment'),
path(r'add_bill/', views.payment_add_bill, name='add_page_bill'),
path(r'get_challans_bill/', views.get_challans_bill, name='get_challans_bill'),
path(r'add_payment_bill/', views.add_payment_bill, name='add_payment_bill'),
path(r'payment_print/', views.payment_print, name='payment_print'),
path(r'print_payment/', views.print_payment, name='print_payment'),
path(r'filter_date/', views.filter_date, name='filter_date'),
path(r'filter_client/', views.filter_client, name='filter_client'),
path(r'display/', views.payment_display, name='display_page'),
]
| [
"ashu.goel1993@hotmal.com"
] | ashu.goel1993@hotmal.com |
7bcae7c22efaf4c3e059873294b558207c8be9a3 | 43d8a8efde76712b939b87f355ebeca03646a31a | /features/steps/out_of_stock.py | f8c228ef6adb00810f4d6204851ee8e40405812e | [] | no_license | andreyafanasev/gettop | ef1d52000b88c96c9e2ec5893d96304f81fd9537 | 952ea4903ca06ea702b670267f566d60ea96e922 | refs/heads/master | 2022-12-15T04:12:25.456296 | 2020-09-11T22:01:44 | 2020-09-11T22:01:44 | 290,305,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from behave import given, when, then
from selenium.webdriver.common.by import By
@then('Verify "Out of Stock" sign is shown')
def verify_out_of_stock(context):
context.app.out.verify_out_of_stock()
@then('Verify "Add to Cart" button is not shown')
def verify_add_cart_not_shown(context):
context.app.out.verify_add_cart_not_shown()
@then('Verify "Checkout" button is not shown')
def verify_checkout_not_shown(context):
context.app.out.verify_checkout_not_shown() | [
"afanasyev6@gmail.com"
] | afanasyev6@gmail.com |
f686b514cf81fb6daa6b08a1b1bbad3b099f06ce | e2c9c8d5176ecb75df24bad668d769db01a3ce55 | /patterns/creational/factory/no_factory_method.py | 5d95597bc9f6409956657fa606b91b9d6b11f4ad | [
"Apache-2.0"
] | permissive | Vyshnavmt94/Python_Design_Patterns | e172d0cdb77534861bca835684999ff8ad099db3 | 3703b3ee7b16e77de2bad68037e2c8542852900b | refs/heads/main | 2023-08-01T06:18:33.655393 | 2021-09-26T09:48:19 | 2021-09-26T09:48:19 | 404,251,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | # Python Code for Object Oriented Concepts without using Factory method
class FrenchLocalizer:
""" it simply returns the french version """
def __init__(self):
self.translations = {"car": "voiture", "bike": "bicyclette", "cycle": "cyclette"}
def localize(self, msg):
"""change the message using translations"""
return self.translations.get(msg, msg)
class SpanishLocalizer:
"""it simply returns the spanish version"""
def __init__(self):
self.translations = {"car": "coche", "bike": "bicicleta", "cycle": "ciclo"}
def localize(self, msg):
"""change the message using translations"""
return self.translations.get(msg, msg)
class EnglishLocalizer:
"""Simply return the same message"""
def localize(self, msg):
return msg
if __name__ == "__main__":
# main method to call others
f = FrenchLocalizer()
e = EnglishLocalizer()
s = SpanishLocalizer()
# list of strings
message = ["car", "bike", "cycle"]
for msg in message:
print(f.localize(msg))
print(e.localize(msg))
print(s.localize(msg))
print("\n")
| [
"vyshnav94.mec@gmail.com"
] | vyshnav94.mec@gmail.com |
ff99da7f9a431c6ffe09cca96a217b4f38518c7a | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/merge_20200722101228.py | fc5063ed0117fd9fbf7a41674a7bab7060ccc3e0 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | items = []
def mergeSort(data):
if len(data) > 1:
mid = len(data) // 2
leftArr = data[:mid]
rightArr= data[mid:]
# now to perform the merge
i = 0
j = 0
k = 0
while i < len(leftArr) and j < len(rightArr):
if leftArr[i] < rightArr[j]:
data[k] =leftArr[i]
i +=1
else:
data[k] = rightArr[j]
j +=1
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
236f09462e944cd91b96e07d6c3f2e30ce53400a | bb7c9be32325dfdf0b3fe9c49a0e2b0c19ee92ed | /DataSummarization.py | 6abbcb3e6fcc2fa813b5dfaf50251c549118442c | [] | no_license | Abhi141188/BusinessAnalyticsWithPython | 27b8282bca95749a831861c6d8f982f48717b33d | 5da3758e6fdf8dffdc760f550c1725caa18b1ca8 | refs/heads/master | 2023-04-30T08:35:51.912110 | 2021-05-16T16:06:40 | 2021-05-16T16:06:40 | 268,271,725 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 1 15:38:28 2020
@author: Abhinav
"""
#-------------------------------Data Summary-------------------------------
#Describe()- Used to get summary statistics in python.
#Describe Function gives the mean, std and IQR values.
#It analyzes both numeric and object series and also the DataFrame column sets of mixed data types.
# creation of DataFrame
import pandas as pd
import numpy as np
#Example 1:
a1 = pd.Series([1, 2, 3,4])
a1
a1.describe()
a2 = pd.Series(['q', 'r', 'r', 'r','q','s','p'])
a2
a2.describe()
info = pd.DataFrame({'numeric': [1, 2, 3, 4],
'object': ['p', 'q', 'r','e']
})
info
info.describe(include=[np.number])
info.describe(include=[np.object])
info.describe()
#Example 2:
#Create a Dictionary of series
d = {'Name':pd.Series(['Cathrine','Alisa','Bobby','Madonna','Rocky','Sebastian','Jaqluine',
'Rahul','David','Andrew','Ajay','Teresa']),
'Age':pd.Series([26,27,25,24,31,27,25,33,42,32,51,47]),
'Score':pd.Series([89,87,67,55,47,72,76,79,44,92,99,69])}
#Create a DataFrame
df = pd.DataFrame(d)
print (df)
#Descriptive or Summary Statistic of the numeric columns:
#Summary statistics
print(df.describe())
#Descriptive or Summary Statistic of the character columns:
#Summary statistics of character column
print(df.describe(include='object'))
#Descriptive or Summary Statistic of all the columns
#Summary statistics of both - character & numerical columns
print(df.describe(include='all'))
#--------------------------------------------------------------------------------------------------------------- | [
"noreply@github.com"
] | Abhi141188.noreply@github.com |
2999dad27794ebfaf5a6fdc0cba4e9a6ec9d4f4c | 4faf2963214ad24f1dcc759ea3dec6dd65ade856 | /mobile/tools/python/imagetools/imagetools.py | 2d0864d729bdc7c1eef4534938f1dfd41cc82b7c | [
"Apache-2.0",
"DOC"
] | permissive | xiebaiyuan/paddle-mobile | a8b6cd22a9ea08a3d84a7f6ed263fa5134f58f26 | 4a43612b35cecca9a17a848477ede3b578470e63 | refs/heads/master | 2021-06-02T14:01:42.368276 | 2021-04-27T12:29:23 | 2021-04-27T12:29:23 | 297,587,504 | 3 | 5 | Apache-2.0 | 2021-03-08T14:32:10 | 2020-09-22T08:39:45 | C++ | UTF-8 | Python | false | false | 1,976 | py | # coding=utf-8
import cv2
from array import array
def resize_take_rgbs(path, shape_h_w, SHOW_IMG=False):
print("[INFO] ---- resize_take_rgbs ---- start")
image = cv2.imread(path)
print("[INFO] image.shape:{}".format(image.shape))
print("[INFO] shape_h_w:{}".format(shape_h_w))
if SHOW_IMG:
cv2.imshow("before", image)
print_rgb(image[0, 0])
# image len may be for .just check it
# image.resize(shape_h_w)
image = cv2.resize(image, (shape_h_w[0], shape_h_w[1]))
if SHOW_IMG:
cv2.imshow("after", image)
print("[INFO] resized image.shape:{}".format(image.shape))
height = shape_h_w[0]
width = shape_h_w[1]
rs_ = []
gs_ = []
bs_ = []
for h in range(0, height):
for w in range(0, width):
'''
bs_.append(image[h, w, 0])
gs_.append(image[h, w, 1])
rs_.append(image[h, w, 2])
'''
bs_.append(image[w, h, 0])
gs_.append(image[w, h, 1])
rs_.append(image[w, h, 2])
# print image[2, 2, 0]/255.
print len(bs_)
print len(gs_)
print len(rs_)
print("[INFO] ---- resize_take_rgbs ---- end")
return bs_, gs_, rs_
def print_rgb((b, g, r)):
print "像素 - R:%d,G:%d,B:%d" % (r, g, b) # 显示像素值
#
# image[0, 0] = (100, 150, 200) # 更改位置(0,0)处的像素
#
# (b, g, r) = image[0, 0] # 再次读取(0,0)像素
# print "位置(0,0)处的像素 - 红:%d,绿:%d,蓝:%d" % (r, g, b) # 显示更改后的像素值
#
# corner = image[0:100, 0:100] # 读取像素块
# cv2.imshow("Corner", corner) # 显示读取的像素块
#
# image[0:100, 0:100] = (0, 255, 0); # 更改读取的像素块
#
# cv2.imshow("Updated", image) # 显示图像
#
# cv2.waitKey(0) # 程序暂停
def save_to_file(to_file_name, array):
with open(to_file_name, "wb") as file_handle:
array.tofile(file_handle)
| [
"xiebaiyuan@139.com"
] | xiebaiyuan@139.com |
767f0bce123733e1485ff5e88be6f2174139ff65 | c33307d2d2b25106c543c77c145200681dfa774c | /test/unit/test_portfolio.py | b77f3b47f2b1431567a32ac1cba826266ded6d94 | [] | no_license | jkparkspers/morningstar_stylebox | 99bfd6301828975b79127b81aa00c221717022b3 | bbd45ad0572424326e701942d14e18b24f2f0fd7 | refs/heads/main | 2023-01-24T04:23:18.516730 | 2020-12-06T17:09:39 | 2020-12-06T17:09:39 | 311,387,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import pytest
from security_coll.holding import Holding
from security_coll.portfolio import Portfolio
from test.unit.csv_holdings import hold0, hold1, hold2
def test_total0():
port = Portfolio()
h0 = Holding(hold0)
port.append(h0)
assert h0.total() == port.total()
def test_total1():
port = Portfolio()
h0 = Holding(hold0)
h1 = Holding(hold1)
port.append(h0)
port.append(h1)
assert h0.total() + h1.total() == port.total()
def test_total2():
port = Portfolio()
h0 = Holding(hold0)
h1 = Holding(hold1)
h2 = Holding(hold2)
port.append(h0)
port.append(h1)
port.append(h2)
subp = port.sub_port(lambda h: h['ticker'] == 'DFEOX' or h['ticker'] == 'DFEVX')
assert h0.total() + h1.total() == subp.total()
def test_total3():
port = Portfolio()
h0 = Holding(hold0)
h1 = Holding(hold1)
h2 = Holding(hold2)
port.append(h0)
port.append(h1)
port.append(h2)
assert 18099 == port.total(lambda k: k[1] == 'g')
def test_total4():
port = Portfolio()
h0 = Holding(hold0)
h1 = Holding(hold1)
h2 = Holding(hold2)
port.append(h0)
port.append(h1)
port.append(h2)
# 0.110908286 0.111018918 0.111240181 for mv mb mg
expect = {'mv': .110908286, 'mb': .111018918, 'mg': .111240181}
ratio = port.ratio(lambda k: k[0] == 'm')
assert len(ratio) == 3
assert pytest.approx(expect['mv']) == ratio['mv']
assert pytest.approx(expect['mb']) == ratio['mb']
assert pytest.approx(expect['mg']) == ratio['mg']
| [
"jkparksbus@outlook.com"
] | jkparksbus@outlook.com |
63dc8305e1993eda9e740b36caa14ef5f524cfa7 | 497421f9cbe8764f05ad4b62ba80a1d3bf4f50f0 | /Episodes/Episode6/reportcreator.py | f8da9d16d261e444891af89eace0a8172e33955a | [
"Apache-2.0"
] | permissive | bromount/making-apis-work-for-you | dae5973c1e5bb51d12b8d06e4787f063fbf4fe51 | da5215b1d118e7e20eec3e4c10ea8a99f3abf7ed | refs/heads/master | 2020-06-19T02:06:09.236935 | 2018-06-28T16:11:57 | 2018-06-28T16:11:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Author: vivek Mistry @[Vivek M.]
Date: 26-04-2018 07:01
Disclaimer:
All information, documentation, and code is provided to you AS-IS and should
only be used in an internal, non-production laboratory environment.
License:
Copyright 2017 BlueCat Networks, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import pandas as pd
import numpy as np
from jinja2 import Environment, FileSystemLoader
import datetime as dt
from weasyprint import HTML
reportName = "Printer List for Head Office"
reportfilename = reportName.replace(" ","_")+"_"+str(dt.datetime.now().strftime("%Y%m%d-%H%M%S"))
df = pd.read_csv("PrinterList.csv")
#print(df.head())
#print(df)
printer_report = pd.pivot_table(df,
index=["SubNet","PrinterModel"],
values=["PrinterName"],
aggfunc=[np.count_nonzero],
margins=True)
#print(printer_report)
# Import HTML Template using jinja2
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("report_template.html")
# Assign values to variables in report
template_vars = {"title" : reportName,
"data_table": printer_report.to_html(),
"currentdate":dt.datetime.now().strftime('%Y-%m-%d %H:%M')}
# get the html output from the template with the data
html_out = template.render(template_vars)
#print(html_out)
# create html report
with open(reportfilename+".html","w") as f:
f.write(html_out)
# Generate pdf using weasyprint
HTML(string=html_out).write_pdf(reportfilename+".pdf")
#HTML(string=html_out).write_pdf(args.outfile.name, stylesheets=["style.css"])
| [
"vivekm.mistry@gmail.com"
] | vivekm.mistry@gmail.com |
73007fc679f839da30894e1fbe6889499e25cafb | bdb15492f9a5b7e73bc55acf1557df1fc2f2f7c4 | /emptyFolderRemover.py | 48443e8fa3a673ae0a24071dafa1317dcc220419 | [] | no_license | mmouhib/empty-folders-remover-gui | 88027cd1bce9d63467695974369fc20d99b8752a | aeefa787e868fb087bacd90e74f0f5825597882d | refs/heads/master | 2023-06-17T12:40:30.797286 | 2021-05-27T19:11:10 | 2021-05-27T19:11:10 | 386,936,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,174 | py | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog
import remover
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(650, 349)
MainWindow.setMinimumSize(QtCore.QSize(650, 349))
MainWindow.setMaximumSize(QtCore.QSize(650, 349))
MainWindow.setAcceptDrops(False)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("img/window_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setStyleSheet("background-color:#24272b;")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.path_value = QtWidgets.QLineEdit(self.centralwidget)
self.path_value.setEnabled(False)
self.path_value.setGeometry(QtCore.QRect(130, 190, 260, 31))
self.path_value.setStyleSheet("color:white;")
self.path_value.setObjectName("path_value")
self.browse_btn = QtWidgets.QPushButton(self.centralwidget)
self.browse_btn.setGeometry(QtCore.QRect(400, 190, 100, 31))
font = QtGui.QFont()
font.setFamily("JetBrains Mono ExtraBold")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.browse_btn.setFont(font)
self.browse_btn.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor))
self.browse_btn.setMouseTracking(False)
self.browse_btn.setAutoFillBackground(False)
self.browse_btn.setStyleSheet(" color: #333;\n"
" border: 2px solid #555;\n"
" border-radius: 20px;\n"
" border-style: outset;\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #888\n"
" );\n"
" padding: 5px;\n"
"")
self.browse_btn.setIconSize(QtCore.QSize(16, 16))
self.browse_btn.setAutoRepeat(False)
self.browse_btn.setAutoExclusive(False)
self.browse_btn.setAutoDefault(False)
self.browse_btn.setDefault(False)
self.browse_btn.setFlat(False)
self.browse_btn.setObjectName("browse_btn")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 70, 611, 51))
font = QtGui.QFont()
font.setFamily("OCR A Extended")
font.setPointSize(36)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.label.setFont(font)
self.label.setStyleSheet("color:#429bf5;\n"
"font:bold;")
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(130, 180, 21, 21))
self.label_2.setStyleSheet("color:white;")
self.label_2.setObjectName("label_2")
self.start_btn = QtWidgets.QPushButton(self.centralwidget)
self.start_btn.setGeometry(QtCore.QRect(260, 250, 131, 41))
font = QtGui.QFont()
font.setFamily("JetBrains Mono ExtraBold")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.start_btn.setFont(font)
self.start_btn.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor))
self.start_btn.setMouseTracking(False)
self.start_btn.setAutoFillBackground(False)
self.start_btn.setStyleSheet(" color: #333;\n"
" border: 2px solid #555;\n"
" border-radius: 20px;\n"
" border-style: outset;\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #888\n"
" );\n"
" padding: 5px;\n"
"")
self.start_btn.setIconSize(QtCore.QSize(16, 16))
self.start_btn.setAutoRepeat(False)
self.start_btn.setAutoExclusive(False)
self.start_btn.setAutoDefault(False)
self.start_btn.setDefault(False)
self.start_btn.setFlat(False)
self.start_btn.setObjectName("start_btn")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Empty Folder Remover"))
self.browse_btn.setText(_translate("MainWindow", "Browse"))
self.label.setText(_translate("MainWindow", "Empty Folder Remover"))
self.label_2.setText(_translate("MainWindow", "Path"))
self.start_btn.setText(_translate("MainWindow", "Start"))
def browser(self):
self.file_path = QFileDialog.getExistingDirectory(None, "Select Directory")
self.path_value.setText(self.file_path)
def on_browse_click(self):
self.browse_btn.clicked.connect(self.browser)
def deleter(self):
remover.remove(self.file_path)
def on_run_click(self):
self.start_btn.clicked.connect(self.deleter)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
ui.on_browse_click()
ui.on_run_click()
MainWindow.show()
sys.exit(app.exec_())
| [
"mouhibouni321@gmail.com"
] | mouhibouni321@gmail.com |
bde3cdffacb74c379934c3b976da5ac53db144a3 | 51e93332b5b0221bb1b34d4b53f761d9d53e1b9c | /app/core/migrations/0001_initial.py | a88ea9f86995282d556a7ffaa56cd09c1bfd0e23 | [
"MIT"
] | permissive | MaistrenkoAnton/TDD | 286d0cb0d24c796f045eeac4d03f29ac3bf0ab5a | 20049d08f22aeeb626a7975bbee3dc5c95c76449 | refs/heads/master | 2020-06-02T03:32:20.396472 | 2019-08-05T12:24:57 | 2019-08-05T12:24:57 | 191,021,446 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # Generated by Django 2.1.9 on 2019-06-09 19:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"it2015maistrenko@gmail.com"
] | it2015maistrenko@gmail.com |
86c20f3ee8cc15f60771ddb28d4a51a15ac54331 | 629d3eaaaf4c83cb9d4cb863460e17d59a0bcb1c | /sallos_. Recall . Gate.py | b78c00138d907fc7bbc8b4cda725def0aa6ceda8 | [] | no_license | Maupishon/Razor-Enhanced | a8a514ddae5e03f8ad8b795e880750afc157c2f1 | f686c8bd456728311757db22a13e25551bbfc9e1 | refs/heads/master | 2023-03-07T22:27:58.484194 | 2021-02-26T03:09:33 | 2021-02-26T03:09:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,765 | py | # Sallos . recall and . gate commands
# By MatsaMilla, Version 1.2 9/21/20
runebookDelay = 600
if Player.GetRealSkillValue('Magery') > 35:
mageRecall = True
else:
mageRecall = False
Misc.SendMessage('Recalling by Charges')
def makeRunebookList( ):
sortedRuneList = []
for i in Player.Backpack.Contains:
if i.ItemID == 0x22C5:
# opens runebook
Items.UseItem( i )
Misc.Pause(120)
if Journal.Search('You must wait'):
Misc.SendMessage('trying runebook again')
Items.UseItem( i )
Gumps.WaitForGump( 1431013363, 5000 )
bookSerial = i.Serial
runeNames = []
lineList = Gumps.LastGumpGetLineList()
# Remove the default 3 lines from the top of the list
lineList = lineList[ 3 : ]
# Remove the items before the names of the runes
endIndexOfDropAndDefault = 0
for i in range( 0, len( lineList ) ):
if lineList[ i ] == 'Set default' or lineList[ i ] == 'Drop rune':
endIndexOfDropAndDefault += 1
else:
break
# Add two for the charge count and max charge numbers
endIndexOfDropAndDefault += 2
runeNames = lineList[ endIndexOfDropAndDefault : ( endIndexOfDropAndDefault + 16 ) ]
runeNames = [ name for name in runeNames if name != 'Empty' ]
mageRecall = 5
chargeRecall = 2
gate = 6
for x in runeNames:
sortedRuneList.append( (bookSerial, x.lower(), mageRecall , chargeRecall , gate) )
mageRecall = mageRecall + 6
chargeRecall = chargeRecall + 6
gate = gate + 6
Gumps.CloseGump(1431013363)
Misc.Pause(runebookDelay)
Misc.SendMessage('Runebooks Updated', 66)
return sortedRuneList
def recall( str ):
for f in runeNames:
if str == f[1]:
Items.UseItem(f[0])
Gumps.WaitForGump(1431013363, 1000)
Gumps.SendAction(1431013363, f[2])
Misc.SendMessage('Recalling to ' + str,11)
def chargeRecall( str ):
for f in runeNames:
if str == f[1]:
Items.UseItem(f[0])
Gumps.WaitForGump(1431013363, 1000)
Gumps.SendAction(1431013363, f[3])
Misc.SendMessage('Recalling to ' + str,11)
def gate( str ):
for f in runeNames:
if str == f[1]:
Items.UseItem(f[0])
Gumps.WaitForGump(1431013363, 1000)
Gumps.SendAction(1431013363, f[4])
Misc.SendMessage('Gating ' + str,11)
def FindItem( itemID, container, color = -1, ignoreContainer = [] ):
'''
Searches through the container for the item IDs specified and returns the first one found
Also searches through any subcontainers, which Misc.FindByID() does not
'''
ignoreColor = False
if color == -1:
ignoreColor = True
if isinstance( itemID, int ):
foundItem = next( ( item for item in container.Contains if ( item.ItemID == itemID and ( ignoreColor or item.Hue == color ) ) ), None )
elif isinstance( itemID, list ):
foundItem = next( ( item for item in container.Contains if ( item.ItemID in itemID and ( ignoreColor or item.Hue == color ) ) ), None )
else:
raise ValueError( 'Unknown argument type for itemID passed to FindItem().', itemID, container )
if foundItem != None:
return foundItem
subcontainers = [ item for item in container.Contains if ( item.IsContainer and not item.Serial in ignoreContainer ) ]
for subcontainer in subcontainers:
foundItem = FindItem( itemID, subcontainer, color, ignoreContainer )
if foundItem != None:
return foundItem
def checkRegs():
if (FindItem(0x0F7A , Player.Backpack) and FindItem(0x0F86 , Player.Backpack) and FindItem(0x0F7B , Player.Backpack) ):
return True
else:
return False
def parseJournal (str):
# Fetch the Journal entries (oldest to newest)
regularText = Journal.GetTextBySerial(Player.Serial)
# Reverse the Journal entries so that we read from newest to oldest
regularText.Reverse()
# Read back until the item ID was started to see if it succeeded
for line in regularText[ 0 : len( regularText ) ]:
#if line == str:
if str in line:
line = line.split(str + ' ', 1)[1]
Journal.Clear()
return line
playerSerialCheck = Misc.ReadSharedValue('playerSerial')
runeNames = Misc.ReadSharedValue('runeNames'+str(Player.Serial))
if runeNames == 0:
Misc.SendMessage('Reading Runebooks, please wait', 33)
runeNames = makeRunebookList()
Misc.Pause(500)
Misc.SetSharedValue('runeNames'+str(Player.Serial), runeNames)
else:
Misc.SendMessage('Runes Still In Memory', 66)
Journal.Clear()
while True:
if Journal.SearchByName(". recall", Player.Name):
if mageRecall and checkRegs():
recallLocation = parseJournal('. recall')
recall(recallLocation.lower())
Misc.NoOperation()
else:
recallLocation = parseJournal('. recall')
chargeRecall(recallLocation.lower())
Misc.NoOperation()
Journal.Clear()
elif Journal.SearchByName(". gate", Player.Name):
gateLocation = parseJournal('. gate')
gate(gateLocation.lower())
Journal.Clear()
Misc.Pause(50)
| [
"noreply@github.com"
] | Maupishon.noreply@github.com |
e7722f456b7ae1a30993a3e08de9a4519408e1f3 | 4c0622c1b8a18d2c259083f0ae2bad7f8edc113a | /subgap-cli.py | 2925a769ced216f69213289b8a4ea29d75cc8b67 | [
"MIT"
] | permissive | xCuri0/subgap-cli | 135f5b33848d8ad6f053f95dfe76a345c88d61d6 | b8283cc1679acdaa6624224718c73f942cd5d25b | refs/heads/master | 2020-04-24T18:23:55.765031 | 2019-02-24T03:40:55 | 2019-02-24T03:40:55 | 172,178,401 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | #!/usr/bin/python
import sys
import os
from sty import fg
from googleapiclient.discovery import build
key = open(os.path.join(sys.path[0], './key.txt')).read().strip()
service = build('youtube', 'v3', developerKey=key)
pewdiepiesubs = service.channels().list(
part='statistics',
id='UC-lHJZR3Gqxm24_Vd_AJ5Yw'
).execute()['items'][0]['statistics']['subscriberCount']
tseriessubs = service.channels().list(
part='statistics',
id='UCq-Fj5jknLsUf-MWSy4_brA'
).execute()['items'][0]['statistics']['subscriberCount']
print(fg.magenta + "PewDiePie is at " + str(pewdiepiesubs) + " subs")
print(fg.red + "T-Series is at " + str(tseriessubs) + " subs")
print(fg.white + "Sub gap is " + str(int(pewdiepiesubs) - int(tseriessubs)) + " subs")
| [
"zkqri0@gmail.com"
] | zkqri0@gmail.com |
6cb5204699f5892e240fbae90c57b1a07a9e4450 | aee07d7918af529f2d1a7a47e9dae517c0b9bf0c | /main.py | 74621317b2b11fd74716ad2949524a09579dfb37 | [] | no_license | Kmax607/TicTacToe- | aa2890a534acd6dade307226550d4ff86d0fa7a3 | d776f17f7177223c18bc1209c660397db1a11493 | refs/heads/master | 2022-05-23T20:56:22.227142 | 2020-04-26T22:44:38 | 2020-04-26T22:44:38 | 258,606,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,882 | py | from colors import red, blue, green, yellow
import random
board = [blue("1"), blue("2"), blue("3"), blue("4"), blue("5"), blue("6"), blue("7"), blue("8"), blue("9")]
def print_board():
global x_wins
global o_wins
global x_name
global o_name
if x_num_wins > 4 and o_name == "COM":
print(board[0] + yellow("|") + board[1] + yellow("|") + board[2])
print(board[3] + yellow("|") + board[4] + yellow("|") + board[5])
print(board[6] + yellow("|") + board[7] + yellow("|") + board[8])
print()
elif o_num_wins > 4 and x_name == "COM":
print(board[0] + yellow("|") + board[1] + yellow("|") + board[2])
print(board[3] + yellow("|") + board[4] + yellow("|") + board[5])
print(board[6] + yellow("|") + board[7] + yellow("|") + board[8])
print()
else:
print(board[0] + "|" + board[1] + "|" + board[2])
print(board[3] + "|" + board[4] + "|" + board[5])
print(board[6] + "|" + board[7] + "|" + board[8])
print()
def change_to_X(pos):
board[pos] = red("X")
def change_to_O(pos):
board[pos] = green("O")
x_won = False
o_won = False
turns = 0
x_name = "X"
o_name = "O"
play_again = False
x_num_wins = 0
o_num_wins = 0
x_turn = True
#Determines if X won
def x_wins():
global x_won
global turns
global x_name
global x_num_wins
if board[0] == board[1] == board[2] == red("X"):
print_board()
print(red(x_name + " wins!"))
new_x_won = True
x_won = new_x_won
turns += 7
x_num_wins += 1
return x_num_wins
return x_won
if board[3] == board[4] == board[5] == red("X"):
print_board()
print(red(x_name + " wins!"))
new_x_won = True
x_won = new_x_won
turns += 7
x_num_wins += 1
return x_num_wins
return x_won
if board[6] == board[7] == board[8] == red("X"):
print_board()
print(red(x_name + " wins!"))
new_x_won = True
x_won = new_x_won
turns += 7
x_num_wins += 1
return x_num_wins
return x_won
if board[0] == board[3] == board[6] == red("X"):
print_board()
print(red(x_name + " wins!"))
new_x_won = True
x_won = new_x_won
turns += 7
x_num_wins += 1
return x_num_wins
return x_won
if board[1] == board[4] == board[7] == red("X"):
print_board()
print(red(x_name + " wins!"))
new_x_won = True
x_won = new_x_won
turns += 7
x_num_wins += 1
return x_num_wins
return x_won
if board[2] == board[5] == board[8] == red("X"):
print_board()
print(red(x_name + " wins!"))
new_x_won = True
x_won = new_x_won
turns += 7
x_num_wins += 1
return x_num_wins
return x_won
if board[0] == red("X") and board[4] == red("X") and board[8] == red("X"):
print_board()
print(red(x_name + " wins!"))
new_x_won = True
x_won = new_x_won
turns += 7
x_num_wins += 1
return x_num_wins
return x_won
if board[2] == board[4] == board[6] == red("X"):
print_board()
print(red(x_name + " wins!"))
new_x_won = True
x_won = new_x_won
turns += 7
x_num_wins += 1
return x_num_wins
return x_won
#Determines if O won
def o_wins():
global o_won
global turns
global o_num_wins
if board[0] == board[1] == board[2] == green("O"):
print_board()
print(green(o_name + " wins!"))
new_o_won = True
o_won = new_o_won
turns += 7
o_num_wins += 1
return o_num_wins
return o_won
if board[3] == board[4] == board[5] == green("O"):
print_board()
print(green(o_name + " wins!"))
new_o_won = True
o_won = new_o_won
turns += 7
o_num_wins += 1
return o_num_wins
return o_won
if board[6] == board[7] == board[8] == green("O"):
print_board()
print(green(o_name + " wins!"))
new_o_won = True
o_won = new_o_won
turns += 7
o_num_wins += 1
return o_num_wins
return o_won
if board[0] == board[3] == board[6] == green("O"):
print_board()
print(green(o_name + " wins!"))
new_o_won = True
o_won = new_o_won
turns += 7
o_num_wins += 1
return o_num_wins
return o_won
if board[1] == board[4] == board[7] == green("O"):
print_board()
print(green(o_name + " wins!"))
new_o_won = True
o_won = new_o_won
turns += 7
o_num_wins += 1
return o_num_wins
return o_won
if board[2] == board[5] == board[8] == green("O"):
print_board()
print(green(o_name + " wins!"))
new_o_won = True
o_won = new_o_won
turns += 7
o_num_wins += 1
return o_num_wins
return o_won
if board[0] == board[4] == board[8] == green("O"):
print_board()
print(green(o_name + " wins!"))
new_o_won = True
o_won = new_o_won
turns += 7
o_num_wins += 1
return o_num_wins
return o_won
if board[2] == board[4] == board[6] == green("O"):
print_board()
print(green(o_name + " wins!"))
new_o_won = True
o_won = new_o_won
turns += 7
o_num_wins += 1
return o_num_wins
return o_won
#after a game ends, program goes through this function to determine whether or not to play again and display scoreboard
def new_game():
global play_again
global x_won
global o_won
global x_name
global o_name
global x_num_wins
global o_num_wins
if x_won == True or o_won == True or turns >= 9:
if x_num_wins == o_num_wins:
print("It's all tied up!")
elif x_num_wins == o_num_wins + 1:
print(red(x_name) + " is in the lead!")
elif o_num_wins == x_num_wins + 1:
print(green(o_name) + " is in the lead!")
elif x_num_wins == o_num_wins + 2:
print(red(x_name) + " is pulling away!")
elif o_num_wins == x_num_wins + 2:
print(green(o_name) + " is pulling away!")
elif x_num_wins > o_num_wins + 2:
print(red(x_name) + " has a commanding lead!")
else:
print(green(o_name) + " has a commanding lead!")
print(red(x_name + ": " + str(x_num_wins)))
print(green(o_name + ": " + str(o_num_wins)))
new_play_again = input("Play again? ")
if new_play_again == ("Yes") or new_play_again == ("yes") or new_play_again == ("Y") or new_play_again == ("y"):
new_new_play_again = True
while new_play_again not in ["Yes", "yes", "Y", "y"]:
print("Enter 'Yes' or 'Y' to play again")
new_play_again = input("Play again? ")
if new_play_again == ("Yes") or new_play_again == ("yes") or new_play_again == ("Y") or new_play_again == ("y"):
new_new_play_again = True
play_again = new_new_play_again
return play_again
#Checks wins, honestly kind of useless
def check_wins():
x_wins()
o_wins()
#First game only
def play_game():
global turns
global x_won
global o_won
global x_name
global o_name
global play_again
global x_turn
global position
print("Welcome to Tic Tac Toe! Enter your name or enter COM for a computer player. Beat the computer 5 times to win a prize!")
print()
new_x_name = input("Player 1: ")
x_name = new_x_name
new_o_name = input("Player 2: ")
o_name = new_o_name
while x_won == False and o_won == False and turns < 9:
while turns < 9:
print_board()
if x_turn == True:
if x_name == "COM":
ai_logic()
else:
position = input("Choose a position: ")
position = int(position) - 1
if board[position] == red("X"):
print("Invalid position")
x_turn = True
turns -= 1
board[position] = red("X")
if board[position] == green("O"):
print("Invalid position")
x_turn = True
turns -= 1
board[position] = green("O")
else:
x_turn = False
change_to_X(position)
elif x_turn == False:
if o_name == "COM":
ai_logic()
else:
position = input("Choose a position: ")
position = int(position) - 1
x_turn = True
if board[position] == red("X"):
print("Invalid position")
x_turn = False
turns -= 1
board[position] = red("X")
elif board[position] == green("O"):
print("Invalid position")
x_turn = False
turns -= 1
board[position] = green("O")
else:
x_turn = True
change_to_O(position)
turns += 1
check_wins()
if turns >= 9 and x_won == False and o_won == False:
print("TIE!")
new_game()
if play_again == True:
x_won = False
o_won = False
turns = 0
board[0] = blue("1")
board[1] = blue("2")
board[2] = blue("3")
board[3] = blue("4")
board[4] = blue("5")
board[5] = blue("6")
board[6] = blue("7")
board[7] = blue("8")
board[8] = blue("9")
play_new_game()
return x_turn
#Logic for computer players
def ai_logic():
global position
global board
global x_turn
global turns
#Horizontal offense
if board[1] == green("O") and board[2] == green("O") and board[0] == blue("1"):
com_position = 0
elif board[0] == green("O") and board[1] == green("O") and board[1] == blue("2"):
com_position = 1
elif board[0] == green("O") and board[1] == green("O") and board[2] == blue("3"):
com_position = 2
elif board[4] == green("O") and board[5] == green("O") and board[3] == blue("4"):
com_position = 3
elif board[3] == green("O") and board[5] == green("O") and board[4] == blue("5"):
com_position = 4
elif board[3] == green("O") and board[4] == green("O") and board[5] == blue("6"):
com_position = 5
elif board[7] == green("O") and board[8] == green("O") and board[6] == blue("7"):
com_position = 6
elif board[6] == green("O") and board[8] == green("O") and board[7] == blue("8"):
com_position = 7
elif board[6] == green("O") and board[7] == green("O") and board[8] == blue("9"):
com_position = 8
#Diagonal offense
elif board[4] == green("O") and board[8] == green("O") and board[0] == blue("1"):
com_position = 0
elif board[4] == green("O") and board[0] == green("O") and board[8] == blue("9"):
com_position = 8
elif board[0] == green("O") and board[8] == green("O") and board[4] == blue("2"):
com_position = 4
elif board[4] == green("O") and board[6] == green("O") and board[2] == blue("3"):
com_position = 2
elif board[4] == green("O") and board[2] == green("O") and board[6] == blue("7"):
com_position = 6
elif board[2] == green("O") and board[6] == green("O") and board[4] == blue("5"):
com_position = 4
#Horizontal defense
elif board[1] == green("O") and board[2] == green("O") and board[0] == blue("1"):
com_position = 0
elif board[0] == red("X") and board[1] == red("X") and board[1] == blue("2"):
com_position = 1
elif board[0] == red("X") and board[1] == red("X") and board[2] == blue("3"):
com_position = 2
elif board[4] == red("X") and board[5] == red("X") and board[3] == blue("4"):
com_position = 3
elif board[3] == red("X") and board[5] == red("X") and board[4] == blue("5"):
com_position = 4
elif board[3] == red("X") and board[4] == red("X") and board[5] == blue("6"):
com_position = 5
elif board[7] == red("X") and board[8] == red("X") and board[6] == blue("7"):
com_position = 6
elif board[6] == red("X") and board[8] == red("X") and board[7] == blue("8"):
com_position = 7
elif board[6] == red("X") and board[7] == red("X") and board[8] == blue("9"):
com_position = 8
#Vertical defense
elif board[3] == red("X") and board[6] == red("X") and board[0] == blue("1"):
com_position = 0
elif board[4] == red("X") and board[7] == red("X") and board[1] == blue("2"):
com_position = 1
elif board[5] == red("X") and board[8] == red("X") and board[2] == blue("3"):
com_position = 2
elif board[6] == red("X") and board[1] == red("X") and board[3] == blue("4"):
com_position = 3
elif board[1] == red("X") and board[7] == red("X") and board[4] == blue("5"):
com_position = 4
elif board[2] == red("X") and board[8] == red("X") and board[5] == blue("6"):
com_position = 5
elif board[0] == red("X") and board[5] == red("X") and board[6] == blue("7"):
com_position = 6
elif board[1] == red("X") and board[4] == red("X") and board[7] == blue("8"):
com_position = 7
elif board[2] == red("X") and board[5] == red("X") and board[8] == blue("9"):
com_position = 8
#Diagonal defense
elif board[4] == red("X") and board[8] == red("X") and board[0] == blue("1"):
com_position = 0
elif board[4] == red("X") and board[0] == red("X") and board[8] == blue("9"):
com_position = 8
elif board[0] == red("X") and board[8] == red("X") and board[4] == blue("2"):
com_position = 4
elif board[4] == red("X") and board[6] == red("X") and board[2] == blue("3"):
com_position = 2
elif board[4] == red("X") and board[2] == red("X") and board[6] == blue("7"):
com_position = 6
elif board[2] == red("X") and board[6] == red("X") and board[4] == blue("5"):
com_position = 4
else:
com_position = random.randint(0, 8)
position = com_position
return position
turns -= 1
return x_turn
return turns
#Plays all games excluding the first one
def play_new_game():
global turns
global x_won
global o_won
global x_name
global o_name
global play_again
global x_turn
global position
while x_won == False and o_won == False and turns < 9:
while turns < 9:
print_board()
if x_turn == True:
if x_name == "COM":
ai_logic()
else:
position = input("Choose a position: ")
position = int(position) - 1
if board[position] == red("X"):
print("Invalid position")
x_turn = True
turns -= 1
board[position] = red("X")
if board[position] == green("O"):
print("Invalid position")
x_turn = True
turns -= 1
board[position] = green("O")
else:
x_turn = False
change_to_X(position)
elif x_turn == False:
if o_name == "COM":
ai_logic()
else:
position = input("Choose a position: ")
position = int(position) - 1
x_turn = True
if board[position] == red("X"):
print("Invalid position")
x_turn = False
turns -= 1
board[position] = red("X")
elif board[position] == green("O"):
print("Invalid position")
x_turn = False
turns -= 1
board[position] = green("O")
else:
x_turn = True
change_to_O(position)
turns += 1
check_wins()
if turns >= 9 and x_won == False and o_won == False:
print("TIE!")
new_game()
if play_again == True:
x_won = False
o_won = False
turns = 0
board[0] = blue("1")
board[1] = blue("2")
board[2] = blue("3")
board[3] = blue("4")
board[4] = blue("5")
board[5] = blue("6")
board[6] = blue("7")
board[7] = blue("8")
board[8] = blue("9")
play_new_game()
return x_turn
play_game() | [
"max.kraus607@gmail.com"
] | max.kraus607@gmail.com |
710826787f7469b5c8d8e68f530a894b8067623e | f24f8a5cf1580470cf616705a220027feac9b177 | /awesome/api/awesome/celery.py | ecb47c15ebd34979cbb44196e89352deda7f603a | [] | no_license | tvtrong/restapi | 4f5eb4ad545ed9dd7847f63994957fdc76fc3eba | c3da498108df1e7950ea2cc003dd75f0fe5a1b60 | refs/heads/master | 2022-12-25T19:39:45.627411 | 2020-10-10T12:39:33 | 2020-10-10T12:39:33 | 302,898,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awesome.settings")
celery_app = Celery("awesome")
celery_app.config_from_object("django.conf:settings", namespace="CELERY")
celery_app.autodiscover_tasks()
| [
"phongtaitieu@gmail.com"
] | phongtaitieu@gmail.com |
0014d8ae9b0a477d911966eee25170baa3d1c4e9 | c134fb83591fb4c1aa048e3c67e3e8d303dce09e | /physics.py | 0f76b82c2767008e5687f1eb579e7ad25b5808f7 | [] | no_license | JordiBusquets/Planet-dynamics | 84c5c87f94937a31a04ea0a80831f04c73df1862 | b57dd4a3e352cb59cda0eea26abc63a9899bbe1e | refs/heads/main | 2023-02-18T09:43:53.020208 | 2021-01-15T16:41:33 | 2021-01-15T16:41:33 | 329,704,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | from planet import planet, distance_between_planets, static_earth
from typing import List
from constants import G
def gravitational_force(p1, p2):
d = distance_between_planets(p1, p2)
f = G * p1.mass * p2.mass / (d * d)
return f
def gravitational_acceleration(f, p):
return (f / p.mass) ** 0.5
def compute_accelerations(planets: List[planet]):
n = len(planets)
i = 0
while i < n:
planets[i].clear_acceleration()
i += 1
i_lhs = 0
while i_lhs < n - 1:
p_lhs = planets[i_lhs]
i_rhs = i_lhs + 1
while i_rhs < n:
p_rhs = planets[i_rhs]
f = gravitational_force(p_lhs, p_rhs) # absolute force
a_lhs = gravitational_acceleration(f, p_lhs) # absolute acceleration on p_lhs
a_rhs = gravitational_acceleration(f, p_rhs) # absolute acceleration on p_rhs
d = distance_between_planets(p_lhs, p_rhs)
d_x = p_rhs.x - p_lhs.x
d_y = p_rhs.y - p_lhs.y
d_z = p_rhs.z - p_lhs.z
planets[i_lhs].append_acceleration(a_lhs * d_x / d, a_lhs * d_y / d, a_lhs * d_z / d)
planets[i_rhs].append_acceleration(-a_rhs * d_x / d, -a_rhs * d_y / d, -a_rhs * d_z / d)
i_rhs += 1
i_lhs += 1
return 0
def stable_circular_orbit_earth(p, x):
earth = static_earth("earth")
earth.x = x
f = gravitational_force(p, earth)
a = gravitational_acceleration(f, earth)
earth.y_v = (a * x) ** 0.5
return earth
| [
"77457232+JordiBusquets@users.noreply.github.com"
] | 77457232+JordiBusquets@users.noreply.github.com |
5eb02b5c4f92fe814ca18e492ea5a382e792fd1c | 9dcae8f4687f119f4eec20b6e14bae54710d9506 | /MNI2CTAffine.py | 2d225dfa46db61a65d444115c23b44205162c4c9 | [
"BSD-2-Clause"
] | permissive | msharrock/NPH_Prediction | fc2e50a7142d3370e245ebdc05d6b117da847205 | 13547c4f441b14ec7bf899f21e66ae73523ef79f | refs/heads/master | 2020-04-24T11:35:18.251470 | 2019-01-23T21:35:33 | 2019-01-23T21:35:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py |
import numpy as np
import os
import CTtools
import nibabel as nib
from subprocess import call
import sys
import ipdb
ct_scan_path = str(sys.argv[1])
if 'Ashu' in ct_scan_path:
BASE = 'Ashu_Files'
else:
BASE = ''
MNI_152 = os.path.join(os.getcwd(),'MNI152_T1_1mm.nii.gz')
nameOfAffineMatrix = ct_scan_path[:ct_scan_path.find('.nii.gz')]+'_affine.mat'
nameOfInvMatrix = ct_scan_path[:ct_scan_path.find('.nii.gz')]+'_inverse.mat'
ct_scan_wodevice = ct_scan_path
subject_name = os.path.split(ct_scan_path)[-1]
subject_name = subject_name[:subject_name.find('.nii.gz')]
segmentedMNI = os.path.join(BASE,'Final_Predictions', subject_name+'_MNI152.segmented1.nii.gz')
segmentedORIG = os.path.join(BASE,'Transformed_Predictions', subject_name+'.segmented.nii.gz')
orig_name = os.path.join(BASE,'Scans', subject_name+'.nii.gz')
try:
call(['convert_xfm', '-omat', nameOfInvMatrix, '-inverse', nameOfAffineMatrix])
call(['flirt','-in', segmentedMNI, '-ref', orig_name, '-applyxfm', '-init', nameOfInvMatrix, '-out', segmentedORIG, '-interp', 'nearestneighbour'])
except:
print 'something did not work'
| [
"azhang@ece.ucsb.edu"
] | azhang@ece.ucsb.edu |
7288af0e631367a1e5713665c9384a348c96d313 | 216960ee4ae323c2050d51dac413f49fd7fac5a9 | /1.py | ef1aab01f4ec3ea01ee06b89abb67bcfa1c0eab5 | [] | no_license | sergiypotapov/randomEngDay | 06ae10c091a01b4f8a34a9e2cf08fc6632684d40 | e68d95c9ad93bcc78c8d91aa9f4b9bff6e9bd73c | refs/heads/master | 2021-01-10T04:14:32.378871 | 2016-03-07T22:25:30 | 2016-03-07T22:25:30 | 53,363,666 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 1,006 | py | __author__ = 'Serg'
import kivy
import cycle
import todays_day
kivy.require('1.9.1') # replace with your current kivy version !
from kivy.app import App
from kivy.uix.label import Label
#TODO по нажатию кнопки приложение прказывает результат. при повторном нажатии - еще один результат и так далее
#TODO если не ОК - одна из нескольких грустных картинок
#TODO если ОК - одна из нескольких веселых
#TODO Билд на андроид
class MyApp(App):
def build(self):
random = cycle.cycle()
random = str(random)
to_day = todays_day.today_day()
to_day = str(to_day)
print("random is", random, "today is", to_day )
if to_day == random:
return Label(text=random, font_size= '900sp')
else:
return Label(text=random)
if __name__ == '__main__':
MyApp().run()
| [
"sergiy.potapov@gmail.com"
] | sergiy.potapov@gmail.com |
746774617ed9e37b03bbc24665b63b4a592bf514 | baf3996414315ffb60470c40c7ad797bf4e6897f | /02_ai/1_ml/4_ml_mastery/code/chapter_09/shuffle_split.py | 300dc2268fc15665661c5450849e0a375e9836d3 | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 733 | py | # Evaluate using Shuffle Split Cross Validation
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
filename = 'pima-indians-diabetes.data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(filename, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
n_splits = 10
test_size = 0.33
seed = 7
kfold = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=seed)
model = LogisticRegression()
results = cross_val_score(model, X, Y, cv=kfold)
print("Accuracy: %.3f%% (%.3f%%)" % (results.mean()*100.0, results.std()*100.0)) | [
"thiago.allue@yahoo.com"
] | thiago.allue@yahoo.com |
c35f4aa6ee2233583ca42b981127e5ccf42f4a33 | 5bd405802f4c84141faf51c2fafea4146cbf36e2 | /app/forms.py | cea94769d1c76f0cbc86a441b31f412980e2efae | [] | no_license | konigd/miazazi.md | dee4734bca26ced4f50dd8be8e77214b494f7bbf | f79d19ccec658c515b41c6b7b111e15fd171ec37 | refs/heads/master | 2021-01-21T13:53:35.137203 | 2015-08-29T12:45:01 | 2015-08-29T12:45:01 | 41,569,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | from flask.ext.wtf import Form
from wtforms import TextField, IntegerField, PasswordField, SubmitField, StringField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.fields.html5 import EmailField
from wtforms.validators import Required, EqualTo
from models import Place, Category, User
from wtforms.widgets import TextArea
def get_all_locations():
return Place.query.all()
def category_list():
return Category.query.all()
class SignupForm(Form):
first_name = TextField("Nume", [Required()])
last_name = TextField("Prenume", [Required()])
place = QuerySelectField("Localitatea ",[Required()], query_factory=get_all_locations, get_label="name")
email = EmailField("Adresa email",[Required()])
password = PasswordField("Parola",[Required()])
password_confirmation = PasswordField("Confirma parola",[Required(), EqualTo('password')])
submit = SubmitField("Trimite")
class LoginForm(Form):
email = EmailField('Adresa email',[Required()])
password = PasswordField('Parola',[Required()])
submit = SubmitField('Login')
def validate(self):
if not Form.validate(self):
return False
user = User.query.filter_by(email=self.email.data).first()
if user is None:
self.email.errors.append('Nu exista utilizator cu asa email %s' % self.email.data)
return False
if user.password != self.password.data:
self.password.errors.append('Parola este incorecta')
return False
return True
class EventForm(Form):
title = TextField("Denumirea evenimentului")
description = StringField("Detalii despre eveniment !", widget=TextArea())
image_url = TextField(" Adresa imaginei ")
category = QuerySelectField("Categoria evenimentului", query_factory=category_list, get_label="name")
place = QuerySelectField("Regiunea/localitatea desfasurarii evenimentului", query_factory=get_all_locations, get_label="name")
submit= SubmitField ('Creeaza eveniment')
| [
"soleil.mdsv@gmail.com"
] | soleil.mdsv@gmail.com |
a33f5339f1c55af5c1db0609b9f50259580b5e63 | 80b2b1fad6ab060f2b6d65a5adf4ebc9a726bb96 | /web_app/modules/account/views/confirm.py | d4cb6cecef082c749cd221a52b26d01106e59d48 | [
"MIT"
] | permissive | ai404/esafe-platform | 71744551a10bfecea521374c868117c78bdb4e68 | 2e29ab2d3deb81fd999b74a2f6844c54a836c6d8 | refs/heads/master | 2022-12-07T10:39:06.180043 | 2020-08-30T18:11:08 | 2020-08-30T18:11:08 | 291,329,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | import datetime
from .. import bp_account
from database.models import Token, User
from web_app.helpers import confirm_token
from flask import flash, url_for, redirect, g
@bp_account.route('/confirm/<token>')
def confirm_email(token):
"""Confirm a user's new account"""
email = confirm_token(token)
if not email:
flash('Invalid or Expired Token!', 'error')
return redirect(url_for('account.login'))
user = User.query.filter_by(email=email).first()
if user.confirmed:
flash('Account already confirmed. Please login.', 'info')
else:
tk = Token.query.filter_by(token_value=token).first()
tk.used = True
user.confirmed = True
user.confirmed_on = datetime.datetime.now()
g.session.add(tk)
g.session.add(user)
g.session.commit()
flash('You have confirmed your account. Thanks!', 'info')
return redirect(url_for('main.dashboard'))
| [
"khalifa.abde@outlook.com"
] | khalifa.abde@outlook.com |
ef38ff29016b720bd6ec32ef7af2217c4f3c351f | e9038a8c25b3642707cb4c698396212de2ba7ba1 | /main.py | a3d53ba2a83dcc3e4bb05d679cae308dbeac12e3 | [] | no_license | eloygeenjaar/pytorch-vae | f928fa02c4e1b733a90c830a98d327238d2a0f4e | 9dc44aae64f0e2896427ce955a48733d6315bb2d | refs/heads/master | 2022-02-11T10:06:28.251810 | 2019-08-19T01:42:02 | 2019-08-19T01:42:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,807 | py | import os
import sys
import numpy as np
import torch
from tensorboardX import SummaryWriter
from torch import optim
from data_loader.data_loader import data_loaders
from model.bernoulli_vae import BernoulliVAE
from model.conv_vae import ConvVAE
from utils.config import get_args
from utils.draw_figs import draw_figs
args = get_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
args.cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if args.cuda else "cpu")
train_loader, test_loader = data_loaders(args)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed_all(args.seed)
writer = SummaryWriter(args.out_dir)
model_class = BernoulliVAE if args.arch == "bernoulli" else ConvVAE
mean_img = train_loader.dataset.get_mean_img()
model = model_class(
device=device,
img_shape=args.img_shape,
h_dim=args.h_dim,
z_dim=args.z_dim,
analytic_kl=args.analytic_kl,
mean_img=mean_img,
).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, eps=1e-4)
if args.no_iwae_lr:
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", patience=100, factor=10 ** (-1 / 7)
)
else:
milestones = np.cumsum([3 ** i for i in range(8)])
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=milestones, gamma=10 ** (-1 / 7)
)
def train(epoch):
for batch_idx, (data, _) in enumerate(train_loader):
optimizer.zero_grad()
outs = model(data, mean_n=args.mean_num, imp_n=args.importance_num)
loss_1, loss = -outs["elbo"].cpu().data.numpy().mean(), outs["loss"].mean()
loss.backward()
optimizer.step()
model.train_step += 1
if model.train_step % args.log_interval == 0:
print(
"Train Epoch: {} ({:.0f}%)\tLoss: {:.6f}".format(
epoch, 100.0 * batch_idx / len(train_loader), loss.item()
)
)
writer.add_scalar("train/loss", loss.item(), model.train_step)
writer.add_scalar("train/loss_1", loss_1, model.train_step)
def test(epoch):
elbos = [
model(data, mean_n=1, imp_n=args.log_likelihood_k)["elbo"].squeeze(0)
for data, _ in test_loader
]
def get_loss_k(k):
losses = [
model.logmeanexp(elbo[:k], 0).cpu().numpy().flatten() for elbo in elbos
]
return -np.concatenate(losses).mean()
return map(get_loss_k, [args.importance_num, 1, 64, args.log_likelihood_k])
if args.eval:
model.load_state_dict(torch.load(args.best_model_file))
with torch.no_grad():
print(list(test(0)))
if args.figs:
draw_figs(model, args, test_loader, 0)
sys.exit()
for epoch in range(1, args.epochs + 1):
writer.add_scalar("learning_rate", optimizer.param_groups[0]["lr"], epoch)
train(epoch)
with torch.no_grad():
if args.figs and epoch % 100 == 1:
draw_figs(model, args, test_loader, epoch)
test_loss, test_1, test_64, test_ll = test(epoch)
if test_loss < model.best_loss:
model.best_loss = test_loss
torch.save(model.state_dict(), args.best_model_file)
scheduler_args = {"metrics": test_loss} if args.no_iwae_lr else {}
scheduler.step(**scheduler_args)
writer.add_scalar("test/loss", test_loss, epoch)
writer.add_scalar("test/loss_1", test_1, epoch)
writer.add_scalar("test/loss_64", test_64, epoch)
writer.add_scalar("test/LL", test_ll, epoch)
print("==== Testing. LL: {:.4f} ====\n".format(test_ll))
if args.to_gsheets:
from utils.to_sheets import upload_to_google_sheets
row_data = [args.exp_name, str(test_ll), str(test_64), str(test_64 - test_ll)]
upload_to_google_sheets(row_data=row_data)
| [
"yooonholee@gmail.com"
] | yooonholee@gmail.com |
c62c24e115cdf1835d84b2b7bb4b7def2fbadcf6 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-dms/samples/generated_samples/datamigration_v1_generated_data_migration_service_stop_migration_job_async.py | a4bc8f0f5878cfe73e659344426766b46ce49d17 | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,959 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for StopMigrationJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dms
# [START datamigration_v1_generated_DataMigrationService_StopMigrationJob_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import clouddms_v1
async def sample_stop_migration_job():
# Create a client
client = clouddms_v1.DataMigrationServiceAsyncClient()
# Initialize request argument(s)
request = clouddms_v1.StopMigrationJobRequest(
)
# Make the request
operation = client.stop_migration_job(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
# [END datamigration_v1_generated_DataMigrationService_StopMigrationJob_async]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
94a9437b315443eac7abdc259ca9578071f0a649 | 14c41d8e912f1f9a34d3cc0a6f279dc2d1e4f672 | /gt_events/queries.py | 96c3f70fe572f33b953802d0b32cdb475c429c35 | [] | no_license | Dan-Theriault/7140-Prototype | 1190cafe9926fa20c515043aa43b2f045bf3820d | 53674543328395042d1e34193a1a899a48bb3fb4 | refs/heads/master | 2021-01-19T03:34:33.109313 | 2017-04-24T03:02:37 | 2017-04-24T03:02:37 | 87,324,003 | 0 | 2 | null | 2017-04-19T20:02:50 | 2017-04-05T15:10:40 | CSS | UTF-8 | Python | false | false | 997 | py | """Python wrapper around SQLite Queries."""
import sqlite3
def add_event(name, datetime, org, description, location):
"""Add a new event to the database."""
conn = sqlite3.connect('events.db')
cur = conn.cursor()
event_info = [name, datetime, org, description, location]
cur.execute('INSERT INTO events VALUES (?,?,?,?,?)', event_info)
conn.commit()
conn.close()
def list_events():
"""Get a list of all events in the database."""
conn = sqlite3.connect('events.db')
cur = conn.cursor()
cur.execute('SELECT rowid ,name, datetime, org, description, location FROM events')
res = cur.fetchall()
conn.commit()
conn.close()
return res
def get_event(rowid):
"""Pull down details of a specific event by its rowid."""
conn = sqlite3.connect('events.db')
cur = conn.cursor()
cur.execute('SELECT rowid, * FROM events WHERE rowid=?', [rowid])
res = cur.fetchone()
conn.commit()
conn.close()
return res
| [
"dannymt97@gmail.com"
] | dannymt97@gmail.com |
79b8ecb0a73fd79fead4c0b345b5f54ea86815e6 | 58fcf486c437c4f30f44195503df6e9632f1ae4b | /time/old/time_01.py | a0995d770ae0711adfa92338fd31dd17687b17ec | [] | no_license | gratten/timesheets | 620c02d508cf2063c9f2e89922c10f1331589509 | 20501ea05a0f06672c1a0c2703ffeeb9622aef80 | refs/heads/main | 2023-07-08T09:14:50.773681 | 2021-08-04T13:03:27 | 2021-08-04T13:03:27 | 392,686,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | import shutil
import datetime
import os
# from openpyxl import load_workbook
import xlwings as xw
# determine filename based on today's date
today = datetime.date.today()
last_monday = today - datetime.timedelta(days=today.weekday())
file_name = f'Ward, Gratten 2021_Timesheet_{last_monday}.xls'
path = os.getcwd()
files = os.listdir(path)
def new_week(file_name):
if file_name not in files:
shutil.copy("Ward, Gratten 2021_Timesheet.xls", file_name)
feedback = '\nNew file created.\n'
else:
feedback = "\nFile already exists.\n"
return feedback
def add_task(file_name):
# initiate workbook
excel_app = xw.App(visible=False)
wb = excel_app.books.open(file_name)
ws = wb.sheets[0]
# find next row to populate
cell_range = ws.range('A21', 'A36')
for cell in cell_range:
if cell[0].value is None:
empty = cell[0].row
break
else:
print('Document full.')
# collect user input
project = input("Enter project: ")
description = input("Enter description: ")
seq = input("Enter sequence: ")
act_code = input("Enter activity code: ")
hours = input("Enter hours: ")
# populate data
ws.range(f'A{empty}').value = project
ws.range(f'B{empty}').value = description
ws.range(f'C{empty}').value = seq
ws.range(f'D{empty}').value = act_code
ws.range(f'E{empty}').value = hours
# save and close
wb.save()
wb.close()
excel_app.quit()
# determine which day to populate hours
# notify user
feedback = '\nTask recorded.\n'
return feedback
selection = ''
while selection != 'E':
selection = input('W - new week \n'
'T - add task\n'
'E - exit\n\n'
'Make a selection...')
if selection == 'W':
print(new_week(file_name))
elif selection == 'T':
print(add_task(file_name)) | [
"Gratten.Ward@graphicpkg.com"
] | Gratten.Ward@graphicpkg.com |
fab55d204978d837e4212c08440b33872d36947d | 2405752a692d003f83fa2f24272d7afa47254fc6 | /dynamics/probabilistic_ensemble.py | 21cd4706ea25f86a424352dc12e3a9c085389b78 | [
"MIT"
] | permissive | ZhihanLee/sac-plus | 1edee7724939484cf72181e0789c3e03a2542451 | 829c8652bc07a420e855ace696ae44de5feb5379 | refs/heads/main | 2023-03-09T01:27:30.775603 | 2021-02-21T22:46:47 | 2021-02-21T22:46:47 | 476,717,412 | 2 | 0 | MIT | 2022-04-01T12:46:44 | 2022-04-01T12:46:43 | null | UTF-8 | Python | false | false | 8,717 | py | import os
import torch as th
from torch import nn as nn
from torch.nn import functional as F
import numpy as np
import pickle
#TODO:
# - Better to predict logvar or logstd?
# - Learn logvar or keep it constant?
# - Holdout loss: best ratio? save best checkpoint in epoch? individual improvement?
class EnsembleLayer(nn.Module):
def __init__(self, ensemble_size, input_dim, output_dim):
super().__init__()
self.W = nn.Parameter(th.empty((ensemble_size, input_dim, output_dim)), requires_grad=True).float()
nn.init.xavier_uniform_(self.W, gain=nn.init.calculate_gain('relu'))
self.b = nn.Parameter(th.zeros((ensemble_size, 1, output_dim)), requires_grad=True).float()
def forward(self, x):
# assumes x is 3D: (ensemble_size, batch_size, dimension)
return x @ self.W + self.b
class ProbabilisticEnsemble(nn.Module):
def __init__(self, input_dim, output_dim, ensemble_size=5, arch=(200,200,200,200), activation=F.relu, learning_rate=0.001, num_elites=2, device='auto'):
super().__init__()
self.ensemble_size = ensemble_size
self.input_dim = input_dim
self.output_dim = output_dim * 2 # mean and std
self.activation = activation
self.arch = arch
self.num_elites = num_elites
self.elites = [i for i in range(self.ensemble_size)]
self.layers = nn.ModuleList()
in_size = input_dim
for hidden_size in self.arch:
self.layers.append(EnsembleLayer(ensemble_size, in_size, hidden_size))
in_size = hidden_size
self.layers.append(EnsembleLayer(ensemble_size, self.arch[-1], self.output_dim))
self.inputs_mu = nn.Parameter(th.zeros(input_dim), requires_grad=False).float()
self.inputs_sigma = nn.Parameter(th.zeros(input_dim), requires_grad=False).float()
self.max_logvar = nn.Parameter(th.ones(1, output_dim, dtype=th.float32) / 2.0).float()
self.min_logvar = nn.Parameter(-th.ones(1, output_dim, dtype=th.float32) * 10.0).float()
self.decays = [0.000025, 0.00005, 0.000075, 0.000075, 0.0001]
self.optim = th.optim.Adam([{'params': self.layers[i].parameters(), 'weight_decay': self.decays[i]} for i in range(len(self.layers))] +
[{'params': self.max_logvar}, {'params': self.min_logvar}], lr=learning_rate)
if device == 'auto':
self.device = th.device('cuda') if th.cuda.is_available() else th.device('cpu')
else:
self.device = device
self.to(self.device)
def forward(self, input, deterministic=False, return_dist=False):
dim = len(input.shape)
# input normalization
h = (input - self.inputs_mu) / self.inputs_sigma
# repeat h to make amenable to parallelization
# if dim = 3, then we probably already did this somewhere else (e.g. bootstrapping in training optimization)
if dim < 3:
h = h.unsqueeze(0)
if dim == 1:
h = h.unsqueeze(0)
h = h.repeat(self.ensemble_size, 1, 1)
for layer in self.layers[:-1]:
h = layer(h)
h = self.activation(h)
output = self.layers[-1](h)
# if original dim was 1D, squeeze the extra created layer
if dim == 1:
output = output.squeeze(1) # output is (ensemble_size, output_size)
mean, logvar = th.chunk(output, 2, dim=-1)
# Variance clamping to prevent poor numerical predictions
logvar = self.max_logvar - F.softplus(self.max_logvar - logvar)
logvar = self.min_logvar + F.softplus(logvar - self.min_logvar)
if deterministic:
if return_dist:
return mean, logvar
else:
return mean
else:
std = th.sqrt(th.exp(logvar))
samples = mean + std * th.randn(std.shape, device=std.device)
if return_dist:
return samples, mean, logvar
else:
return samples
def compute_loss(self, x, y):
mean, logvar = self.forward(x, deterministic=True, return_dist=True)
inv_var = th.exp(-logvar)
if len(y.shape) < 3:
y = y.unsqueeze(0).repeat(self.ensemble_size, 1, 1)
mse_losses = (th.square(mean - y) * inv_var).mean(-1).mean(-1)
var_losses = logvar.mean(-1).mean(-1)
total_losses = (mse_losses + var_losses).sum()
total_losses += 0.01*self.max_logvar.sum() - 0.01*self.min_logvar.sum()
return total_losses
def compute_mse_losses(self, x, y):
mean = self.forward(x, deterministic=True, return_dist=False)
if len(y.shape) < 3:
y = y.unsqueeze(0).repeat(self.ensemble_size, 1, 1)
mse_losses = (mean - y)**2
return mse_losses.mean(-1).mean(-1)
def save(self, path):
save_dir = 'weights/'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
th.save({'ensemble_state_dict': self.state_dict(),
'ensemble_optimizer_state_dict': self.optim.state_dict()}, path + '.tar')
def load(self, path):
params = th.load(path)
self.load_state_dict(params['ensemble_state_dict'])
self.optim.load_state_dict(params['ensemble_optimizer_state_dict'])
def fit_input_stats(self, data):
mu = np.mean(data, axis=0, keepdims=True)
sigma = np.std(data, axis=0, keepdims=True)
sigma[sigma < 1e-12] = 1.0
self.inputs_mu.data = th.from_numpy(mu).to(self.device).float() # Can I ommit .data?
self.inputs_sigma.data = th.from_numpy(sigma).to(self.device).float()
def train_ensemble(self, X, Y, batch_size=256, holdout_ratio=0.1, max_holdout_size=5000, max_epochs_no_improvement=5, max_epochs=200):
self.fit_input_stats(X)
num_holdout = min(int(X.shape[0] * holdout_ratio), max_holdout_size)
permutation = np.random.permutation(X.shape[0])
inputs, holdout_inputs = X[permutation[num_holdout:]], X[permutation[:num_holdout]]
targets, holdout_targets = Y[permutation[num_holdout:]], Y[permutation[:num_holdout]]
holdout_inputs = th.from_numpy(holdout_inputs).to(self.device).float()
holdout_targets = th.from_numpy(holdout_targets).to(self.device).float()
idxs = np.random.randint(inputs.shape[0], size=[self.ensemble_size, inputs.shape[0]])
num_batches = int(np.ceil(idxs.shape[-1] / batch_size))
def shuffle_rows(arr):
idxs = np.argsort(np.random.uniform(size=arr.shape), axis=-1)
return arr[np.arange(arr.shape[0])[:, None], idxs]
num_epochs_no_improvement = 0
epoch = 0
best_holdout_losses = [float('inf') for _ in range(self.ensemble_size)]
while num_epochs_no_improvement < max_epochs_no_improvement and epoch < max_epochs:
self.train()
for batch_num in range(num_batches):
batch_idxs = idxs[:, batch_num * batch_size : (batch_num + 1) * batch_size]
batch_x, batch_y = inputs[batch_idxs], targets[batch_idxs]
batch_x, batch_y = th.from_numpy(batch_x).to(self.device).float(), th.from_numpy(batch_y).to(self.device).float()
loss = self.compute_loss(batch_x, batch_y)
self.optim.zero_grad()
loss.backward()
self.optim.step()
idxs = shuffle_rows(idxs)
self.eval()
with th.no_grad():
holdout_losses = self.compute_mse_losses(holdout_inputs, holdout_targets)
holdout_losses = [l.item() for l in holdout_losses]
#print('Epoch:', epoch, 'Holdout losses:', [l.item() for l in holdout_losses])
self.elites = np.argsort(holdout_losses)[:self.num_elites]
improved = False
for i in range(self.ensemble_size):
if epoch == 0 or (best_holdout_losses[i] - holdout_losses[i]) / (best_holdout_losses[i]) > 0.01:
best_holdout_losses[i] = holdout_losses[i]
num_epochs_no_improvement = 0
improved = True
if not improved:
num_epochs_no_improvement += 1
epoch += 1
print('Epoch:', epoch, 'Holdout losses:', ', '.join(["%.4f"%hl for hl in holdout_losses]))
return np.mean(holdout_losses)
if __name__ == '__main__':
with open('/home/lucas/Desktop/drl-cd/weights/drlcd-cheetah-ns-paper1data0', 'rb') as f:
memory = pickle.load(f)
X, Y = memory.to_train_batch()
model = ProbabilisticEnsemble(X.shape[1], Y.shape[1])
model.train_ensemble(X, Y, max_epochs=200) | [
"lucasnale@gmail.com"
] | lucasnale@gmail.com |
ee356748b473d4085c481203fcfedf43fbc1ffc8 | fb1aebfe953bc38663a11058180f22f35142eda5 | /WEB/concesionario/vehiculos/migrations/0005_perfil.py | 0d3aa21c508a9b930490c9466b445ffe093bf87a | [] | no_license | Monghomery1499/proyecto | 801bf7a3db26c27441b1c8a9cb1ffa71e2215dfc | a8fb669eade1d10b8b9a356f2dc92f29957e0dd5 | refs/heads/main | 2023-04-05T23:33:20.338486 | 2021-04-28T16:37:48 | 2021-04-28T16:37:48 | 362,537,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | # Generated by Django 3.0 on 2021-04-22 07:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import vehiculos.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('vehiculos', '0004_auto_20210421_2019'),
]
operations = [
migrations.CreateModel(
name='Perfil',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('telefono', models.IntegerField()),
('direccion', models.TextField()),
('cedula', models.CharField(max_length=10)),
('foto', models.ImageField(upload_to=vehiculos.models.url_perfil)),
('usuario', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Perfil',
'verbose_name_plural': 'Perfiles',
},
),
]
| [
"bguaman99.bg@gmail.com"
] | bguaman99.bg@gmail.com |
fbb3446493f4237d3215cd5b17a12f2716787801 | ed02484cf8db6ca95f30b204e4beb3e33f76da7a | /26_oop_special_methods/task_26_3a.py | 4813afd3905d67d63eb10a9b96cf5e48d01b708c | [] | no_license | tifling85/Pyneng | 3d6024e7c8dd33afedc8986644118e12139b271e | 23846630d24f773f5c0a35dcd195ffd4484695a4 | refs/heads/master | 2020-08-07T11:38:21.644124 | 2019-11-28T14:37:02 | 2019-11-28T14:37:02 | 213,435,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | # -*- coding: utf-8 -*-
'''
Задание 26.3a
Изменить класс IPAddress из задания 26.3.
Добавить два строковых представления для экземпляров класса IPAddress.
Как дожны выглядеть строковые представления, надо определить из вывода ниже:
Создание экземпляра
In [5]: ip1 = IPAddress('10.1.1.1/24')
In [6]: str(ip1)
Out[6]: 'IP address 10.1.1.1/24'
In [7]: print(ip1)
IP address 10.1.1.1/24
In [8]: ip1
Out[8]: IPAddress('10.1.1.1/24')
In [9]: ip_list = []
In [10]: ip_list.append(ip1)
In [11]: ip_list
Out[11]: [IPAddress('10.1.1.1/24')]
In [12]: print(ip_list)
[IPAddress('10.1.1.1/24')]
Для этого задания нет теста!
'''
class IPAddress:
def __init__(self, ip):
ipaddr = ip.split('/')
if len(ipaddr[0].split('.')) != 4:
raise ValueError('Incorrect IP address')
for i in ipaddr[0].split('.'):
if not i.isdigit():
raise ValueError('Incorrect IP address')
if int(i) not in range(0,255):
raise ValueError('Incorrect IP address')
if not ipaddr[1].isdigit():
raise ValueError('Incorrect mask')
if int(ipaddr[1]) not in range(8,32):
raise ValueError('Incorrect mask')
self.ip, self.mask = ipaddr
def __str__(self):
return 'IP Address {}/{}'.format(self.ip, self.mask)
def __repr__(self):
return "IPAddress('{}/{}')".format(self.ip, self.mask)
if __name__ == '__main__':
ip1 = IPAddress('1.1.1.1/24')
print(ip1)
lst = []
lst.append(ip1)
print(lst)
| [
"tifling85@mail.ru"
] | tifling85@mail.ru |
3806e6c65494530724c23490d61499296513254b | 224afc0584213f70959c8a7da30146da19aed98d | /array/26.py | 313ea952b780745479e9e7b2195f5015e7c6e4c4 | [] | no_license | zxmeng/LeetCode | 26c2eb458912c2137bf0af4cdd31868260d9ba59 | 131aae52be6a62b284aee686dcb17ff85809a416 | refs/heads/master | 2020-03-15T03:41:36.019361 | 2018-06-20T03:53:50 | 2018-06-20T03:53:50 | 131,948,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
count = 1
i = 1
while i < len(nums):
if nums[i] == nums[i-1]:
i += 1
else:
nums[count] = nums[i]
count += 1
i += 1
del nums[count:]
return len(nums) | [
"z9meng@uwaterloo.ca"
] | z9meng@uwaterloo.ca |
40ceba7ad043be024ef819ddb6947c3a4ad5721c | 94bcf113636b617137ec0feb1e2d7d12717d8cb2 | /pytorch-practice-vision/chapter7/train.py | 52bcab47f51ab99b7d6f7943252f0e1bb3637fc3 | [] | no_license | cheewing/pytorch-practice | 4d4d6a25a05f46e5efea5a78f957752698ff7245 | 20092e8536a3f30865a55adb5be243f1dc4bce30 | refs/heads/master | 2020-03-27T01:18:48.585935 | 2018-08-27T05:10:49 | 2018-08-27T05:10:49 | 145,699,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,067 | py | # coding: utf-8
import torch
import torchvision
from torchvision import datasets, models, transforms
import os
from torch.autograd import Variable
import matplotlib.pyplot as pyplot
import time
%matplotlib inline
data_dir = 'DogsVSCats'
data_transform = {
x.transforms.Compose([
transforms.Scale([224, 224])
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
for x in ['train', 'valid']
}
image_datasets = {
x:datasets.ImageFolder(
root=os.path.join(data_dir, x),
transform=data_transform[x]
)
for x in ['train', 'valid']
}
dataloader = {
x:torch.utils.data.DataLoader(
dataset=image_datasets[x],
batch_size=16,
shuffle=True
)
for x in ['train', 'valid']
}
x_example, y_example = next(iter(dataloader['train']))
example_classes = image_datasets['train'].example_classes
index_classes = image_datasets['train'].class_to_idx
model = models.vgg16(pretrained = True)
for param in model.parameters():
param.requires_grad = False
model.classifier = torch.nn.Sequential(
torch.nn.Linear(25088, 4096),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
torch.nn.Linear(4096, 4096),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
torch.nn.Linear(4096, 2)
)
use_gpu = torch.cuda.is_available()
if use_gpu:
model = model.cuda()
cost = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.classifier.parameters())
loss_f = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.classifier.parameters(), lr = 0.00001)
epoch_n = 5
time_open = time.time()
for epoch in range(epoch_n):
print('Epoch {}/{}'.format(epoch, epoch_n-1))
print('-'*10)
for phase in ['train', 'valid']:
if phase == 'train':
print('Training...')
model.train(True)
else:
print('Validing...')
model.train(False)
running_loss = 0.0
running_corrects = 0
for batch, data in enumerate(dataloader[phase], 1):
x, y = data
if use_gpu:
x, y = Variable(x.cuda()), Variable(y.cuda())
else:
x, y = Variable(x), Variable(y)
y_pred = model(x)
_, pred = torch.max(y_pred.data, 1)
optimizer.zero_grad()
loss = loss_f(y_pred, y)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.data[0]
running_corrects += torch.sum(pred == y.data)
if batch % 500 ==0 and phase == 'train':
print('Batch {}, Train Loss: {:.4f}, Train ACC:{:.4f}'.\
format(batch, running_loss/batch, 100*running_corrects/(16*batch)))
epoch_loss = running_loss*16 / len(image_datasets[phase])
epoch_acc = 100*running_corrects/len(image_datasets[phase])
print('{} Loss:{:.4f} Acc:{:.4f}%'.format(phase, epoch_loss, epoch_acc))
time_end = time.time() - time_open
print(time_end) | [
"chengwei19890@163.com"
] | chengwei19890@163.com |
cdf4cfc812b67c47f380666606834eb7c4b3a6b8 | b18d1e62a3a5641393f5a55b3e1c54b7aae33f6c | /EjerciciosClase4-1Bim/Ejemplo01/Ejemplo01.py | 0b7dcc81fae46a90793b5105117ff047874e97b4 | [] | no_license | IntroProgramacion-P-Oct20-Feb21/trabajofinal-1bim-FabianMontoya9975 | 2a8b087f2481b37a8a216f9e0561abd388d1f038 | 3b3c4c8499c0526a0c28d7c2f7d1a6ce6ec8f1b7 | refs/heads/main | 2023-01-28T15:07:16.897371 | 2020-12-06T07:26:33 | 2020-12-06T07:26:33 | 317,355,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | """
Se imprime en pantalla el valor cadena que se le asigno a la variable
"""
nombreEstudiante = "José Fabián"
print(nombreEstudiante) | [
"jfmontoya1@utpl.edu.ec"
] | jfmontoya1@utpl.edu.ec |
6da4e06c6a149e1802bee00d900bb33d0fe86876 | ce355d6359265a6534155f89ed724dfe6bb0c88a | /naccsweb/powerpugs/models.py | 55dfc487653612e8872472037c5d6f2921201bf1 | [] | no_license | IsaiasCuevas/naccs-django | d4a8722c3b37329645e2d920df097f83dac91fc0 | 98f226091303a7f471660f64d51e050d9f068fd7 | refs/heads/master | 2021-07-25T01:45:02.827443 | 2020-08-25T16:15:33 | 2020-08-25T16:15:33 | 204,828,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,323 | py | from django.db import models
from django.contrib.auth.models import User
from naccsweb.storage_backends import PrivateMediaStorage
IGL_OPTIONS= [
('Yes', 'True'),
('No', 'False'),
]
APP_STATUS= [
('Accepted', 'Accepted'),
('Pending', 'Pending'),
('Denied','Denied')
]
class PowerPugsPlayerApplication(models.Model):
class Meta:
verbose_name = "Power Pug Player Application"
verbose_name_plural = "Power Pug Player Applications"
def __str__(self):
return self.user.profile.nickname
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
name = models.CharField(max_length=80, default="")
email = models.CharField(max_length=80)
college = models.CharField(max_length=80)
igl = models.BooleanField(default=False)
faceit_link = models.CharField(max_length=80)
esea_link = models.CharField(max_length=80)
curr_team = models.CharField(max_length=80, blank=True)
lan = models.TextField(max_length=1000, blank=True)
other = models.TextField(max_length=1000, blank=True)
application = models.FileField(upload_to="powerpugs/general/", storage=PrivateMediaStorage())
paid = models.BooleanField(default=False)
status = models.TextField(choices=APP_STATUS, default="Pending")
accepted = models.BooleanField(default=False)
class PowerPugsIGLApplication(models.Model):
class Meta:
verbose_name = "Power Pug IGL Application"
verbose_name_plural = "Power Pug IGL Applications"
def __str__(self):
return self.user.profile.nickname
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
name = models.CharField(max_length=80)
email = models.CharField(max_length=80)
faceit_link = models.CharField(max_length=80)
esea_link = models.CharField(max_length=80)
curr_team = models.CharField(max_length=80, blank=True)
lan = models.TextField(max_length=1000, blank=True)
other = models.TextField(max_length=1000, blank=True)
application = models.FileField(upload_to="powerpugs/igl/", storage=PrivateMediaStorage())
status = models.TextField(choices=APP_STATUS, default="Pending")
accepted = models.BooleanField(default=False)
| [
"isaiascuevas19@gmail.com"
] | isaiascuevas19@gmail.com |
d7f2a4cd830bf10320cf73ec3ef9d87c630d4c08 | 7a4e35881553049e636d904ffa2238a0dc087e80 | /control/OPcontrol.py | 55802af6467b301cc774b5cb0ff53eaed2d6a616 | [] | no_license | Mustenaka/back-end-do | 395bd5bd2f6949e9bdab4dc7f2024b6a2fe4e238 | ea9d8b790bb3a27137957361d7e6a7e36bbcd5d4 | refs/heads/main | 2023-04-24T08:48:03.255471 | 2021-05-06T01:40:06 | 2021-05-06T01:40:06 | 343,109,918 | 12 | 3 | null | null | null | null | UTF-8 | Python | false | false | 20,873 | py | import random
import models.DBconnect as DBconnect
import datetime
import os
import sys
projectPath = os.path.abspath(os.path.join(os.getcwd()))
sys.path.append(projectPath)
class OPcontrol:
"""
control 控制层,承上启下,该层作用为:
对基本的数据库层代码进行调用,并进行一系列的逻辑处理,并且返回结果给API层
"""
def __init__(self):
# 对于数据库而言,不用长连接,怕长时间不操作还占用带宽
pass
def check_login(self, user_name, user_pwd):
"""
登陆确认,传递进入用户名,用户密码,并将传递进来的数据和数据库中的记录进行比对
返回出是否成功登陆内容
Args:
user_name 用户名
user_pwd 用户密码
Returns
一个字典,返回用户ID,用户名,和用户微信ID
"""
print("-----------")
db = DBconnect.DBconnect()
info = db.dbQuery_userLogin(user_name, user_pwd)
if info == None:
dic = {"returnCode": "r0"}
else:
dic = {
"returnCode": "a0",
"user_id": info[0],
"user_name": info[1],
"user_pwd": info[2],
"user_wx_id": info[3],
"user_rightAnswer": info[4],
"user_wrongAnswer": info[5],
"isAdministrator": info[6],
}
print(dic)
return dic
def __is_already(self, db, user_id):
"""
内部函数,用来判断该用户是否已经存在,该内部方法的调用时刻在于创建【用户ID】的时候进行判断
(即使user_id生成8位随机数,但还是不排除有可能有重复)
Args:
db 数据库打开的指针
user_id 用户ID
Return
False - 不重复, True - 重复
"""
db = DBconnect.DBconnect()
info = db.dbQuery_user_is_already(user_id)
if info == None:
return False
else:
return True
def register(self, user_name, user_pwd):
"""
创建一个新用户, 通过传递进来的用户名和密码注册。
自动生成一个8位随机数字的user_id,这个id将会是整个系统中用户的绝对唯一标识符
此外,由于前端是无法获取到微信ID,只能作为页面提供方
所以user_wx_id这个参数作废了,目前这个参数只能同user_id相同
在旧的版本中,传入的参数是微信ID,可是微信的OpenID是无法通过前端获取的,只能由后端存储传递给前端,
所以这一部分代码需要进行重构
Args:
user_name 用户名
user_pwd 用户密码
Returns:
returnCode 正确返回a0,错误返回r0
user_id 用户ID,通过随机数字生成
user_name 用户名
user_pwd 用户密码
user_wx_id 微信号码
user_rightAnswer 正确答题数0
user_wrongAnswer 错误答题数0
"""
db = DBconnect.DBconnect()
new_user_id = str(random.randint(0, 99999999)).zfill(8)
bool_is_already = self.__is_already(db, new_user_id)
while bool_is_already:
new_user_id = str(random.randint(0, 99999999)).zfill(8)
bool_is_already = self.__is_already(db, new_user_id)
# 插入数据库
is_successful = db.dbInsert(
"user_info",
new_user_id,
user_name,
user_pwd,
new_user_id,
0,
0,
0
)
if is_successful:
dic = {
"returnCode": "a0",
"user_id": new_user_id,
"user_name": user_name,
"user_pwd": user_pwd,
"user_wx_id": new_user_id,
"user_rightAnswer": 0,
"user_wrongAnswer": 0,
"isAdministrator": 0
}
else:
dic = {
"returnCode": "r0"
}
return dic
def get_chapter_all(self):
"""
给管理员端获取的信息,一次性获取所有的章节表内容
关系: 科目 -> 章节 -> 题目
一次性全部获取方便做管理端的插入表格
"""
dbTable = "chapters_info"
db = DBconnect.DBconnect()
info = db.dbQuery(dbTable)
dic = {}
li = []
for i in range(0, len(info)):
# 题目编号我不希望从0开始
pageNumber = "c" + str(i+1)
dic_tmp = {
"group": pageNumber,
"chapters_id": info[i][0], # 章节编号
"subject_id": info[i][1], # 属于哪本书的编号
"chapters_name": info[i][2] # 该章节中文名称
}
li.append(dic_tmp)
dic.setdefault("chapters", li)
#dic.setdefault(pageNumber, dic_tmp)
return dic
# 重要 - 管理端需要使用此内容
def get_title_all(self):
"""
给管理端获取的信息,一次性获取全部的题目ID内容
关系: 科目 -> 章节 -> 题目
一次性获取全部信息方便管理端做好插入表格
"""
dbTable = "title_info"
db = DBconnect.DBconnect()
info = db.dbQuery(dbTable)
dic = {}
li = []
for i in range(0, len(info)):
# 题目编号我不希望从0开始
pageNumber = "t" + str(i+1)
title_id = info[i][0]
# 反向查询 : 题目ID -> 章节ID
chapters = db.dbQuery_chapter_by_title(title_id)
chapter_id = chapters[0][0]
print(chapter_id)
# 反向查询 : 章节ID -> 科目ID
subjects = db.dbQuery_subject_by_chapter(chapter_id)
subject_id = subjects[0][0]
print(subject_id)
dic_tmp = {
"group": pageNumber,
"title_id": title_id, # 题目ID
"chapters_id": chapter_id, # 章节ID
"subject_id": subject_id, # 科目ID
"titleHead": info[i][1], # 题目标题
"titleCont": info[i][2], # 题目内容
"titleAnswer": info[i][3], # 题目答案
"titleAnalysis": info[i][4], # 题目分析
"titleAveracc": info[i][5], # 题目平均正确率
"titlespaper": info[i][6], # 题目出处
"specialNote": info[i][7], # 特殊注解
}
li.append(dic_tmp)
#dic.setdefault(pageNumber, dic_tmp)
print(li)
dic.setdefault("titles", li)
return dic
def get_subject(self):
"""
接下来的几段代码的逻辑均为: 科目ID --> 章节ID --> 题目ID --> 题目具体信息 --> 提交题目
获取科目信息
Returns:
返回科目编号,科目名称,科目介绍,目前固定只有四个科目
"""
dbTable = "subject_info"
db = DBconnect.DBconnect()
info = db.dbQuery(dbTable)
dic = {}
li = []
for i in range(0, len(info)):
pageNumber = "s" + str(i+1)
dic_tmp = {
"group": pageNumber,
"subject_id": info[i][0], # 书本<科目>编号
"subject_name": info[i][1], # 书本<科目>名称
"subject_brief": info[i][2] # 书本<科目>介绍
}
li.append(dic_tmp)
dic.setdefault("subjects", li)
return dic
def get_chapter(self, sub_id):
"""
根据科目获取当前章节信息表
Args:
sub_id 科目ID
Returns:
返回 章节编号 ,科目编号,该章节的中文名称
"""
dbTable = "chapters_info"
db = DBconnect.DBconnect()
info = db.dbQuery_chapter_according_to_subject(str(sub_id))
dic = {}
li = []
for i in range(0, len(info)):
pageNumber = "c" + str(i+1)
dic_tmp = {
"group": pageNumber,
"chapters_id": info[i][0], # 章节编号
"subject_id": info[i][1], # 科目编号
"chapters_name": info[i][2] # 该章节中文名称
}
li.append(dic_tmp)
dic.setdefault("chapters", li)
return dic
def get_title(self, chp_id):
"""
根据章节获取当前题目ID表
Args:
chp_id 章节ID
Returns:
返回 题目ID,章节ID
"""
dbTable = "titlenumber_info"
db = DBconnect.DBconnect()
info = db.dbQuery_title_according_to_chapter(str(chp_id))
dic = {}
li = []
for i in range(0, len(info)):
pageNumber = "t" + str(i+1)
dic_tmp = {
"group":pageNumber,
"title_id": info[i][0], # 题目ID
"chapters_id": info[i][1], # 章节ID
}
li.append(dic_tmp)
dic.setdefault("titles", li)
return dic
# 根据题目获得详细信息
# 说明
# title_id: 输入的titleid
# titleHead: 题目的标题
# titleCont: 题目的内容
# titleAnswer: 题目的答案(选择填空混合)
# titleAnalysis: 题目的解析
# titleAveracc: 题目的平均正确率
# titlespaper: 题目来自的试卷
# specialNote: 特殊注解(一般没有为None)
def get_title_info(self, tit_id):
"""
根据题目ID获取题目的具体内容,包括获取到正确答案
Args:
tit_id 章节ID
Returns:
title_id: 输入的titleid
titleHead: 题目的标题
titleCont: 题目的内容
titleAnswer: 题目的答案(选择填空混合)
titleAnalysis: 题目的解析
titleAveracc: 题目的平均正确率
titlespaper: 题目来自的试卷
specialNote: 特殊注解(一般没有为None)
"""
dbTable = "title_info"
db = DBconnect.DBconnect()
info = db.dbQuery_title_according_to_title(str(tit_id))
dic = {}
'''
# 原来是多组的形式返回,但是貌似一个ID只有一个信息,所以多组不需要了
for i in range(0,len(info)):
pageNumber = "t" + str(i+1)
dic_tmp = {
"title_id":info[i][0],
"titleHead":info[i][1],
"titleCont":info[i][2],
"titleAnswer":info[i][3],
"titleAnalysis":info[i][4],
"titleAveracc":info[i][5],
"titlespaper":info[i][6],
"specialNote":info[i][7],
}
dic.setdefault(pageNumber,dic_tmp)
'''
dic.setdefault("title_id", info[0][0])
dic.setdefault("titleHead", info[0][1])
dic.setdefault("titleCont", info[0][2])
dic.setdefault("titleAnswer", info[0][3])
dic.setdefault("titleAnalysis", info[0][4])
dic.setdefault("titleAveracc", info[0][5])
dic.setdefault("titlespaper", info[0][6])
dic.setdefault("specialNote", info[0][7])
return dic
def get_title_len(self):
"""
获取数据库中题目数量,将会用在随机生成题目的范围中
"""
dbTable = "title_info"
db = DBconnect.DBconnect()
info = db.dbQuery_title_len(dbTable)
return info
# 答题
def answerCorrectJudgment(self, user_id, tit_id, answer, user_note):
"""
验证传递进来的题目内容,过程原理是:
先提取题号对应的题目信息
再将输入的答案与实际答案进行对比
最后根据用户请求写入user_info表中生成总数据记录
再将数据写入titlenote_info表中做详细记录
最后返回True or False表示回答正确与否
"""
dbTable = "titlenote_info"
db = DBconnect.DBconnect()
# 查询题目正确答案
info = db.dbQuery_title_according_to_title(str(tit_id))
# 获取正确答案 & 正确率,正确回答数,错误回答数量
rightAnswer = info[0][3]
titleAveracc = info[0][5]
titleRight = info[0][8]
titleWrong = info[0][9]
# 对比正确答案 - 计算出是否正确
print(answer, rightAnswer)
isRight = False
inpRight = "0"
if str(answer) == str(rightAnswer):
isRight = True
inpRight = "1"
titleRight += 1
else:
titleWrong += 1
# 更新用户回答总信息
db.dbUpdate_user_answer(isRight, user_id)
# 生成平均正确率,并且将记录更新到题目表
titleAveracc = (titleRight) / (titleRight + titleWrong)
db.dbUpdate_title_info(str(tit_id), str(titleAveracc),
str(titleRight), str(titleWrong))
# 更新用户回答详细内容 - 记录题号和回答时间
inputDataTime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
db.dbInsert(dbTable, user_id, tit_id,
inpRight, inputDataTime, user_note)
return isRight
# 验证管理员身份
def check_administrator(self, user_id):
"""
输入用户ID,验证是否是管理员
"""
dbTable = "user_info"
db = DBconnect.DBconnect()
# 查询题目正确答案
info = db.dbQuery_is_administrator(str(user_id))
print(info)
if not info:
return False
if info[0][0] != 0:
return True
return False
# 插入新题目
def insert_new_title(self, li):
"""
插入一条新的题目
Args:
li - 包含全部题目信息的list
Returns:
插入成功,或者是插入失败
"""
user_id = li[0]
title_id = li[1]
chapters_id = li[2]
titleHead = li[3]
titleCont = li[4]
titleAnswer = li[5]
titleAnalysis = li[6]
titlespaper = li[7]
specialNote = li[8]
db = DBconnect.DBconnect()
# 查询题目正确答案
dbTable = "titlenumber_info"
is_OK = db.dbInsert(dbTable, title_id, chapters_id)
dbTable = "title_info"
is_OK = db.dbInsert(dbTable, title_id, titleHead, titleCont,
titleAnswer, titleAnalysis, 0, titlespaper, specialNote, 0, 0)
if is_OK:
return True
else:
return False
def insert_new_chapter(self, chapters_id, subject_id, chapters_name):
"""
插入一个新的章节,注意一个问题,数据库对于chapter使用了replace,所以是增加和修改合一了
Args:
li - 包含全部题目信息的list
Returns:
插入成功,或者是插入失败
"""
dbTable = "chapters_info"
db = DBconnect.DBconnect()
is_OK = db.dbInsert(dbTable, chapters_id,
subject_id, chapters_name)
print(is_OK)
if is_OK:
return True
else:
return False
def update_title(self, li):
"""
插入一条新的题目
Args:
li - 包含全部题目信息的list
Returns:
插入成功,或者是插入失败
"""
user_id = li[0]
title_id = li[1]
chapters_id = li[2]
titleHead = li[3]
titleCont = li[4]
titleAnswer = li[5]
titleAnalysis = li[6]
titlespaper = li[7]
specialNote = li[8]
db = DBconnect.DBconnect()
# 查询题目正确答案
dbTable = "titlenumber_info"
is_OK = db.dbUpdate_signled(
dbTable, "chaptersId", chapters_id, "titleId", title_id)
dbTable = "title_info"
is_OK = db.update_title_all(
dbTable,
title_id,
titleHead,
titleCont,
titleAnswer,
titleAnalysis,
titlespaper,
specialNote)
if is_OK:
return True
else:
return False
def remove_title(self, title_id):
"""
输入一个title_id标题ID,删除数据库表中title_info表对应的内容
Update:
删除题目表的同时titlenumber_info的表对应的内容
"""
dbTable = "titlenumber_info"
needName = "titleId"
db = DBconnect.DBconnect()
# 删除题目表
is_OK = db.dbDelete(
dbTable, needName, title_id)
print(is_OK)
return is_OK
def remove_chapter(self, chapter_id):
"""
输入一个chapter_id 章节ID,删除数据库中章节表中对应的内容
Update:
删除章节表的同时删除titlenumber_info的表对应的内容
删除章节表的同时删除title_info表中对应的内容
"""
dbTable = "chapters_info"
needName = "chaptersId"
db = DBconnect.DBconnect()
is_OK = db.dbDelete(
dbTable, needName, chapter_id)
return is_OK
def get_answerRecord_all(self):
"""
给管理员端获取的答题记录信息,一次性获取所有的章节表内容,
这是一个不可以更改的信息表,为了确认答题的记录
一次性全部获取方便做管理端的插入表格
"""
dbTable = "titlenote_info"
db = DBconnect.DBconnect()
info = db.dbQuery(dbTable)
dic = {}
li = []
for i in range(0, len(info)):
# 题目编号我不希望从0开始
pageNumber = "r" + str(i+1)
dic_tmp = {
"group": pageNumber,
"user_id": info[i][0], # 用户ID
"title_id": info[i][1], # 题目ID
"is_right": info[i][2], # 是否回答正确
"respontime": info[i][3], # 回答时间
"personNote": info[i][4] # 个人记录
}
li.append(dic_tmp)
dic.setdefault("answer_record", li)
#dic.setdefault(pageNumber, dic_tmp)
return dic
def update_user_info(self, user_id, user_name, user_pwd, isAdministrator):
"""
更新用户信息,输入用户ID作为索引
可以修改的信息有,user_name用户名,user_pwd用户密码,is_admin是管理员么?
@param user_id 用户id
@param user_name 用户名
@param user_pwd 用户密码
@param isAdministrator 是否是管理员
@return
返回一个字典,其中包含一个returnCode,当他等于a0的时候表示获取正确信息,返回r0的时候表示获取信息失败
同时返回的字典还会有基本的查询信息。
"""
db = DBconnect.DBconnect()
# 插入数据库
if isAdministrator == "0" or isAdministrator == "1":
is_successful = db.dbUpdate_user_infomation(
user_id, user_name, user_pwd, isAdministrator
)
else:
is_successful = db.dbUpdate_user_infomation(
user_id, user_name, user_pwd
)
print(is_successful)
is_admin = db.dbQuery_is_administrator(user_id)
if not is_admin:
isAdministrator = 0
if is_admin[0][0] != 0:
isAdministrator = 1
elif is_admin[0][0] == 0:
isAdministrator = 0
if is_successful:
dic = {
"returnCode": "a0",
"user_id": user_id,
"user_name": user_name,
"user_pwd": user_pwd,
"isAdministrator": isAdministrator
}
else:
dic = {
"returnCode": "r0"
}
return dic
if __name__ == '__main__':
op = OPcontrol()
#k = op.answerCorrectJudgment("1001","2","硬时系统","这一道题记录点信息")
li = [
"10000002",
"5",
"2",
"填空题",
"请问1+1=?",
"2",
"1+1=2",
"1991",
"智商检测"
]
k = op.insert_new_title(li)
print(k)
| [
"mumten120@outlook.com"
] | mumten120@outlook.com |
3457b96701489bc8d5925ff1ad5b764f2984a595 | a126a8b3be572c08e40883a64b63a3cb39317a94 | /tests/test_mop.py | 53969a42019407a87d056d9e5fe52ad4a405dccd | [
"MIT"
] | permissive | 0LL13/person | 8fbd8aa3062c4ff5b8b0037a1c78b2c7bb92fbd2 | 3b9d43c7b5c1452fd6d28ad50f1831dfb4c4fa62 | refs/heads/master | 2023-01-06T20:31:43.340124 | 2020-11-06T09:41:24 | 2020-11-06T09:41:24 | 288,961,722 | 0 | 2 | MIT | 2020-10-25T17:51:04 | 2020-08-20T09:22:00 | Python | UTF-8 | Python | false | false | 2,826 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_mop.py
"""Tests for `mop_role` module."""
import pytest
from context import helpers # noqa
from context import mop_role
# pylint: disable=redefined-outer-name
def test_mop_role():
# pylint: disable=W0612, W0613
mop_1 = mop_role.MoP(
"14",
"NRW",
"Grüne",
"Alfons-Reimund",
"Hubbeldubbel",
peer_title="auf der",
electoral_ward="Rhein-Sieg-Kreis IV",
minister="JM",
)
assert mop_1.legislature == "14" # nosec
assert mop_1.first_name == "Alfons-Reimund" # nosec
assert mop_1.last_name == "Hubbeldubbel" # nosec
assert mop_1.gender == "male" # nosec
assert mop_1.peer_preposition == "auf der" # nosec
assert mop_1.party_name == "Grüne" # nosec
assert mop_1.parties == [ # nosec
helpers.Party(
party_name="Grüne", party_entry="unknown", party_exit="unknown"
) # noqa # nosec
] # noqa # nosec
assert mop_1.ward_no == 28 # nosec
assert mop_1.voter_count == 110389 # nosec
assert mop_1.minister == "JM" # nosec
mop_1.add_Party("fraktionslos")
assert mop_1.party_name == "fraktionslos" # nosec
assert mop_1.parties == [ # nosec
helpers.Party(
party_name="Grüne", party_entry="unknown", party_exit="unknown"
), # noqa # nosec
helpers.Party(
party_name="fraktionslos",
party_entry="unknown",
party_exit="unknown", # noqa # nosec
),
]
mop_2 = mop_role.MoP(
"14",
"NRW",
"CDU",
"Regina",
"Dinther",
electoral_ward="Landesliste",
) # noqa
assert mop_2.electoral_ward == "ew" # nosec
mop_3 = mop_role.MoP(
"16",
"NRW",
"Piraten",
"Heiner",
"Wiekeiner",
electoral_ward="Kreis Aachen I",
) # noqa
assert mop_3.voter_count == 116389 # nosec
mop_4 = mop_role.MoP(
"16",
"NRW",
"Linke",
"Heiner",
"Wiekeiner",
electoral_ward="Köln I"
) # noqa
assert mop_4.ward_no == 13 # nosec
assert mop_4.voter_count == 121721 # nosec
mop_5 = mop_role.MoP("14", "NRW", "Grüne", "Heiner", "Wiekeiner")
assert mop_5.electoral_ward == "ew" # nosec
assert mop_5.ward_no is None # nosec
assert mop_5.voter_count is None # nosec
mop_5.change_ward("Essen III")
assert mop_5.electoral_ward == "Essen III" # nosec
assert mop_5.ward_no == 67 # nosec
assert mop_5.voter_count == 104181 # nosec
def test_person_NotInRangeError():
# pylint: disable=W0612, W0613
mop = mop_role.MoP
with pytest.raises(helpers.NotInRange):
mop("100", "NRW", "SPD", "Alfons-Reimund", "Hubbeldubbel")
| [
"hardy.ecc95@gmail.com"
] | hardy.ecc95@gmail.com |
32899d3e754390786ab649a1de26f959c3d28b8e | ebd6f68d47e192da7f81c528312358cfe8052c8d | /swig/Examples/test-suite/python/overload_template_runme.py | 014ec71cbb0db5035821e801e8ec2cb7a7342c9d | [
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | inishchith/DeepSpeech | 965ad34d69eb4d150ddf996d30d02a1b29c97d25 | dcb7c716bc794d7690d96ed40179ed1996968a41 | refs/heads/master | 2021-01-16T16:16:05.282278 | 2020-05-19T08:00:33 | 2020-05-19T08:00:33 | 243,180,319 | 1 | 0 | Apache-2.0 | 2020-02-26T05:54:51 | 2020-02-26T05:54:50 | null | UTF-8 | Python | false | false | 3,596 | py | from overload_template import *
f = foo()
a = maximum(3, 4)
b = maximum(3.4, 5.2)
# mix 1
if (mix1("hi") != 101):
raise RuntimeError, ("mix1(const char*)")
if (mix1(1.0, 1.0) != 102):
raise RuntimeError, ("mix1(double, const double &)")
if (mix1(1.0) != 103):
raise RuntimeError, ("mix1(double)")
# mix 2
if (mix2("hi") != 101):
raise RuntimeError, ("mix2(const char*)")
if (mix2(1.0, 1.0) != 102):
raise RuntimeError, ("mix2(double, const double &)")
if (mix2(1.0) != 103):
raise RuntimeError, ("mix2(double)")
# mix 3
if (mix3("hi") != 101):
raise RuntimeError, ("mix3(const char*)")
if (mix3(1.0, 1.0) != 102):
raise RuntimeError, ("mix3(double, const double &)")
if (mix3(1.0) != 103):
raise RuntimeError, ("mix3(double)")
# Combination 1
if (overtparams1(100) != 10):
raise RuntimeError, ("overtparams1(int)")
if (overtparams1(100.0, 100) != 20):
raise RuntimeError, ("overtparams1(double, int)")
# Combination 2
if (overtparams2(100.0, 100) != 40):
raise RuntimeError, ("overtparams2(double, int)")
# Combination 3
if (overloaded() != 60):
raise RuntimeError, ("overloaded()")
if (overloaded(100.0, 100) != 70):
raise RuntimeError, ("overloaded(double, int)")
# Combination 4
if (overloadedagain("hello") != 80):
raise RuntimeError, ("overloadedagain(const char *)")
if (overloadedagain() != 90):
raise RuntimeError, ("overloadedagain(double)")
# specializations
if (specialization(10) != 202):
raise RuntimeError, ("specialization(int)")
if (specialization(10.0) != 203):
raise RuntimeError, ("specialization(double)")
if (specialization(10, 10) != 204):
raise RuntimeError, ("specialization(int, int)")
if (specialization(10.0, 10.0) != 205):
raise RuntimeError, ("specialization(double, double)")
if (specialization("hi", "hi") != 201):
raise RuntimeError, ("specialization(const char *, const char *)")
# simple specialization
xyz()
xyz_int()
xyz_double()
# a bit of everything
if (overload("hi") != 0):
raise RuntimeError, ("overload()")
if (overload(1) != 10):
raise RuntimeError, ("overload(int t)")
if (overload(1, 1) != 20):
raise RuntimeError, ("overload(int t, const int &)")
if (overload(1, "hello") != 30):
raise RuntimeError, ("overload(int t, const char *)")
k = Klass()
if (overload(k) != 10):
raise RuntimeError, ("overload(Klass t)")
if (overload(k, k) != 20):
raise RuntimeError, ("overload(Klass t, const Klass &)")
if (overload(k, "hello") != 30):
raise RuntimeError, ("overload(Klass t, const char *)")
if (overload(10.0, "hi") != 40):
raise RuntimeError, ("overload(double t, const char *)")
if (overload() != 50):
raise RuntimeError, ("overload(const char *)")
# everything put in a namespace
if (nsoverload("hi") != 1000):
raise RuntimeError, ("nsoverload()")
if (nsoverload(1) != 1010):
raise RuntimeError, ("nsoverload(int t)")
if (nsoverload(1, 1) != 1020):
raise RuntimeError, ("nsoverload(int t, const int &)")
if (nsoverload(1, "hello") != 1030):
raise RuntimeError, ("nsoverload(int t, const char *)")
if (nsoverload(k) != 1010):
raise RuntimeError, ("nsoverload(Klass t)")
if (nsoverload(k, k) != 1020):
raise RuntimeError, ("nsoverload(Klass t, const Klass &)")
if (nsoverload(k, "hello") != 1030):
raise RuntimeError, ("nsoverload(Klass t, const char *)")
if (nsoverload(10.0, "hi") != 1040):
raise RuntimeError, ("nsoverload(double t, const char *)")
if (nsoverload() != 1050):
raise RuntimeError, ("nsoverload(const char *)")
A_foo(1)
b = B()
b.foo(1)
| [
"inishchith@gmail.com"
] | inishchith@gmail.com |
bfdc37371096de0b170b034eeb65f0b6ca5f5a26 | 741f698098f5566302706c856af7960aa3a81d14 | /datahouse/text_segment.py | 107eb0f32ff56f25b346d9c93a4cc87f37db9594 | [] | no_license | 0xyd/DaPanJiue | 4d751d13308810b963293be5198268c783c17a37 | a2e43082e2ea23e29bd0b379ce4edf6549b74783 | refs/heads/master | 2021-06-14T14:15:55.196633 | 2017-05-02T01:09:08 | 2017-05-02T01:09:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | import re
import lxml
import jieba
# 20170212: A reg use to the filter to delete
# '(│|├|┼|┤|└|┴|┘|┌|┬|┐|【|】|?|,|:|。|、|;|「|」|○|\(|\))'
# 20170212:
| [
"davisfreeman1015@gmail.com"
] | davisfreeman1015@gmail.com |
f04fca0aedf6e3b04ef05ba76937338d2691420d | 91ff9046470501702884b9ddf51f154ea606218b | /scaf/data/embedding.py | 9eb36ece6fcc06dce2de8b37c6905d4acf4ddf16 | [
"CC-BY-4.0",
"MIT"
] | permissive | englhardt/scaf | fd4731d0d0c2dc1c24180e5c006b98fe8f92f49e | 7ed7b424766ccb8910d28b55014604b5d98c276c | refs/heads/master | 2020-04-30T09:07:26.858005 | 2019-03-20T14:18:25 | 2019-03-22T15:06:52 | 176,738,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,478 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Adrian Englhardt <adrian.englhardt@gmail.com>
# Licensed under the MIT License - https://opensource.org/licenses/MIT
import logging
import os
from builtins import map
import numpy as np
from gensim.models import KeyedVectors
class Embedding(object):
def __init__(self, path, binary=False):
if os.path.isfile(path):
self.model = KeyedVectors.load_word2vec_format(path, binary=binary)
else:
logging.error('Model \'{}\' can not be loaded.'.format(path))
return
self.model.init_sims(replace=True)
def represent(self, word):
if word in self.model.vocab:
return self.model.syn0[self.model.index2word.index(word)]
else:
return np.zeros(self.model.vector_size)
def similarity(self, word1, word2):
"""
Vectors are supposed to be normalized
"""
return self.represent(word1).dot(self.represent(word2))
def most_similar(self, positive=(), negative=(), n=10):
"""
Vectors are supposed to be normalized
"""
return self.model.most_similar(positive=positive, negative=negative, topn=n)
def most_similar_to_word(self, word, n=10):
"""
Vectors are supposed to be normalized
"""
return self.model.most_similar(positive=[word], topn=n)
def oov(self, word):
return word not in self.model.vocab
def eval_analogy(self, eval_file):
return self.model.accuracy(eval_file, case_insensitive=True)
def vocab(self):
return self.model.vocab
def model(self):
return self.model
class BasicEmbedding(object):
def __init__(self, path):
if not os.path.isfile(path):
logging.error('Model \'{}\' can not be loaded.'.format(path))
return
self.model = dict()
self.vector_size = 0
with open(path) as f:
self.vector_size = int(f.readline().split()[1])
for l in f:
word_splits = l.split()
word = word_splits[0]
series = list(map(float, word_splits[1:]))
series /= np.linalg.norm(series)
self.model[word] = series
def update_model(self, vocab, vectors, normalize=False):
self.model = dict()
for i, w in enumerate(vocab):
self.model[w] = vectors[i]
if normalize:
self.model[w] /= np.linalg.norm(self.model[w])
def vocab(self):
return list(self.model.keys())
def represent(self, word):
return self.model.get(word, np.zeros(self.vector_size))
def vector_size(self):
return self.vector_size
@staticmethod
def common_vocab(embeddings):
if not embeddings or len(embeddings) == 0:
return []
if len(embeddings) == 1:
return embeddings[0].vocab()
intersected_vocab = set(embeddings[0].vocab())
for e in embeddings[1:]:
intersected_vocab &= set(e.vocab())
return intersected_vocab
@staticmethod
def merged_vocab(embeddings):
if not embeddings or len(embeddings) == 0:
return []
if len(embeddings) == 1:
return embeddings[0].vocab()
merged_vocab = set(embeddings[0].vocab())
for e in embeddings[1:]:
merged_vocab |= set(e.vocab())
return merged_vocab
| [
"adrian.englhardt@kit.edu"
] | adrian.englhardt@kit.edu |
06b28b5965035b5b5634f69bf347939ad40b055b | 4dd9fa47cb3e2b9f2a89c4f286d4a83ef2582e63 | /trunk/parallelUQ/old_20140405/restart.py | 1bedf202463b2de0628f7a08c79ba18d48044da7 | [] | no_license | taoyiliang/unc-quant | 79807b8891c8814a68f864a64d920768533d0bf1 | 251af15fca1a3d7e72236468208486cf8eb350e3 | refs/heads/master | 2022-05-31T13:40:58.605789 | 2022-05-23T17:08:52 | 2022-05-23T17:08:52 | 10,384,159 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | import parExecutor as ex
import cPickle as pk
#load executor
print 'Loading executor for restart...'
rstDict=pk.load('executor.backup.pk','rb')
for key in rstDict.keys():
print key,rstDict[key]
#newexec = Executor(restart=True,rstDict=rstDict)
| [
"taoyiliang@gmail.com"
] | taoyiliang@gmail.com |
a45f8d01ea3d38c88df410debafde8bacda6c399 | fa8d47841322bec699cc7d507e94327b63ea4990 | /phonecall/apps.py | b8f64bbad6ab58535833538758addf946deb5d88 | [] | no_license | vitorh45/work-at-olist | f9fd988bd746ecab93ca94dbca70f5eb5ed5c24a | 9cc68f5faa29e8ac1ad061d83b6aae745e9404c7 | refs/heads/master | 2020-06-14T20:12:23.604161 | 2018-11-12T02:18:17 | 2018-11-12T02:18:17 | 75,351,575 | 0 | 0 | null | 2016-12-02T02:13:14 | 2016-12-02T02:13:14 | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class PhonecallConfig(AppConfig):
name = 'phonecall'
| [
"vitorh45@gmail.com"
] | vitorh45@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.