seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
28924497501 |
"""
프로그래머스 Lv2 -뉴스 클러스터링
"""
# 아이디어가 맘에 들었음!@
"""
20 Minute :: flood fill
"""
from collections import Counter
import math
def make_window_size2(string):
windows = []
for i in range(len(string)-1):
if string[i].isalpha() and string[i+1].isalpha():
windows.append(string[i:i+2].lower())
return windows
def solution(str1, str2):
window1 = make_window_size2(str1)
window2 = make_window_size2(str2)
total_window = window1+ window2
#Processing
counter1 = Counter(window1)
counter2 = Counter(window2)
union_counter = {}
intersection_counter = {}
for phrase in total_window:
cnt1 = counter1.get(phrase, 0)
cnt2 = counter2.get(phrase, 0)
union_counter[phrase] = max(cnt1, cnt2)
if min(cnt1, cnt2) > 0:
intersection_counter[phrase] = min(cnt1, cnt2)
print("union_cnt:", union_counter)
print("intersection_cnt:", intersection_counter)
if union_counter:
answer = math.floor((sum(intersection_counter.values()) * 65536 / sum(union_counter.values())))
else:
answer = 65536
return answer | GuSangmo/BOJ_practice | programmers/level2/뉴스클러스터링.py | 뉴스클러스터링.py | py | 1,199 | python | en | code | 0 | github-code | 36 |
39479742836 | # -*- coding: utf-8 -*-
from django.urls import path, re_path, include
from decks import views
from decks.views import TournamentListView
urlpatterns = [
re_path(r'^$', views.index, name='index'),
re_path(r'^(?P<deck_id>[0-9]+)/$', views.deck, name='deck'),
re_path(r'^cluster/(?P<cluster_id>[0-9]+)/$', views.cluster, name='cluster'),
re_path(r'^cluster/(?P<cluster_id>[0-9]+)/cards/$', views.cluster_cards, name='cluster_cards'),
re_path(r'^cluster/(?P<cluster_id>[0-9]+)/close/$', views.cluster_close, name='cluster_close'),
re_path(r'^cluster/(?P<cluster_id>[0-9]+)/far/$', views.cluster_far, name='cluster_far'),
re_path(r'^cluster/$', views.clusters, name='clusters'),
re_path(r'^tournament/$', TournamentListView.as_view(), name='tournaments'),
re_path(r'^tournament/(?P<tournament_id>[0-9]+)/$', views.tournament, name='tournament'),
re_path(r'^crafter/$', views.recommendations, name='recommendations'),
re_path(r'^manabase/$', views.manabaseanalysis, name='manabaseanalysis'),
]
| jcrickmer/mtgdbpy | decks/urls.py | urls.py | py | 1,036 | python | en | code | 0 | github-code | 36 |
14102761586 | import platform
import yaml
import pkg_resources
import re
import logging
log = logging.getLogger(__name__)
def convert_conda_yaml_to_requirement(conda_array) :
'''
Convert the conda.yaml syntax to requirements.txt syntax :
for now :
- select "dependencies" key
- transform = into ==
- add pip packages dependencies to the list of other dependencies
Additionally remove python requirement (not supported by pkg_resources.require)
Also need to remove pip -e "install"
'''
# get dependencies
dep_array = [v for v in conda_array["dependencies"] if type(v) == str]
pip_require = [v for v in conda_array["dependencies"]
if type(v) == dict and "pip" in v.keys()][0]["pip"]
# remove " -e " install type :
pip_require = [v for v in pip_require if (re.match(r"^ *-e ",v) == None)]
# need to add extra = if no < or >
dep_array_conv = [x.replace('=','==') for x in dep_array]
dep_array_conv = [x.replace(r'>==','>=').replace('<==','<=').replace('===','==')
for x in dep_array_conv]
# put back pip requirement in place
# assumes it is at the end
dep_array_conv = dep_array_conv + pip_require
# remove python version check
dep_array_conv = [x for x in dep_array_conv if re.match('^python[<.>,=]=',x) == None]
return dep_array_conv
def conda_python_version_requirement(conda_array):
'''
Return the python version required if present in the conda.yaml
Otherwise return None
'''
# get dependencies
dep_array = [v for v in conda_array["dependencies"] if type(v) == str]
# get Python version
python_req = [x for x in dep_array if re.match('^python[<.>,=]',x) != None]
if len(python_req) == 0 :
return None
else :
# Only return 1st occurence
return python_req[0].replace('python','')
def check_python(requirement, value) :
'''
Check if a Python version abide by a Python version requirement
WARNING :
this can only check 1 condition, can not check multiple conditions
separated by ,
'''
condition = re.findall('[<,>,=]=*', requirement)[0]
condition = condition.replace('=','==')
condition = condition.replace('<==','<=').replace('>==','>=').replace('===','==')
version_req = re.findall('[0-9.]+', requirement)[0]
len_version = len(version_req.split('.'))
value = ".".join(value.split('.')[0:len_version])
value = pkg_resources.parse_version(value)
version_req = pkg_resources.parse_version(version_req)
test = eval("value "+condition+" version_req")
return test
def check_environment(filename = 'conda.yaml') :
'''
Check that the current conda environment abide by the filename (conda.yaml)
and raise an error if not.
A good place to put the function is in the file ./src/{project_name}/pipeline.py
at the beginning of the create_pipelines function
'''
with open(filename) as stream :
values = yaml.safe_load(stream)
pkg_req = convert_conda_yaml_to_requirement(values)
pkg_resources.require(pkg_req)
python_req = conda_python_version_requirement(values)
if (python_req != None) :
python_ver = platform.python_version()
if not(check_python(python_req, python_ver)) :
raise(Exception(f"python version {python_ver} is not compatible "
f"with conda.yaml python requirement {python_req}"))
log.info(f"Conda environment matches the requirements of {filename}")
if __name__ == "__main__" :
check_environment()
| nasa/ML-airport-data-services | data_services/conda_environment_test.py | conda_environment_test.py | py | 3,612 | python | en | code | 3 | github-code | 36 |
29158315057 |
# Imports
from __future__ import print_function, division
import tensorflow as tf
import warnings
from tensorflow.python.ops import control_flow_ops
import numpy as np
import pdb
# DATA AUGMENTATION ****************************************************************************************************
def random_rotation_image_with_annotation(image_tensor, annotation_tensor, max_angle):
# Random variable: two possible outcomes (0 or 1)
# with 0.5 chance
random_var = tf.cast(tf.random_uniform(maxval=2, dtype=tf.int32, shape=[]),dtype=tf.float32)
# Random selection of angle and direction of rotation
random_angle = tf.cast(tf.random_uniform(maxval=max_angle, dtype=tf.int32, shape=[]),dtype=tf.float32)
random_direction = tf.cast(tf.random_uniform(minval=-1, maxval=1, dtype=tf.int32, shape=[]),dtype=tf.float32)
randomly_rotated_img = control_flow_ops.cond(pred=tf.equal(tf.multiply(tf.abs(random_direction), random_var), 0),
true_fn=lambda: tf.contrib.image.rotate(image_tensor,
random_direction * random_angle,
interpolation='NEAREST'),
false_fn=lambda: image_tensor)
randomly_rotated_annotation = control_flow_ops.cond(pred=tf.equal(tf.multiply(tf.abs(random_direction), random_var), 0),
true_fn=lambda: tf.contrib.image.rotate(annotation_tensor,
random_direction * random_angle,
interpolation='NEAREST'),
false_fn=lambda: annotation_tensor)
return randomly_rotated_img, randomly_rotated_annotation
def flip_randomly_left_right_image_with_annotation(image_tensor, annotation_tensor):
"""Accepts image tensor and annotation tensor and returns randomly flipped tensors of both.
The function performs random flip of image and annotation tensors with probability of 1/2
The flip is performed or not performed for image and annotation consistently, so that
annotation matches the image.
Parameters
----------
image_tensor : Tensor of size (width, height, 3)
Tensor with image
annotation_tensor : Tensor of size (width, height, 1)
Tensor with annotation
Returns
-------
randomly_flipped_img : Tensor of size (width, height, 3) of type tf.float.
Randomly flipped image tensor
randomly_flipped_annotation : Tensor of size (width, height, 1)
Randomly flipped annotation tensor
"""
# Random variable: two possible outcomes (0 or 1)
# with 0.5 chance
random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])
randomly_flipped_img = control_flow_ops.cond(pred=tf.equal(random_var, 0),
true_fn=lambda: tf.image.flip_left_right(image_tensor),
false_fn=lambda: image_tensor)
randomly_flipped_annotation = control_flow_ops.cond(pred=tf.equal(random_var, 0),
true_fn=lambda: tf.image.flip_left_right(annotation_tensor),
false_fn=lambda: annotation_tensor)
return randomly_flipped_img, randomly_flipped_annotation
def flip_randomly_up_down_image_with_annotation(image_tensor, annotation_tensor):
"""Accepts image tensor and annotation tensor and returns randomly flipped tensors of both.
The function performs random flip of image and annotation tensors with probability of 1/2
The flip is performed or not performed for image and annotation consistently, so that
annotation matches the image.
Parameters
----------
image_tensor : Tensor of size (width, height, 3)
Tensor with image
annotation_tensor : Tensor of size (width, height, 1)
Tensor with annotation
Returns
-------
randomly_flipped_img : Tensor of size (width, height, 3) of type tf.float.
Randomly flipped image tensor
randomly_flipped_annotation : Tensor of size (width, height, 1)
Randomly flipped annotation tensor
"""
# Random variable: two possible outcomes (0 or 1)
# with 0.5 chance
random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])
randomly_flipped_img = control_flow_ops.cond(pred=tf.equal(random_var, 0),
true_fn=lambda: tf.image.flip_up_down(image_tensor),
false_fn=lambda: image_tensor)
randomly_flipped_annotation = control_flow_ops.cond(pred=tf.equal(random_var, 0),
true_fn=lambda: tf.image.flip_up_down(annotation_tensor),
false_fn=lambda: annotation_tensor)
return randomly_flipped_img, randomly_flipped_annotation
def random_color_distortion(image_tensor, annotation_tensor):
random_var_brightness = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])
distorted_image = control_flow_ops.cond(pred=tf.equal(random_var_brightness, 0),
true_fn=lambda: tf.image.random_brightness(image_tensor, max_delta=32. / 255.),
false_fn=lambda: image_tensor)
random_var_saturation = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])
distorted_image = control_flow_ops.cond(pred=tf.equal(random_var_saturation, 0),
true_fn=lambda: tf.image.random_saturation(distorted_image, lower=0.5, upper=1.5),
false_fn=lambda: distorted_image)
random_var_hue = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])
distorted_image = control_flow_ops.cond(pred=tf.equal(random_var_hue, 0),
true_fn=lambda: tf.image.random_hue(distorted_image, max_delta=0.2),
false_fn=lambda: distorted_image)
random_var_contrast = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])
distorted_image = control_flow_ops.cond(pred=tf.equal(random_var_contrast, 0),
true_fn=lambda: tf.image.random_contrast(distorted_image, lower=0.5, upper=1.5),
false_fn=lambda: distorted_image)
return tf.clip_by_value(distorted_image, 0.0, 1.0), annotation_tensor
# ACCURACY FUNCTIONS ***************************************************************************************************
def compute_accuracy(valid_preds, valid_labels, classes , name = 'accuracy'):
with tf.name_scope(name):
#pixel_acc = tf.divide(tf.reduce_sum(tf.cast(tf.equal(valid_labels, valid_preds), dtype=tf.int32)),
# tf.cast(tf.shape(valid_labels)[0], dtype=tf.int32))
_, pixel_acc = tf.metrics.accuracy(valid_labels, valid_preds)
#cm = tf.confusion_matrix(valid_labels, valid_preds, num_classes=CLASSES)
_, cm = tf.metrics.mean_iou(valid_labels, valid_preds, classes)
mean_iou = compute_mean_iou(cm)
_, mean_per_class_acc = tf.metrics.mean_per_class_accuracy(valid_labels, valid_preds, classes)
return pixel_acc, mean_iou, mean_per_class_acc
def compute_mean_iou(total_cm, name='mean_iou'):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = tf.to_float(tf.reduce_sum(total_cm, 0))
sum_over_col = tf.to_float(tf.reduce_sum(total_cm, 1))
cm_diag = tf.to_float(tf.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = tf.reduce_sum(tf.cast(
tf.not_equal(denominator, 0), dtype=tf.float32))
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = tf.where(
tf.greater(denominator, 0),
denominator,
tf.ones_like(denominator))
iou = tf.div(cm_diag, denominator)
# If the number of valid entries is 0 (no classes) we return 0.
result = tf.where(
tf.greater(num_valid_entries, 0),
tf.reduce_sum(iou, name=name) / num_valid_entries,
0)
return result
# DECAY FUNCTIONS ******************************************************************************************************
def lr_decay(learning_rate):
return (learning_rate * 0.5)
# NORMALIZATIONS *******************************************************************************************************
def mybn(x, is_train, name='bn'):
moving_average_decay = 0.9
with tf.variable_scope(name):
decay = moving_average_decay
# Get batch mean and var, which will be used during training
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2])
# Define variables, mu and sigma are not trainable since depend on the batch (train) or the population (test)
with tf.device('/CPU:0'):
mu = tf.get_variable('mu', batch_mean.get_shape(), tf.float32,
initializer=tf.zeros_initializer(), trainable=False)
sigma = tf.get_variable('sigma', batch_var.get_shape(), tf.float32,
initializer=tf.ones_initializer(), trainable=False)
beta = tf.get_variable('beta', batch_mean.get_shape(), tf.float32,
initializer=tf.zeros_initializer())
gamma = tf.get_variable('gamma', batch_var.get_shape(), tf.float32,
initializer=tf.ones_initializer())
update = 1.0 - decay
update_mu = mu.assign_sub(update * (mu - batch_mean))
update_sigma = sigma.assign_sub(update * (sigma - batch_var))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_sigma)
mean, var = tf.cond(is_train, lambda: (batch_mean, batch_var),
lambda: (mu, sigma))
bn = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)
return bn
def mygn(x, G=32, eps=1e-5, name='gn'):
with tf.variable_scope(name):
# NHWC to NCHW
#x = tf.transpose(x, [0, 3, 1, 2])
_, channels, _, _ = x.get_shape().as_list()
shape = tf.shape(x)
N = shape[0]
C = shape[1]
H = shape[2]
W = shape[3]
x = tf.reshape(x, [N, G, C//G, H, W])
group_mean, group_var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
x = (x - group_mean) / tf.sqrt(group_var + eps)
with tf.device('/CPU:0'):
beta = tf.get_variable('beta', [channels], initializer=tf.constant_initializer(0.0))
gamma = tf.get_variable('gamma', [channels], initializer=tf.constant_initializer(1.0))
gamma = tf.reshape(gamma, [1, C, 1, 1])
beta = tf.reshape(beta, [1, C, 1, 1])
x = tf.reshape(x, [N, C, H, W]) * gamma + beta
# NCHW to NHWC
#x = tf.transpose(x, [0, 2, 3, 1])
return x
# RELU *****************************************************************************************************************
def myrelu(x, leakness=0.0, name=None):
if leakness > 0.0:
name = 'lrelu' if name is None else name
return tf.maximum(x, x*leakness, name='lrelu')
else:
name = 'relu' if name is None else name
return tf.nn.relu(x, name='relu')
# UNET OUTPUT SIZE *****************************************************************************************************
def compute_unet_output_size(in_size, num_layers):
size = in_size
reduction_due_to_conv = 2
for i in range(num_layers):
size -= 2*2*reduction_due_to_conv # 2 convolutions, 2 time in every layer
reduction_due_to_conv *= 2
size += reduction_due_to_conv # bottom layer only visited once
return size
def is_valid_input_unet(in_size, num_layers):
isvalid = 1
size = in_size
reduction_due_to_conv = 2
for i in range(num_layers-1):
size -= reduction_due_to_conv*2 # 2 convolutions, 2 time in every layer
if size % 2 != 0:
#print('Error: odd image size before pooling at layer ' + str(num_layers-i) + ', ' + str(size))
isvalid = 0
size = size / 2
return isvalid
# INITIALIZER FUNCTIONS ************************************************************************************************
def identity_initializer(filter_shape):
"""returns the values of a filter that simply passes forward the input feature map"""
filter = np.zeros((filter_shape))
center = int(filter_shape[1]/2)
for i in range(filter_shape[2]):
filter[center, center, i, i] = np.float(1)
return filter
# SPECTRAL NORMED WEIGHTS
NO_OPS = 'NO_OPS'
def _l2normalize(v, eps=1e-12):
return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)
def spectral_normed_weight(W, u=None, num_iters=1, update_collection=None, with_sigma=False):
# Usually num_iters = 1 will be enough
W_shape = W.shape.as_list()
W_reshaped = tf.reshape(W, [-1, W_shape[-1]])
if u is None:
u = tf.get_variable("u", [1, W_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)
def power_iteration(i, u_i, v_i):
v_ip1 = _l2normalize(tf.matmul(u_i, tf.transpose(W_reshaped)))
u_ip1 = _l2normalize(tf.matmul(v_ip1, W_reshaped))
return i + 1, u_ip1, v_ip1
_, u_final, v_final = tf.while_loop(
cond=lambda i, _1, _2: i < num_iters,
body=power_iteration,
loop_vars=(tf.constant(0, dtype=tf.int32),
u, tf.zeros(dtype=tf.float32, shape=[1, W_reshaped.shape.as_list()[0]]))
)
if update_collection is None:
warnings.warn('Setting update_collection to None will make u being updated every W execution. This maybe undesirable'
'. Please consider using a update collection instead.')
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
# sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
W_bar = W_reshaped / sigma
with tf.control_dependencies([u.assign(u_final)]):
W_bar = tf.reshape(W_bar, W_shape)
else:
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
# sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
W_bar = W_reshaped / sigma
W_bar = tf.reshape(W_bar, W_shape)
# Put NO_OPS to not update any collection. This is useful for the second call of discriminator if the update_op
# has already been collected on the first call.
if update_collection != NO_OPS:
tf.add_to_collection(update_collection, u.assign(u_final))
if with_sigma:
return W_bar, sigma
else:
return W_bar | ChangqingHui/Semantic-Segmentation-with-Adversarial-Networks | utils.py | utils.py | py | 15,349 | python | en | code | 1 | github-code | 36 |
72052572903 | # Created by: Younes Elfeitori
# Created on: Nov 2017
# Created for: ICS3U
# This program selects 10 numbers from 1 to 10 and selects the biigest value
from numpy import random
def find_max_value(array):
# finds largest number
max_value = max(array)
return max_value
# input
counter = 0
random_numbers = []
while counter < 10:
single_number = random.randint(1, 10 + 1)
print(single_number)
random_numbers.append(single_number)
counter = counter + 1
# process
largest_value = find_max_value(random_numbers)
# output
print("\nThe largest number is: " + str(largest_value))
| Youneselfeitori/Unit5-02 | array_max.py | array_max.py | py | 610 | python | en | code | 0 | github-code | 36 |
25417748938 | #!/usr/bin/env python
import soundfile as sf
import math
class LoopableSample():
def __init__(self):
self.data = []
def addBuffer(self, buffer):
for d in buffer:
self.data.append(d)
def fromFile(self, file):
print("loading %s" % file)
(data, ignore) = sf.read(file, dtype="float32")
self.addBuffer(data[2 * 4410:])
return self
def length(self):
return len(self.data) - (5 * 4410)
def create(self, outFile):
l = self.length()
halfWay = math.floor(l / 2)
xFade = math.floor(0.75 * halfWay)
out = []
for s in range(l - xFade):
p = s + xFade - halfWay
if s < (halfWay - xFade):
out.append(self.data[s + halfWay])
elif s >= halfWay:
out.append(self.data[p])
else:
f = 1.0 * p / xFade
out.append((f * self.data[p]) + ((1.0 - f) * self.data[s + halfWay]))
sf.write(outFile, out, 44100)
| andrewbooker/samplescaper | capture/LoopableSample.py | LoopableSample.py | py | 1,055 | python | en | code | 2 | github-code | 36 |
74352481703 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
GUFY - Copyright (c) 2019, Fabian Balzer
Distributed under the terms of the GNU General Public License v3.0.
The full license is in the file LICENSE.txt, distributed with this software.
-------------------------------------------------------------------------------
@author: Fabian Balzer (fabian.balzer@studium.uni-hamburg.de)
A module for the progress bar updates and threading, including the heads of the
evaluation functions.
The module is structured as follows:
- The ProgressDialog class for creating a progress window when plotting
- The Worker classes for carrying out the plotting process
- Function for evaluating single file data
- Function for evaluating time series data
"""
import numpy as np
import traceback
import PyQt5.QtWidgets as QW
import PyQt5.QtCore as QC
import PyQt5.QtGui as QG
import simgui_modules.plots as splot # used in eval-commands
import simgui_modules.utils as sut
from simgui_modules.additionalWidgets import GUILogger
# %% The ProgressDialog class for creating a progress window when plotting
class ProgressDialog(QW.QDialog):
"""A dialog that pops up when a plot is being made. Shows a progressbar
and a status concerning plotting, and contains a button to stop the plot
process. Automatically closes upon finishing.
Parameters:
Param_Dict: For the plot parameters.
parent: QObject: preferably the main window.
all_: bool: unimplemented function to plot everything
"""
finished = QC.pyqtSignal(bool) # signal to indicate success
def __init__(self, Param_Dict, parent, mode, Request_Dict=None, slider=None):
super().__init__(parent=parent)
self.setModal(True)
self.setWindowFlags( # This will stop the close button from appearing
QC.Qt.Window |
QC.Qt.CustomizeWindowHint |
QC.Qt.WindowTitleHint |
QC.Qt.WindowMinimizeButtonHint |
QC.Qt.WindowStaysOnTopHint
)
self.Request_Dict = Request_Dict
self.mode = mode
self.initUi()
self.determineProgressLength(Param_Dict)
self.setWindowTitle("Plotting in progress...")
if mode == "Test":
self.parent().progressWorker = TestPlotWorker()
elif mode == "PlotSingle":
self.parent().progressWorker = PlotSingleWorker(Param_Dict) # Create worker for plotting
elif mode == "PlotAll":
self.parent().progressWorker = PlotMultipleWorker(Param_Dict,
Request_Dict,
slider)
else:
self.parent().progressWorker = TestPlotWorker()
self.parent().thread = QC.QThread() # Create thread for worker. For safety reasons leave a reference on the main window.
self.signalsConnection()
self.parent().progressWorker.moveToThread(self.parent().thread)
self.parent().thread.start() # Start the thread
self.resize(400, self.height())
self.setWindowIcon(QG.QIcon('simgui_registry/CoverIcon.png'))
self.show()
def initUi(self):
"""Initiates the visual elements, including a progress bar and a cancel
button"""
self.progressBar = QW.QProgressBar()
self.infoLabel = QW.QLabel()
self.cancelButton = QW.QPushButton("Cancel")
buttonBox = QW.QWidget()
buttonBoxLayout = QW.QHBoxLayout(buttonBox)
buttonBoxLayout.addStretch(1)
buttonBoxLayout.addWidget(self.cancelButton)
buttonBoxLayout.setContentsMargins(0, 0, 0, 0)
layout = QW.QVBoxLayout(self)
layout.addWidget(self.progressBar)
layout.addWidget(self.infoLabel)
if self.mode == "PlotAll":
self.multipleProgressBar = QW.QProgressBar()
self.multipleProgressBar.setRange(0, self.Request_Dict["PlotNumber"])
self.multipleProgressBar.setValue(0)
layout.addWidget(self.multipleProgressBar)
self.multiInfoLabel = QW.QLabel(f"Currently working on plot 1/{self.Request_Dict['PlotNumber']}...")
layout.addWidget(self.multiInfoLabel)
layout.addStretch(1)
layout.addWidget(buttonBox)
def determineProgressLength(self, Param_Dict):
"""Calculate the number of checkpoint steps for the current plot
settings and format the slider accordingly."""
self.progressBar.setRange(0, 0)
# startplot, modifications, annotations, startplot,
length = 6 # _setupPlots, finish
if Param_Dict["PlotMode"] == "Profile":
length += sut.calculateProfileAdditions(Param_Dict)
self.progressBar.setRange(0, length)
self.progressBar.setValue(0)
def updateProgress(self, message):
"""Updates the progressbar by one step and sets the text of the
infoLabel to message."""
value = self.progressBar.value()
self.progressBar.setValue(value + 1)
self.infoLabel.setText(f"{message}...")
def updateMultiProgress(self):
oldValue = self.multipleProgressBar.value()
self.multipleProgressBar.setValue(oldValue + 1)
self.progressBar.setValue(0)
text = f"{oldValue+2}/{self.Request_Dict['PlotNumber']}"
self.multiInfoLabel.setText(f"Currently working on plot {text}...")
GUILogger.debug(text)
def signalsConnection(self):
"""Connect the cancelButton"""
self.parent().progressWorker.progressUpdate.connect(self.updateProgress)
self.parent().progressWorker.finished.connect(lambda: self.close())
if self.mode == "PlotAll":
self.parent().progressWorker.multiProgress.connect(self.updateMultiProgress)
self.cancelButton.clicked.connect(self.stopProcess)
self.parent().thread.started.connect(self.parent().progressWorker.plot)
def keyPressEvent(self, event):
if event.key() == QC.Qt.Key_Escape:
self.stopProcess()
def closeEvent(self, event):
self.parent().thread.quit()
super().closeEvent(event)
def stopProcess(self):
self.infoLabel.setText(f"Plotting interrupted. Please wait until the current step is finished...")
self.cancelButton.setDisabled(True)
plotWindow = self.parent().Param_Dict["CurrentPlotWindow"]
plotWindow.restoreSettings.setDisabled(True)
plotWindow.writeFileButton.setDisabled(True)
plotWindow.externalWindowButton.setDisabled(True)
self.parent().progressWorker._isRunning = False
# %% The Worker classes for carrying out the plotting process
class WorkerBase(QC.QObject):
"""A base class for objects to be used during threading"""
finished = QC.pyqtSignal(bool)
progressUpdate = QC.pyqtSignal(str)
def __init__(self):
super().__init__()
self._isRunning = True
self.oldMessage = "Starting up"
class PlotSingleWorker(WorkerBase):
"""A worker to carry out a single plot"""
def __init__(self, Param_Dict):
super().__init__()
self.Param_Dict = Param_Dict
@QC.pyqtSlot() # This is necessary to make the threading work.
def plot(self):
try:
evaluateSingle(self.Param_Dict, self)
except sut.WorkingException as e:
GUILogger.error(str(e.args[0]))
except Exception as e:
traceback.print_exc() # This will print the complete traceback including links to the lines
GUILogger.exception("A yt-internal exception occured:<br><b><font color"
f'="DarkRed">{type(e).__name__}:</font><br>'
f"{e}</b>")
GUILogger.log(29, "I've printed the traceback for you.")
self._isRunning = False
self.finished.emit(self._isRunning)
class PlotMultipleWorker(WorkerBase):
"""A worker to carry out multiple consecutive plots"""
finished = QC.pyqtSignal(bool, str)
multiProgress = QC.pyqtSignal()
def __init__(self, Param_Dict, Request_Dict, slider):
super().__init__()
self.Param_Dict = Param_Dict
self.Request_Dict = Request_Dict
self.slider = slider
@QC.pyqtSlot() # This is necessary to make the threading work.
def plot(self):
try:
evaluateMultiple(self.Param_Dict, self.Request_Dict, self.slider, self)
except sut.WorkingException as e:
GUILogger.error(str(e.args[0]))
except Exception as e:
traceback.print_exc() # This will print the complete traceback including links to the lines
GUILogger.exception("A yt-internal exception occured:<br><b><font color"
f'="DarkRed">{type(e).__name__}:</font><br>'
f"{e}</b>")
GUILogger.log(29, "I've printed the traceback for you.")
self._isRunning = False
self.finished.emit(self._isRunning, self.Request_Dict["Directory"])
class TestPlotWorker(WorkerBase):
@QC.pyqtSlot() # Override this
def plot(self):
import time
for i in range(100):
if self._isRunning:
time.sleep(0.02)
self.progressUpdate.emit(str(i))
if self._isRunning:
self.success = True
self.finished.emit()
# %% Function for evaluating single file data
def evaluateSingle(Param_Dict, worker):
"""Handles the different cases needed for evaluation of a Data or
DataSetSeries object.
Parameters:
Param_Dict: For the information to be plotted
worker: Worker object the evaluation is initiated from
"""
mode = Param_Dict["PlotMode"]
sut.emitStatus(worker, f"Creating the initial {mode.lower()} plot")
GUILogger.log(29, f"Producing the requested {mode.lower()} plot...")
# For lineplotting we need to remember the grid unit
Param_Dict["oldGridUnit"] = Param_Dict["GridUnit"]
# Convenient way to choose the right function:
eval(f"splot.{mode}Plot(Param_Dict, worker)")
sut.emitStatus(worker, "Finishing")
# %% Function for evaluating time series data
def evaluateMultiple(Param_Dict, Request_Dict, slider, worker):
"""Evaluate the series according to the settings given from the
plotDialog. If the makeMovie-attribute from the dialog is True, ask for a
directory, create a folder and save the figures there."""
mode = Param_Dict["PlotMode"]
directory = Request_Dict["Directory"]
onlyEvery = Request_Dict["OnlyEvery"]
plotnum = Request_Dict["PlotNumber"]
GUILogger.log(29, f"Producing the requested {mode.lower()} plots...")
sut.emitStatus(worker, f"Creating the initial {mode.lower()} plot")
# For lineplotting we need to remember the grid unit
Param_Dict["oldGridUnit"] = Param_Dict["GridUnit"]
i = 0
for j in range(Request_Dict["Length"]):
if i % onlyEvery == 0:
# The following will set the plotWindow and dataset to the one we want
Param_Dict["SignalHandler"].getSliderInput(value=j, seriesEval=True)
# Convenient way to choose the right plot function
eval(f"splot.{mode}Plot(Param_Dict, worker)")
GUILogger.info(f"Progress: {int(i/onlyEvery+1)}/{plotnum} {mode.lower()} plots done.")
if Request_Dict["MakeMovie"]:
saveName = f"{directory}/{mode}plot_{i+1}"
Param_Dict["CurrentPlotWindow"].saveFigure(saveName)
sut.emitMultiStatus(worker, i, plotnum)
i += 1
slider.setValue(j)
| Fabian-Balzer/GUFY | GUFY/simgui_modules/threading.py | threading.py | py | 12,049 | python | en | code | 0 | github-code | 36 |
28242426542 | class Solution:
def sumOfUnique(self, nums: List[int]) -> int:
li = []
re = []
for i in nums:
if i not in li:
li.append(i)
else:
if i not in re:
re.append(i)
nums = [j for j in nums if j not in re]
return sum(nums) | coincidence-one/algorithm-test-prep | week6/1748번_문제/1748_김현진.py | 1748_김현진.py | py | 335 | python | en | code | null | github-code | 36 |
15521857644 | '''
85. Maximal Rectangle
Given a 2D binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area.
Example:
Input:
[
["1","0","1","0","0"],
["1","0","1","1","1"],
["1","1","1","1","1"],
["1","0","0","1","0"]
]
Output: 6
'''
class Solution:
# based on problem 84
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
if not heights: return 0
stack, index, maxArea = [], 0, 0
while index < len(heights):
if not stack: stack.append((heights[index],index))
else:
if heights[index] > stack[-1][0]: stack.append((heights[index],index))
elif heights[index] < stack[-1][0]:
while True:
if stack:
tailNum = stack[-1][0]
if tailNum > heights[index]:
tailIndex = stack[-1][1]
area = tailNum * (index-tailIndex)
maxArea = area if area > maxArea else maxArea
stack.pop()
elif tailNum < heights[index]:
stack.append((heights[index],tailIndex))
break
else: break
else:
stack.append((heights[index],tailIndex))
break
else: pass
index += 1
if stack:
for s in stack:
area = s[0] * (index-s[1])
maxArea = area if area > maxArea else maxArea
return maxArea
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix: return 0
row, col, heights, maxArea = len(matrix), len(matrix[0]), [], 0
for i in range(row):
for j in range(col):
n = int(matrix[i][j])
if i == 0:
heights.append(n)
else:
if not n: heights[j] = 0
else: heights[j] += 1
area = self.largestRectangleArea(heights)
maxArea = area if area > maxArea else maxArea
return maxArea
class Solution2:
# using dynamic programming
# heights[j]: height of j-th col to the i-th row
# left[j]: leftmost index of height[j]
# right[j]: rightmost index of height[j]
def init(self, col):
res = []
for i in range(col):
res.append(0)
return res
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix: return 0
row, col, maxArea = len(matrix), len(matrix[0]), 0
heights, left, right = self.init(col), self.init(col), self.init(col)
for i in range(row):
for j in range(col):
n = int(matrix[i][j])
if i == 0:
heights[j] = n
else:
if not n: heights[j] = 0
else: heights[j] += 1
currLeft = 0
for j in range(col):
n = int(matrix[i][j])
if i == 0:
if not n:
left[j] = -1
currLeft = j+1
else: left[j] = currLeft
else:
if not n:
left[j] = -1
currLeft = j+1
else: left[j] = max(currLeft,left[j])
currRight = col-1
for j in range(col-1,-1,-1):
n = int(matrix[i][j])
if i == 0:
if not n:
right[j] = col
currRight = j-1
else: right[j] = currRight
else:
if not n:
right[j] = col
currRight = j-1
else: right[j] = min(currRight,right[j])
'''
print('heights:',heights)
print('left:',left)
print('right:',right)
print('-'*10)
'''
for j in range(col):
maxArea = max(maxArea, heights[j] * (right[j] - left[j] + 1))
return maxArea | MarshalLeeeeee/myLeetCodes | 85-maximalRectangle.py | 85-maximalRectangle.py | py | 4,569 | python | en | code | 0 | github-code | 36 |
16274525184 | # Authors: Clyde Sumagang and Roy Morla
# Date: 9/22/ 2019
# Course: CST 205
# Abstract: This program will count the rgb values in a matrix and store them into a dictionary
# with 4 bins based on color and intensity
import pickle
file = open('image_matrix', 'rb')
data = pickle.load(file)
def task1(data):
#dictionary to hold levels respective rgb intensity values
histo = {
'red': [0,0,0,0],
'green': [0,0,0,0],
'blue': [0,0,0,0]
}
#for loop to iterate through length of the outer list
for x in range(len(data)):
#changes what values list comprehension statements use
real_data = data[x]
#list comprehension to split up rgb data from tuples
red_data = [x[0] for x in real_data]
green_data = [x[1] for x in real_data]
blue_data = [x[2] for x in real_data]
#logic for red data to increment dictionary list values
for i in red_data:
if (i <= 63):
histo['red'][0] += 1
elif (i <= 127 and i >= 64):
histo['red'][1] += 1
elif (i <= 191 and i >= 128):
histo['red'][2] += 1
elif (i <= 255 and i >= 192):
histo['red'][3] += 1
#logic for green data to increment dictionary list values
for i in green_data:
if (i <= 63):
histo['green'][0] += 1
elif (i <= 127 and i >= 64):
histo['green'][1] += 1
elif (i <= 191 and i >= 128):
histo['green'][2] += 1
elif (i <= 255 and i >= 192):
histo['green'][3] += 1
#logic for blue data to increment dictionary list values
for i in blue_data:
if (i <= 63):
histo['blue'][0] += 1
elif (i <= 127 and i > 64):
histo['blue'][1] += 1
elif (i <= 191 and i >= 128):
histo['blue'][2] += 1
elif (i <= 255 and i >= 192):
histo['blue'][3] += 1
return histo
print (task1(data))
| rjmorla/helloworld | my_Workspace/cst205/hw/hw1/hw1_1.py | hw1_1.py | py | 2,104 | python | en | code | 0 | github-code | 36 |
6699714805 | def linear_search(data, item):
index = 0
found = False
while index < len(data):
if data[index] == item:
found = True
else:
index += 1
return found, index
def binary_search(data, item):
first = 0
last = len(data) - 1
found = False
while first <= last and not found:
midpoint = (first + last) // 2
if data[midpoint] == item:
found = True
else:
if item < data[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return found
def interpolation_serach(list,x ):
idx0 = 0
idxn = (len(list) - 1)
found = False
while idx0 <= idxn and x >= list[idx0] and x <= list[idxn]:
# Find the mid point
mid = idx0 +int(((float(idxn - idx0)/( list[idxn] - list[idx0])) *( x - list[idx0])))
# Compare the value at mid point with search value
if list[mid] == x:
found = True
return found
if list[mid] < x:
idx0 = mid + 1
return found
data = [12,13, 11, 99, 22, 55, 90]
print(binary_search(sorted(data), 99)) | h3nok/MLIntro | Algorithms/Sorting/search_algorithms.py | search_algorithms.py | py | 1,185 | python | en | code | 0 | github-code | 36 |
41120685968 | import os
from default.liststc import splitter_to_array, add_list, length, convert_arr_to_type, el_in_array
delimiter = ';'
def write(header, arr, folder_name, file_name, url_file=None):
url = url_file
url_none = True if not url else False
folder_found = False
url = ''
if url is None:
for (root, dirs, files) in os.walk('..', topdown=True):
# memvalidasi apakah directory sesuai dengan nama folder
if el_in_array(folder_name, dirs):
url = os.path.join(root, folder_name)
folder_found = True
# menurut spek, jika file dengan nama yang sama ditemukan, maka harus menghapus existing file
# terlebih dahulu
if el_in_array(file_name, files):
os.remove(file_name)
else:
folder_found=True
# membuat folder baru jika tidak ada folder bername file_name
if not folder_found:
os.mkdir(path=folder_name)
url = folder_name
# proses writing
with open(os.path.join(url, file_name), 'w') as file:
# write csv header
for i in range(length(header)):
file.write(header[i] + delimiter)
file.write('\n')
# write the data
for i in range(0, length(arr)):
for j in range(length(arr[i])):
file.write(str(arr[i][j]) + delimiter)
file.write('\n')
# untuk optimisasi, kita melakukan pencarian url hanya sekali untuk beberapa file dengan cara melempar
# url jika url awalnya tidak dimiliki (ditandai dengan variabel url_none)
return url if url_none else None
def read(folder_name, file_name, type_arr=None, function_validator=None, function_search=None,
validator_param=None, search_param=None, url_file=None):
# penjelasan parameter fungsi
#
# folder_name: nama folder
#
# file_name: nama_file
#
# function_validator: jika tidak semua lines dari file ingin disimpan sebagai array, maka akan dilakukan validasi
# di tiap line data menggunakan fungsi ini
#
# function_search: jika ingin menggunakan data di tiap line file untuk melakukan proses search, maka fungsi
# function_search akan dijalankan. karena berada di fungsi load, artinya tidak akan ada field
# yang merupakan input user selain folder_name, maka diasumsikan fungsi ini akan selalu memberikan
# array yang memiliki isi
#
# validator_param: parameter yang akan digunakan sebagai validator di function_validator
#
# search_param: parameter yang akan digunakan function_search
url = url_file
url_none = True if not url_file else False
folder_found = False
# proses pencarian file. proses ini bisa mencakup folder dimanapun jika masih merupakan child dari folder
# utama program
if url is None:
for (root, dirs, files) in os.walk('..', topdown=True):
if el_in_array(folder_name, dirs):
url = os.path.join(root, folder_name)
folder_found = True
else:
folder_found = True
if not folder_found:
raise FileNotFoundError
with open(os.path.join(url, file_name)) as file:
raw = file.readlines()
data_arr = []
for i in range(length(raw)):
# karena line 0 dari data merupakan header, maka kita bisa melewati i = 0
if i == 0:
pass
else:
# memecah string tiap line menjadi array berdasarkan delimiter
data = splitter_to_array(raw[i], delimiter)
# pemasukan data ke array. menggunakan parameter yang sudah dijelaskan di atas
if type_arr is not None:
data = convert_arr_to_type(data, type_arr)
if function_validator is None:
if function_search is None:
data_arr = add_list(data_arr, data)
else:
search = function_search(search_param, data)
data_arr = add_list(data_arr, search)
else:
if function_validator(data, validator_param):
if function_search is None:
data_arr = add_list(data_arr, data)
else:
search = function_search(search_param, data)
data_arr = add_list(data_arr, search)
# untuk optimisasi, kita hanya akan melakukan pencarian url sekali, lalu melempar url yang sudah ditemukan
if url_none:
return data_arr, url
else:
return data_arr
| zidane-itb/tubes-daspro | file/csv.py | csv.py | py | 4,733 | python | id | code | 0 | github-code | 36 |
9816456408 | __title__ = 'pyfcm'
__summary__ = 'Python client for FCM - Firebase Cloud Messaging (Android, iOS and Web)'
__url__ = 'https://github.com/olucurious/pyfcm'
__version__ = '1.5.2'
__author__ = 'Emmanuel Adegbite'
__email__ = 'olucurious@gmail.com'
__license__ = 'MIT License'
| olucurious/PyFCM | pyfcm/__meta__.py | __meta__.py | py | 277 | python | en | code | 790 | github-code | 36 |
17498728037 | import os.path
import pandas
import numpy as np
def opt_report(reportPath, snrTh=0.9, debug=False, plotError=True):
df = pandas.read_csv(reportPath)
totalNbLoop = list(df["nbLoop"])[-1]
# print(totalNbLoop)
loopList = []
rmseList = []
avgErrorList = []
for loop_ in range(totalNbLoop + 1):
if debug:
print("------ Loop:{} -------".format(loop_))
itemList = []
dxPixList = []
dyPixList = []
snrList = []
dxList = []
dyList = []
for item, dxPix_, dyPix_, snr_ in zip(list(df["nbLoop"]), list(df["dxPix"]), list(df["dyPix"]),
list(df["SNR"])):
if item == loop_:
itemList.append(item)
dxPixList.append(dxPix_)
dyPixList.append(dyPix_)
snrList.append(snr_)
nanList = [item_ for item_ in snrList if item_ == 0]
snrThList = [item_ for item_ in snrList if item_ > snrTh]
dxPixAvg = np.nanmean(np.asarray(dxPixList))
dyPixAvg = np.nanmean(np.asarray(dyPixList))
dxPixRMSE = np.nanstd(np.asarray(dxPixList))
dyPixRMSE = np.nanstd(np.asarray(dyPixList))
xyErrorAvg = np.sqrt(dxPixAvg ** 2 + dyPixAvg ** 2)
xyRMSE = np.sqrt(dxPixRMSE ** 2 + dyPixRMSE ** 2)
if debug:
print("#GCPs:{} --> #NaNs:{} ; #snrTh >{}:{}".format(len(itemList), len(nanList), snrTh, len(snrThList)))
print("dxPixAvg:{} , xRMSE:{}".format("{0:.4f}".format(dxPixAvg),
"{0:.2f}".format(dxPixRMSE)))
print("dyPixAvg:{} , yRMSE:{}".format("{0:.4f}".format(dyPixAvg),
"{0:.2f}".format(dyPixRMSE)))
print("xyErrorAvg:{} , xyRMSE:{}".format("{0:.4f}".format(xyErrorAvg),
"{0:.2f}".format(xyRMSE)))
loopList.append(loop_)
rmseList.append(xyRMSE)
avgErrorList.append(xyErrorAvg)
indexMin = np.argmin(avgErrorList)
# if debug:
print("Loop of Min Error:{} --> RMSE:{:.3f} , avgErr:{:.3f}".format(loopList[indexMin], np.min(rmseList),
np.min(avgErrorList)))
if plotError:
import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator)
fig, ax = plt.subplots()
ax.plot(loopList, rmseList, c="r", linestyle="--", marker="o", label="RMSE [pix]")
ax.plot(loopList, avgErrorList, c="g", linestyle="-", marker="o", label="meanErr [pix]")
ax.grid()
ax.legend()
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', width=2, direction="in")
ax.set_xlabel('#iterations')
ax.set_ylabel("Error [pix]")
# plt.show()
fig.savefig(os.path.join(os.path.dirname(reportPath), "CoregistrationError.png"), dpi=400)
return loopList[indexMin], totalNbLoop, np.min(avgErrorList)
def parse_opt_report(opt_report_path):
df = pandas.read_csv(opt_report_path)
nb_loops = list(df["nbLoop"])[-1]
loopList = []
rmse = []
avg_error = []
for loop_ in range(nb_loops + 1):
itemList = []
dxPixList = []
dyPixList = []
snrList = []
for item, dxPix_, dyPix_, snr_ in zip(list(df["nbLoop"]), list(df["dxPix"]), list(df["dyPix"]),
list(df["SNR"])):
if item == loop_:
itemList.append(item)
dxPixList.append(dxPix_)
dyPixList.append(dyPix_)
snrList.append(snr_)
dxPixAvg = np.nanmean(np.asarray(dxPixList))
dyPixAvg = np.nanmean(np.asarray(dyPixList))
dxPixRMSE = np.nanstd(np.asarray(dxPixList))
dyPixRMSE = np.nanstd(np.asarray(dyPixList))
xyErrorAvg = np.sqrt(dxPixAvg ** 2 + dyPixAvg ** 2)
xyRMSE = np.sqrt(dxPixRMSE ** 2 + dyPixRMSE ** 2)
loopList.append(loop_)
rmse.append(xyRMSE)
avg_error.append(xyErrorAvg)
idx_min = np.argmin(avg_error)
loop_min_err = loopList[idx_min]
# print("Loop of Min Error:{} --> RMSE:{:.3f} , avgErr:{:.3f}".format(loopList[indexMin], np.min(rmse),
# np.min(avg_error)))
return rmse, avg_error, loop_min_err
| SaifAati/Geospatial-COSICorr3D | geoCosiCorr3D/geoTiePoints/misc.py | misc.py | py | 4,538 | python | en | code | 37 | github-code | 36 |
4374357755 | """You are climbing a staircase. It takes n steps to reach the top.
Each time you can either climb 1 or 2 steps.
In how many distinct ways can you climb to the top?"""
"""Example 1:
Input: n = 2
Output: 2
Explanation: There are two ways to climb to the top.
1. 1 step + 1 step
2. 2 steps"""
# goal is two climb to two
# can take one step = 1
# two steps at once = 2
# can either take one step twice or one two step to get to 2
"""Example 2:
Input: n = 3
Output: 3
Explanation: There are three ways to climb to the top.
1. 1 step + 1 step + 1 step
2. 1 step + 2 steps
3. 2 steps + 1 step"""
# time complexity = O(n)
# Solution
class Solution:
# create a function called climbStairs
def climbStairs(self, n):
# have two variables that are both initialized as one
one_step = 1
two_step = 1
# loop through n minus 1 time
# continuously update the two variables one & two
for i in range(n - 1):
# temp variable before other variables are updated
temp_num = one_step
# update one. one is set to one plus two | adding two previous values = new result
one_step = one_step + two_step
# update two to whatever the previous value of what one was | updating one
# before we update two
# set two to the temporary variable | avoid setting it to one plus two
two_step = temp_num
# return what one happens to land on
return one_step
if __name__ == "__main__":
test = Solution()
input = test.climbStairs(3)
print(input)
"""The time complexity of this function is O(n) since it iterates through the
loop n-1 times, and each iteration takes constant time. The time complexity is linear in terms of the input size n.""" | sharmaineb/tech-interview | climbingstairs.py | climbingstairs.py | py | 1,722 | python | en | code | 0 | github-code | 36 |
26613373403 | from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix, classification_report
import seaborn as sn
import matplotlib.pyplot as plt
data = pd.read_csv('./Data/wine.csv')
data = data.sample(frac=1, random_state=42).reset_index(drop=True)
ndata = data.shape[0]
ncolumn = data.shape[1]
train_rate = 0.7
ntrain = int(ndata * train_rate)
train_index = range(ntrain)
test_index = range(ntrain, ndata)
train, test = data.iloc[train_index,], data.iloc[test_index,]
train_x, train_y = train.iloc[:,:-1], train.iloc[:,-1]
test_x, test_y = test.iloc[:,:-1], test.iloc[:,-1]
log = LogisticRegression()
log.fit(train_x,train_y)
estimates = log.predict(train_x)
C=confusion_matrix(train_y,estimates)
TN, FP, FN, TP = C.ravel()
Accuracy= accuracy_score(train_y,estimates)
Precision=float(TP/(TP+FP))
Recall=float(TP/(TP+FN))
Specificity=float(TN/(TN+FP))
F1measure=float(2*Precision*Recall/(Precision+Recall))
Gmean=float(np.sqrt(Precision*Recall))
print("This solution is computed using train data")
print(C)
print("Accuracy using train data is: %.3f"%(Accuracy))
print("Precision : %.3f, Recall : %.3f, Specificity : %.3f, F1measure : %.3f, G-mean : %.3f" %(Precision, Recall, Specificity, F1measure, Gmean))
print("Type 1 error : %.3f, Type 2 error : %.3f\n"%(1-Specificity, 1-Recall))
estimates2 = log.predict(test_x)
C2=confusion_matrix(test_y,estimates2)
TN2, FP2, FN2, TP2 = C2.ravel()
Accuracy2 = accuracy_score(test_y, estimates2)
Precision2 = float(TP2 / (TP2 + FP2))
Recall2 = float(TP2 / (TP2 + FN2))
Specificity2 = float(TN2 / (TN2 + FP2))
F1measure2 = float(2 * Precision2 * Recall2 / (Precision2 + Recall2))
Gmean2 = float(np.sqrt(Precision2 * Recall2))
print("This solution is computed using test data")
print(C2)
print("Accuracy using test data is: %.3f" % (Accuracy2))
print("Precision : %.3f, Recall : %.3f, Specificity : %.3f, F1measure : %.3f, G-mean : %.3f" % (
Precision2, Recall2, Specificity2, F1measure2, Gmean2))
print("Type 1 error : %.3f, Type 2 error : %.3f\n" % (1 - Specificity2, 1 - Recall2))
df_cm = pd.DataFrame(C, ['Actual N','Actual P'],['Predicted N','Predicted P'])
df_cm2 = pd.DataFrame(C2, ['Actual N','Actual P'],['Predicted N','Predicted P'])
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set(title='Confusion Matrix of Train Data')
ax2 = fig.add_subplot(212)
ax2.set(title='Confusion Matrix of Test Data')
sn.heatmap(df_cm, annot=True, fmt='d', ax=ax1, annot_kws={"size": 16})
sn.heatmap(df_cm2, annot=True, fmt='d', ax=ax2, annot_kws={"size": 16})
plt.tight_layout()
plt.show() | larocaroja/Advanced-Programming | Logistic Regression.py | Logistic Regression.py | py | 2,652 | python | en | code | 0 | github-code | 36 |
26175621630 | from distutils.core import setup, Extension
from Cython.Build import cythonize
include_dirs_list = [
"../include",
"../Thirdparty/libccd/src", #libccd
"../Thirdparty/libccd/build/src", #libccd
"../Thirdparty/yaml-cpp/include", # yaml-cpp
"../Thirdparty/boost_1_7_0", # boost
"../Thirdparty/eigen", # eigen
"../Thirdparty/googletest/googletest/include", # gtest
"../Thirdparty/octomap/octomap/include", # octomap ?
"../Thirdparty/fcl/build/include", # fcl
"../Thirdparty/fcl/include", # fcl
]
setup(ext_modules = cythonize(Extension(
"pympl", # the extension name
sources=["pympl.pyx"], # the Cython source and additional C++ source files
language="c++", # generate and compile C++ code
include_dirs=include_dirs_list,
library_dirs=["../build"],
libraries=["mpl"],
extra_compile_args=["-std=c++11"]
))) | hfutcgncas/mpl_cpp | cython/setup.py | setup.py | py | 1,152 | python | en | code | 1 | github-code | 36 |
23296850783 | from pathlib import Path
import pandas as pd
from . import integration
from model import advanced_controls as ac
from model import aez
from model import dd
from model import vma
from model import world_land
from solution import factory
standard_land_allocation_types = list(world_land.AEZ_ALLOCATION_MAP.keys()) + ["Add-On Solutions"]
standard_land_solution_priorities = {
'Non-Degraded Forest':
['peatland', 'mangroverestoration', 'indigenouspeoplesland', 'forestprotection', 'multistrataagroforestry'],
'Degraded Forest':
['tropicalforests', 'temporateforests', 'BOREAL FOREST', 'peatland', 'mangroverestoration', 'bamboo', 'afforestation'],
'Non-Degraded Grassland':
['peatland', 'grasslandprotection', 'multistrataagroforestry', 'tropicaltreestaples', 'silvopasture', 'managedgrazing'],
'Degraded Grassland':
['afforestation', 'farmlandrestoration', 'perennialbioenergy'],
'Non-Degraded Cropland':
['tropicalforests', 'peatland', 'riceintensification', 'improvedrice', 'conservationagriculture', 'treeintercropping'],
'Degraded Cropland':
['treeintercropping'],
'Add-On Solutions':
['improvedcattlefeed', 'regenerativeagriculture', 'irregationefficiency', 'nutrientmanagement', 'SUSTAINABLE INTENSIFICATION']
}
"""The prioritization amongst Land solutions for access to land in each land allocation type.
Any land solution not on this list will be assumed to be lower priority than these."""
# UPPER CASE are items in the integration workbook that don't correspond to any solution known to me:
# BOREAL FOREST, SUSTAINABLE INTENSIFICATION
# VMAS={
# 'Current Adoption': vma.VMA(
# filename=THISDIR.joinpath("vma_data", "Current_Adoption.csv"),
# use_weight=False)
# }
class AEZ_Land_Integration:
"""The AEZ / Land Integration looks at competition between LAND solutions for land of different types,
and adjusts land availability accordingly.
"""
def assemble_current_status(self, scenario_list=None):
"""Perform the first step of the integration, which is to collate the current adoptions of all
the scenarios across all allocation regions and TMRs. By default, the drawdown PDS2 scenario is
used for all Land solutions. An alternative list (with differing solutions and/or scenario choices)
may be provided instead.
"""
if scenario_list:
self.scenario_list = scenario_list
self.solution_list = [ _map_scenario_to_module(scenario) for scenario in self.scenario_list ]
else:
self.solution_list = factory.all_solutions_category(ac.SOLUTION_CATEGORY.LAND)
self.scenario_list = [ factory.solution_pds_type(x, "PDS2") for x in self.solution_list ]
self.world_land_availability = world_land.World_TMR_AEZ_Map(series_name="2020")
#).reduce_columns(world_land.AEZ_ALLOCATION_MAP)
per_solution_allocations = {}
for scenario in self.scenario_list:
sc_dict = scenario.ae.world_land_alloc_dict
per_solution_allocations[scenario.name] = pd.concat( sc_dict.values(), keys=sc_dict.keys() )
self.all_solution_allocations = pd.concat( per_solution_allocations.values(), keys=per_solution_allocations.keys() )
# What we want is triple-index (allocation_zone, solution, tmr) and these columns
# "Total Area", "Area Available for Solution", "Solution Current Adoption", "Solution Current Adoption %",
# ... more stuff. Let's start there.
# Then we have to sort the data by priority within the different Landtypes
def _map_scenario_to_module(scenario):
"""Given a scenario, return the common module name (e.g. 'afforestation') of the solution"""
fullmodule = scenario.__module__
period = fullmodule.rfind('.')
return fullmodule[period+1:]
| ProjectDrawdown/solutions | integrations/aez_land_integration.py | aez_land_integration.py | py | 3,975 | python | en | code | 203 | github-code | 36 |
28870151431 | class Node:
def __init__(self, data=None):
self.data = data
self.next = None
self.previous = None
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def prepend(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
self.tail = new_node
return
new_node.next = self.head
self.head.previous = new_node
self.head = new_node
def append(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
self.tail = new_node
return
self.tail.next = new_node
new_node.previous = self.tail
self.tail = new_node
def add_to_middle(self, data, prev_data):
new_node = Node(data)
current_node = self.head
while current_node:
if current_node.data == prev_data:
new_node.next = current_node.next
current_node.next = new_node
new_node.previous = current_node
new_node.next.previous = new_node
current_node = current_node.next
def pop(self):
if self.tail is None:
return 'List is empty'
data = self.tail.data
if self.tail.previous:
self.tail = self.tail.previous
self.tail.next = None
else:
self.tail = None
self.head = None
return data
def print_first_to_last(self):
current_node = self.head
full_list = []
while current_node:
full_list.append(current_node.data)
current_node = current_node.next
print(full_list)
def print_last_to_first(self):
current_node = self.tail
full_list = []
while current_node:
full_list.append(current_node.data)
current_node = current_node.previous
print(full_list)
if __name__ == '__main__':
doubly_linked_list = DoublyLinkedList()
doubly_linked_list.prepend('Third')
doubly_linked_list.prepend('Second')
doubly_linked_list.prepend('First')
doubly_linked_list.append('Fourth')
doubly_linked_list.add_to_middle('Test', 'Two')
doubly_linked_list.print_first_to_last()
doubly_linked_list.print_last_to_first()
print('-------Print head and tail --------')
print(doubly_linked_list.head.data)
print(doubly_linked_list.tail.data)
print('-------Pop data--------')
print(doubly_linked_list.pop())
doubly_linked_list.print_first_to_last()
print(doubly_linked_list.pop())
print(doubly_linked_list.pop())
print(doubly_linked_list.pop())
print(doubly_linked_list.pop())
| almamuncsit/Data-Structures-and-Algorithms | Data-Structure/13-doubly-linked-list.py | 13-doubly-linked-list.py | py | 2,766 | python | en | code | 5 | github-code | 36 |
36332628382 | import csv
import pandas as pd
import numpy as np
data = pd.read_csv("insurance.csv")
ages = []
sexes = []
bmis = []
num_children = []
smoker_statuses = []
regions = []
insurance_charges = []
def load_list_to_data(lst, csv_file, column_name):
# open csv file
with open(csv_file) as csv_info:
# read the data from the csv file
csv_dict = csv.DictReader(csv_info)
# loop through the data in each row of the csv
for row in csv_dict:
# add the data from each row to a list
lst.append(row[column_name])
# return the list
return lst
load_list_to_data(ages, "insurance.csv", 'age')
load_list_to_data(sexes , "insurance.csv", 'sex')
load_list_to_data(bmis , "insurance.csv", 'bmi')
load_list_to_data(num_children , "insurance.csv", 'children')
load_list_to_data(smoker_statuses , "insurance.csv", 'smoker')
load_list_to_data(regions , "insurance.csv", 'region')
load_list_to_data(insurance_charges , "insurance.csv", 'charges')
# list_of_patients = [ages,sexes,bmis, num_children, smoker_statuses, regions, insurance_charges]
# patient_dict = {z[0]: list(z[1:]) for z in zip(*list_of_patients)}
# print(patient_dict)
class PatientInfo:
def __init__(self, patient_age, patient_sex, patient_bmi,
patient_children, patient_smoke, patient_region, patient_charge):
self.patient_age = patient_age
self.patient_sex = patient_sex
self.patient_bmi = patient_bmi
self.patient_children = patient_children
self.patient_smoke = patient_smoke
self.patient_region = patient_region
self.patient_charge = patient_charge
def average_smoker(self):
total_smokers = []
total_na_smoker = []
for element in self.patient_smoke:
if element == 'yes':
total_smokers.append(element)
elif element == 'no':
total_na_smoker.append(element)
total_smoker_mean = round(len(total_smokers) / len(self.patient_smoke), 3)
total_na_smoker_mean = round(len(total_na_smoker) / len(self.patient_smoke), 3)
print("Average patients that smokes:", total_smoker_mean)
print("Average patients that do not smoke:", total_na_smoker_mean)
if total_smoker_mean > total_na_smoker_mean:
print("According currenlty our database indicates average person that smokes greater than people does not smoke.")
elif total_na_smoker_mean > total_smoker_mean:
print('According currently our database indicates average person does not smoke greater than people who actually smoke.')
def update_age(self, new_age):
""" Updates Patient's age """
self.patient_ages = new_age
new_age = int(new_age)
print("Patient's new age is {}".format(self.patient_age))
def update_children(self, num_of_child):
self.patient_children = num_of_child
if num_of_child <= 0:
print(f'You have no child.')
elif num_of_child == 1:
print(f"You have got a single child.")
else:
print(f"You have {self.patient_children} children's.")
def update_smoke(self, update_smoke=None):
if update_smoke == 0:
print("Well done, smoking it is not good for you plus makes your insurance cheaper.")
elif update_smoke >= 1:
print("Consider quit smoking to have a healty life and make your insurance cheaper.")
def change_of_sex(self, gender=None):
if type(gender) is int:
print('Number is invalid entry. Enter a your gender')
else:
print(f"Succed. Information has been changed to {gender}.")
def analyze_ages(self): # Copied.
# initialize total age at zero
total_age = 0
# iterate through all ages in the ages list
for age in self.patient_age:
# sum of the total age
total_age += int(age)
# return total age divided by the length of the patient list
return ("Average Patient Age: " + str(round(total_age/len(self.patient_age), 2)) + " years")
def create_dictionary(self):
self.patients_dictionary = {}
self.patients_dictionary["Age"] = [int(age) for age in self.patient_age]
self.patients_dictionary["Sex"] = self.patient_sex
self.patients_dictionary["BMI"] = self.patient_bmi
self.patients_dictionary["Num Of Child"] = self.patient_children
self.patients_dictionary["Smoke Status"] = self.patient_smoke
self.patients_dictionary["Regions"] = self.patient_region
self.patients_dictionary["charges"] = self.patient_charge
return self.patients_dictionary
def gender_count(self):
male_count = 0
female_count = 0
for gender in self.patient_sex:
if gender == 'male':
male_count += 1
elif gender == 'female':
female_count += 1
print("Male Count:", str(male_count))
print("Female Count:", str(female_count))
def unique_region(self):
unique = []
for element in self.patient_region:
if element not in unique:
unique.append(element)
return unique
def average_charges(self):
total_charges = 0
for element in self.patient_charge:
total_charges += float(element)
return ("Average Yearly Medical Insurance Charges: " +
str(round(total_charges/len(self.patient_charge), 2)) + " dollars.")
def average_gender(self):
total_male = []
total_female = []
for element in self.patient_sex:
if element == 'male':
total_male.append(element)
elif element == 'female':
total_female.append(element)
print("According in our data set we have got total of males", len(total_male),
"average Male", round(len(total_male) / len(self.patient_sex), 2))
print("According in our data set we have got total of Females", len(total_female),
"average Female", round(len(total_female) / len(self.patient_sex), 2))
def gender_vs_charge(self):
# Change everything into float
self.patient_charge = [float(i) for i in self.patient_charge]
comprasion = list(zip(self.patient_sex, self.patient_charge))
male_cost = 0
female_cost = 0
total_male = 0
total_female = 0
for k, v in comprasion:
if k == 'male':
male_cost += v
total_male += 1
elif k == 'female':
female_cost += v
total_female += 1
average_male_insurance_chargers = round(male_cost / total_male, 2)
average_female_insurance_chargers = round(female_cost / total_female, 2)
return "Average Male Insurance cost is: " + str(average_male_insurance_chargers) + "\nAverage Female Insurance cost is: " + str(average_female_insurance_chargers)
def average_bmi_gender(self):
# Turn every single bmi into float
bmi = [float(i) for i in self.patient_bmi]
sex = [str(i) for i in self.patient_sex]
sex_bmi = list(zip(sex, bmi))
male_bmi = 0.0
female_bmi = 0.0
male_total = 0
female_total = 0
for gender, bmi in sex_bmi:
if gender == 'male':
male_bmi += bmi
male_total += 1
else:
female_bmi += bmi
female_total += 1
male_bmi = round(male_bmi / male_total, 2)
female_bmi = round(female_bmi / female_total, 2)
return "Average Male Bmi is: " + str(male_bmi) + "\nAverage Female Bmi is: " + str(female_bmi)
patients = PatientInfo(ages, sexes, bmis, num_children, smoker_statuses, regions, insurance_charges)
patients.update_children(1)
patients.update_smoke(0)
patients.change_of_sex('Male')
patients.analyze_ages()
patients.gender_count()
print(patients.unique_region())
print(patients.average_charges())
patients.average_gender()
patients.average_smoker()
patients.create_dictionary()
print(patients.gender_vs_charge())
print(patients.average_bmi_gender())
| Sorunlu00/Project-US-Insurance-Cost | Medical_Insurance_cost.py | Medical_Insurance_cost.py | py | 8,296 | python | en | code | 0 | github-code | 36 |
72027268583 | cookbook = {'sandwich' : {'ingredients' : ['ham', 'bread', 'cheese', \
'tomatoes'] , 'meal' : 'lunch', 'prep_time' : 10}, \
'cake' : {'ingredients' : ['flour', 'sugar', 'eggs'] , 'meal' : 'dessert', \
'prep_time' : 60}, \
'salad' : {'ingredients' : ['avocado', 'arugula', 'tomatoes', 'spinach'] , \
'meal' : 'lunch', 'prep_time' : 15}}
def print_my_cookbook(cookbook):
print('Let\'s see what we have here...\n')
for recipe in cookbook:
print_recipe(recipe)
print('\n')
def print_recipe(recipe=''):
if recipe == '':
print("\nNo recipe, grandma can't guess\n")
return
if not recipe in cookbook:
print("\nGrandma never wrote this recipe\n")
return
print('Recipe for grandma\'s {}'.format(recipe))
for value in cookbook[recipe]:
if value == 'ingredients':
print('Ingredients :')
for ingr in cookbook[recipe][value]:
print(' -', ingr)
if value == 'meal':
print('For {} time !'.format(cookbook[recipe][value]))
if value == 'prep_time':
print('Take only {} minutes !'.format(cookbook[recipe][value]))
def delete_recipe(recipe):
if recipe == '':
print("\nNo recipe, grandma can't guess\n")
return
if not recipe in cookbook:
print("\nGrandma never wrote this recipe\n")
return
del cookbook[recipe]
def new_recipe(name='', ingr='', meal_t='', time=''):
if name == '' or ingr == '' or meal_t == '' or time == '':
print("\nSomething is missing, grandma can't guess\n")
return
if name in cookbook:
print("\nGrandma already wrote it\n")
return
cookbook[name] = dict(ingredients = ingr, meal = meal_t, prep_time = time)
print('You have found the old grandma\'s cookbook\n what are you going to do ?\
')
choice = 0
ing_list = []
while choice != 5:
choice = int(input('1.Add a recipe\n2.Delete a recipe\n3.Print a recipe\
\n4.Print the cookbook\
\n5.Leave the book the alone\n\n'))
if choice == 1:
print('\nBe careful with the pages my dear...\n\
They are older than you\n\n')
print('\nFirst choose a name for the recipe:\n')
name = input()
print('\nWrite the ingredient if you\'re done tap enter with nothing')
ingre = input()
while ingre != '':
ing_list.append(ingre)
ingre = input()
print('\nWhat kind of meal is it?')
meal_t = input()
print('\nHow many time it takes for a mortal to cook it?')
prep_time = int(input())
new_recipe(name, ing_list, meal_t, prep_time)
ingre = ''
ing_list = []
name = ''
meal_t = ''
prep_time = 0
if choice == 2:
print('\nYou can burn one of this page:\n')
for recipe in cookbook:
print('{}'.format(recipe))
print('choose:')
name = input()
delete_recipe(name)
name = ''
if choice == 3:
print('\nYou can look at one of this page:\n')
for recipe in cookbook:
print('{}'.format(recipe))
print('choose:')
name = input()
print_recipe(name)
name = ''
if choice == 4:
print_my_cookbook(cookbook)
print('The book is closed')
| GabPillow/python_bootcamp | day00/ex06/recipe.py | recipe.py | py | 3,347 | python | en | code | 0 | github-code | 36 |
4414438763 | s = input()
n = len(s)
k ="keyence"
flg = 1
# はじめて違う文字を見つけたら、マイナスインデックスで後ろからも同時に見ていく(かしこい…)
for i in range(7):
if s[i] != k[i]:
if s[-7+i] != k[-7+i]:
flg = 0
break
print('YES' if flg else 'NO')
# -- ダメだったコード --
# OKなパターンは3つ
# 1.keyencexxx
# 2.xxxkeyence
# 3.keyxxxence など、頭とお尻に分かれてるパターン
# 分かれてるパターンに関して、はじめて違った部分をiとし
# その後スライスでお尻の部分判定をしようとした
# if k in s:
# print('YES')
# exit()
# x = 0
# for i in range(n):
# if s[i] != k[i]:
# x = i
# break
# ここの条件がうまくできなかった…最後の1文字だけの時など。どうやったらうまくできるんだろう
# if k[x:] == s[n-1-x:n]:
# print('YES')
# else:
# print('NO')
| burioden/atcoder | submissions/keyence2019/b.py | b.py | py | 937 | python | ja | code | 4 | github-code | 36 |
26740758351 | #!/usr/bin/env python
import glob, os, sys, subprocess, shutil, string, argparse
parser = argparse.ArgumentParser(description="Wrapper script for MakePlots_HTopMultilep.py. This gets called on the PBS worker node via the PBS script generated by submit-PBS-ARRAY-MakePlots_HTopMultilep.py. The variable to be plotted gets retrieved via the PBS_ARRAYID index.")
parser.add_argument("--optstr", dest="optstr", action="store", type=str)
parser.add_argument("--varlist", dest="varlist", action="store", type=str, nargs="+")
parser.add_argument("--outputpath", dest="outputpath", action="store", type=str)
args = parser.parse_args()
if __name__ == '__main__':
# Read varlist from argparse.
# It will automagically re-create a python list from the multiple arguments of the input --varlist option.
varlist = args.varlist
# Get the var from the PBS_ARRAYID
pbs_array_idx = int(os.getenv('PBS_ARRAYID'))
var = varlist[pbs_array_idx]
print("Current job index PBS_ARRAYID={0}, var={1}".format(pbs_array_idx,var))
# OK, execute plotting script for this var!
# NB: it's crucial to make this call when running on the worker node, otherwise
# python will not be able to find modules in Plotter/
os.chdir(os.path.abspath(os.path.curdir)+"/HTopMultilepAnalysis/PlotUtils")
plotscript = os.path.abspath(os.path.curdir) + "/Plotter/MakePlots_HTopMultilep.py"
optlist = args.optstr.split(' ')
cmdlist = ['python',plotscript] + optlist + ['--submitPBSVar',var]
cmd = " ".join( "{0}".format(c) for c in cmdlist )
print("Executng command:\n{0}".format(cmd))
subprocess.call( cmd, shell = True )
# Now move the output to the target directory
outputpath = args.outputpath
if not outputpath[-1] == '/':
outputpath += '/'
# Get all subdirs in current location whose name starts with "OutputPlots_", rsync them to output directory, and remove the local copy
job_outdirs = [ dir for dir in os.listdir(".") if "OutputPlots_" in dir and os.path.isdir(dir) ]
for dir in job_outdirs:
thisdir = dir
if thisdir[-1] == '/':
thisdir = thisdir[:-1]
subprocess.call( ['rsync','-azP',thisdir,outputpath] )
shutil.rmtree(thisdir)
| mmilesi/HTopMultilepAnalysis | PlotUtils/Scripts/wrapper-MakePlots_HTopMultilep-PBS.py | wrapper-MakePlots_HTopMultilep-PBS.py | py | 2,258 | python | en | code | 0 | github-code | 36 |
18316576813 | import functools
import inspect
import types
from typing import Dict, List, Optional, Type, Union
import pytest
import servo.utilities.inspect
class OneClass:
def one(self) -> None:
...
def two(self) -> None:
...
def three(self) -> None:
...
class TwoClass(OneClass):
def four(self) -> None:
...
def five(self) -> None:
...
class ThreeClass(TwoClass):
def six(self) -> None:
...
@pytest.mark.parametrize(
"cls, stop_at_parent, method_names",
[
(OneClass, None, ["one", "two", "three"]),
(TwoClass, None, ["four", "five"]),
(TwoClass, OneClass, ["one", "two", "three", "four", "five"]),
(ThreeClass, OneClass, ["one", "two", "three", "four", "five", "six"]),
(ThreeClass, TwoClass, ["four", "five", "six"]),
],
)
def test_get_instance_methods(cls, stop_at_parent, method_names) -> None:
methods = servo.utilities.inspect.get_instance_methods(
cls, stop_at_parent=stop_at_parent
)
assert list(methods.keys()) == method_names
def test_get_instance_methods_invalid_parent() -> None:
with pytest.raises(TypeError) as e:
servo.utilities.inspect.get_instance_methods(OneClass, stop_at_parent=int)
assert (
str(e.value)
== "invalid parent type \"<class 'int'>\": not found in inheritance hierarchy"
)
def test_get_instance_methods_returns_bound_methods_if_possible() -> None:
methods = servo.utilities.inspect.get_instance_methods(
ThreeClass(), stop_at_parent=OneClass
)
assert list(methods.keys()) == ["one", "two", "three", "four", "five", "six"]
assert functools.reduce(
lambda bound, m: bound & inspect.ismethod(m), methods.values(), True
)
def test_get_instance_methods_returns_finds_dynamic_instance_methods() -> None:
def seven() -> None:
...
instance = ThreeClass()
instance.seven = types.MethodType(seven, instance)
methods = servo.utilities.inspect.get_instance_methods(
instance, stop_at_parent=OneClass
)
assert list(methods.keys()) == [
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
]
assert functools.reduce(
lambda bound, m: bound & inspect.ismethod(m), methods.values(), True
)
def test_get_instance_methods_returns_ignores_attributes() -> None:
class FourClass(ThreeClass):
ignore_me: str = "ignore_me"
instance = FourClass()
methods = servo.utilities.inspect.get_instance_methods(
instance, stop_at_parent=OneClass
)
assert list(methods.keys()) == ["one", "two", "three", "four", "five", "six"]
assert functools.reduce(
lambda bound, m: bound & inspect.ismethod(m), methods.values(), True
)
def test_resolution_none() -> None:
def test_type() -> None:
...
def test_str() -> "None":
...
res_type, res_str = servo.utilities.inspect.resolve_type_annotations(
inspect.Signature.from_callable(test_type).return_annotation,
inspect.Signature.from_callable(test_str).return_annotation,
)
assert res_type == res_str
def test_resolution_none() -> None:
def test_type() -> None:
...
def test_str() -> "None":
...
res_type, res_str = servo.utilities.inspect.resolve_type_annotations(
inspect.Signature.from_callable(test_type).return_annotation,
inspect.Signature.from_callable(test_str).return_annotation,
)
assert res_type == res_str
def test_aliased_types() -> None:
import servo
import servo.types
from servo import types
from servo.types import Duration
def test_type_path() -> servo.types.Duration:
...
def test_type_abbr() -> types.Duration:
...
def test_type() -> Duration:
...
def test_str_path() -> "servo.types.Duration":
...
def test_str_abbr() -> "types.Duration":
...
def test_str() -> "Duration":
...
resolved = servo.utilities.inspect.resolve_type_annotations(
inspect.Signature.from_callable(test_type_path).return_annotation,
inspect.Signature.from_callable(test_type_abbr).return_annotation,
inspect.Signature.from_callable(test_type).return_annotation,
inspect.Signature.from_callable(test_str_path).return_annotation,
inspect.Signature.from_callable(test_str_abbr).return_annotation,
inspect.Signature.from_callable(test_str).return_annotation,
globalns=globals(),
localns=locals(),
)
assert set(resolved) == {Duration}
# TODO: Compare compound return types, generic, skipping arguments...
# None, None.__class__, 'None'
# Optional[str], Dict[str, int], Dict[str, List[float]]
# omit argument, extra argument, argument with wrong type
# @pytest.mark.parametrize(
# "reference_callable"
# )
import typing
from typing import Any
def test_equal_callable_descriptors() -> None:
import servo
import servo.types
def test_one() -> typing.Dict:
...
def test_two() -> typing.Dict[str, Any]:
...
def test_three() -> typing.Dict[str, int]:
...
def test_four() -> typing.Dict[float, str]:
...
sig1 = inspect.Signature.from_callable(test_one)
sig2 = inspect.Signature.from_callable(test_two)
with pytest.raises(TypeError) as e:
servo.utilities.inspect.assert_equal_callable_descriptors(
servo.utilities.inspect.CallableDescriptor(
signature=sig1, globalns=globals(), localns=locals()
),
servo.utilities.inspect.CallableDescriptor(
signature=sig2, globalns=globals(), localns=locals()
),
)
assert (
str(e.value)
== 'invalid callable "() -> Dict": incompatible return type annotation "typing.Dict[str, typing.Any]" in callable signature "() -> Dict[str, Any]", expected "typing.Dict"'
)
servo.utilities.inspect.assert_equal_callable_descriptors(
servo.utilities.inspect.CallableDescriptor(
signature=inspect.Signature.from_callable(test_two),
globalns=globals(),
localns=locals(),
),
servo.utilities.inspect.CallableDescriptor(
signature=inspect.Signature.from_callable(test_three),
globalns=globals(),
localns=locals(),
),
)
# before_handler_signature = inspect.Signature.from_callable(__before_handler)
# servo.utilities.inspect.assert_equal_callable_descriptors(
# servo.utilities.inspect.CallableDescriptor(signature=before_handler_signature, module=event.module, globalns=event_globalns, localns=None),
# servo.utilities.inspect.CallableDescriptor(signature=handler_signature, module=handler_module, globalns=handler_globalns, localns=handler_localns),
# name=name,
# )
# servo.utilities.inspect.assert_equal_callable_descriptors()
# ...
MaybeNumeric = Optional[Union[float, int]]
@pytest.mark.parametrize(
"types_, error_message",
[
# Success cases
([dict, dict], None),
([str, str], None),
([None, None], None),
([List[str], List[str]], None),
([Dict[str, int], Dict[str, int]], None),
([dict[str, int], Dict[str, int]], None),
([Any, str], None),
([Any, List[str]], None),
([List[Any], List[str]], None),
([Dict[str, Any], Dict[str, int]], None),
# Subclassing
([OneClass, TwoClass], None),
([List[OneClass], List[TwoClass]], None),
([Dict[str, OneClass], Dict[str, TwoClass]], None),
# Special forms
([MaybeNumeric, MaybeNumeric], None),
([MaybeNumeric, Optional[Union[int, float]]], None),
# ---
# Failure cases
(
[dict, int],
"Incompatible type annotations: expected <class 'dict'>, but found <class 'int'>",
),
(
[Dict[str, int], dict],
"Incompatible type annotations: expected typing.Dict[str, int], but found <class 'dict'>",
),
(
[List[str], List[int]],
"Incompatible type annotations: expected typing.List[str], but found <class 'str'>",
),
(
[MaybeNumeric, float],
"Incompatible type annotations: expected typing.Union[float, int, NoneType], but found <class 'float'>",
),
(
[dict, Dict[str, Any]],
"Incompatible type annotations: expected <class 'dict'>, but found typing.Dict[str, typing.Any]",
),
(
[TwoClass, MaybeNumeric],
"Incompatible type annotations: expected <class 'inspect_test.TwoClass'>, but found typing.Union[float, int, NoneType]",
),
(
[TwoClass, OneClass],
"Incompatible type annotations: expected <class 'inspect_test.TwoClass'>, but found <class 'inspect_test.OneClass'>",
),
],
)
def test_assert_equal_types(types_: List[Type], error_message: Optional[str]) -> None:
if error_message:
with pytest.raises(TypeError) as e:
servo.utilities.inspect.assert_equal_types(*types_)
assert str(e.value) == error_message
else:
servo.utilities.inspect.assert_equal_types(*types_)
| opsani/servox | tests/utilities/inspect_test.py | inspect_test.py | py | 9,377 | python | en | code | 6 | github-code | 36 |
3382979841 | class Solution:
def findMin(self, nums: List[int]) -> int:
start , end = 0 ,len(nums) - 1
curr_min = float("inf")
while start < end :
mid = (start + end ) // 2
curr_min = min(curr_min,nums[mid])
# right has the min
if nums[mid] > nums[end]:
start = mid + 1
# left has the min
else:
end = mid - 1
return min(curr_min,nums[start])
| neetcode-gh/leetcode | python/0153-find-minimum-in-rotated-sorted-array.py | 0153-find-minimum-in-rotated-sorted-array.py | py | 532 | python | en | code | 4,208 | github-code | 36 |
10495557667 | from dateutil import rrule
import datetime
# 算两个时间的月数
def months_calculte(begin,end):
begin += '-01'
end += '-01'
d1 = datetime.datetime.strptime(begin,'%Y-%m-%d')
d2 = datetime.datetime.strptime(end,'%Y-%m-%d')
# d2 = datetime.date(2017, 4)
months = rrule.rrule(rrule.MONTHLY, dtstart=d1, until=d2).count()
return months
# 算两个时间的天数
def days_calculte(begin, end):
begin = begin.split('-')
end = end.split('-')
d = int(begin[2])
m = int(begin[1])
y = int(begin[0])
# difference in day
dd = int(end[2])
# difference in month
dm = int(end[1])
# difference in year
dy = int(end[0])
begind = datetime.date(y, m, d)
endd = datetime.date(dy, dm, dd)
return (endd - begind).days+1
#算年数
def years_calculte(begin,end):
begin = int(begin)
end = int(end)
return end-begin+1
#生成连续的日期
def dateRange(begin, end):
ymd = "%Y-%m-%d"
if len(begin) == 7:
ymd = "%Y-%m"
if len(begin) == 4:
c = int(end) - int(begin)+1
year = []
for i in range(c):
year.append(str(int(begin)+i))
return sorted(year)
dates = []
dt = datetime.datetime.strptime(begin, ymd)
date = begin[:]
while date <= end:
dates.append(date)
dt = dt + datetime.timedelta(1)
date = dt.strftime(ymd)
return sorted(set(dates))
def date_parmas_check(params):
if not params.get('time_kind'):
return False,'请表明要查寻的时间格式!'
if not params.get('start_time') or not params.get('end_time'):
return False,'缺少时间范围!'
if params.get('time_kind') == 'month' and (
not len(params.get('start_time')) == 7 or not len(params.get('end_time')) == 7):
return False,'按月统计时间范围有误!'
if params.get('time_kind') == 'year' and (
not len(params.get('start_time')) == 4 or not len(params.get('end_time')) == 4):
return False,'按年统计时间范围有误!'
if params.get('time_kind') == 'day' and (
not len(params.get('start_time')) == 10 or not len(params.get('end_time')) == 10):
return False,'按日统计时间范围有误!'
return True,'success'
#时间增长 几天几个月几年
def date_up(begin,several):
if several == 0:
return begin
several = several - 1
if len(begin) == 4:
return int(begin) + several
elif len(begin) == 7:
b = begin.split('-')
m = int(b[1]) + several
y = int(b[0])
if m > 12:
y = int(b[0]) + int(m / 12)
m = m % 12
result_date = str(y) + '-' + str(m)
return result_date
else:
b = begin.split('-')
s = datetime.date(int(b[0]), int(b[1]), int(b[2]))
result_date = s + datetime.timedelta(days=several)
return result_date.strftime('%Y-%m-%d')
#判断哪个时间大
def mix_min_check(start,end):
a = int(start.replace('-',''))
b = int(end.replace('-', ''))
if a>b:
return False
return True
| rantengfei/python-utility | compute_time.py | compute_time.py | py | 3,098 | python | en | code | 0 | github-code | 36 |
4109086257 | import sys
n, m = [int(x) for x in input().split()]
one, two = 0, 0
for i in range(1, m + 1):
a, b, c, d = [int(x) for x in input().split()]
one += a * b
two += c * d
if one >= n and two >= n:
print(f"It's a tie at round {i}!")
sys.exit()
if one >= n:
print(f"Team 1 wins at round {i}!")
sys.exit()
if two >= n:
print(f"Team 2 wins at round {i}!")
sys.exit()
print("Oh no!")
| AAZZAZRON/DMOJ-Solutions | ucrpc21c.py | ucrpc21c.py | py | 447 | python | en | code | 1 | github-code | 36 |
19573707796 | import os
def getVariantspath(modelPath,reportfname):
"""list dirs in modelPath in order to get variant names and its folder path"""
vnames = [name for name in os.listdir(modelPath) if os.path.isdir(os.path.join(modelPath,name))]
vpath = [os.path.join(modelPath,name) for name in os.listdir(modelPath) if os.path.isdir(os.path.join(modelPath,name))]
return vnames,vpath
def getoutputID(line,searchsignal):
"""get index/location of searchsignal in line"""
line0 = line.split()
SID = None
for sid,item in enumerate(line0):
if searchsignal in item:
SID = sid
break
return SID #this is not a very safe way of doing it
def getSUMresults(filepath,searchsignal):
"""return sum/total value of searchsignal"""
OutputFile = open(filepath,"r")
OutputLines = OutputFile.readlines()
OutputFile.close()
SID = getoutputID(OutputLines[1],searchsignal)
results = []
for line in OutputLines:
line = line.split()
try:
hour = float(line[0]) #should be between 1-8760
results.append(float(line[SID]))
except:
pass
return sum(results)
def illCAL(illline,thres):
"""Calculating sDA, %of DF based on ill line"""
illline = illline.split()
nrpts = len(illline)-1
count = 0
for value in illline[1:]:
if float(value) >= thres:
count +=1
percentage = round(count/nrpts * 100,0)
return percentage
def getsDA(illfile):
"""read Ill file and return sDA300,50
In this case also returning % of floor area has DF larger than 3%"""
illf = open(illfile,"r")
illlines = illf.readlines()
illf.close()
for line in illlines:
if "DA_300" in line and "CDA" not in line:
DAline = line
elif "DF" in line:
DFline = line
#sDA calculation
sDA = illCAL(DAline,50)
sDF = illCAL(DFline,3)
return sDA,sDF
def ReadAndWriteReport(modelPath,reportfname):
#extract all file paths
vnames, vpath = getVariantspath(modelPath,reportfname)
addOutput = [os.path.join(fpath,"Results\\AddOutput_1h.prn") for fpath in vpath]
illfile = [os.path.join(fpath,"Daylight\\001_Z1.ill") for fpath in vpath]
#read files and write reports
reportf = open(os.path.join(modelPath,reportfname),"w")
#first line
reportf.write("Varname\tOrientation\tWWR\tSHDActive\tTotalRadOnWindow\tTotalRadThroughWindow\tTotalInternalGain\tTotalHeating\tTotalCooling\tTotalEnergy\tDaylightFactor\tSpatialDaylightAutonomy\n")
for vid,va in enumerate(vnames):
reportf.write("%s\t"%(va))
reportf.write("%s\t"%(va.split("_")[0]))
reportf.write("%s\t"%(va.split("_")[1]))
reportf.write("%s\t"%(round(getSUMresults(addOutput[vid],"SHD_active"),0))) #hours
reportf.write("%s\t"%(round(getSUMresults(addOutput[vid],"IT_")/3600,0))) #kW
reportf.write("%s\t"%(round(getSUMresults(addOutput[vid],"QSOLTR_")/3600,0))) #kW
reportf.write("%s\t"%(round(getSUMresults(addOutput[vid],"Q_intgain_")/1000,0))) #kW
reportf.write("%s\t"%(round(getSUMresults(addOutput[vid],"Q_tot_ht_")/1000,0))) #kW
reportf.write("%s\t"%(round(getSUMresults(addOutput[vid],"Q_tot_cl_")/1000,0))) #kW
reportf.write("%s\t"%(round(getSUMresults(addOutput[vid],"Q_tot_ht_")/1000 + getSUMresults(addOutput[vid],"Q_tot_cl_")/1000,0))) #kW
sda,sdf = getsDA(illfile[vid])
reportf.write("%s\t"%(sdf))
reportf.write("%s\n"%(sda))
reportf.close()
| vhoangTS/LizardParallelPlot | reportWriter.py | reportWriter.py | py | 3,626 | python | en | code | 0 | github-code | 36 |
38817077412 | import requests
import random
from dotenv import load_dotenv
from PIL import ImageTk, Image
from io import BytesIO
import tkinter as tk
import os
class FetchAPI():
query: str
quantity: int
img_width: int
img_height: int
load_dotenv()
api_key = os.getenv('PEXELS_API_KEY')
def __init__(self, query: str, quantity: int) -> None:
self.img_width = 1280
self.img_height = 720
self.query = query
self.quantity = quantity
# Getter Method
def get_query(self):
return self.query
def get_quantity(self):
return self.quantity
# Setter Method
def set_query(self, newQuery):
self.query = newQuery
def set_quantity(self, newQuantity):
self.query = newQuantity
@staticmethod
def randomNumber() -> int:
return random.randint(1, 100)
def fetchAPI(self):
url = f'https://api.pexels.com/v1/search?query={self.get_query()}&per_page={self.get_quantity()}&page={self.randomNumber()}&orientation=landscape'
headers = {'Authorization': self.api_key}
response = requests.get(url, headers=headers)
return response.json()
def DisplayPhotos(self)-> None:
data = self.fetchAPI()
for photo in data['photos']:
photo_link = photo['src']['medium']
response = requests.get(photo_link)
image = Image.open(BytesIO(response.content))
root = tk.Tk()
root.wm_attributes("-topmost", 1)
tk_image = ImageTk.PhotoImage(image)
label = tk.Label(root, image=tk_image, text= self.get_query())
label.pack()
root.mainloop()
return None
| yethuhlaing/Car-Rental | src/fetchAPI.py | fetchAPI.py | py | 2,071 | python | en | code | 0 | github-code | 36 |
8385928022 | from django.db.models import Model, Q, OuterRef, Max, Count
from django.conf import settings
from django.core import mail
from django.http import HttpResponse
from django.template import Context, Template, loader
from django.utils.translation import gettext_lazy as _
from django.contrib import admin
import os, glob
from pathlib import Path
import pyexcel
import markdown
from markdown_link_attr_modifier import LinkAttrModifierExtension
from urllib.parse import quote
def get_current_site(request):
from .models import Site
try: return Site.objects.get_current(request)
except Site.DoesNotExist: pass
return None
def user_programs(queryset, path, request, or_cond=None):
if request.user.is_superuser:
if not request.user.site: return queryset
cond = Q(**{path+'sites': request.user.site})
return queryset.filter(cond | or_cond if or_cond else cond)
cond = Q(**{path+'user': request.user})
return queryset.filter(cond | or_cond if or_cond else cond)
def create_model(name, fields, app_label='formative', module='',
program=None, meta=None, base_class=Model):
class Meta:
pass
setattr(Meta, 'app_label', app_label)
if meta is not None:
for key, value in meta.__dict__.items():
if key[:2] == '__' or key == 'abstract': continue
setattr(Meta, key, value)
setattr(Meta, 'db_table', name)
if not module: module = app_label
attrs = {'__module__': module, 'Meta': Meta}
attrs.update(dict(fields))
# Create the class, which automatically triggers ModelBase processing
model = type(name, (base_class,), attrs)
if program: model._meta.program_slug = program
return model
def remove_p(text):
s = text.strip()
if s[-3-1:] == '</p>':
i = s.rindex('<p>')
return s[i+3:-3-1]
return text
def send_email(template, to, subject, context={}, connection=None):
new_context = { 'settings': settings }
new_context.update(context)
context = Context(new_context)
if type(template) != Template: context = new_context # wtf, Django
sub = ' '.join(subject.render(context).splitlines()).rstrip()
message = template.render(context)
email = mail.EmailMessage(sub, message, settings.CONTACT_EMAIL, [to],
connection=connection)
return email.send()
class TabularExport:
def __init__(self, form, queryset, **kwargs):
self.args, self.fields, self.collections = kwargs, [], {}
names = []
for name in self.args:
if not self.args[name]: continue
if name.startswith('block_'): names.append(name[len('block_'):])
elif name.startswith('collection_') and self.args[name] != 'no':
cname = name[len('collection_'):]
self.collections[cname] = [0, []]
if self.args[name] == 'combine':
self.collections[cname][0] = -1
blocks = { 'block_'+b.name: b
for b in form.submission_blocks().filter(name__in=names) }
self.items = {}
if self.collections:
item_model = form.item_model
# item_model's _submission rel doesn't recognize original queryset
qs = form.model.objects.filter(pk__in=queryset) # but this works
sub_items = item_model.objects.filter(_submission__in=qs)
items_qs = sub_items.filter(_collection__in=self.collections)
# TODO order should be by block_rank, cf Submission._collections()
for item in items_qs.order_by('_collection', '_block', '_rank'):
app = self.items.setdefault(item._submission_id, {})
app_col = app.setdefault(item._collection, [])
app_col.append(item)
for c in self.collections:
if self.collections[c][0] < 0: continue
lengths = [ len(app[c])
for app in self.items.values() if c in app ]
self.collections[c][0] = lengths and max(lengths) or 0
for name in self.args:
if name.startswith('block_'):
if blocks[name].block_type() == 'stock':
for n in blocks[name].stock.widget_names():
self.fields.append(blocks[name].stock.field_name(n))
else: self.fields.append(blocks[name].name)
elif name.startswith('cfield_'):
cname, field = name[len('cfield_'):].split('.')
if cname not in self.collections: continue
self.collections[cname][1].append(field)
def header_row(self):
ret = ['email']
for name in self.fields:
if name.startswith('_'): ret.append(name[1:])
else: ret.append(name)
for collection, (n, fields) in self.collections.items():
if not n: continue
cfields = []
for field in fields:
if field == '_file': cfields.append(collection + '_file')
else: cfields.append(collection + '_' + field)
if n < 0: ret += cfields
else: ret += cfields * n
return ret
def data_row(self, submission, sub_items):
row = [submission._email]
for name in self.fields:
val = getattr(submission, name)
if val is None: out = ''
else: out = str(val)
row.append(out)
def item_val(item, field):
if field == '_file' and item._file:
return 'https://' + settings.DJANGO_SERVER + item._file.url
val = getattr(item, field)
if val is None: return ''
return str(val)
for collection, (n, fields) in self.collections.items():
col_items = sub_items.setdefault(collection, [])
if n < 0:
for field in fields:
vals = [ item_val(item, field) for item in col_items ]
sep = ' ' if field == '_file' else ', '
out = sep.join(vals)
row.append(out)
else:
for item in col_items:
for field in fields: row.append(item_val(item, field))
row.extend([''] * (n-len(col_items)) * len(fields))
return row
def data_rows(self, queryset):
ret = []
for submission in queryset:
sub_items = self.items.setdefault(submission._id, {})
row = self.data_row(submission, sub_items)
ret.append(row)
return ret
def data(self, queryset):
ret = [self.header_row()]
ret += self.data_rows(queryset)
return ret
def csv_response(self, filename, queryset):
data = self.data(queryset)
stream = pyexcel.save_as(array=data, dest_file_type='csv')
response = HttpResponse(stream, content_type='text/csv')
disp = f"attachment; filename*=UTF-8''" + quote(filename)
response['Content-Disposition'] = disp
return response
def submission_link(s, form, rest=''):
server = settings.DJANGO_SERVER
if ':' in server or server.endswith('.local'): proto = 'http'
else: proto = 'https'
if s._valid > 1 and not rest:
if s._valid == form.num_pages(): rest = f'page-{form.num_pages()}'
else: rest = f'page-{s._valid + 1}'
return f'{proto}://{server}/{form.program.slug}/{form.slug}/{s._id}/{rest}'
def get_file_extension(name):
return Path(name).suffix[1:].lower()
def thumbnail_path(path, ext=None):
idx = path.rindex('.')
return path[:idx] + '_tn' + (ext and '.'+ext or path[idx:])
def subtitle_path(path, lang):
idx = path.rindex('.')
return path[:idx] + '_s_' + lang + '.vtt'
def delete_submission_files(files_recs):
for rec in files_recs:
submission_dir = os.path.join(settings.MEDIA_ROOT, str(rec.submission))
if not os.path.isdir(submission_dir): continue
for filename in os.listdir(submission_dir):
os.remove(os.path.join(submission_dir, filename))
os.rmdir(submission_dir)
def delete_file(file):
if os.path.isfile(file.path): os.remove(file.path)
thumb = thumbnail_path(file.path)
if os.path.isfile(thumb): os.remove(thumb)
for path in glob.glob(subtitle_path(file.path, '*')):
os.remove(path)
def human_readable_filesize(size, decimal_places=2):
for unit in ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']:
if size < 1024 or unit == 'PB': break
size /= 1024
return f"{size:.{decimal_places}f} {unit}"
def any_name_field(**kwargs):
Qs = [ Q(**{ namen + (k != '_' and k or ''): v for k, v in kwargs.items() })
for namen in ('name1', 'name2', 'name3') ]
return Qs[0] | Qs[1] | Qs[2]
def get_tooltips():
return {
'previoustip': _('Previous Page'),
# 'sortabletip': _('Drag to reorder'),
# 'uploadtip': _('Replace File'),
}
class MarkdownFormatter(markdown.Markdown):
def __init__(self):
super().__init__(extensions=[
LinkAttrModifierExtension(new_tab='external_only')
])
def convert(self, text):
self.reset() # in our context this seems to be always needed
return super().convert(text)
| johncronan/formative | formative/utils.py | utils.py | py | 9,508 | python | en | code | 4 | github-code | 36 |
70068023143 | import random
def randomquote(quotes):
last = len(quotes) -1
rnd = random.randint(0,last)
print("Random Quote: ",quotes[rnd])
f = open("quotes.txt")
quotes = f.readlines()
f.close()
selection = "A"
while True:
selection = input("(D)isplay a quote\n(A)dd a quote\nChoose your selection by typing in the letter: ")
if selection.upper() == "D":
randomquote(quotes)
elif selection.upper() == "A":
quotetoadd = input("What saying would you like to add to the file?: ")
filewritingto = open("quotes.txt",'a')
filewritingto.write(quotetoadd + "\n")
filewritingto.close()
print("'",quotetoadd,"' has been written to the file!")
else:
print("Exiting...")
break
| EricJB77/python-random-quote | get-quote.py | get-quote.py | py | 705 | python | en | code | 0 | github-code | 36 |
9766193824 | import sys
import librosa
import numpy as np
#import soundfile as sf
import functools
import torch
#from torch.nn.functional import cosine_similarity
#import essentia.standard as es
def logme(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
print('\n-----------------\n')
print(' MODEL: {}'.format(f.__name__.upper()))
print('\n-----------------\n')
return f(*args, **kwargs)
return wrapped
class ProgressBar:
"""Progress bar
"""
def __init__ (self, valmax, maxbar, title):
if valmax == 0: valmax = 1
if maxbar > 200: maxbar = 200
self.valmax = valmax
self.maxbar = maxbar
self.title = title
print ('')
def update(self, val, avg_loss=0):
# format
if val > self.valmax: val = self.valmax
# process
perc = round((float(val) / float(self.valmax)) * 100)
scale = 100.0 / float(self.maxbar)
bar = int(perc / scale)
# render
if avg_loss:
# out = '\r %20s [%s%s] %3d / %3d cost: %.2f r_loss: %.0f l_loss: %.4f clf_loss: %.4f' % (
out = '\r %20s [%s%s] %3d / %3d loss: %.5f' % (
self.title,
'=' * bar, ' ' * (self.maxbar - bar),
val,
self.valmax,
avg_loss,
)
else:
out = '\r %20s [%s%s] %3d / %3d ' % (self.title, '=' * bar, ' ' * (self.maxbar - bar), val, self.valmax)
sys.stdout.write(out)
sys.stdout.flush()
def pad(l, sr):
# 0-Pad 10 sec at fs hz and add little noise
z = np.zeros(10*sr, dtype='float32')
z[:l.size] = l
z = z + 5*1e-4*np.random.rand(z.size).astype('float32')
return z
def compute_spectrogram(filename, sr=22000, n_mels=96):
# zero pad and compute log mel spec
try:
audio, sr = librosa.load(filename, sr=sr, res_type='kaiser_fast')
except:
audio, o_sr = sf.read(filename)
audio = librosa.core.resample(audio, o_sr, sr)
try:
x = pad(audio, sr)
except ValueError:
x = audio
audio_rep = librosa.feature.melspectrogram(y=x, sr=sr, hop_length=512, n_fft=1024, n_mels=n_mels, power=1.)
audio_rep = np.log(audio_rep + np.finfo(np.float32).eps)
return audio_rep
def return_spectrogram_max_nrg_frame(spectrogram):
frames = librosa.util.frame(np.asfortranarray(spectrogram), frame_length=96, hop_length=12)
idx_max_nrg = np.argmax(np.sum(np.sum(frames, axis=0), axis=0))
return frames[:,:,idx_max_nrg]
def return_spectrogram_3_max_nrg_frames(spectrogram):
frames = librosa.util.frame(np.asfortranarray(spectrogram), frame_length=96, hop_length=12)
idxes_max_nrg = (-np.sum(np.sum(frames, axis=0), axis=0)).argsort()[:3]
return frames[:,:,idxes_max_nrg]
def spectrogram_to_audio(filename, y, sr=22000):
y = np.exp(y)
x = librosa.feature.inverse.mel_to_audio(y, sr=sr, n_fft=1024, hop_length=512, power=1.)
librosa.output.write_wav(filename, x, sr)
def extract_spectrogram(filename, sr=16000, n_mels=48):
audio = cut_audio(filename, sampleRate=sr, segment_duration=29.1)
frames = melspectrogram(audio, sampleRate=sr, frameSize=512, hopSize=256, numberBands=[48],
warpingFormula='slaneyMel', window='hann', normalize='unit_tri')
return frames['mel_48_db'].T
def melspectrogram(audio, sampleRate=44100, frameSize=2048, hopSize=1024,
window='blackmanharris62', zeroPadding=0, center=True,
numberBands=[128, 96, 48, 32, 24, 16, 8],
lowFrequencyBound=0, highFrequencyBound=None,
weighting='linear', warpingFormula='slaneyMel', normalize='unit_tri'):
if highFrequencyBound is None:
highFrequencyBound = sampleRate/2
windowing = es.Windowing(type=window, normalized=False, zeroPadding=zeroPadding)
spectrum = es.Spectrum()
melbands = {}
for nBands in numberBands:
melbands[nBands] = es.MelBands(numberBands=nBands,
sampleRate=sampleRate,
lowFrequencyBound=lowFrequencyBound,
highFrequencyBound=highFrequencyBound,
inputSize=(frameSize+zeroPadding)//2+1,
weighting=weighting,
normalize=normalize,
warpingFormula=warpingFormula,
type='power')
norm10k = es.UnaryOperator(type='identity', shift=1, scale=10000)
log10 = es.UnaryOperator(type='log10')
amp2db = es.UnaryOperator(type='lin2db', scale=2)
results = essentia.Pool()
for frame in es.FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize,
startFromZero=not center):
spectrumFrame = spectrum(windowing(frame))
for nBands in numberBands:
melFrame = melbands[nBands](spectrumFrame)
results.add('mel_' + str(nBands)+'_db', amp2db(melFrame))
results.add('mel_' + str(nBands)+'_log1+10kx', log10(norm10k(melFrame)))
results.add('mel_' + str(nBands), melFrame)
return results
def cut_audio(filename, sampleRate=44100, segment_duration=None):
audio = es.MonoLoader(filename=filename, sampleRate=sampleRate)()
if segment_duration:
segment_duration = round(segment_duration*sampleRate)
segment_start = (len(audio) - segment_duration) // 2
segment_end = segment_start + segment_duration
else:
segment_start = 0
segment_end = len(audio)
if segment_start < 0 or segment_end > len(audio):
raise ValueError('Segment duration is larger than the input audio duration')
return audio[segment_start:segment_end]
def kullback_leibler(y_hat, y):
"""Generalized Kullback Leibler divergence.
:param y_hat: The predicted distribution.
:type y_hat: torch.Tensor
:param y: The true distribution.
:type y: torch.Tensor
:return: The generalized Kullback Leibler divergence\
between predicted and true distributions.
:rtype: torch.Tensor
"""
return (y * (y.add(1e-5).log() - y_hat.add(1e-5).log()) + (y_hat - y)).sum(dim=-1).mean()
def embeddings_to_cosine_similarity_matrix(z):
"""Converts a a tensor of n embeddings to an (n, n) tensor of similarities.
"""
cosine_similarity = torch.matmul(z, z.t())
embedding_norms = torch.norm(z, p=2, dim=1)
embedding_norms_mat = embedding_norms.unsqueeze(0)*embedding_norms.unsqueeze(1)
cosine_similarity = cosine_similarity / (embedding_norms_mat)
return cosine_similarity
def contrastive_loss(z_audio, z_tag, t=1):
"""Computes contrastive loss following the paper:
A Simple Framework for Contrastive Learning of Visual Representations
https://arxiv.org/pdf/2002.05709v1.pdf
TODO: make it robust to NaN (with low values of t it happens).
e.g Cast to double float for exp calculation.
"""
z = torch.cat((z_audio, z_tag), dim=0)
s = embeddings_to_cosine_similarity_matrix(z)
N = int(s.shape[0]/2)
s = torch.exp(s/t)
try:
s = s * (1 - torch.eye(len(s), len(s)).cuda())
# s[range(len(s)), range(len(s))] = torch.zeros((len(s),)).cuda()
except AssertionError:
s = s * (1 - torch.eye(len(s), len(s)))
denom = s.sum(dim=-1)
num = torch.cat((s[:N,N:].diag(), s[N:,:N].diag()), dim=0)
return torch.log((num / denom) + 1e-5).neg().mean()
| andrebola/contrastive-mir-learning | utils.py | utils.py | py | 7,635 | python | en | code | 13 | github-code | 36 |
27479201010 | def read():
with open("input/02.txt") as f:
return [x.split() for x in f.read().split('\n')[:-1]]
def part1(m):
r = [0, 0]
for x, y in m:
if x == 'forward':
r[0] += int(y)
elif x == 'up':
r[1] -= int(y)
elif x == 'down':
r[1] += int(y)
return r[0] * r[1]
def part2(m):
r = [0, 0, 0]
for x, y in m:
if x == 'forward':
r[0] += int(y)
r[1] += r[2] * int(y)
elif x == 'up':
r[2] -= int(y)
elif x == 'down':
r[2] += int(y)
return r[0] * r[1]
print(part1(read()))
print(part2(read()))
| MergunFrimen/advent-of-code | 2021/02/02.py | 02.py | py | 653 | python | en | code | 0 | github-code | 36 |
43767535683 | # -*- coding: utf-8 -*-
# @Time : 2020/8/19 11:17
# @Author : WuatAnt
# @File : ext_gcd.py
# @Project : Python数据结构与算法分析
def ext_gcd(x,y):
if y == 0:
return (x,1,0)
else:
(d,a,b) = ext_gcd(y,x%y)
return (d,b,a-(x//y)*b)
print(ext_gcd(25,9)) | WustAnt/Python-Algorithm | Chapter8/8.3/8.3.3/ext_gcd.py | ext_gcd.py | py | 297 | python | en | code | 9 | github-code | 36 |
6951210977 | import argparse
from algorithms.utils import timedcall
@timedcall
def count_inversions(array):
_, inversions = _count_inversions(array)
return inversions
def _count_inversions(array):
if len(array) < 2:
return array, 0
mid = len(array) // 2
left, left_inversions = _count_inversions(array[:mid])
right, right_inversions = _count_inversions(array[mid:])
array, cross_inversions = merge(left, right)
return array, left_inversions + right_inversions + cross_inversions
def merge(left, right):
array, inversions = [], 0
i = j = 0
while i < len(left) and j < len(right):
if left[i] > right[j]:
inversions += len(left) - i
array.append(right[j])
j += 1
else:
array.append(left[i])
i += 1
while i < len(left):
array.append(left[i])
i += 1
while j < len(right):
array.append(right[j])
j += 1
return array, inversions
@timedcall
def count_inversions_naive(array):
inversions = 0
for j in range(len(array)):
for i in range(j):
inversions += array[i] > array[j]
return inversions
def read_data(filepath):
with open(filepath, 'r') as fp:
data = map(int, fp.read().splitlines())
return list(data)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--in_file', default='data/data.txt')
return parser.parse_args()
def main():
args = parse_args()
data = read_data(args.in_file)
inversions = count_inversions(data)
print(inversions)
if __name__ == '__main__':
main()
| dfridman1/algorithms-coursera | algorithms/divide_and_conquer/week2/inversions.py | inversions.py | py | 1,642 | python | en | code | 0 | github-code | 36 |
17567691479 | from inpladesys.datatypes import Segment, Segmentation
from typing import List
import numpy as np
from inpladesys.datatypes.dataset import Dataset
from collections import Counter
from sklearn.model_selection import train_test_split
import time
import scipy.stats as st
def generate_segmentation(preprocessed_documents: List[List[tuple]], documents_features: List[np.ndarray],
document_label_lists, documents, task=None) -> List[Segmentation]:
assert len(documents_features) == len(preprocessed_documents)
segmentations = []
for i in range(len(documents_features)):
preprocessed_doc_tokens = preprocessed_documents[i]
doc_features = documents_features[i]
assert doc_features.shape[0] == len(preprocessed_doc_tokens)
labels = document_label_lists[i]
segments = []
for k in range(doc_features.shape[0]):
prep_token = preprocessed_doc_tokens[k]
segments.append(Segment(offset=prep_token[1],
length=prep_token[2] - prep_token[1],
author=labels[k]))
segmentations.append(Segmentation(author_count=max(labels) + 1,
segments=segments,
max_repairable_error=60,
document_length=len(documents[i])))
if task == 'a':
for segmentation in segmentations:
fix_segmentation_labels_for_plagiarism_detection(segmentation)
return segmentations
def fix_segmentation_labels_for_plagiarism_detection(segmentation, plagiarism_majority=False):
# the majority label should be 0 (original author)
assert segmentation.author_count == 2
author_segments = segmentation.by_author[0]
plagiarism_segments = segmentation.by_author[1]
author_len = sum(s.length for s in author_segments)
plagiarism_len = sum(s.length for s in plagiarism_segments)
swap = author_len < plagiarism_len
if plagiarism_majority:
swap = not swap
if swap:
for s in segmentation:
s.author = 1 - s.author
segmentation.by_author[0] = plagiarism_segments
segmentation.by_author[1] = author_segments
def custom_train_test_split(preprocessed_documents: List[List[tuple]], documents_features: List[np.ndarray],
dataset: Dataset, train_size, random_state):
# indices of every document
indices_of_docs = [i for i in range(len(preprocessed_documents))]
i_train, i_test = train_test_split(indices_of_docs, train_size=train_size, random_state=random_state)
prep_docs_train = [preprocessed_documents[i] for i in i_train]
prep_docs_test = [preprocessed_documents[i] for i in i_test]
doc_features_train = [documents_features[i] for i in i_train]
doc_features_test = [documents_features[i] for i in i_test]
author_counts_train = [dataset.segmentations[i].author_count for i in i_train]
author_counts_test = [dataset.segmentations[i].author_count for i in i_test]
dataset_train = Dataset([dataset.documents[i] for i in i_train],
[dataset.segmentations[i] for i in i_train])
dataset_test = Dataset([dataset.documents[i] for i in i_test],
[dataset.segmentations[i] for i in i_test])
return prep_docs_train, prep_docs_test, \
doc_features_train, doc_features_test, \
author_counts_train, author_counts_test, \
dataset_train, dataset_test
def find_cluster_for_noisy_samples(predicted_labels, context_size=10):
start = time.time()
len_ = len(predicted_labels)
counter = Counter(predicted_labels)
noisy = counter[-1]
unclustered_label = 0
if -1 in counter.keys():
if len(counter.most_common()) == 1:
predicted_labels[:] = unclustered_label
else:
for i in range(len_):
if predicted_labels[i] == -1:
left_diff = i - context_size
left = left_diff if left_diff >= 0 else 0
right_diff = i + context_size
right = right_diff if right_diff < len_ else len_
counter = Counter(predicted_labels[left:right])
if -1 in counter.keys():
if len(counter.most_common()) == 1:
predicted_labels[left:right] = unclustered_label
else:
found, curr = 0, 0
while found == 0:
if counter.most_common()[curr][0] != -1:
predicted_labels[i] = counter.most_common()[curr][0]
found = 1
curr += 1
# print('Noisy labels reclustered in {}'.format(time.time()-start))
return noisy
def perform_confidence_interval_test(samples: List, c_interval=0.95, p_normal_threshold=0.05):
n = len(samples)
# https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.stats.normaltest.html
# https://stackoverflow.com/questions/12838993/scipy-normaltest-how-is-it-used
z, p_val = st.normaltest(samples, nan_policy='raise')
if p_val < p_normal_threshold:
print('A given sample is not from normal distribution: '
'p_val = {} < threshold = {}'.format(p_val, p_normal_threshold))
print('The confidence intervals cannot be calculated.')
else:
sem = st.sem(samples)
mean = np.mean(samples)
interval = st.t.interval(c_interval, n-1, loc=mean, scale=sem) # https://stackoverflow.com/questions/15033511/compute-a-confidence-interval-from-sample-data/34474255#34474255
print('Mean:', mean)
print('Standard error:', sem)
print('{}% confidence interval: {}\n'.format(c_interval * 100, interval))
| Coolcumber/inpladesys | software/inpladesys/models/misc/misc.py | misc.py | py | 5,944 | python | en | code | 3 | github-code | 36 |
5043460289 | """Methods for playing the game from the value iteration agent."""
from DeepQLearningAgent import *
from QLearningAgent import *
from DoubleQLearningAgent import *
episodes = 100
def play_q(env: JoypadSpace, args, actions):
"""Play the game using the Q-learning agent."""
agent: QLearningAgent = QLearningAgent(env)
for _ in range(episodes):
environment = None
if actions is None:
actions = env.action_space.n
else:
environment = SkipFrame(JoypadSpace(gym.make(args.env)), skip=5)
state = environment.reset()
done = False
_, _, _, info, = environment.step(0)
_, _, _, info, = environment.step(0)
_, _, _, info, = environment.step(0)
state = agent.make_state(info)
while not done:
action = agent.get_action(state)
_, _, done, info = environment.step(action)
state = agent.make_state(info)
environment.render()
# close the environment
env.close()
def play_double_q(env: JoypadSpace, args, actions):
"""Play the game using the Q-learning agent."""
agent: DoubleQLearningAgent = DoubleQLearningAgent(env, actions)
for _ in range(episodes):
environment = None
if actions is None:
actions = env.action_space.n
else:
environment = JoypadSpace(gym.make(args.env), actions)
environment.reset()
done = False
_, _, _, info, = environment.step(0)
state = agent.make_state(info)
while not done:
if done:
_ = environment.reset()
action = agent.get_action(state)
_, _, done, info = environment.step(action)
state = agent.make_state(info)
environment.render()
# close the environment
try:
env.close()
except:
pass
| astelmach01/Mario-Q_Learning | play.py | play.py | py | 1,955 | python | en | code | 1 | github-code | 36 |
74160099303 | #!/bin/python3
import os
import sys
#
# Complete the getMoneySpent function below.
#
def getMoneySpent(keyboards, drives, b):
keyboards = sorted([each_keyboards for each_keyboards in keyboards if each_keyboards < b], reverse = True)
drives = sorted([each_drives for each_drives in drives if each_drives < b], reverse = True)
result = set()
for each_keyboards in keyboards:
for each_drives in drives:
if each_keyboards + each_drives <= b:
result.add(each_keyboards + each_drives)
if len(result) != 0:
return max(result)
return (-1)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
bnm = input().split()
b = int(bnm[0])
n = int(bnm[1])
m = int(bnm[2])
keyboards = list(map(int, input().rstrip().split()))
drives = list(map(int, input().rstrip().split()))
#
# The maximum amount of money she can spend on a keyboard and USB drive, or -1 if she can't purchase both items
#
moneySpent = getMoneySpent(keyboards, drives, b)
fptr.write(str(moneySpent) + '\n')
fptr.close()
| CodingProgrammer/HackerRank_Python | (Implementation)Electronics_Shop.py | (Implementation)Electronics_Shop.py | py | 1,134 | python | en | code | 0 | github-code | 36 |
42910133147 | from pymongo import MongoClient
import time
client = MongoClient('localhost', 27017)
db = client['sahamyab']
series_collection = db['tweets']
start_time = time.time()
series_collection.update_many(
{'hashtags':{'$in': ['فولاد', 'شستا', 'شبندر'] }},
{'$set':{'gov': True }})
end_time = time.time()
delta_time = end_time - start_time
print(delta_time)
| masoudrahimi39/Big-Data-Hands-On-Projects | NoSQL Databases (Cassandra, MongoDB, Neo4j, Elasticsearch)/MongoDB/1000 twiits/game3_2.py | game3_2.py | py | 397 | python | en | code | 0 | github-code | 36 |
70891763304 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import ojfdb
#import ojfresult
#import ojfpostproc
import towercal
import yawcal
import bladecal
import ojf_freeyaw
def rebuild_symlink_database():
# first, rebuild the symlink list: all files in one folder
# this includes the sweep cases as well
path_db = 'database/'
data_source_root = 'data/raw/'
ojfdb.make_symlinks_all(path_db, data_source_root)
# convert the database index to csv/DataFrame_hdf5/xls. Based on the file
# name other usefull selection columns are created
ojfdb.convert_pkl_index_df(path_db, db_id='symlinks_all')
# create a stastistics database
# create a dashboard plot for all the cases
ojfdb.build_stats_db(path_db, 'symlinks_all', calibrate=True, dashplot=True,
dataframe=True, resample=True, continue_build=True,
save_df=True, save_df_csv=True)
# only rebuild the statistics database
ojfdb.build_stats_db(path_db, 'symlinks_all', calibrate=True, dashplot=False,
dataframe=True, resample=True, continue_build=False,
save_df=False, save_df_csv=False)
# add the freeyaw control stair cases to the stats collection
ojf_freeyaw.add_yawcontrol_stair_steps()
def rebuild_calibration_data():
"""
Rebuild the calibration data based on the raw calibration measurements.
"""
towercal.all_tower_calibrations()
yawcal.all_yawlaser_calibrations()
# bladecall fails currently for the stair detection in April
bladecal.all_blade_calibrations()
if __name__ == '__main__':
dummy=None
path_db = 'database/'
data_source_root = 'data/raw/'
| davidovitch/freeyaw-ojf-wt-tests | rebuild.py | rebuild.py | py | 1,755 | python | en | code | 2 | github-code | 36 |
14248952393 | from collections import deque
#올바른 괄호열인지 판단 하는 함수
def isCorrect(p):
lst = [] #문자열의 문자를 하나하나 담을 lst
for i in range(len(p)):
if p[i] == '(': # 만약 열린 괄호이면 리스트에 넣는다.
lst.append(p[i])
elif p[i] == ')': # 만약 닫힌 괄호라면
if len(lst) == 0: # 닫힌 괄호인데 lst가 빈 상태라면
return False # 올바른 문자열이 아니다.
lst.pop() # lst에 있는 열린 괄호를 하나 뽑는다.
if len(lst): # lst에 열린 괄호가 남은 상태라면 올바른 문자열이 아니다.
return False
return True
def solution(p): #p는 균형잡힌 문자(열린 괄호와 닫힌 괄호가 같다.)
#종료 조건 -> 빈 문자열이라면 빈 문자열 반환
if p == "" or isCorrect(p): # 또는 문자열이 처음부터 올바른 문자열이라면 그대로 반환
return p
length = len(p) # 문자열의 길이를 담는다.
u = ""
v = ""
q = deque([p[0]]) # 문자열의 첫 번째 괄호를 큐에 넣는다.
idx = 0 # 문자열의 첫 번째 괄호를 큐에 넣고 index는 0으로 초기화(p에서 인덱스 0을 가리킴)
# 균형잡힌 문자열이 끝난 지점을 idx로 찾아나가는 작업
while q:
if q[-1] == p[idx + 1]: #만약 큐에 있는 괄호와 같은 괄호라면
q.append(p[idx + 1]) #큐에 집어넣는다.
idx += 1 # 현재 index를 다음 index로 업데이트
else: # 큐에 있는 괄호와 다른 괄호라면
q.pop() # 큐에 있는 괄호 하나를 제거
idx += 1 # index 업데이트
#인덱스를 기준으로 u와 v를 나누기
u = p[:idx + 1] # u는 더 이상 분리할 수 없는 균형잡힌 문자열을 담고 있다.
v = p[idx + 1:] # 나머지 열은 v에 담기
#만약 u가 '올바른 괄호 문자열'이라면 -> 즉 u의 시작 문자열이 열린 괄호라면(u가 균형잡힌 문자열이므로) v에 대하여 1단계부터 다시 수행
if u[0] == '(':
return u + solution(v)
else: #그렇지 않다면(u가 올바른 괄호가 아니라면)
answer = ""
p = solution(v)
#u의 앞 뒤 문자 제거하고 괄호 뒤집기
u = list(u)
u[0] = ""
u[-1] = ""
u = ''.join(u) #u를 다시 문자열로 변경
if u != "":
for i in range(len(u)):
if u[i] == '(':
u = list(u)
u[i] = ')'
u = ''.join(u)
else:
u = list(u)
u[i] = '('
u = ''.join(u)
answer += "(" + p + ")" + u
return answer | vmfaldwntjd/Algorithm | Programmers/DFS,BFS/괄호 변환/Programmers.py | Programmers.py | py | 2,869 | python | ko | code | 0 | github-code | 36 |
38558826717 | import tkinter as tk
import random
import time
class Ball:
def __init__(self, _x, _y, _r, vx, vy, a_x=0, a_y=0, color='black'):
self.x_acceleration = a_x
self.y_acceleration = a_y
self.v_x = vx
self.v_y = vy
self.x = _x
self.y = _y
self.r = _r
self.ball = c.create_oval(_x - _r, _y - _r, _x + _r, _y + _r, fill=color, width=0)
def ball_move(self):
last_x = self.x
last_y = self.y
self.x += self.v_x
self.y += self.v_y
self.v_x += self.x_acceleration
self.v_y += self.y_acceleration
c.move(self.ball, self.x - last_x, self.y - last_y)
def collision_with_ball(self, ball, enother_ball):
if True: # условие соударения с шаром
return True
else:
return False
def collision_with_wall(self, ball, wall):
d_x = 0
d_y = 0
if wall == 'right':
if ball.x + ball.r - d_x > WW: # условие столкновения со стеной
return True
else:
return False
elif wall == 'left':
if ball.x + ball.r + d_x < 100: # условие столкновения со стеной
return True
else:
return False
elif wall == 'up':
if ball.y + ball.r + d_y < 100: # условие столкновения со стеной
return True
else:
return False
else:
if ball.y + ball.r - d_y > WH: # условие столкновения со стеной
return True
else:
return False
class Field(Ball):
def __init__(self):
Ball.__init__(self, 0, 0, 0, 0, 0)
self.balls = []
c.bind('<Button-1>', self.click)
def click(self, event):
global score, score_text
try:
for i in range(len(self.balls)):
if (self.balls[i].x - event.x) ** 2 + (self.balls[i].y - event.y) ** 2 < self.balls[i].r ** 2:
c.delete(self.balls[i].ball)
del self.balls[i]
score += 1
c.delete(score_text)
score_text = c.create_text(1450, 10, text=str(score), font='Verdana 14')
except IndexError:
pass
def generation_of_ball(self):
x = random.randrange(100, 700)
y = random.randrange(100, 500)
r = random.randrange(30, 50)
d_x = random.randrange(-10, 10)
d_y = random.randrange(-10, 10)
a_x = random.randrange(-2, 2)
a_y = random.randrange(-2, 2)
self.balls.append(Ball(x, y, r, d_x, d_y, a_x, a_y, random.choice(colors)))
def collision_handling(self):
for i in range(len(self.balls) - 1):
for j in range(i, len(self.balls)):
if self.collision_with_ball(self.balls[i], self.balls[j]):
pass
""" написать функцию соударения шаров """
if self.collision_with_wall(self.balls[i], 'right'):
self.balls[i].v_x *= -1
if self.collision_with_wall(self.balls[i], 'left'):
self.balls[i].v_x *= -1
if self.collision_with_wall(self.balls[i], 'up'):
self.balls[i].v_y *= -1
if self.collision_with_wall(self.balls[i], 'down'):
self.balls[i].v_y *= -1
try:
if self.collision_with_wall(self.balls[-1], 'right'):
self.balls[-1].v_x *= -1
if self.collision_with_wall(self.balls[-1], 'left'):
self.balls[-1].v_x *= -1
if self.collision_with_wall(self.balls[-1], 'up'):
self.balls[-1].v_y *= -1
if self.collision_with_wall(self.balls[-1], 'down'):
self.balls[-1].v_y *= -1
except IndexError:
pass
def movement(self):
for i in self.balls:
i.ball_move()
def update_table(name, score):
flag = True
q = 1
a = []
table = open("Best_Players.txt")
for line in table:
a.append((int(line.split(' | ')[2]), line.split(' | ')[1]))
table.close()
for i in range(len(a)):
if a[i][1] == name:
a[i] = (score, name)
flag = False
if flag:
a.append((score, name))
a.sort(key=lambda x: x[0], reverse=True)
table = open("Best_Players.txt", 'w')
for i in a:
table.write(str(q) + ' | ' + i[1] + ' | ' + str(i[0]) + '\n')
q += 1
table.close()
WW = 1500
WH = 800
root = tk.Tk()
root.geometry('1500x800+0+0')
c = tk.Canvas(root, width=WW, height=WH, bg='white')
c.pack()
colors = ['black', 'yellow', 'green', 'blue']
f = Field()
for i in range(10):
f.generation_of_ball()
score = 0
speed = 0
iteration = 0
score_text = c.create_text(1450, 10, text=str(score), font='Verdana 14')
name = input('Enter your name: ')
def upd():
try:
table = open("Best_Players.txt")
except FileNotFoundError:
table = open("Best_Players.txt", 'w')
table.write('1' + ' | ' + ' Vasya' + ' | ' + '666' + '\n' + '2' + ' | ' + ' Gosha' + ' | ' + '103' + '\n')
table.close()
global speed, iteration, name, score
f.collision_handling()
f.movement()
if len(f.balls) > 40 or len(f.balls) == 7:
update_table(name, score)
exit()
if score > 5 and speed == 0:
speed = 2
elif score > 10 and speed == 2:
speed = 3
elif score > 20 and speed == 3:
speed = 5
elif score > 40 and speed == 5:
speed = 8
if speed and iteration % 50 * speed == 0:
iteration = 0
f.generation_of_ball()
root.after(40, upd)
iteration += 1
upd()
tk.mainloop()
| MitiaKorotkov/infa_2019_korotkov | laba4_2.py | laba4_2.py | py | 6,091 | python | en | code | 0 | github-code | 36 |
32058377252 | from heapq import heappop, heappush, heapify
def solution(scoville, K):
answer = 0
heapify(scoville)
while scoville[0] < K and len(scoville) >= 2:
first = heappop(scoville)
second = heappop(scoville)
heappush(scoville, first+(second*2))
answer += 1
if scoville[0] < K:
return -1
return answer
| back1ash/solving_problem | coding_test/programmers/더 맵게.py | 더 맵게.py | py | 367 | python | en | code | 0 | github-code | 36 |
947147182 | pkgname = "rxvt-unicode"
pkgver = "9.31"
pkgrel = 1
build_style = "gnu_configure"
configure_args = [
"--with-terminfo=/usr/share/terminfo",
"--with-term=rxvt-unicode-256color",
"--enable-256-color",
"--enable-font-styles",
"--enable-keepscrolling",
"--enable-startup-notification",
"--enable-selectionscrolling",
"--enable-smart-resize",
"--enable-transparency",
"--enable-combining",
"--enable-unicode3",
"--enable-pixbuf",
"--enable-frills",
"--enable-xim",
"--disable-perl",
]
hostmakedepends = ["pkgconf"]
makedepends = [
"xorgproto",
"libxrender-devel",
"libxft-devel",
"libxt-devel",
"libsm-devel",
"libptytty-devel",
"fontconfig-devel",
"gdk-pixbuf-devel",
"startup-notification-devel",
]
depends = [f"rxvt-unicode-terminfo={pkgver}-r{pkgrel}"]
pkgdesc = "Terminal emulator supporting Xft fonts and Unicode"
maintainer = "q66 <q66@chimera-linux.org>"
license = "GPL-3.0-or-later"
url = "http://software.schmorp.de/pkg/rxvt-unicode.html"
source = f"http://dist.schmorp.de/{pkgname}/{pkgname}-{pkgver}.tar.bz2"
sha256 = "aaa13fcbc149fe0f3f391f933279580f74a96fd312d6ed06b8ff03c2d46672e8"
hardening = ["vis", "!cfi"]
def init_configure(self):
self.make_install_env[
"TERMINFO"
] = f"{self.chroot_destdir}/usr/share/terminfo"
def pre_install(self):
self.make_install_env[
"TERMINFO"
] = f"{self.chroot_destdir}/usr/share/terminfo"
self.install_dir("usr/share/terminfo")
def post_install(self):
self.install_file("doc/etc/rxvt-unicode.terminfo", "usr/share/terminfo/r")
self.install_file(self.files_path / f"{pkgname}.png", "usr/share/pixmaps")
self.install_file(
self.files_path / f"{pkgname}.desktop", "usr/share/applications"
)
@subpackage("rxvt-unicode-terminfo")
def _tinfo(self):
self.pkgdesc = f"{pkgdesc} (terminfo data)"
return ["usr/share/terminfo"]
configure_gen = []
| chimera-linux/cports | contrib/rxvt-unicode/template.py | template.py | py | 1,956 | python | en | code | 119 | github-code | 36 |
28798514201 | def main():
print("This program will calculate your BMI and tell whether it's above, below, or within the healthy range.")
weight = int(input("What is your weight in pounds?"))
height = int(input("What is your height?"))
finalheight = height ** 2
bmi = (weight * 720) / finalheight
finalbmi = str(bmi)
if bmi < 19:
finalbmi = "below"
if bmi >= 19 and bmi <= 25:
finalbmi = "within"
if bmi > 25:
finalbmi = "over"
print("Your bmi is", finalbmi, "the healthy range.")
main()
| Eric-Wonbin-Sang/CS110Manager | 2020F_hw6_submissions/mehtaom/OmCH7P1.py | OmCH7P1.py | py | 539 | python | en | code | 0 | github-code | 36 |
36750215907 | from flask import (g, abort, get_flashed_messages, request, flash, redirect,
url_for)
from sqlalchemy.sql import functions
from buddyup.app import app
from buddyup.database import (Course, Visit, User, BuddyInvitation,
Location, Major, Event, Language, db)
from buddyup.templating import render_template
from buddyup.util import form_get, check_empty
from functools import wraps
def admin_required(f):
@wraps(f)
def func(*args, **kwargs):
if g.user and g.user.user_name == app.config.get("ADMIN_USER", u""):
return f(*args, **kwargs)
else:
abort(403)
return func
@app.route("/admin")
@admin_required
def admin_dashboard():
variables = {}
variables['group_count'] = Event.query.count()
variables['unique_visits'] = Visit.query.count()
query = db.session.query(functions.sum(Visit.requests))
variables['total_visits'] = query.scalar()
variables['total_groups'] = Event.query.count()
variables['total_invites'] = BuddyInvitation.query.count()
# Maybe only count users who have logged in?
variables['total_users'] = User.query.count()
variables['courses'] = Course.query.order_by(Course.name).all()
variables['majors'] = Major.query.order_by(Major.name).all()
variables['locations'] = Location.query.order_by(Location.name).all()
variables['languages'] = Language.query.order_by(Language.name).all()
return render_template('admin/dashboard.html', **variables)
@app.route("/admin/course/add", methods=['POST'])
@admin_required
def admin_add_course():
name = form_get('name')
check_empty(name, "Course Name")
instructor = form_get('instructor')
check_empty(instructor, "Professor Name")
if not get_flashed_messages():
course = Course(name=name, instructor=instructor)
db.session.add(course)
db.session.commit()
flash("Added Course " + name)
return redirect(url_for('admin_dashboard'))
#return render_template('admin/dashboard.html', **get_stats())
@app.route("/admin/course/delete", methods=['POST'])
@admin_required
def admin_delete_course():
course_ids = map(int, request.form.getlist('courses'))
for course_id in course_ids:
Course.query.filter_by(id=course_id).delete()
db.session.commit()
flash('Course deleted')
return redirect(url_for('admin_dashboard'))
@app.route("/admin/location/add", methods=['POST'])
@admin_required
def admin_add_location():
name = form_get('location')
check_empty(name, "Location Name")
if not get_flashed_messages():
loc = Location(name=name)
db.session.add(loc)
db.session.commit()
flash("Added Course " + name)
return redirect(url_for('admin_dashboard'))
@app.route("/admin/location/delete", methods=['POST'])
@admin_required
def admin_delete_location():
location_ids = map(int, request.form.getlist('location'))
for location_id in location_ids:
Location.query.filter_by(id=location_id).delete()
db.session.commit()
flash('Location deleted')
return redirect(url_for('admin_dashboard'))
@app.route("/admin/major/add", methods=['POST'])
@admin_required
def admin_add_major():
name = form_get('major')
check_empty(name, "Major Name")
if not get_flashed_messages():
major = Major(name=name)
db.session.add(major)
db.session.commit()
flash("Added Course " + name)
return redirect(url_for('admin_dashboard'))
@app.route("/admin/major/delete", methods=['POST'])
@admin_required
def admin_delete_major():
major_ids = map(int, request.form.getlist('majors'))
for major_id in major_ids:
Major.query.filter_by(id=major_id).delete()
db.session.commit()
flash('Majors deleted')
return redirect(url_for('admin_dashboard'))
@app.route("/admin/language/add", methods=['POST'])
@admin_required
def admin_add_language():
name = form_get('language')
check_empty(name, "Language Name")
if not get_flashed_messages():
language = Language(name=name)
db.session.add(language)
db.session.commit()
flash("Added Language " + name)
return redirect(url_for('admin_dashboard'))
@app.route("/admin/language/delete", methods=['POST'])
@admin_required
def admin_delete_language():
language_ids = map(int, request.form.getlist('languages'))
for language_id in language_ids:
Language.query.filter_by(id=language_id).delete()
db.session.commit()
flash('Languages deleted')
return redirect(url_for('admin_dashboard'))
@app.route("/admin/users")
@admin_required
def admin_user_management():
users = User.query.all()
return render_template('admin/userManagement.html', users=users)
@app.route("/admin/forums")
@admin_required
def admin_forum_management():
pass
@app.route("/admin/stats")
@admin_required
def admin_stats():
variables = {}
variables['group_count'] = Event.query.count()
variables['unique_visits'] = Visit.query.count()
# This requires something with func.sum. Not sure what.
variables['total_visits'] = Visit.query.sum(Visit.requests)
variables['total_groups'] = Event.query.count()
variables['total_invites'] = BuddyInvitation.query.count()
# Maybe only count users who have logged in?
variables['total_users'] = User.query.filter(User.activated == True).count()
render_template('admin_stats.html', **variables)
| thangatran/Buddy-Up | buddyup/pages/admin.py | admin.py | py | 5,477 | python | en | code | 0 | github-code | 36 |
27511260267 | # -*- coding: utf-8 -*-
"""
Created on Mo 12 Sept 2 13:15:51 2022
@author: FKAM
"""
import pandas as pd
import streamlit as st
import plotly.express as px
import plotly.graph_objs as go
#import altair as alt
#from bokeh.plotting import figure
def list_ext(uploads, radio3):
list_ = []
header_default = ["date [YYYYMMDD]",
"time [HHMMSS]",
"X [m]",
"Y [m]",
"Z [m]",
"Drain nr. [-]",
"Job nr. [-]",
"Base unit [-]",
"Operator [-]",
"Stitcher type [-]",
"Stitcher length [m]",
"Stitcher ballast [ton]",
"Drain type [-]",
"Anchoring [-]",
"Pattern type [0=square/1=triang.]",
"Pattern distance [m]",
"Pattern heading [deg]",
"Pattern X-position [m]",
"Pattern Y-position [m]",
"Prescribed depth [m]",
"Max. depth [m]",
"Pull back [m]",
"Cum. drain length [m]",
"Duration [s]",
"Max. force [kN]",
"Stitcher angle [deg]",
"ok",
"new roll",
"canceled",
"Log interval [m]",
"Data nr. [-]",
"Force [kN]"]
df_default = pd.DataFrame(columns=header_default)
for file_ in uploads:
for headerline in file_:
headerline = str(headerline)
if '#date' in headerline:
break
headerline = headerline[:-3]
headerlist = headerline.replace("b'#", "").split(',')
if 'Remarks' in headerlist:
headerlist.remove('Remarks')
headerlist.remove('')
for index, item in enumerate(headerlist):
if ' [ok' in item:
headerlist[index] = 'ok'
if 'canceled]' in item:
headerlist[index] = 'canceled'
df = pd.read_csv(file_, index_col=False, header=None)
nums = list(range(len(headerlist)))
headerdict = dict(zip(nums, headerlist))
df = df.rename(columns=headerdict)
df = df.rename(columns={' Drain nr. [-]' : 'Drain nr. [-]'})
force_1_loc = df.columns.get_loc('Force [kN]')
df_force = df.iloc[:, force_1_loc+1:-1]
for col in range(len(df_force.columns)):
df_force = df_force.rename(columns={df_force.columns[col] : f'Force_{col+2}'})
if radio3 == 'Default columns (recommended)':
if not header_default == headerlist:
df = pd.concat([df_default, df])
for col in df.columns:
if col not in header_default:
df = df.drop([col], axis=1)
elif radio3 == 'Columns from file':
for col in df.columns:
if type(col) == int:
df = df.drop([col], axis=1)
df = pd.concat([df, df_force], axis=1)
#####
list_.append(df)
### Sort list_ on df with most columns ##
a = max([x.shape[1] for x in list_])
indexa = [x.shape[1] for x in list_].index(a)
longest = list_[indexa]
del list_[indexa]
list_.insert(0, longest)
return list_, headerlist
def convert(list_, headerlist, wp_calc_method, fixed_nr):
frame = pd.concat(list_, axis=0, ignore_index=True)
## Rename columns ##
nums = list(range(len(headerlist)))
headerdict = dict(zip(nums, headerlist))
frame = frame.rename(columns=headerdict)
frame = frame.sort_values(['Base unit [-]', 'date [YYYYMMDD]', 'time [HHMMSS]'])
## Add date and time columns ##
#date_text = frame['date [YYYYMMDD]']
frame['date [YYYYMMDD]'] = pd.to_datetime(frame['date [YYYYMMDD]'], format='%Y%m%d').dt.date
frame['time [HHMMSS]'] = frame['time [HHMMSS]'].astype(int)
for pvd in frame.index:
if len(str(frame.loc[pvd, 'time [HHMMSS]'])) < 6:
frame.loc[pvd, 'time [HHMMSS]'] = (6 - len(str(frame.loc[pvd, 'time [HHMMSS]']))) * '0' + str(frame.loc[pvd, 'time [HHMMSS]'])
time_text = frame['time [HHMMSS]'].copy()
frame['time [HHMMSS]'] = pd.to_datetime(frame['time [HHMMSS]'], format='%H%M%S').dt.time
## Cable tension + wp thickness ##
if wp_calc_method == 'No':
wp_frame = 0
else:
wp_thickness = [100]*len(frame)
for pvd in range(len(frame)):
keys = list(frame)
force1 = keys.index('Force [kN]')
force_df = frame.iloc[:, force1:]
force_pvd = force_df.loc[pvd,:].values.tolist()
force_pvd = [i for i in force_pvd if i != 0] #remove zeros
force_pvd = force_pvd[2:-3] #remove first 2 and last 2 values
if len(force_pvd) > 0:
cable_tension = min(force_pvd)
if wp_calc_method == 'Lowest force plus fixed number':
cutoff = cable_tension + fixed_nr
elif wp_calc_method == 'Manual choice':
cutoff = fixed_nr
else:
cutoff = 0
cable_tension_index = force_pvd.index(cable_tension)
force_pvd = force_pvd[:cable_tension_index]
wp = (sum(i > cutoff for i in force_pvd) + 2) * frame['Log interval [m]'][pvd]
wp_thickness[pvd] = wp
wp_frame = frame[['X [m]', 'Y [m]']]
wp_frame['wp [m]'] = wp_thickness
wp_frame['csx'] = [528374]*len(frame)
wp_frame['csy'] = [507360]*len(frame)
tofloat = ['Z [m]',
'Drain nr. [-]',
'Max. depth [m]',
'Max. force [kN]',
'Prescribed depth [m]',
'Stitcher angle [deg]']
for col in tofloat:
if col in frame.columns:
frame[col] = frame[col].astype(float)
else:
continue
return frame, time_text
def show_delay(frame_filtered, delta, start_time, end_time, date, base_unit):
time_text = frame_filtered['time_text']
time_text = pd.concat([start_time, time_text, end_time])
time_text = list(pd.to_datetime(time_text, format='%H%M%S'))
start = time_text[:-1].copy()
end = time_text[1:].copy()
fig, ax = plt.subplots(figsize=[18,3], facecolor='white')
periods = []
for pvd in range(len(start)):
periods.append((start[pvd], end[pvd] - start[pvd]))
periods_op = [tup for tup in periods if tup[1] <= np.timedelta64(int(delta), 's')]
periods_delay = [tup for tup in periods if tup[1] > np.timedelta64(int(delta), 's')]
ax.broken_barh(
periods_delay,
(0.1, 0.2),
color='#FF6861',
#edgecolor="black"
)
ax.broken_barh(
periods_op,
(-0.1, 0.2),
color='green',
# edgecolor="black"
)
ax.set_yticks([0, 0.2])
ax.set_yticklabels(['Operational', 'Delay'])
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
fig.suptitle(f'{date} - {base_unit}', fontsize=20)
ax.grid(linestyle="--")
fig.autofmt_xdate()
st.write(fig)
total_op = total_delay = datetime.timedelta()
for pvd in periods_op:
total_op += pvd[1]
for pvd in periods_delay:
total_delay += pvd[1]
st.write('Operational time: ', str((datetime.datetime.min + total_op).time()))
st.write('Delay time: ', str((datetime.datetime.min + total_delay).time()))
st.write('Efficiency: ', str(round(100 * total_op.total_seconds() / (total_op.total_seconds() + total_delay.total_seconds()))), '%')
fn = f'{date} - {base_unit}.png'
img = io.BytesIO()
plt.savefig(img, format='png')
st.download_button(
label='Download as image',
data=img,
file_name=fn,
mime='image/png'
)
def show_preview(frame):
scale = ["date [YYYYMMDD]",
"time [HHMMSS]",
"Z [m]",
"Drain nr. [-]",
"Base unit [-]",
"Operator [-]",
"Stitcher type [-]",
"Prescribed depth [m]",
"Max. depth [m]",
"Max. force [kN]",
"Stitcher angle [deg]"]
choose_scale = st.selectbox('Choose plot parameter:',
scale,
help='Choose from the list what you want to plot in the figure below', index=8)
frame.columns[10] == choose_scale
if choose_scale in frame.columns:
fig = px.scatter(data_frame = frame,
x=frame['X [m]'],
y=frame['Y [m]'],
color=choose_scale,
color_continuous_scale='turbo')
fig.update_yaxes(scaleanchor='x', scaleratio=1)
st.write(fig)
else:
st.write(f'{choose_scale} not found')
# from streamlit_plotly_events import plotly_events
# clickedPoint = plotly_events(fig, key="line")
# st.write(f"Clicked Point: {clickedPoint}")
def show_wp(wp_frame, cs):
# st.write('**Working platform thickness:**')
# #fig1 = go.Figure()
# fig1 = px.scatter(data_frame = wp_frame,
# x=wp_frame['X [m]'],
# y=wp_frame['Y [m]'],
# color='wp [m]',
# color_continuous_scale='turbo',
# range_color=[0,5])
# fig1.update_yaxes(scaleanchor='x', scaleratio=1)
# st.write(fig1)
st.write('**Working platform thickness:**')
fig1 = go.Figure()
fig1.add_trace(go.Scatter(x=wp_frame['X [m]'],
y=wp_frame['Y [m]'],
mode='markers',
name='PVD points',
marker_color=wp_frame['wp [m]']))
x = [507360, 507460]
y = [cs, cs]
fig1.add_trace(go.Scatter(x=x, y=y,
mode='lines',
name='Cross section'))
fig1.update_yaxes(scaleanchor='x', scaleratio=1)
st.write(fig1)
#st.write(fig1)
# fig3 = go.Figure(data=fig1.data + fig2.data)
# st.write(fig3)
| KempiG/Master | PVD_funcs.py | PVD_funcs.py | py | 11,030 | python | en | code | 0 | github-code | 36 |
20510556949 | from __future__ import print_function
import numpy as np
import ad3.factor_graph as fg
import time
def test_random_instance(n):
costs = np.random.rand(n)
budget = np.sum(costs) * np.random.rand()
scores = np.random.randn(n)
tic = time.clock()
x = solve_lp_knapsack_ad3(scores, costs, budget)
toc = time.clock()
print('ad3: {:.2f}'.format(toc - tic))
try:
tic = time.clock()
x_gold = solve_lp_knapsack_lpsolve(scores, costs, budget)
toc = time.clock()
print('lpsolve: {:.2f}'.format(toc - tic))
res = x - x_gold
assert np.linalg.norm(res) < 1e-6
except ImportError:
print('lpsolve not available')
def solve_lp_knapsack_ad3(scores, costs, budget):
factor_graph = fg.PFactorGraph()
binary_variables = []
for i in range(len(scores)):
binary_variable = factor_graph.create_binary_variable()
binary_variable.set_log_potential(scores[i])
binary_variables.append(binary_variable)
factor_graph.create_factor_knapsack(binary_variables, costs=costs,
budget=budget)
# Run AD3.
_, posteriors, _, _ = factor_graph.solve()
return posteriors
def solve_lp_knapsack_gurobi(scores, costs, budget):
from gurobipy import Model, LinExpr, GRB
n = len(scores)
# Create a new model.
m = Model("lp_knapsack")
# Create variables.
for i in range(n):
m.addVar(lb=0.0, ub=1.0)
m.update()
vars = m.getVars()
# Set objective.
obj = LinExpr()
for i in range(n):
obj += scores[i] * vars[i]
m.setObjective(obj, GRB.MAXIMIZE)
# Add constraint.
expr = LinExpr()
for i in range(n):
expr += costs[i] * vars[i]
m.addConstr(expr, GRB.LESS_EQUAL, budget)
# Optimize.
m.optimize()
assert m.status == GRB.OPTIMAL
x = np.zeros(n)
for i in range(n):
x[i] = vars[i].x
return x
def solve_lp_knapsack_lpsolve(scores, costs, budget):
import lpsolve55 as lps
relax = True
n = len(scores)
lp = lps.lpsolve('make_lp', 0, n)
# Set verbosity level. 3 = only warnings and errors.
lps.lpsolve('set_verbose', lp, 3)
lps.lpsolve('set_obj_fn', lp, -scores)
lps.lpsolve('add_constraint', lp, costs, lps.LE, budget)
lps.lpsolve('set_lowbo', lp, np.zeros(n))
lps.lpsolve('set_upbo', lp, np.ones(n))
if not relax:
lps.lpsolve('set_int', lp, [True] * n)
else:
lps.lpsolve('set_int', lp, [False] * n)
# Solve the ILP, and call the debugger if something went wrong.
ret = lps.lpsolve('solve', lp)
assert ret == 0
# Retrieve solution and return
x, _ = lps.lpsolve('get_variables', lp)
x = np.array(x)
return x
if __name__ == "__main__":
n = 100
test_random_instance(n)
| andre-martins/AD3 | examples/python/example_knapsack.py | example_knapsack.py | py | 2,835 | python | en | code | 68 | github-code | 36 |
34222159351 | from django.shortcuts import render, redirect
from .models import Aricle
from .forms import ArticleForm
def new(request):
if request.method == 'POST':
article_form = ArticleForm(request.POST)
if article_form.is_valid():
article = article_form.save()
return redirect('blog:detail', article.id)
elif request.method == 'GET':
article_form = ArticleForm()
context = {
'article_form' : article_form,
}
return render(request, 'blog/form_new.html', context)
| kimhyunso/exampleCode | django/MTV/blog/new_views.py | new_views.py | py | 542 | python | en | code | 0 | github-code | 36 |
31553967278 | from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import json
import string
import re
ps = PorterStemmer()
punctuation = list(string.punctuation)
stop = stopwords.words('english') + punctuation + ['rt', '#rt', '#follow', 'via', 'donald', 'trump', '…', "trump's",
'new']
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE)
def tokenize(s):
return tokens_re.findall(s)
def preprocess(s, lowercase=True): #what does lowercase=True do?
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token) else ps.stem(token.lower()) for token in tokens]
return tokens
def normalize_text():
with open('Tweets.json', 'r') as f:
for line in f:
try:
tweet = json.loads(line) # load it as Python dict
tokens = preprocess(tweet['text'])
print([w for w in tokens if not w in stop])
except BaseException as e:
continue
normalize_text()
| henrydambanemuya/socialsensing | ConflictSensingApp/TextNormalizer.py | TextNormalizer.py | py | 1,705 | python | en | code | 0 | github-code | 36 |
2847150673 | # Qus: https://leetcode.com/problems/maximal-network-rank/
# time complexity O(N**2)
class Solution(object):
def maximalNetworkRank(self, n, roads):
"""
:type n: int
:type roads: List[List[int]]
:rtype: int
"""
graph = {}
for i in range(n):
graph[i] = set()
for u,v in roads:
graph[u].add(v)
graph[v].add(u)
ans = 0
for u in graph:
for v in graph:
if(u!=v):
# print u,v,len(graph[u]) + len(graph[v])
count = len(graph[u]) + len(graph[v])
if(v in graph[u]):
count -= 1 # remove the common edge if exist between two nodes
ans = max(ans,count)
return ans | mohitsinghnegi1/CodingQuestions | Leetcode Everyday challenge/Maximal Network Rank.py | Maximal Network Rank.py | py | 825 | python | en | code | 2 | github-code | 36 |
20436716291 | # pxy7896@foxmail.com
# 2020/8/1
__doc__ = """
获取中公教育每日一练内容;获取国务院政府工作报告。
"""
import requests
from bs4 import BeautifulSoup
import os
# 服务器反爬虫机制会判断客户端请求头中的User-Agent是否来源于真实浏览器,所以,我们使用Requests经常会指定UA伪装成浏览器发起请求
headers = {'user-agent': 'Mozilla/5.0'}
# 写文件
def writedoc(raw_ss, i, ii):
# 打开文件
# 编码为utf-8
start = raw_ss.find("模拟试题")
end = raw_ss.find("免责声明")
ss = raw_ss[start+5: end-5]
with open("result\\第" + str(ii) + "页.txt", 'a', encoding='utf-8') as f:
# 写文件
f.write(ss + "\n\n")
#print("问题" + str(i) + "文件写入完成" + "\n")
# 根据详细页面url获取目标字符串
def geturl(url):
# 请求详细页面
r = requests.get(url, headers=headers)
# 改编码
r.encoding = "GB2312"
soup = BeautifulSoup(r.text, "html.parser")
# 找出类名为 info-zi mb15 下的所有p标签
#ans = soup.find_all(["p", ".info-zi mb15"])
ans = soup.find_all(["p", ".offcn_shocont"])
# 用来储存最后需要写入文件的字符串
mlist = ""
for tag in ans:
# 获取p标签下的string内容,并进行目标字符串拼接
mlist = mlist + str(tag.string)
# 返回目标字符串
return mlist
# 获取目标网址第几页
def getalldoc(ii):
#string_ans_li = []
if ii == 1:
testurl = "http://www.offcn.com/mianshi/mryl/"
else:
# 字符串拼接成目标网址
testurl = "http://www.offcn.com/mianshi/mryl/" + str(ii) + ".html"
# 使用request去get目标网址
res = requests.get(testurl, headers=headers)
# 更改网页编码--------不改会乱码
res.encoding = "GB2312"
# 创建一个BeautifulSoup对象
soup = BeautifulSoup(res.text, "html.parser")
# 找出目标网址中所有的small标签
# 函数返回的是一个list
ans = soup.find_all("a")
# 用于标识问题
cnt = 1
# 先创建目录
# 如果需要分页爬取,那么路径只要写到对应就好了
#mkdir("result\\第" + str(ii) + "页\\")
for tag in ans:
# 获取a标签下的href网址
#string_ans = str(tag.a.get("href"))
string_ans = str(tag.get("href"))
if string_ans.find("/mianshi/2020/") == -1 and string_ans.find("/mianshi/2019/") == -1 and string_ans.find("/mianshi/2020/") == -1:
continue
#string_ans_li.append(string_ans)
# 请求详细页面
# 返回我们需要的字符串数据
string_write = geturl(string_ans)
# 写文件到磁盘
writedoc(string_write, cnt, ii)
cnt = cnt + 1
#print("第", ii, "页写入完成")
#return string_ans_li
"""
def mkdir(path):
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
# 创建目录操作函数
os.makedirs(path)
return True
else:
# 如果目录存在则不创建,并提示目录已存在
return False
"""
def getall():
for i in range(1, 10, 1):
getalldoc(i)
#print(ss)
print(str(i) + " end!")
#break
def get_gov(testurl, file):
res = requests.get(testurl, headers=headers)
# 更改网页编码--------不改会乱码
res.encoding = "utf-8"
# 创建一个BeautifulSoup对象
soup = BeautifulSoup(res.text, "html.parser")
ans = soup.find_all([["p","h5"], "conlun2_box_text"])
# 用来储存最后需要写入文件的字符串
mlist = ""
for tag in ans:
# 获取p标签下的string内容,并进行目标字符串拼接
s = str(tag.string)
if s == 'None': continue
mlist = mlist + s + "\n"
# 返回目标字符串
with open(file, "a+") as file:
file.write(mlist)
if __name__ == "__main__":
#getall()
get_gov("http://www.gov.cn/guowuyuan/zfgzbg.htm","gov-2020.txt")
get_gov("http://www.gov.cn/guowuyuan/2019zfgzbg.htm","gov-2019.txt")
| pxy7896/PlayWithPython3 | 获取某网站每日一练.py | 获取某网站每日一练.py | py | 4,447 | python | zh | code | 0 | github-code | 36 |
23856814731 | from collections import deque
GENERATOR = 0
MICROCHIP = 1
floors = [[] for _ in range(4)]
elev = 0
elems = dict()
def is_safe(arrangement):
floors, _ = arrangement
for floor in floors:
chips = set()
hasg = False
for e in floor:
if e & 1 == MICROCHIP:
chips.add(e >> 1)
for e in floor:
if e & 1 == GENERATOR:
if e >> 1 in chips:
chips.remove(e >> 1)
hasg = True
if len(chips) > 0 and hasg:
return False
return True
def moves(arrangement):
res = []
floors, elev = arrangement
nelevs = []
if elev > 0:
nelevs.append(elev-1)
if elev < 3:
nelevs.append(elev+1)
for nelev in nelevs:
ne = len(floors[elev])
for i in range(ne):
for j in range(i, ne):
cand = [list(x) for x in floors]
cand[nelev].append(floors[elev][i])
cand[elev][i] = None
if j != i:
cand[nelev].append(floors[elev][j])
cand[elev][j] = None
cand[elev].remove(None)
cand[elev].remove(None)
for k, _ in enumerate(cand):
cand[k].sort()
cand[k] = tuple(cand[k])
narr = (tuple(cand), nelev)
if is_safe(narr):
res.append(narr)
return res
def append(lst, e):
e0, e1 = e
if not e0 in elems:
elems[e0] = len(elems)
e0 = elems[e0]
if e1 == 'generator':
e1 = GENERATOR
else:
e1 = MICROCHIP
lst.append(2*e0+e1)
with open('day11/input.txt') as h:
for i, line in enumerate(h):
line = line.strip('.\n')
words = line.split()
for j, word in enumerate(words):
word = word.strip(',')
if word == 'generator':
append(floors[i], (words[j-1], word))
elif word == 'microchip':
append(floors[i], (words[j-1][:-11], word))
if i == 0:
append(floors[i], ('elerium', 'generator'))
append(floors[i], ('elerium', 'microchip'))
append(floors[i], ('dilithium', 'generator'))
append(floors[i], ('dilithium', 'microchip'))
floors[i].sort()
floors[i] = tuple(floors[i])
floors = tuple(floors)
initial = (floors, elev)
final = [[list(x) for x in floors], 3]
for i in range(3):
final[0][3].extend(final[0][i])
final[0][i] = []
for i in range(4):
final[0][i].sort()
final[0][i] = tuple(final[0][i])
final[0] = tuple(final[0])
final = tuple(final)
qfront = deque([(initial, 0)])
qback = deque([(final, 0)])
sfront, sback = {initial: 0}, {final: 0}
dfront, dback = 0, 0
cont = True
while cont:
while len(qfront) > 0 and qfront[0][1] == dfront:
arr, _ = qfront.popleft()
for narr in moves(arr):
if narr in sback:
print(dfront + sback[narr] + 1)
cont = False
break
if narr in sfront:
continue
sfront[narr] = dfront + 1
qfront.append((narr, dfront + 1))
if not cont:
break
if not cont:
break
dfront += 1
while len(qback) > 0 and qback[0][1] == dback:
arr, _ = qback.popleft()
for narr in moves(arr):
if narr in sfront:
print(sfront[narr] + dback + 1)
cont = False
break
if narr in sback:
continue
sback[narr] = dback + 1
qback.append((narr, dback + 1))
if not cont:
break
dback += 1
| mahiuchun/adventofcode-2016 | day11/part2.py | part2.py | py | 3,739 | python | en | code | 0 | github-code | 36 |
19041324588 | # Author: Trevor Sherrard
# Since: Feb. 21, 2022
# Purpose: This file contains functionallity needed to run inference on a single image
import cv2
import numpy as np
import tensorflow as tf
import keras
# declare file paths
model_file_loc = "../../models/saved_unet_model.h5"
test_image_loc = "../../dataset/semantic_drone_dataset/original_images/000.jpg"
# declare goal image sizes
img_height = 800
img_width = 1200
def preprocess_image(img_file):
def func(img_file):
img_file = img_file.decode()
img = cv2.imread(img_file, cv2.IMREAD_COLOR)
img = cv2.resize(img, (img_width, img_height))
img = img / 255.0
img = img.astype(np.float32)
return img
image = tf.convert_to_tensor(tf.numpy_function(func, [img_file], [tf.float32]))
image = tf.reshape(image, (img_height, img_width, 3))
return image
def load_image_as_dataset(img_file):
dataset = tf.data.Dataset.from_tensor_slices(img_file)
dataset = dataset.map(preprocess_image)
dataset = dataset.batch(1)
return dataset
def run_inference(image_loc):
# load image
dataset = load_image_as_dataset([image_loc])
# load model
model = keras.models.load_model(model_file_loc)
# run inference
pred = model.predict(dataset)
# extract results
predictions = np.argmax(pred, axis=3)
single_channel_pred = predictions[0]
single_channel_pred = single_channel_pred.astype("uint8")
# show mono mask image
cv2.imshow("test", single_channel_pred)
cv2.waitKey(0)
if(__name__ == "__main__"):
run_inference(test_image_loc)
| Post-Obstruction-Assessment-Capstone/Drone-Road-Segmentation | utils/deep_learning/single_image_inference.py | single_image_inference.py | py | 1,599 | python | en | code | 0 | github-code | 36 |
8444405928 | import cupy
import cupyx.scipy.fft
from cupy import _core
from cupy._core import _routines_math as _math
from cupy._core import fusion
from cupy.lib import stride_tricks
import numpy
_dot_kernel = _core.ReductionKernel(
'T x1, T x2',
'T y',
'x1 * x2',
'a + b',
'y = a',
'0',
'dot_product'
)
def _choose_conv_method(in1, in2, mode):
if in1.ndim != 1 or in2.ndim != 1:
raise NotImplementedError('Only 1d inputs are supported currently')
if in1.dtype.kind in 'bui' or in2.dtype.kind in 'bui':
return 'direct'
if _fftconv_faster(in1, in2, mode):
return 'fft'
return 'direct'
def _fftconv_faster(x, h, mode):
"""
.. seealso:: :func: `scipy.signal._signaltools._fftconv_faster`
"""
# TODO(Dahlia-Chehata): replace with GPU-based constants.
return True
def convolve(a, v, mode='full'):
"""Returns the discrete, linear convolution of two one-dimensional sequences.
Args:
a (cupy.ndarray): first 1-dimensional input.
v (cupy.ndarray): second 1-dimensional input.
mode (str, optional): `valid`, `same`, `full`
Returns:
cupy.ndarray: Discrete, linear convolution of a and v.
.. seealso:: :func:`numpy.convolve`
""" # NOQA
if a.size == 0:
raise ValueError('a cannot be empty')
if v.size == 0:
raise ValueError('v cannot be empty')
if v.ndim > 1:
raise ValueError('v cannot be multidimensional array')
if v.size > a.size:
a, v = v, a
a = a.ravel()
v = v.ravel()
method = _choose_conv_method(a, v, mode)
if method == 'direct':
out = _dot_convolve(a, v, mode)
elif method == 'fft':
out = _fft_convolve(a, v, mode)
else:
raise ValueError('Unsupported method')
return out
def _fft_convolve(a1, a2, mode):
offset = 0
if a1.shape[-1] < a2.shape[-1]:
a1, a2 = a2, a1
offset = 1 - a2.shape[-1] % 2
# if either of them is complex, the dtype after multiplication will also be
if a1.dtype.kind == 'c' or a2.dtype.kind == 'c':
fft, ifft = cupy.fft.fft, cupy.fft.ifft
else:
fft, ifft = cupy.fft.rfft, cupy.fft.irfft
dtype = cupy.result_type(a1, a2)
n1, n2 = a1.shape[-1], a2.shape[-1]
out_size = cupyx.scipy.fft.next_fast_len(n1 + n2 - 1)
fa1 = fft(a1, out_size)
fa2 = fft(a2, out_size)
out = ifft(fa1 * fa2, out_size)
if mode == 'full':
start, end = 0, n1 + n2 - 1
elif mode == 'same':
start = (n2 - 1) // 2 + offset
end = start + n1
elif mode == 'valid':
start, end = n2 - 1, n1
else:
raise ValueError(
'acceptable mode flags are `valid`, `same`, or `full`.')
out = out[..., start:end]
if dtype.kind in 'iu':
out = cupy.around(out)
return out.astype(dtype, copy=False)
def _dot_convolve(a1, a2, mode):
offset = 0
if a1.size < a2.size:
a1, a2 = a2, a1
offset = 1 - a2.size % 2
dtype = cupy.result_type(a1, a2)
n1, n2 = a1.size, a2.size
a1 = a1.astype(dtype, copy=False)
a2 = a2.astype(dtype, copy=False)
if mode == 'full':
out_size = n1 + n2 - 1
a1 = cupy.pad(a1, n2 - 1)
elif mode == 'same':
out_size = n1
pad_size = (n2 - 1) // 2 + offset
a1 = cupy.pad(a1, (n2 - 1 - pad_size, pad_size))
elif mode == 'valid':
out_size = n1 - n2 + 1
stride = a1.strides[0]
a1 = stride_tricks.as_strided(a1, (out_size, n2), (stride, stride))
output = _dot_kernel(a1, a2[::-1], axis=1)
return output
def clip(a, a_min, a_max, out=None):
"""Clips the values of an array to a given interval.
This is equivalent to ``maximum(minimum(a, a_max), a_min)``, while this
function is more efficient.
Args:
a (cupy.ndarray): The source array.
a_min (scalar, cupy.ndarray or None): The left side of the interval.
When it is ``None``, it is ignored.
a_max (scalar, cupy.ndarray or None): The right side of the interval.
When it is ``None``, it is ignored.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: Clipped array.
.. seealso:: :func:`numpy.clip`
Notes
-----
When `a_min` is greater than `a_max`, `clip` returns an
array in which all values are equal to `a_max`.
"""
if fusion._is_fusing():
return fusion._call_ufunc(_math.clip,
a, a_min, a_max, out=out)
# TODO(okuta): check type
return a.clip(a_min, a_max, out=out)
# sqrt_fixed is deprecated.
# numpy.sqrt is fixed in numpy 1.11.2.
sqrt = sqrt_fixed = _core.sqrt
cbrt = _core.create_ufunc(
'cupy_cbrt',
('e->e', 'f->f', 'd->d'),
'out0 = cbrt(in0)',
doc='''Elementwise cube root function.
.. seealso:: :data:`numpy.cbrt`
''')
square = _core.create_ufunc(
'cupy_square',
('b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q',
'Q->Q', 'e->e', 'f->f', 'd->d', 'F->F', 'D->D'),
'out0 = in0 * in0',
doc='''Elementwise square function.
.. seealso:: :data:`numpy.square`
''')
absolute = _core.absolute
fabs = _core.create_ufunc(
'cupy_fabs',
('e->e', 'f->f', 'd->d'),
'out0 = abs(in0)',
doc='''Calculates absolute values element-wise.
Only real values are handled.
.. seealso:: :data:`numpy.fabs`
''')
_unsigned_sign = 'out0 = in0 > 0'
_complex_sign = '''
if (in0.real() == 0) {
out0 = (in0.imag() > 0) - (in0.imag() < 0);
} else {
out0 = (in0.real() > 0) - (in0.real() < 0);
}
'''
sign = _core.create_ufunc(
'cupy_sign',
('b->b', ('B->B', _unsigned_sign), 'h->h', ('H->H', _unsigned_sign),
'i->i', ('I->I', _unsigned_sign), 'l->l', ('L->L', _unsigned_sign),
'q->q', ('Q->Q', _unsigned_sign), 'e->e', 'f->f', 'd->d',
('F->F', _complex_sign), ('D->D', _complex_sign)),
'out0 = (in0 > 0) - (in0 < 0)',
doc='''Elementwise sign function.
It returns -1, 0, or 1 depending on the sign of the input.
.. seealso:: :data:`numpy.sign`
''')
heaviside = _core.create_ufunc(
'cupy_heaviside',
('ee->e', 'ff->f', 'dd->d'),
'''
if (isnan(in0)) {
out0 = in0;
} else if (in0 == 0) {
out0 = in1;
} else {
out0 = (in0 > 0);
}
''',
doc='''Compute the Heaviside step function.
.. seealso:: :data:`numpy.heaviside`
'''
)
_float_preamble = '''
#ifndef NAN
#define NAN __int_as_float(0x7fffffff)
#endif
'''
_float_maximum = ('out0 = (isnan(in0) | isnan(in1)) ? out0_type(NAN) : '
'out0_type(max(in0, in1))')
maximum = _core.create_ufunc(
'cupy_maximum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_maximum),
('ff->f', _float_maximum),
('dd->d', _float_maximum),
('FF->F', _float_maximum),
('DD->D', _float_maximum)),
'out0 = max(in0, in1)',
preamble=_float_preamble,
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.maximum`
''',
cutensor_op=('OP_MAX', 1, 1), scatter_op='max')
_float_minimum = ('out0 = (isnan(in0) | isnan(in1)) ? out0_type(NAN) : '
'out0_type(min(in0, in1))')
minimum = _core.create_ufunc(
'cupy_minimum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_minimum),
('ff->f', _float_minimum),
('dd->d', _float_minimum),
('FF->F', _float_minimum),
('DD->D', _float_minimum)),
'out0 = min(in0, in1)',
preamble=_float_preamble,
doc='''Takes the minimum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.minimum`
''',
cutensor_op=('OP_MIN', 1, 1), scatter_op='min')
fmax = _core.create_ufunc(
'cupy_fmax',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', 'out0 = fmax(in0, in1)'),
('ff->f', 'out0 = fmax(in0, in1)'),
('dd->d', 'out0 = fmax(in0, in1)'),
'FF->F', 'DD->D'),
'out0 = max(in0, in1)',
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the other operand.
.. seealso:: :data:`numpy.fmax`
''')
fmin = _core.create_ufunc(
'cupy_fmin',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', 'out0 = fmin(in0, in1)'),
('ff->f', 'out0 = fmin(in0, in1)'),
('dd->d', 'out0 = fmin(in0, in1)'),
'FF->F', 'DD->D'),
'out0 = min(in0, in1)',
doc='''Takes the minimum of two arrays elementwise.
If NaN appears, it returns the other operand.
.. seealso:: :data:`numpy.fmin`
''')
_nan_to_num_preamble = '''
template <class T>
__device__ T nan_to_num(T x, T nan, T posinf, T neginf) {
if (isnan(x))
return nan;
if (isinf(x))
return x > 0 ? posinf : neginf;
return x;
}
template <class T>
__device__ complex<T> nan_to_num(complex<T> x, T nan, T posinf, T neginf) {
T re = nan_to_num(x.real(), nan, posinf, neginf);
T im = nan_to_num(x.imag(), nan, posinf, neginf);
return complex<T>(re, im);
}
'''
_nan_to_num = _core.create_ufunc(
'cupy_nan_to_num_',
('????->?', 'bbbb->b', 'BBBB->B', 'hhhh->h', 'HHHH->H',
'iiii->i', 'IIII->I', 'llll->l', 'LLLL->L', 'qqqq->q', 'QQQQ->Q',
('eeee->e',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('ffff->f',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('dddd->d',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('Ffff->F',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('Dddd->D',
'out0 = nan_to_num(in0, in1, in2, in3)')),
'out0 = in0',
preamble=_nan_to_num_preamble,
doc='''Elementwise nan_to_num function.
.. seealso:: :func:`numpy.nan_to_num`
''')
def _check_nan_inf(x, dtype, neg=None):
if dtype.char in 'FD':
dtype = cupy.dtype(dtype.char.lower())
if dtype.char not in 'efd':
x = 0
elif x is None and neg is not None:
x = cupy.finfo(dtype).min if neg else cupy.finfo(dtype).max
elif cupy.isnan(x):
x = cupy.nan
elif cupy.isinf(x):
x = cupy.inf * (-1)**(x < 0)
return cupy.asanyarray(x, dtype)
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
"""Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
.. seealso:: :func:`numpy.nan_to_num`
"""
if not isinstance(x, cupy.ndarray):
out = cupy.full((), x)
else:
out = cupy.empty_like(x) if copy else x
dtype = out.dtype
nan = _check_nan_inf(nan, dtype)
posinf = _check_nan_inf(posinf, dtype, False)
neginf = _check_nan_inf(neginf, dtype, True)
return _nan_to_num(x, nan, posinf, neginf, out=out)
def real_if_close(a, tol=100):
"""If input is complex with all imaginary parts close to zero, return real
parts.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.real_if_close`
"""
if not issubclass(a.dtype.type, cupy.complexfloating):
return a
if tol > 1:
f = numpy.finfo(a.dtype.type)
tol = f.eps * tol
if cupy.all(cupy.absolute(a.imag) < tol):
a = a.real
return a
@cupy._util.memoize(for_each_device=True)
def _get_interp_kernel(is_complex):
in_params = 'raw V x, raw U idx, '
in_params += 'raw W fx, raw Y fy, U len, raw Y left, raw Y right'
out_params = 'Z y' # output dtype follows NumPy's
if is_complex:
preamble = 'typedef double real_t;\n'
else:
preamble = 'typedef Z real_t;\n'
preamble += 'typedef Z value_t;\n'
preamble += cupy._sorting.search._preamble # for _isnan
code = r'''
U x_idx = idx[i] - 1;
if ( _isnan<V>(x[i]) ) { y = x[i]; }
else if (x_idx < 0) { y = left[0]; }
else if (x[i] == fx[len - 1]) {
// searchsorted cannot handle both of the boundary points,
// so we must detect and correct ourselves...
y = fy[len - 1];
}
else if (x_idx >= len - 1) { y = right[0]; }
else {
const Z slope = (value_t)(fy[x_idx+1] - fy[x_idx]) / \
((real_t)fx[x_idx+1] - (real_t)fx[x_idx]);
Z out = slope * ((real_t)x[i] - (real_t)fx[x_idx]) \
+ (value_t)fy[x_idx];
if (_isnan<Z>(out)) {
out = slope * ((real_t)x[i] - (real_t)fx[x_idx+1]) \
+ (value_t)fy[x_idx+1];
if (_isnan<Z>(out) && (fy[x_idx] == fy[x_idx+1])) {
out = fy[x_idx];
}
}
y = out;
}
'''
return cupy.ElementwiseKernel(
in_params, out_params, code, 'cupy_interp', preamble=preamble)
def interp(x, xp, fp, left=None, right=None, period=None):
""" One-dimensional linear interpolation.
Args:
x (cupy.ndarray): a 1D array of points on which the interpolation
is performed.
xp (cupy.ndarray): a 1D array of points on which the function values
(``fp``) are known.
fp (cupy.ndarray): a 1D array containing the function values at the
the points ``xp``.
left (float or complex): value to return if ``x < xp[0]``. Default is
``fp[0]``.
right (float or complex): value to return if ``x > xp[-1]``. Default is
``fp[-1]``.
period (None or float): a period for the x-coordinates. Parameters
``left`` and ``right`` are ignored if ``period`` is specified.
Default is ``None``.
Returns:
cupy.ndarray: The interpolated values, same shape as ``x``.
.. note::
This function may synchronize if ``left`` or ``right`` is not already
on the device.
.. seealso:: :func:`numpy.interp`
"""
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError('xp and fp must be 1D arrays')
if xp.size != fp.size:
raise ValueError('fp and xp are not of the same length')
if xp.size == 0:
raise ValueError('array of sample points is empty')
if not x.flags.c_contiguous:
raise NotImplementedError('Non-C-contiguous x is currently not '
'supported')
x_dtype = cupy.common_type(x, xp)
if not cupy.can_cast(x_dtype, cupy.float64):
raise TypeError('Cannot cast array data from'
' {} to {} according to the rule \'safe\''
.format(x_dtype, cupy.float64))
if period is not None:
# The handling of "period" below is modified from NumPy's
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
x = x.astype(cupy.float64)
xp = xp.astype(cupy.float64)
# normalizing periodic boundaries
x %= period
xp %= period
asort_xp = cupy.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = cupy.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = cupy.concatenate((fp[-1:], fp, fp[0:1]))
assert xp.flags.c_contiguous
assert fp.flags.c_contiguous
# NumPy always returns float64 or complex128, so we upcast all values
# on the fly in the kernel
out_dtype = 'D' if fp.dtype.kind == 'c' else 'd'
output = cupy.empty(x.shape, dtype=out_dtype)
idx = cupy.searchsorted(xp, x, side='right')
left = fp[0] if left is None else cupy.array(left, fp.dtype)
right = fp[-1] if right is None else cupy.array(right, fp.dtype)
kern = _get_interp_kernel(out_dtype == 'D')
kern(x, idx, xp, fp, xp.size, left, right, output)
return output
| cupy/cupy | cupy/_math/misc.py | misc.py | py | 16,182 | python | en | code | 7,341 | github-code | 36 |
29052565706 | import cryptoFunc
choice = input('Please type 1 for encrypt or 2 for decrypt: ')
file = input('Please give me a file name: ')
if choice == '1':
cryptoFunc.encrypt_file(file)
elif choice == '2':
cryptoFunc.decrypt_file(file)
print('Successfull') | Akeon201/FED | main.py | main.py | py | 265 | python | en | code | 0 | github-code | 36 |
12814302696 | # 곱하기 혹은 더하기 / p312
input = input()
result = 0
for i in input:
if i == '0':
continue
if i == '1':
result += 1
continue
if result == 0:
result += int(i)
else:
result *= int(i)
print(result)
| Girin7716/PythonCoding | pythonBook/Problem Solving/Q2.py | Q2.py | py | 268 | python | ko | code | 1 | github-code | 36 |
37459633466 | #Clases y funciones
#classes = []
#for i in range(10):
# class Dummy:
# def init(self, _name):
# self._name = 'Dummy {}'.format(i)
#
# classes.append(Dummy)
#for item in classes:
# dummy = item()
# print(dummy.name)
#print("Hello World")
class Student:
university = 'Espe'
class Meta:
name = 'MetaClass'
def __init__(self, _id, _name, _age, _carrer, _cell_number):
self.id = _id
self.name = _name
self.age = _age
self.carrer = _carrer
class Phone:
def __init__(self, _number):
self.number = _number
def __repr__(self):
return 'Phone({})'.format(self.number)
self.phone = Phone(_cell_number)
def __repr__(self):
return 'Student ({}, {}, {}, {}, {})'.format(self.id, self.name, self.age, self.carrer, self.phone)
def say_name(self):
print('My name is {}'.format(self.name))
student = Student(1, 'Diego', 18, 'Ing Software','0994651465')
student1 = Student(2, 'Edison', 19, 'Ing Software','0990316348')
student.say_name()
student1.say_name()
print(student)
print(student1)
print(Student.Meta.name) | DiegoPaez2/POO-2963 | Workshop/First partial/Workshop05/classes and functions.py | classes and functions.py | py | 1,255 | python | en | code | 0 | github-code | 36 |
32933105911 | import numpy as np
from PIL import Image
rainbow = np.zeros((521,512,3),'uint8')
for i in range(0,256):
rainbow[:,i,0] = 255-i
rainbow[:,i,1] = 0+i
for i in range(256,512):
rainbow[:,i,1] = 255-i
rainbow[:,i,2] = 0+i
image = Image.fromarray(rainbow)
image.save('rainbow.jpg') | hieumewmew/MultimediaCommunicationExam | bai5/rainbow.py | rainbow.py | py | 299 | python | en | code | 0 | github-code | 36 |
28482960968 | import pandas as pd
import numpy as np
import os, sys
import warnings
import matplotlib.pyplot as plt
import gmplot
from sklearn.cluster import DBSCAN
import random
import json
def remove_invalid_coord(df): #[-90; 90]
#return df.query('lat >= -90 & lat <= 90').query('lon >= -90 & lat <= 90')
return df.query('lat != 0 & lon != 0')
def read_data(day='monday', city='chicago', types='crimes'):
data_file = open('data/{0}/{1}_2018_{2}.csv'.format(day, types, city), 'r')
crime_list = []
for line in data_file:
line = line.strip().split(',')
item = {}
item['datetime'] = pd.to_datetime(str(line[0]), format='%Y/%m/%d %H:%M')
item['month'] = pd.to_datetime(str(line[0]), format='%Y/%m/%d %H:%M').month
item['hour'] = pd.to_datetime(str(line[0]), format='%Y/%m/%d %H:%M').hour
item['lat'] = float(line[1])
item['lon'] = float(line[2])
item['type'] = line[3].strip()
item['export'] = 0
crime_list.append(item)
df = pd.DataFrame(crime_list)
df.set_index('datetime', inplace=True)
return remove_invalid_coord(df)
def read_all_data(city='chicago', types='crimes'):
df = []
for day in ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']:
if len(df) == 0:
df = read_data(day, city=city, types=types)
else:
df = pd.concat([df, read_data(day, city=city, types=types)])
return df
def see_density():
# Le os dados
df = read_all_data()
#print(df.head())
df_month_type = df.groupby(['month', 'type']).count()
#print(min(df_month_type['export']))
#print(max(df_month_type['export']))
crimes = df.groupby('type').all().index
# for c in crimes:
# df_crime = df.query("type == '%s'" % c)
# filtered = df_crime.groupby(['month']).count()
# plt.figure()
# months = ['', 'Jan.', 'Feb.', 'Mar.', 'Apr.', 'May', 'Jun.',
# 'Jul.', 'Aug.', 'Sep.', 'Oct.', 'Nov.', 'Dec.']
# filtered['export'].plot(legend=None, title=c, style='.:')
# plt.xlabel('Months')
# plt.ylabel('Quantity of Crimes')
# plt.xticks(range(13), months, rotation=50)
# plt.yticks(range(0, 7000, 500), [x for x in range(0, 7000, 500)])
# if not os.path.exists('density'):
# os.makedirs('density')
# plt.savefig('density/'+ c + '.pdf', bbox_inches="tight", format='pdf')
# plt.clf()
# Export
df.groupby(['month', 'type']).count()['export'].to_csv('density_austin.csv')
###############################################################################################################
###############################################################################################################
###############################################################################################################
def colors(n):
ret = []
for i in range(n):
r = int(random.random() * 256)
g = int(random.random() * 256)
b = int(random.random() * 256)
r = int(r) % 256
g = int(g) % 256
b = int(b) % 256
ret.append('#{:02X}{:02X}{:02X}'.format(r,g,b))
return ret
def plot_heat(clusters, day, city, types):
plt.clf()
gmap = gmplot.GoogleMapPlotter(clusters.iloc[0]['lat'], clusters.iloc[0]['lon'], 11)
lats, longs = [], []
for indx, cluster in clusters.iterrows():
lats.append(float(cluster['lat']))
longs.append(float(cluster['lon']))
gmap.heatmap(lats, longs)
if not os.path.exists('plottest'):
os.makedirs('plottest')
gmap.draw('plottest/{0}_{1}_{2}.html'.format(city, types, day))
def see_distribution():
city='chicago'
types='crimes'
# for day in ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']:
# df = read_data(day, city, types)
# df = df.drop(['type', 'hour', 'month', 'export'], axis=1)
# clustering = DBSCAN(eps=0.001, min_samples=3).fit_predict(df)
# df['cluster'] = clustering
# plot_heat(df.query('cluster != -1'), day, city, types)
df = read_all_data(city, types)
df = df.drop(['type', 'hour', 'month', 'export'], axis=1)
clustering = DBSCAN(eps=0.001, min_samples=3).fit_predict(df)
df['cluster'] = clustering
plot_heat(df.query('cluster != -1'), 'all', city, types)
###############################################################################################################
###############################################################################################################
###############################################################################################################
def format_clusters(data):
clusters = []
clusters.append([])
lastid = 0
data = data.query('cluster > -1')
for indx, row in data.iterrows():
if row['cluster'] > lastid:
clusters.append([])
lastid = row['cluster']
clusters[-1].append((row['lat'], row['lon']))
return clusters
def get_coords(cluster):
lat, lon = [], []
for i in cluster:
lat.append(i[0])
lon.append(i[1])
return lat, lon
def plot_dots(clusters, day, city, types, each):
plt.clf()
if len(clusters) > 0 and len(clusters[0]) > 0:
gmap = gmplot.GoogleMapPlotter(float(clusters[0][0][0]), float(clusters[0][0][1]), 11)
color_list = colors(len(clusters))
indx = 0
for cluster in clusters:
lat, lon = get_coords(cluster)
gmap.scatter(lat, lon, color_list[indx], edge_width=5, marker=False)
indx += 1
#break
if not os.path.exists('plottest'):
os.makedirs('plottest')
gmap.draw('plottest/{0}_{1}_{2}_{3}_dots.html'.format(city, types, day, each))
def load_clusters(day):
with open(str(os.path.dirname(os.path.abspath(__file__)))+"/clusters/" + str(day) + '.json', "r") as file:
return json.load(file)
def see_maps():
city='austin'
types='crashes'
day='monday'
clusters = load_clusters(day)['{0}_2018_{1}'.format(types, city)]['January']['unkown']
for each in clusters:
plot_dots(clusters[each], day, city, types, each)
see_distribution()
#see_maps()
| lucaslzl/ponche | timewindow/lookdata.py | lookdata.py | py | 5,789 | python | en | code | 0 | github-code | 36 |
23702793306 | # This is just a sample program to show you how to do
# basic image operations using python and the Pillow library.
#
# By Eriya Terada, based on earlier code by Stefan Lee,
# lightly modified by David Crandall, 2020
# Import the Image and ImageFilter classes from PIL (Pillow)
from PIL import Image, ImageFilter, ImageDraw, ImageFont
import random
import numpy as np
import sys
# Step 3 Convert image to gray scale
def grayscale_pad(image, padding_size):
im = Image.open(image).convert("L")
im_width = im.width
im_height = im.height
new_width = (2 * padding_size) + im_width
new_height = (2 * padding_size) + im_height
# Create a new blank grayscale image with padding
gray_im = Image.new("L", (new_width, new_height), color=255)
# Loop over the new image with padding
for x in range(new_width):
for y in range(new_height):
# fill in areas that are not padding
if x > padding_size and x < new_width - padding_size:
if y > padding_size and y < new_height - padding_size:
# convert the original image to grayscale
l_value = im.getpixel((x - padding_size, y - padding_size))
gray_im.putpixel((x, y), l_value)
# Save the image
gray_im.save("gray.png")
return gray_im
# Step 4 Convolution with separable kernel
def convolve(image, hx, hy):
im_width = image.width
im_height = image.height
hx_len = len(hx)
hy_len = len(hy)
image=np.array(image).astype(np.uint8)
new_image = np.zeros(image.shape)
vertimage = np.zeros(image.shape)
# convolve vertically
for x in range(im_height-hy_len+1):
for y in range(im_width):
row_sum=0
col_sum=0
for v in range(hy_len):
row_sum+=image[x+v][y]*hy[v]
vertimage[x][y]=row_sum
# convolve horizontally
img = Image.fromarray(np.uint8(vertimage * 255))
for x in range(im_height):
for y in range(im_width-hx_len+1):
row_sum=0
col_sum=0
for h in range(hx_len):
col_sum+=vertimage[x][y+h]*hx[h]
new_image[x][y]=col_sum
img = Image.fromarray(np.uint8(new_image * 255))
# img.show()
return img
# Canny edge detection
def sobel_edge_detection(gray_img):
gray_img=np.array(gray_img).astype(np.uint8)
# Sobels filter
v = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
h = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
print(gray_img.shape)
im_height, im_width = gray_img.shape
new_image_h = np.zeros(gray_img.shape)
new_image_v = np.zeros(gray_img.shape)
new_image = np.zeros(gray_img.shape)
for i in range(0, im_height-3+1):
for j in range(0, im_width-3+1):
horizontalGrad=0
verticalGrad=0
for x in range(h.shape[0]):
for y in range(h.shape[1]):
horizontalGrad+=h[x][y]*gray_img[i+x,j+y]
new_image_h[i, j] = abs(horizontalGrad)
for x in range(v.shape[0]):
for y in range(v.shape[1]):
verticalGrad+=v[x][y]*gray_img[i+x,j+y]
new_image_v[i, j] = abs(verticalGrad)
# Edge Magnitude
edge_mag = np.sqrt(pow(horizontalGrad, 2.0) + pow(verticalGrad, 2.0))
new_image[i, j] = edge_mag
img = Image.fromarray(np.uint8(new_image * 255))
img.show()
# Create binary edge map
new_image[new_image!= 0.0]=1
new_image[new_image== 0.0]=0
print(new_image.shape)
return new_image
def get_region_colors(im, t_height, t_width, coordinate):
# coordinate is the x,y value of where the region starts in the image
# region_colors is the same size as the template
region_colors = []
for i in range(coordinate[0], coordinate[0]+t_height):
row = []
for j in range(coordinate[1], coordinate[1]+t_width):
row.append(im.getpixel((j, i)))
region_colors.append(row)
return region_colors
def compareImages(region, template):
# takes 2 matrices with the color values
# region and template are the same size
t_height = len(template)
t_width = len(template[0])
total_score = 0
for i in range(t_height):
for j in range(t_width):
region_pixel = region[i][j]
t_pixel = template[i][j]
# changed similarity function to use 255 instead of 1 since grayscale values are from 0-255
pixel_similarity = (region_pixel * t_pixel) + (255-region_pixel) * (255-t_pixel)
total_score += pixel_similarity
return total_score
'''
Function to calculate hamming distance i.e. step 5 in the assignment
'''
def hammingDist(im, t_im, combine, color, text_file_list, symbol_type, p, dist):
im_width = im.width
im_height = im.height
t_width = t_im.width
t_height = t_im.height
# get the template and it's score to compare with image regions later on
t_region = get_region_colors(t_im, t_height, t_width, (0,0))
perfect_score = compareImages(t_region, t_region)
#t_found = Image.new("L", (im_width, im_height), color=255)
combine = combine.copy().convert("RGB")
d = {}
# loop through the image
for i in range(im_height-t_height):
for j in range(im_width-t_width):
# get image region
im_region = get_region_colors(im, t_height, t_width, (i, j))
# score the region
region_score = compareImages(im_region, t_region)
# compare the image region score to the template score
if region_score >= (0.87 * perfect_score):
max_val = region_score
it_val = (i,j)
for y in range(3):
for z in range(3):
if (i-y,j-z) in d:
if d[(i-y,j-z)] >= region_score:
max_val = region_score
it_val = (i-y,j-z)
else:
del d[(i-y,j-z)]
elif (i-y,j+z) in d:
if d[(i-y,j+z)] >= region_score:
max_val = region_score
it_val = (i-y,j+z)
else:
del d[(i-y,j+z)]
d[it_val] = max_val
for k,v in d.items():
i,j = k
region_score = v
draw = ImageDraw.Draw(combine)
top_left = (j,i)
bottom_right = (j + t_width, i + t_height)
#draw.rectangle(((100, 100), (200, 200)), (0, 255, 0))
draw.rectangle((top_left, bottom_right), fill=None, outline = color,width=2)
pitch = '_'
if symbol_type == 'filled_note':
for q in range(int(dist/2)):
if q+i in p:
pitch = p[q+i]
elif i-q in p:
pitch = p[i-q]
font = ImageFont.truetype("/usr/share/fonts/dejavu/DejaVuLGCSansMono.ttf")
# font = ImageFont.truetype("/usr/share/fonts/msttcorefonts/arial.ttf") load_default()
draw.text((j-10, i-2),pitch,(255,0,0),font=font)
text_file_list.append([j, i, t_height, t_width, symbol_type, pitch, float(round((region_score/perfect_score*100), 2))])
# combine.save("step5.png")
return combine, text_file_list
# Step 6: Template matching using convolution
def template_matching(image, template):
m=template.shape[0]
n=template.shape[1]
F=np.zeros((image.shape))
D=np.zeros((image.shape))
# X=np.array(image)
#
# X[X==0]=np.inf
# X[X==1]=0
# Find the coordinates of edges
v,w=np.where(image!=0)
loc=np.stack((v,w),axis=1)
# Find coordinates of whole image
v1,w1=np.where(image==0)
loc1=np.stack((v1,w1),axis=1)
loc2=np.vstack((loc,loc1))
# Calculate D matrix which stores the distance of each pixel from its nearest edge pixel
temp=np.zeros(loc.shape[0])
for i in range(loc2.shape[0]):
temp=np.sqrt((loc2[i][0]-loc[:,0])**2+(loc2[i][1]-loc[:,1])**2)
D[loc2[i][0],loc2[i][1]]=np.min(temp)
img = Image.open(im_name)
draw = ImageDraw.Draw(img)
sum=0
for k in range(0,m):
for l in range(0,n):
sum+=(template[k][l])*(template[k][l])
score=sum
max_D=np.max(D)
# Calculate template scoring
for i in range(0,image.shape[0]-m+1):
for j in range(0,image.shape[1]-n+1):
sum=0
for k in range(0,m):
for l in range(0,n):
sum+=((template[k][l])*((max_D-D[i+k][j+l])/max_D))
F[i][j]=sum
if sum>=0.95*score:
draw.rectangle(((j,i), (j+n,i+m)), fill=None,outline="red")
img.save("output-6.png")
def hough_line(edge):
thetas = np.arange(0, 180, 1)
cos = np.cos(np.deg2rad(theta))
sin = np.sin(np.deg2rad(theta))
rho_range = round(math.sqrt(edge.shape[0]*2 + edge.shape[1]*2))
accumulator = np.zeros((2 * rho_range, len(theta)), dtype=np.uint8)
edge_pixels = np.where(edge == 1)
coordinates = list(zip(edge_pixels[0], edge_pixels[1]))
for p in range(len(coordinates)):
for theta in range(len(theta)):
rho = int(round(coordinates[p][1] * cos[theta] + coordinates[p][0] * sin[theta]))
accumulator[rho, t] += 1
#print(np.max(accumulator))
return accumulator
def hough(image):
# im = image.load()
# im_h, im_w = image.size
# th_val, r_val = 500, 1200
# hough_im = Image.new("L", (th_val, r_val), 255)
# him = hough_im.load()
# rho = {}
# rmax = hypot(im_h, im_w)
# dr = rmax / int(r_val/2)
# dth = pi / th_val
# for x in range(im_h):
# for y in range(im_w):
# if im[x, y] != 255:
# for m in range(th_val):
# th = dth * m
# r = x*cos(th) + y*sin(th)
# n = int(r_val/2) + int(r/dr+0.5)
# him[m, n] -= 1
dist = 0
img = image.convert('L') #conversion to gray scale
bw = img.point(lambda x: 0 if x<128 else 255, '1')
img_bin = np.array(bw).astype(np.uint8)
x, y = img_bin.shape
d = {}
for i in range(0,x):
d[i] = 0
for j in range(y):
if img_bin[i][j]==0:
d[i] +=1
l = [k for k,v in d.items() if v > y/2]
for i in range(0,len(l)-1):
if l[i]+1 != l[i+1]:
if dist == 0:
dist = l[i+1]-l[i]
elif dist == l[i+1]-l[i]:
break
lines = [l[0]]
p = l[0]
for i in range(1,len(l)):
if l[i] - p > dist*2:
lines.append(l[i])
p = l[i]
return dist, lines
def rescale(template,dist):
temp = Image.open(template).convert("L")
factor = dist/temp.height
temp = temp.resize((int(temp.width * factor), int(temp.height * factor)))
return temp
def pitch(lines,dist):
p = {}
j = 1
for i in lines:
if j%2 ==0:
p[i-dist*1.5] = 'D'
p[i-dist] = 'C'
p[i-dist*0.5] = 'B'
p[i] = 'A'
p[i+dist*0.5] = 'G'
p[i+dist] = 'F'
p[i+dist*1.5] = 'E'
p[i+dist*2] = 'D'
p[i+dist*2.5] = 'C'
p[i+dist*3] = 'B'
p[i+dist*3.5] = 'G'
p[i+dist*4] = 'F'
p[i+dist*4.5] = 'E'
else:
p[i-dist*0.5] = 'G'
p[i] = 'F'
p[i+dist*0.5] = 'E'
p[i+dist] = 'D'
p[i+dist*1.5] = 'C'
p[i+dist*2] = 'B'
p[i+dist*2.5] = 'A'
p[i+dist*3] = 'G'
p[i+dist*3.5] = 'F'
p[i+dist*4] = 'E'
p[i+dist*4.5] = 'D'
p[i+dist*5] = 'B'
j += 1
return p
if __name__ == '__main__':
music_file = sys.argv[1]
im_name = "../test-images/" + music_file
template1 = "../test-images/template1.png"
template2 = "../test-images/template2.png"
template3 = "../test-images/template3.png"
template4 = "../test-images/template4.png"
template5 = "../test-images/template5.png"
image = Image.open(im_name)
# finding the scale of the template
dist, lines = hough(image)
temp1 = rescale(template1,dist)
temp2 = rescale(template2,dist*3)
temp3 = rescale(template3,dist*2.5)
temp4 = rescale(template4,dist*3)
temp5 = rescale(template5,dist*8)
gray_im = image.convert("L")
# temp1 = Image.open(template1).convert("L")
# temp2 = Image.open(template2).convert("L")
# temp3 = Image.open(template3).convert("L")
# hx=[1,2,1]
# hy=[1,2,1]
# image=convolve(gray_im, hx, hy)
# edge1=sobel_edge_detection(gray_im)
# edge2=sobel_edge_detection(temp1)
# template_matching(edge1,edge2)
result_list = []
l =[]
p = pitch(lines,dist)
result1, result_list = hammingDist(gray_im, temp1, gray_im, "red", result_list, "filled_note", p, dist)
result2, result_list = hammingDist(gray_im, temp2, result1, "green", result_list, "eighth_rest", p, dist)
result3, result_list = hammingDist(gray_im, temp3, result2, "blue", result_list, "quarter_rest", p, dist)
result4, l = hammingDist(gray_im, temp4, result3, "yellow", l, "quarter_rest", p, dist)
result5, l = hammingDist(gray_im, temp5, result4, "pink", l, "quarter_rest", p, dist)
text_list = result_list
np.savetxt("detected.txt", text_list, fmt="%s") # Saving the results in a txt file
result5.save("detected.png")
| dhruvabhavsar/Optical-Music-Recognition | python-sample/omr.py | omr.py | py | 13,907 | python | en | code | 0 | github-code | 36 |
23930369932 | import sys
import threading
lastId = 0 #Ids used for object pointers
class aObject:
def __init__(self, name, value, type):
global lastId
self.name = name
self.value = value
self.aType = type
self.id = lastId + 1
self.attributes = {}
lastId += 1
class aString(aObject):
def __init__(self, name, value):
super().__init__(name, value, "string")
self.name = name
self.value = str(value)
self.aType = "string"
class aNum(aObject):
def __init__(self, name, value):
super().__init__(name, value, "number")
self.name = name
self.value = value
class aBool(aObject):
false = aNum("false", 0)
true = aNum("true", 1)
def __init__(self, name, value):
super().__init__(name, value, "bool")
self.name = name
self.value = value
class aArray(aObject):
def __init__(self, name):
super().__init__(name, [], "array")
self.name = name
self.value = []
self.aType = "array"
class aError(aObject):
def __init__(self, name):
super().__init__(name, name, "error_obj")
self.name = name
self.aType = "error"
def aRaise(self, text, ln):
print("EXCEPTION: " + self.name + ": " + text + " on line " + str(ln) + "\n")
input("Press enter to exit...\n")
sys.exit()
class aStream(aObject):
def act(self, val):
pass
def getVal(self):
return self.id
def delVal(self):
pass
def setVal(self, val):
self.act(val)
def __init__(self, name, act):
super().__init__(name, 0, "stream")
self.act = act
self.attributes["act"] = self.act
value = property(getVal, setVal, delVal, )
TypeErr = aError("TypeError")
def convertObject(obj, newType, ln, attr=None):
try:
if newType == "string":
return aString(obj.name, str(obj.value))
elif newType == "num":
return aNum(obj.name, int(obj.value))
elif newType == "stream":
return aStream(obj.name, attr["act"])
except TypeError:
TypeErr.aRaise("Invalid Value For Type " + obj.type, ln)
| Krobix/Ametscript | ametscript/classes.py | classes.py | py | 1,956 | python | en | code | 1 | github-code | 36 |
38568510649 | def dfs(graph, node, visited, stack):
visited.add(node)
for neighbor in graph[node]:
if neighbor not in visited:
dfs(graph, neighbor, visited, stack)
stack.append(node)
return stack
def topological_order(edges, n):
graph = dict()
for i in range(1, n+1):
graph[i] = set()
for edge in edges:
graph[edge[0]].add(edge[1])
visited = set()
stack = []
for key in graph.keys():
if key not in visited:
order = dfs(graph, key, visited, stack)
return order[::-1]
n = 5
Edges = [[1,2],[1,3],[2,3],[3,4],[4,2],[3,5]]
# [1, 2, 3, 5, 4]
# n = 5
# Edges = [[1,2],[1,3],[2,3],[4,2],[3,4],[3,5]]
# # [6, 1, 2, 3, 5, 4]
# n=9
# Edges = [[3,2],[3,7],[2,1],[1,7],[1,6],[6,5],[7,6],[7,5],[5,4],[8,9]]
print(topological_order(Edges, n)) | archanakalburgi/Algorithms | summer_prep/graphs/topological_dfs.py | topological_dfs.py | py | 822 | python | en | code | 1 | github-code | 36 |
41703057518 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0006_auto_20160109_0000'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_ts', models.DateField()),
],
options={
},
bases=(models.Model,),
),
]
| zachswift615/zachswift | portfolio/migrations/0007_blog.py | 0007_blog.py | py | 701 | python | en | code | 0 | github-code | 36 |
9390236732 | n = int(input('Digite um número: '))
verificador = 1
if (n / 2).is_integer() == False and n != 1 and n != 0:
verificador = 0
for c in range(2, n):
n2 = n / c
if n2.is_integer():
verificador = 1
if verificador == 0:
print('Esse número é primo!')
else:
print('Esse número não é primo!')
| github-felipe/ExerciciosEmPython-cursoemvideo | PythonExercicios/ex052.py | ex052.py | py | 335 | python | pt | code | 0 | github-code | 36 |
16411706948 | import os
import shutil
import numpy as np
import cv2
import random
import copy
from keras.models import Sequential
from keras.layers.core import Dense, Flatten, Dropout
import tensorflow as tf
def qpixmap_to_array(qtpixmap):
# qpixmap转换成array
img = qtpixmap.toImage()
temp_shape = (img.height(), img.bytesPerLine() * 8 // img.depth())
temp_shape += (4,)
ptr = img.bits()
ptr.setsize(img.byteCount())
result = np.array(ptr, dtype=np.uint8).reshape(temp_shape)
result = result[..., :3]
return result
def img_to_candy(img):
# 图片转换成灰色,灰色图片转换为轮廓
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_candy = cv2.Canny(img_gray, 100, 200)
return img_candy
def create_model(input_shape, output_dim, hidden_layer: dict):
convolutional_layer = hidden_layer.get("convolutional_layer")
fully_connected_layer = hidden_layer.get("fully_connected_layer")
# 创建模型
model = Sequential()
model.add(Flatten(input_shape=input_shape))
if convolutional_layer is not None:
# 待实现
# 处理卷积层
pass
if fully_connected_layer is not None:
# 处理全连接层
for index, item in enumerate(fully_connected_layer):
model.add(Dense(item, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(output_dim, activation='softmax'))
return model
def model_mutation():
# 模型变异-变异程度由低到高
# 1.完全继承/复制-降低lr重训练
# 2.完全继承/复制-并重训练
# 3.数量不变,结构重排,并重训练
# 4.降低5%结构,并重训练
# 5.增加5%结构,并重训练
pass
def arr_mutation_rearrange(arr_old: list):
# 随机重排,比如[1,2,3]排列成[2,1,3]
arr_new = copy.deepcopy(arr_old)
random.shuffle(arr_new)
return arr_new
def arr_mutation_merge(arr_old: list):
# 合并,层数减少,如[1,2,3]=>[3,3]或[1,5]
arr_new = copy.deepcopy(arr_old)
length = len(arr_new)
if length <= 1:
return arr_new
index1, index2 = random.sample(range(0, length), 2)
arr_new[index1] = arr_new[index1] + arr_new[index2]
del arr_new[index2]
return arr_new
def arr_mutation_split(arr_old: list):
# 分裂,层数增加,如如[3,4]=>[1,3,3]或[2,2,3]等
arr_new = copy.deepcopy(arr_old)
index_arr = []
for i, val in enumerate(arr_new):
if val > 1:
index_arr.append(i)
if len(index_arr) <= 0:
# 数组中没有可以分裂的
return arr_new
index_random = random.sample(index_arr, 1)[0]
val0 = arr_new[index_random]
val1 = random.randint(1, val0 - 1)
val2 = val0 - val1
del arr_new[index_random]
arr_new.insert(index_random, val2)
arr_new.insert(index_random, val1)
return arr_new
def arr_mutation_increase(arr_old: list):
arr_new = copy.deepcopy(arr_old)
length = len(arr_new)
random_index = random.randint(0, length - 1)
increase = int(arr_new[random_index] * 0.05)
if increase == 0:
increase = 1
arr_new[random_index] = arr_new[random_index] + increase
return arr_new
def arr_mutation_decrease(arr_old: list):
arr_new = copy.deepcopy(arr_old)
length = len(arr_new)
random_index = random.randint(0, length - 1)
decrease = int(arr_new[random_index] * 0.05)
if decrease == 0:
decrease = 1
arr_new[random_index] = arr_new[random_index] - decrease
if arr_new[random_index] <= 0:
arr_new[random_index] = 1
return arr_new
def hidden_layer_mutation(hidden_layer: dict):
convolutional_layer = hidden_layer.get("convolutional_layer")
fully_connected_layer = hidden_layer.get("fully_connected_layer")
return [
{
"mutation_type": "origin",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": copy.deepcopy(fully_connected_layer)
},
{
"mutation_type": "mutations_rearrange",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_rearrange(fully_connected_layer)
},
{
"mutation_type": "mutations_merge",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_merge(fully_connected_layer)
},
{
"mutation_type": "mutations_split",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_split(fully_connected_layer)
},
{
"mutation_type": "mutations_increase",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_increase(fully_connected_layer)
},
{
"mutation_type": "mutations_decrease",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_decrease(fully_connected_layer)
}
]
def model_save(model, model_path):
# 模型保存
if not os.path.exists(model_path):
os.makedirs(model_path)
tf.saved_model.save(model, model_path)
def model_load(model_path):
# 模型加载
if not os.path.exists(model_path):
print(f"[{model_path}] is not exists ,exit")
exit(-1)
model = tf.saved_model.load(model_path)
return model
def create_folder(folder_path):
# 删除模型
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def remove_folder(folder_path):
# 删除模型
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
if __name__ == '__main__':
a = [100, 101, 102, 103, 104, 105, 106, 107, 108]
a = [1, 2, 3]
print(a)
b = arr_mutation_merge(a)
c = arr_mutation_split(a)
print(b)
print(c)
| zhangxinzhou/game_explorer | game01_dino/new_test/game_utils.py | game_utils.py | py | 5,865 | python | en | code | 0 | github-code | 36 |
10401144210 | #!/usr/bin/env python
"""
Testtool om een lokale HTTP server te starten die verbinding maakt
met dvs-daemon. Niet geschikt voor productie! Gebruik daar WSGI voor.
"""
import bottle
import argparse
import dvs_http_interface
import logging
# Initialiseer argparse
parser = argparse.ArgumentParser(description='DVS HTTP interface test tool')
parser.add_argument('-s', '--server', action='store', default='127.0.0.1', help='DVS server (standaard 127.0.0.1)')
parser.add_argument('-p', '--port', action='store', default='8120', help='DVS poort (standaard 8120)')
args = parser.parse_args()
dvs_http_interface.dvs_client_server = "tcp://%s:%s" % (args.server, args.port)
# Stel logger in:
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.info("Server: %s", dvs_http_interface.dvs_client_server)
bottle.debug(True)
bottle.run(host='localhost', port=8080, reloader=True) | PaulWagener/rdt-infoplus-dvs | dvs-http.py | dvs-http.py | py | 906 | python | nl | code | null | github-code | 36 |
43891173362 | def fatorial(num=1, show=False):
"""
:param num: Número para ser fatorado
:param show: Mostar o processo sa fatoração
:return: Resultado da fatoração
"""
f = 1
for c in range(num, 0, -1):
if show:
print(c, end='')
if c > 1:
print(' x ', end='')
else:
print(' = ', end='')
f *= c
return f
n = int(input('Digite um número: '))
print(f'{fatorial(n, show=True)}')
| Kaue-Romero/Python_Repository | Exercícios/exerc_102.py | exerc_102.py | py | 484 | python | pt | code | 0 | github-code | 36 |
7111352063 | import urllib
import urllib2
from django import template
from django.conf import settings
from django.template.defaultfilters import truncatewords
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from utils.acm_auth import get_ip
register = template.Library()
def fix_trunc(text):
""" Removes the space that truncatewords adds to strings before the
ellipses.
"""
return "%s..." % text[:-4]
@register.filter
def get_meta(obj):
""" Returns the meta name of the object. """
return obj._meta.verbose_name
@register.filter
def get_title(article, chars):
""" Return the title, and truncate the letters if chars is not None. """
return article.get_title()[:chars]
@register.simple_tag(takes_context=True)
def get_video_url(context, video):
""" This filter takes an article object, and an IP address to return an
embedable video URL for videos from the DL.
"""
request = context['request']
session = request.session
video_url = "%(video)s%(joiner)s%(query)s" % {
'video': video,
'joiner': '&' if '?' in video else '?',
'query': urllib.urlencode({
'CFID': session[settings.ACM_SESSION_VARS['CFID']],
'CFTOKEN': session[settings.ACM_SESSION_VARS['CFTOKEN']],
'ip': get_ip(request),
'websvc': 1,
}),
}
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', settings.ACM_USER_AGENT)]
return opener.open(video_url).read().strip()
@register.simple_tag(takes_context=True)
def get_article_body(context, article):
""" Gets the body of the DL article using the user's IP address. """
request = context['request']
ip = get_ip(request)
body = article.get_body(ip=ip)
return mark_safe(body)
@register.simple_tag(takes_context=True)
def get_article_abstract(context, article, words):
""" Gets the abstract of the article using the user's IP address. """
abstract = article.get_abstract()
if abstract in ["", settings.BLANK_ARTICLE_TEXT]:
ip = get_ip(context['request'])
abstract = article.get_body(ip=ip)
return truncatewords(strip_tags(abstract), words)
| mnadifi/cie | source/apps/articles/templatetags.py | templatetags.py | py | 2,209 | python | en | code | 0 | github-code | 36 |
17013425141 | import datetime
from lambda_function import handler
from components import line_bot_api
from utils import utils_database
from linebot.models import (
JoinEvent,
MemberJoinedEvent,
MemberLeftEvent,
TextSendMessage
)
@handler.add(JoinEvent)
def handle_join(event):
group_id = event.source.group_id
group_summary = line_bot_api.get_group_summary(group_id)
event_info = {
"group_id": group_summary.group_id,
"group_name": group_summary.group_name,
"datetime": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
}
utils_database.insert_joined_group_info(event_info)
if not utils_database.check_is_allowed_collect_event_event_info_group(event.source.group_id):
msg = "該群組尚未開通收納訊息功能,請向管理員申請權限,以便收納通報訊息"
message = TextSendMessage(text=msg)
line_bot_api.reply_message(event.reply_token, message)
return
@handler.add(MemberJoinedEvent)
def handle_member_joined(event):
current_dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
group_id = event.source.group_id
summary = line_bot_api.get_group_summary(group_id)
group_name = summary.group_name
user_id = event.joined.members[0].user_id
profile = line_bot_api.get_group_member_profile(group_id, user_id)
display_name = profile.display_name
picture_url = profile.picture_url
event_info = {
"datetime": current_dt,
"group_id": group_id,
"group_name": group_name,
"user_id": user_id,
"display_name": display_name,
"picture_url": picture_url
}
try:
utils_database.insert_user_info_when_join_group(event_info)
except Exception as e:
print(e)
msg = f'嗨,{ display_name }\n歡迎加入【防汛護水志工第六大隊颱風豪雨事件通報】,麻煩您輸入您的志工編號,方便老六紀錄您的通報結果哦!本群組會收納所有您提供的通報訊息與照片,敬請避免在本群組聊天、傳送問候圖,感謝您的配合與諒解,也謝謝您熱心協助!'
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=msg))
@handler.add(MemberLeftEvent)
def handle_member_left(event):
current_dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
group_id = event.source.group_id
user_id = event.left._members[0]["userId"]
event_info = {
"datetime": current_dt,
"group_id": group_id,
"user_id": user_id
}
status = utils_database.update_user_info_when_left_group(event_info)
return status
| jialiang8931/WRA06-Volunteer-LineBot | src/components/handler_event_group.py | handler_event_group.py | py | 2,658 | python | en | code | 0 | github-code | 36 |
70562644584 | import sys
from bisect import bisect_left
input = sys.stdin.readline
N = int(input().rstrip())
nums = list(map(int, input().rstrip().split()))
dp = []
def change(ary, num):
'''
:param ary: dp 배열
:param num: 대치할 수
num보다 큰 수 중 최솟값과 대치 (이진탐색 이용)
:return: None
'''
low, high = 0, len(ary)
while low <= high:
mid = (low + high) // 2
if ary[mid] >= num:
high = mid-1
else:
low = mid+1
ary[low] = num
for i in range(N):
if not dp or dp[-1] < nums[i]:
dp.append(nums[i])
else:
# change(dp, nums[i]) # 이진 탐색 직접 구현
dp[bisect_left(dp, nums[i])] = nums[i] # bisect 모듈 활용
print(len(dp)) | zsmalla/algorithm-jistudy-season1 | src/chapter5/다이나믹프로그래밍(1)/임지수/12015_python_임지수.py | 12015_python_임지수.py | py | 791 | python | ko | code | 0 | github-code | 36 |
17236751533 | # Дано натуральное число n (n ≥ 10). Напишите программу, которая определяет его максимальную и минимальную цифры.
n = int(input())
max = 0
min = n % 10
while n != 0:
last_digit = n % 10
if last_digit > max:
max = last_digit
if last_digit < min:
min = last_digit
n = n // 10
print('Максимальная цифра равна', max)
print('Минимальная цифра равна', min)
| i-kasparova/gloacademy_python | Lesson_8/task_4.py | task_4.py | py | 517 | python | ru | code | 0 | github-code | 36 |
32538066028 | # -*- coding: utf-8 -*-
"""
Created on Sat May 5 10:53:26 2018
@author: lenovo
"""
import numpy as np
from scipy.optimize import leastsq
def fun(p,x):
"""定义想要拟合的函数"""
k,b = p
return k*x+b
def err(p,x,y):
"""定义误差函数"""
return fun(p,x)-y
x = [1,2,3,4]
y = [6,5,7,10]
p0 = [1,1]
x1 = np.array(x)
y1 = np.array(y)
xishu = leastsq(err,p0,args=(x1,y1))
print(xishu[0]) | wilsonzyp/probability_statistics | Try_leastsq_with_scipy.py | Try_leastsq_with_scipy.py | py | 446 | python | en | code | 1 | github-code | 36 |
22354796740 | from django.shortcuts import render
from remarcable_app.models import SearchHistory
from remarcable_app.query_functions import (
delete_old_searches,
pull_all_products,
pull_all_tagged_products,
pull_all_categories,
pull_all_tags,
products_to_array,
search_products,
tags_to_dictionary,
filter_by_tag,
filter_by_category,
strip_search_results
)
# this view defines the home landing page
def home(request):
"""
we want to pull all of the tables of data we want to use first, so that they can be manipulated by filters.
NOTE: This may not scale well with a large database, however for this case.. It may even be slower to join an entire table,
then filter in one line of code each time we need specific data VS. pulling everything once and continually
filtering that down like shown here...
"""
product_table = pull_all_products()
tag_product_table = pull_all_tagged_products()
categories = pull_all_categories()
just_tags = pull_all_tags()
if request.method == "POST":
# pull the currently selected category and tag values from the html radio button
category_filter = request.POST.get('category')
tag_filter = request.POST.get('tag')
# since we have two different filter functions, we must call each one and update the product_table
product_table = filter_by_category(product_table, category_filter, categories)
product_table = filter_by_tag(product_table, tag_filter,just_tags)
else:
category_filter = 'None'
tag_filter = 'None'
# utilize helper functions to parse our final sorted/filtered tables into usuable data for the front end
product_data = products_to_array(product_table)
tag_data = tags_to_dictionary(tag_product_table)
return render(request,'home.html',
{
'product_data': product_data,
'tag_data':tag_data,
'categories':categories,
'tags':just_tags,
'category_filter':category_filter,
'tag_filter': tag_filter
})
# this view defines the search results page
def search_results(request):
"""
we want to pull all of the tables of data we want to use first, so that they can be manipulated by filters.
"""
product_table = pull_all_products()
tag_product_table = pull_all_tagged_products()
categories = pull_all_categories()
just_tags = pull_all_tags()
search_list = []
final_products = []
category_filter = 'None'
tag_filter = 'None'
"""
pull the last search term so that if search_results page is refreshed without submitting a new search,
the search results are still shown and filters can be applied.
"""
raw_search = str(SearchHistory.objects.last())
# check if the POST method is from search bar, otherwise it must be from the filters
if request.method == "POST" and request.POST.get('text_input') is not None:
# pull the raw text from tax string from the search bar
raw_search = request.POST.get('text_input')
# create a new search_name object and send it to the database
latest_search = SearchHistory.objects.create(search_name=raw_search)
"""
in order to keep the SearchHistory database from getting too large, we will check to see if it is larger
than 15 entries. If so, call the delete_old_searches function and delete the 10 oldest searches.
"""
if len(SearchHistory.objects.all().values_list()) > 15:
delete_old_searches()
# strip the raw seach string of all white space and store remaining words in an array of strings
search_list = strip_search_results(raw_search)
# check to make sure the array is not empty
if len(search_list) > 0:
# utilize the search_products function to search entire database and return a list of matching product_ids
final_products = search_products(search_list,product_table,tag_product_table)
# filter the displayed product_table based on the matching product_ids found above
product_table = product_table.filter(id__in = final_products)
else:
#if no new search is posted.. it must mean filters have been applied
# strip the raw seach (last search result) string of all white space and store remaining words in an array of strings
search_list = strip_search_results(raw_search)
# check to make sure the array is not empty
if len(search_list) > 0:
# utilize the search_products function to search entire database and return a list of matching product_ids
final_products = search_products(search_list,product_table,tag_product_table)
# filter the displayed product_table based on the matching product_ids found above
product_table = product_table.filter(id__in = final_products)
# pull the currently selected category and tag values from the html radio button
category_filter = request.POST.get('category')
tag_filter = request.POST.get('tag')
# since we have two different filter functions, we must call each one and update the product_table
product_table = filter_by_category(product_table, category_filter, categories)
product_table = filter_by_tag(product_table, tag_filter,just_tags)
# utilize helper functions to parse our final sorted/filtered tables into usuable data for the front end
product_data = products_to_array(product_table)
tag_data = tags_to_dictionary(tag_product_table)
return render(request, 'search.html',
{
'product_data': product_data,
'tag_data':tag_data,
'raw_search':raw_search,
'categories':categories,
'tags':just_tags,
'category_filter':category_filter,
'tag_filter': tag_filter
}) | stephenv13/remarcableproject | remarcable_app/views.py | views.py | py | 5,899 | python | en | code | 0 | github-code | 36 |
28841930639 | import pygame, sys, time, random
from pygame.locals import *
pygame.init()
mainClock = pygame.time.Clock()
lives = 3
lives2 = 3
width = 800
height = 600
windowSurface = pygame.display.set_mode((width, height), 0, 32)
pygame.display.set_caption('Star Wars!')
movementSpeed = 10
projectileSpeed = 30
scrollSpeed = 6
iambecomespeed = 200
shotFrameCounter = 0
targetFrameCounter = 0
collisionFrameCounter = 0
shots = []
shots2 = []
targets = []
lifeblocks = []
nopain = []
death = []
maxLives = 3
score = 0
maxTargets = 5
lifes = 4
maxShots = 3
Finvincible = 1
iambecome = 1
moveLeft = False
moveLeft2 = False
moveRight = False
moveRight2 = False
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
yellow = (255, 255, 0)
x = 48
y = 48
t = 40
player = pygame.Rect(273, 20, x, t)
player2 = pygame.Rect(273, 530, x, t)
bg = pygame.Rect(0, -100, 10, 10)
shoot = False
shoot2 = False
background = pygame.image.load('Resources/Images/StarsPattern.png')
Da_Ship = pygame.image.load('Resources/Images/marrio.jpeg')
SS_Falcon = pygame.image.load('Resources/Images/SS Falcon.png').convert()
Rover = pygame.image.load('Resources/Images/World.png').convert()
The_World = pygame.image.load('Resources/Images/tuskc.png').convert()
pew = pygame.mixer.Sound('Resources/Audio/Gun+1.wav')
pew2 = pygame.mixer.Sound('Resources/Audio/Gun+Shot2.wav')
boom = pygame.mixer.Sound('Resources/Audio/Explosion+1.wav')
boom7 = pygame.mixer.Sound('Resources/Audio/boom7.wav')
space = pygame.mixer.music.load('Resources/Audio/Space Fighter Loop.mp3')
DASHIP = pygame.transform.scale(Da_Ship, (x, y))
FALCON = pygame.transform.scale(SS_Falcon, (x ,y))
ROVER = pygame.transform.scale(Rover, (x,y))
THE_WORLD = pygame.transform.scale(The_World, (x,y))
mcounter = 1
mouset = True
yellowrect = pygame.draw.rect(windowSurface, yellow, (400, 550, 30, 30))
greenRect = pygame.draw.rect(windowSurface, green, (250, 10, 500, 300))
titleFont = pygame.font.SysFont("none", 60)
myText = "Welcome to Space War! Here are the rules:"
text = titleFont.render(myText, True, black)
def end(lives,lives2):
while True:
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
pygame.display.update()
if lives <= 0:
font = pygame.font.SysFont("none", 24)
scoreText = ("Player 2 WINS!")
text2 = font.render(scoreText, True, white)
windowSurface.blit(text2, (10, 10))
thatRect = pygame.draw.rect(windowSurface, green, (50, 300, 390, 100))
myText = "End Game?"
thisRect = pygame.draw.rect(windowSurface, green, (50, 450, 390, 100))
myText2 = "New Game?"
text = titleFont.render(myText, True, black)
textRect = text.get_rect()
textRect.centerx = thatRect.centerx
textRect.centery = thatRect.centery
windowSurface.blit(text, textRect)
text2 = titleFont.render(myText2, True, black)
textRect2 = text.get_rect()
textRect2.centerx = thisRect.centerx
textRect2.centery = thisRect.centery
windowSurface.blit(text2, textRect2)
pygame.display.update()
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= thatRect.left and event.pos[0] <= thatRect.right and event.pos[
1] >= thatRect.top and \
event.pos[1] <= thatRect.bottom:
print("endgame selected!")
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= thisRect.left and event.pos[0] <= thisRect.right and event.pos[
1] >= thisRect.top and \
event.pos[1] <= thisRect.bottom:
pygame.mixer.music.unpause()
print("newgame selected")
startgame()
chooseship()
if event.type == QUIT:
print("quit selected!")
pygame.quit()
sys.exit()
if lives2 <= 0:
font = pygame.font.SysFont("none", 24)
scoreText = ("Player 1 WINS!")
text2 = font.render(scoreText, True, white)
windowSurface.blit(text2, (10, 10))
thatRect = pygame.draw.rect(windowSurface, green, (50, 300, 390, 100))
myText = "End Game?"
thisRect = pygame.draw.rect(windowSurface, green, (50, 450, 390, 100))
myText2 = "New Game?"
text = titleFont.render(myText, True, black)
textRect = text.get_rect()
textRect.centerx = thatRect.centerx
textRect.centery = thatRect.centery
windowSurface.blit(text, textRect)
text2 = titleFont.render(myText2, True, black)
textRect2 = text.get_rect()
textRect2.centerx = thisRect.centerx
textRect2.centery = thisRect.centery
windowSurface.blit(text2, textRect2)
pygame.display.update()
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= thatRect.left and event.pos[0] <= thatRect.right and event.pos[
1] >= thatRect.top and \
event.pos[1] <= thatRect.bottom:
print("endgame selected!")
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= thisRect.left and event.pos[0] <= thisRect.right and event.pos[
1] >= thisRect.top and \
event.pos[1] <= thisRect.bottom:
pygame.mixer.music.unpause()
print("newgame selected")
startgame()
chooseship()
if event.type == QUIT:
print("quit selected!")
pygame.quit()
sys.exit()
pygame.display.update()
def chooseship():
shotFrameCounter = 0
targetFrameCounter = 0
collisionFrameCounter = 0
shots = []
shots2 = []
targets = []
lifeblocks = []
nopain = []
death = []
maxLives = 3
score = 0
maxTargets = 5
lifes = 4
maxShots = 3
Finvincible = 1
iambecome = 1
moveLeft = False
moveLeft2 = False
moveRight = False
moveRight2 = False
x = 48
y = 54
player = pygame.Rect(273, 20, x, y)
player2 = pygame.Rect(273, 530, x, y)
bg = pygame.Rect(0, -100, 10, 10)
shoot = False
shoot2 = False
lives = 3
lives2 = 3
mcounter = 1
safe = 0
safe2 = 0
mouset = True
while mouset:
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
blueRect = pygame.draw.rect(windowSurface, blue, (200, 100, 60, 60))
redRect = pygame.draw.rect(windowSurface, red, (200, 300, 60, 60))
greenRect = pygame.draw.rect(windowSurface, green, (400, 100, 60, 60))
whiteRect = pygame.draw.rect(windowSurface, white, (400, 300, 60, 60))
firstship = "The World"
secondship = "Rover"
thirdship = "Inevitability"
fourthship = "Falcon"
daFont = pygame.font.SysFont("none", 20)
hrship = daFont.render(firstship, True, blue)
windowSurface.blit(hrship, (200, 170))
rhship = daFont.render(secondship, True, red)
windowSurface.blit(rhship, (200, 370))
ssship = daFont.render(thirdship, True, green)
windowSurface.blit(ssship, (400, 170))
tship = daFont.render(fourthship, True, white)
windowSurface.blit(tship, (400, 370))
windowSurface.blit(THE_WORLD, blueRect)
windowSurface.blit(ROVER, redRect)
windowSurface.blit(DASHIP, greenRect)
windowSurface.blit(FALCON, whiteRect)
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= greenRect.left and event.pos[0] <= greenRect.right and event.pos[
1] >= greenRect.top and event.pos[1] <= greenRect.bottom:
if mcounter == 2:
ship2 = DASHIP
shipname = ("daship")
mouset = False
if event.pos[0] >= blueRect.left and event.pos[0] <= blueRect.right and event.pos[1] >= blueRect.top and \
event.pos[1] <= blueRect.bottom:
if mcounter == 2:
ship2 = THE_WORLD
shipname = ("world")
mouset = False
if event.pos[0] >= redRect.left and event.pos[0] <= redRect.right and event.pos[1] >= redRect.top and \
event.pos[1] <= redRect.bottom:
if mcounter == 2:
ship2 = ROVER
shipname = ("Rover")
mouset = False
if event.pos[0] >= whiteRect.left and event.pos[0] <= whiteRect.right and event.pos[
1] >= whiteRect.top and \
event.pos[1] <= whiteRect.bottom:
if mcounter == 2:
ship2 = FALCON
shipname = ("Falcon")
mouset = False
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= greenRect.left and event.pos[0] <= greenRect.right and event.pos[
1] >= greenRect.top and event.pos[1] <= greenRect.bottom:
if mcounter == 1:
ship1 = DASHIP
shipname = ("daship")
mcounter = 2
if event.pos[0] >= blueRect.left and event.pos[0] <= blueRect.right and event.pos[1] >= blueRect.top and \
event.pos[1] <= blueRect.bottom:
if mcounter == 1:
ship1 = THE_WORLD
shipname = ("mworld")
mcounter = 2
if event.pos[0] >= redRect.left and event.pos[0] <= redRect.right and event.pos[1] >= redRect.top and \
event.pos[1] <= redRect.bottom:
if mcounter == 1:
ship1 = ROVER
shipname = ("Rover")
mcounter = 2
if event.pos[0] >= whiteRect.left and event.pos[0] <= whiteRect.right and event.pos[
1] >= whiteRect.top and \
event.pos[1] <= whiteRect.bottom:
if mcounter == 1:
ship1 = FALCON
shipname = ("Falcon")
mcounter = 2
ship1 = pygame.transform.rotate(ship1, 180)
great = True
while great:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT:
moveLeft = True
if event.key == K_RIGHT:
moveRight = True
if event.key == K_p:
shoot = True
pew2.play()
if event.key == K_a:
moveLeft2 = True
if event.key == K_d:
moveRight2 = True
if event.key == K_SPACE:
shoot2 = True
pew.play()
if event.type == KEYUP:
if event.key == K_LEFT:
moveLeft = False
if event.key == K_RIGHT:
moveRight = False
if event.key == K_p:
shoot = False
if event.key == K_a:
moveLeft2 = False
if event.key == K_d:
moveRight2 = False
if event.key == K_SPACE:
shoot2 = False
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if moveLeft2 == True:
if player2.left > 0:
player2.left -= movementSpeed
if moveRight2 == True:
if player2.right < width:
player2.right += movementSpeed
if moveLeft == True:
if player.left > 0:
player.left -= movementSpeed
if moveRight == True:
if player.right < width:
player.right += movementSpeed
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
windowSurface.blit(ship1, player)
windowSurface.blit(ship2, player2)
for target in targets[:]:
if target.left < - 20:
targets.remove(target)
for life in lifeblocks[:]:
if life.left < - 20:
lifeblocks.remove(life)
for invincible in nopain[:]:
if invincible.left < - 20:
nopain.remove(invincible)
for dead in death[:]:
if dead.left < -20:
death.remove(dead)
if shoot == True and (len(shots) < maxShots):
shots.append(pygame.Rect(player.centerx - 3, player.centery - 3, 6, 6))
for i in range(len(shots)):
pygame.draw.rect(windowSurface, green, shots[i])
shots[i].bottom += projectileSpeed
if shots[i].colliderect(player2):
lives2 -= 1
shots[i].top = 600
boom7.play()
for target in targets[:]:
if shots[i].colliderect(target):
targets.remove(target)
lives -= 1
shots[i].top = 600
boom.play()
for life in lifeblocks[:]:
if shots[i].colliderect(life):
lifeblocks.remove(life)
lives += 1
shots[i].top = 600
boom.play()
for invincible in nopain[:]:
if shots[i].colliderect(invincible):
nopain.remove(invincible)
if safe == 0:
safe = 30
maxLives -= 1
for dead in death[:]:
if shots[i].colliderect(dead):
lives2 = 1
shots[i].top = 600
boom.play()
if safe > 0:
if safe % 3 == 0:
boom.play()
ship1.set_alpha(255)
safe -= 1
else:
ship1.set_alpha(0)
else:
ship1.set_alpha(255)
if shoot2 == True and (len(shots2) < maxShots):
shots2.append(pygame.Rect(player2.centerx - 3, player2.centery - 3, 6, 6))
for i in range(len(shots2)):
pygame.draw.rect(windowSurface, red, shots2[i])
shots2[i].bottom -= projectileSpeed
if shots2[i].colliderect(player):
lives -= 1
boom7.play()
for target in targets[:]:
if shots2[i].colliderect(target):
targets.remove(target)
lives2 -= 1
shots2[i].bottom = 0
for life in lifeblocks[:]:
if shots2[i].colliderect(life):
lifeblocks.remove(life)
lives2 += 1
shots2[i].bottom = 0
for invincible in nopain[:]:
if shots2[i].colliderect(invincible):
invincible.left = -10
if safe2 == 0:
safe2 = 30
for dead in death[:]:
if shots2[i].colliderect(dead):
lives = 1
shots2[i].bottom = 0
if safe2 > 0:
if safe2 % 3 == 0:
boom.play()
ship2.set_alpha(255)
safe2 -= 1
else:
ship2.set_alpha(0)
else:
ship2.set_alpha(255)
for shot in shots[:]:
if shot.top > 620:
shots.remove(shot)
for shot in shots[:]:
if shot.colliderect(player2):
shot.top = 600
for shot2 in shots2[:]:
if shot2.bottom < 0:
shots2.remove(shot2)
for shot2 in shots2[:]:
if shot2.colliderect(player):
shot2.bottom = 0
z = random.randint(0, 23)
if z == 4:
if len(targets) < maxTargets:
targets.append(pygame.Rect(width + 20, random.randint(100, height - 100), 40, 20))
if z == 13:
if len(lifeblocks) < lifes:
lifeblocks.append(pygame.Rect(width + 20, random.randint(100, height - 100), 40, 20))
if z == 5:
if len(nopain) < Finvincible:
nopain.append(pygame.Rect(width + 20, random.randint(100, height - 100), 40, 20))
if z == 1:
if len(death) < iambecome:
death.append(pygame.Rect(width + 20, random.randint(100, height - 100), 40, 20))
for i in range(len(targets)):
pygame.draw.rect(windowSurface, red, targets[i])
targets[i].left -= movementSpeed
for i in range(len(lifeblocks)):
pygame.draw.rect(windowSurface, blue, lifeblocks[i])
lifeblocks[i].left -= movementSpeed
for i in range(len(nopain)):
pygame.draw.rect(windowSurface, black, nopain[i])
nopain[i].left -= movementSpeed
for i in range(len(death)):
pygame.draw.rect(windowSurface, white, death[i])
death[i].left -= iambecomespeed
font = pygame.font.SysFont("none", 20)
scoreText = "Lives: " + str(lives)
text2 = font.render(scoreText, True, green)
windowSurface.blit(text2, (10, 10))
font = pygame.font.SysFont("none", 20)
scoreText = "Lives: " + str(lives2)
text3 = font.render(scoreText, True, red)
windowSurface.blit(text3, (750, 560))
pygame.display.update()
mainClock.tick(60)
if safe > 0:
safe -= 1
if safe2 > 0:
safe2 -= 1
if lives <= 0 or lives2 <= 0:
end(lives,lives2)
def playmusic():
v = .1
pygame.mixer.music.load('Resources/Audio/Space Fighter Loop.mp3')
pygame.mixer.music.play(-1, 0)
pygame.mixer.music.set_volume(v)
def startgame():
game = True
realgame = False
while game:
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
greenRect = pygame.draw.rect(windowSurface, green, (200, 250, 390, 100))
titleFont = pygame.font.SysFont("none", 90)
myText = "Start game?"
text = titleFont.render(myText, True, black)
textRect = text.get_rect()
textRect.centerx = windowSurface.get_rect().centerx
textRect.centery = windowSurface.get_rect().centery
windowSurface.blit(text, textRect)
bigFont = pygame.font.SysFont("none", 100)
Spacewar = "SPACE WAR"
text3 = bigFont.render(Spacewar, True, red)
windowSurface.blit(text3, (200, 100))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= greenRect.left and event.pos[0] <= greenRect.right and event.pos[1] >= greenRect.top and event.pos[1] <= greenRect.bottom:
playmusic()
tules = True
while tules:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= yellowrect.left and event.pos[0] <= yellowrect.right and event.pos[
1] >= yellowrect.top and \
event.pos[1] <= yellowrect.bottom:
chooseship()
pygame.display.update()
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
yellowrect = pygame.draw.rect(windowSurface, yellow, (400, 550, 30, 30))
greenRect = pygame.draw.rect(windowSurface, green, (30, 10, 720, 40))
redrect = pygame.draw.rect(windowSurface, red, (60, 150, 70, 40))
bluerect = pygame.draw.rect(windowSurface, blue, (60, 220, 70, 40))
blackrect = pygame.draw.rect(windowSurface, black, (60, 290, 70, 40))
whiterect = pygame.draw.rect(windowSurface, white, (60, 360, 70, 40))
rulered = "if you hit the red rectangle, you lose a life"
ruleblue = "if you hit the blue rectangle, you get a life"
ruleblack = "if you hit the black rectangle, you are invisible (but it itself is basically invisible) until you shoot"
rulewhite = " if you hit the white rectangle, the other character gets their lives reduced to one life(you can try to hit it, anyway.)"
titleFont = pygame.font.SysFont("none", 50)
myText = "Welcome to Space War! Here are the rules:"
text = titleFont.render(myText, True, black)
Start = "READY? PRESS THE YELLOW BUTTON!"
text3 = titleFont.render(Start, True, blue)
windowSurface.blit(text3, (75, 500))
windowSurface.blit(text, greenRect)
littleFont = pygame.font.SysFont("none", 20)
tusk = pygame.font.SysFont("none", 18)
text4 = littleFont.render(rulered, True, red)
windowSurface.blit(text4, (160, 150))
text5 = littleFont.render(ruleblue, True, blue)
windowSurface.blit(text5, (160, 220))
text6 = littleFont.render(ruleblack, True, white)
windowSurface.blit(text6, (160, 290))
text7 = tusk.render(rulewhite, True, white)
windowSurface.blit(text7, (160, 360))
pygame.display.update()
pygame.display.update()
startgame()
| Noah04322/Assignments | End of Year.py | End of Year.py | py | 23,466 | python | en | code | 0 | github-code | 36 |
26703340293 | import speech_recognition as sr
import wave
import sys
import os
import uuid
pcmfn = sys.argv[1]
wavefn = os.path.join(str(uuid.uuid4().hex))
with open(pcmfn, 'rb') as pcm:
pcmdata = pcm.read()
with wave.open(wavefn, 'wb') as wavfile: #convert pcm to wav
wavfile.setparams((2, 2, 48000, 0, 'NONE', 'NONE'))
wavfile.writeframes(pcmdata)
try:
r = sr.Recognizer()
with sr.AudioFile(wavefn) as source:
audio = r.record(source)
except:
print('SR failed')
os.remove(wavefn)
try:
print(r.recognize_google(audio))
except:
print('!Unrecognizable')
| nfsmith/DiscordStenographer | transcribePCM.py | transcribePCM.py | py | 586 | python | en | code | 0 | github-code | 36 |
23063800044 | from src.pipeline.predict_pipeline import camera
from src.utils import emotion_average
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from src.utils import normalize
from src.utils import string
from src.exception import CustomException
import sys
import pandas as pd
def recommender(emotion,preference):
try:
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(client_id="12496220faa84eb39d6fdd22d53f3599",
client_secret="bc1f341b8551410c98f12d749c49fd33"))
if preference=="1":
playlist_link ="https://open.spotify.com/playlist/37i9dQZEVXbLZ52XmnySJg"
elif preference=="2":
playlist_link = "https://open.spotify.com/playlist/37i9dQZEVXbMDoHDwVN2tF"
playlist_URI = playlist_link.split("/")[-1].split("?")[0]
results = sp.playlist(playlist_URI, fields='tracks,next')
tracks=results['tracks']
audio_features_list = []
while tracks:
for item in tracks['items']:
track = item['track']
# Get the audio features for the track
audio_features = sp.audio_features(track['id'])[0]
# Add the audio features to the list
audio_features_list.append(audio_features)
# Get the next page of tracks (if there is one)
tracks = sp.next(tracks)
# Convert the list of audio features to a Pandas DataFrame
b = pd.DataFrame(audio_features_list)
# Iterate over each dictionary in the list and append it to a
b['valence']=normalize(b['valence'])
b['energy']=normalize(b['energy'])
b['tempo']=normalize(b['tempo'])
b['emotional_state']=(b['tempo']+b['valence'])/2
emotions=[]
for val in b['emotional_state']:
if val>0:
emotions.append(1)
else:
emotions.append(0)
b['emotion']=emotions
extract1=b[b['emotion']==1]
extract2=b[b['emotion']==0]
random_row1 = extract1.sample(n=1)
random_row2 = extract2.sample(n=1)
track1 = sp.track(string(random_row1))
track2 = sp.track(string(random_row2))
if emotion==1:
return(track1['id'])
else :
return(track2['id'])
except Exception as e:
raise CustomException(e,sys)
| AnshulDubey1/Music-Recommendation | src/pipeline/song_predictor.py | song_predictor.py | py | 2,474 | python | en | code | 5 | github-code | 36 |
33830156345 | import pandas as pd
class Lista:
def __init__(self):
self.planilha_original = pd.read_excel("senhas.xlsx")
self.df_original = pd.DataFrame(self.planilha_original) # CRIA O DATAFRAME ORIGINAL
def busca_login(self, nome):
self.reload()
if nome in [i for i in self.planilha_original["usuario"]]:
print("Ja existe este usuario.")
return 1
else:
print("Usuario não localizado")
return 0
def listar_todos(self):
self.reload()
print("LISTA DE LOGINS.")
for i, x in zip(self.planilha_original["usuario"], self.planilha_original["senha"]):
print(f"LOGIN -> {i} # SENHA -> {x}")
def inserir_login(self, usuario, senha):
df_novo = pd.DataFrame({'usuario': [usuario], 'senha': [senha]}) # CRIA O DATAFRAME SEMPRE COM COLCHETES
df_lista_nova = pd.concat([self.df_original, df_novo]) # CRIA O DATAFRAME CONCATENADO
try:
df_lista_nova.to_excel("senhas.xlsx", index=False)
print("Dados inseridos com sucesso.")
self.reload()
except:
print("Ocorreu erro ao inserir dados.")
def logar_sistema(self, usuario, senha):
if usuario in [y for y in self.planilha_original["usuario"]]:
for i, x in zip(self.planilha_original["usuario"], self.planilha_original["senha"]):
try:
i = str(i)
x = str(x)
if i == usuario and x == senha:
print("LOGADO COM SUCESSO.")
if i == usuario and x != senha:
print("SENHA NÃO CONFERE.")
except:
print("Erro desconhecido de formato de campos.")
else:
print("USUARIO E SENHAS INCORRETOS.")
def reload(self):
self.planilha_original = pd.read_excel("senhas.xlsx")
| riatoso/sistemaDeLoginExcel | login.py | login.py | py | 1,934 | python | pt | code | 0 | github-code | 36 |
18082073278 | #!/usr/bin/env python
"""
Neato control program to make a robot follow a line (like a roadway) and react
to signs in its path.
"""
import rospy
from geometry_msgs.msg import Twist, PoseWithCovariance, Pose, Point, Vector3
from sensor_msgs.msg import LaserScan, Image
import math
import numpy as np
import cv2
from cv_bridge import CvBridge
import helper_functions as hp
import signal
import sys
##### GLOBAL SPEED CONSTANT #####
rotate_speed_limit = 0.3
##### GLOBAl STATE CONSTANTS #####
DRIVE = 0
STOP = 1
LOOK_BOTH_WAYS = 2
class Controller:
def __init__(self):
##### ROS INITIALIZATION #####
rospy.init_node('caribou')
self.pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.command = Twist()
self.threshold = 0 # TODO: CHANGE THIS NUMBER
self.bridge = CvBridge()
rospy.Subscriber('/camera/image_raw', Image, self.react_to_image)
##### IMAGE SIZE #####
self.win_size = (640,480)
self.win_height_cropped = 480*0.9
##### SET STATE #####
self.state = DRIVE
##### INITIALIZE WINDOWS #####
cv2.namedWindow('set_bounds')
cv2.namedWindow('bw_window_cropped')
cv2.namedWindow('Output')
##### INITIALIZE SIFT #####
self.sift = cv2.SIFT()
self.bf = cv2.BFMatcher()
self.past_descriptors = []
##### SIGN REACTION BEHAVIOR #####
self.pause_duration = rospy.Duration(3)
self.ignore_stop_sign_threshold = self.pause_duration + rospy.Duration(3)
self.last_stop_sign = rospy.Time.now() - self.ignore_stop_sign_threshold
##### COLOR PARAMETERS (hand-tweaked) #####
settings_file = open('settings.txt', 'r')
self.grey_lb = int(settings_file.readline())
self.grey_ub = int(settings_file.readline())
self.red_lb = eval(settings_file.readline())
self.red_ub = eval(settings_file.readline())
settings_file.close()
##### CALIBRATION SLIDERS #####
cv2.createTrackbar('grey l', 'set_bounds', self.grey_lb , 255,
self.set_grey_lower)
cv2.createTrackbar('grey u', 'set_bounds', self.grey_ub , 255,
self.set_grey_upper)
cv2.createTrackbar('B l', 'set_bounds', self.red_lb[0], 255,
self.set_b_l)
cv2.createTrackbar('B u', 'set_bounds', self.red_ub[0], 255,
self.set_b_u)
cv2.createTrackbar('G l', 'set_bounds', self.red_lb[1] ,255,
self.set_g_l)
cv2.createTrackbar('G u', 'set_bounds', self.red_ub[1], 255,
self.set_g_u)
cv2.createTrackbar('R l', 'set_bounds', self.red_lb[2], 255,
self.set_r_l)
cv2.createTrackbar('R u', 'set_bounds', self.red_ub[2], 255,
self.set_r_u)
##### START OFF STOPPED #####
self.stop()
self.send()
def set_grey_lower(self, val):
""" Use sliders to set GREY lower bound. """
self.grey_lb = val
def set_grey_upper(self, val):
""" Use sliders to set GREY upper bound. """
self.grey_ub = val
def set_b_l(self, val):
""" Use sliders to set BLUE lower bound. """
self.red_lb[0] = val
def set_b_u(self, val):
""" Use sliders to set BLUE upper bound. """
self.red_ub[0] = val
def set_g_l(self, val):
""" Use sliders to set BLUE lower bound. """
self.red_lb[1] = val
def set_g_u(self, val):
""" Use sliders to set GREEN upper bound. """
self.red_ub[1] = val
def set_r_l(self, val):
""" Use sliders to set RED lower bound. """
self.red_lb[2] = val
def set_r_u(self, val):
""" Use sliders to set RED upper bound. """
self.red_ub[2] = val
def react_to_image(self, msg):
"""
Process image messages from ROS and stash them in an attribute called
cv_image for subsequent processing
Grabs image stream from camera, called cv_image, and processes the image for
line following and sign detection
"""
self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
cv2.waitKey(5)
if self.state == DRIVE:
direction = hp.find_line(self.cv_image,
(0, self.win_height_cropped), self.win_size,
(self.grey_lb, self.grey_lb, self.grey_lb),
(self.grey_ub, self.grey_ub, self.grey_ub),
self.threshold)
self.drive(direction)
sign_test = hp.find_stop_sign(self.cv_image,
tuple(self.red_lb), tuple(self.red_ub))
if (sign_test and
(rospy.Time.now() - self.ignore_stop_sign_threshold) >
self.last_stop_sign):
rospy.Timer(self.pause_duration,
self.look_both_ways, oneshot=True)
self.state = STOP
elif self.state == STOP:
self.stop()
elif self.state == LOOK_BOTH_WAYS:
gray = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY)
kp, des = self.sift.detectAndCompute(gray, None)
if len(self.past_descriptors) > 10:
previous_des = self.past_descriptors.pop(0)
matches = self.bf.knnMatch(des, previous_des, k=2)
# Apply ratio test
good_count = 0
for m,n in matches:
if m.distance < 0.75*n.distance:
good_count += 1
if good_count > 0.6*len(previous_des):
self.state = DRIVE
self.past_descriptors.append(des)
cv2.imshow("Output", self.cv_image)
cv2.waitKey(5)
def look_both_ways(self, event):
""" Callback function to set the robot's state to LOOK_BOTH_WAYS """
self.last_stop_sign = rospy.Time.now()
self.state = LOOK_BOTH_WAYS
def drive(self, direction):
""" Changes self.command in response to the direction inputed """
if direction[1]:
if direction[0] == 0:
self.command.angular.z = 0
self.command.linear.x = .1
else:
proportion = (float(direction[0]) / (640/2))
self.command.angular.z = (min(proportion, rotate_speed_limit)
if proportion > 0 else max(proportion, -rotate_speed_limit))
self.command.linear.x = .1 * (1 - abs(proportion))
else:
self.stop()
def stop(self):
""" Sets self.command to stop all bot motion """
self.command.linear.x = 0
self.command.angular.z = 0
def send(self):
""" Publishes self.command to ROS """
self.pub.publish(self.command)
def signal_handler(self, signal, frame):
""" Saves calibration settings to settings.txt file before closing """
settings_file = open('settings.txt', 'w')
settings_file.write(str(self.grey_lb) + '\n')
settings_file.write(str(self.grey_ub) + '\n')
settings_file.write(str(self.red_lb) + '\n')
settings_file.write(str(self.red_ub) + '\n')
settings_file.close()
print('Exiting gracefully...')
sys.exit(0)
controller = Controller()
signal.signal(signal.SIGINT, controller.signal_handler)
while not rospy.is_shutdown():
controller.send() | lianilychee/project_caribou | scripts/caribou.py | caribou.py | py | 6,666 | python | en | code | 1 | github-code | 36 |
40017524881 | from scipy.stats import zscore
from datetime import datetime as dt
import numpy as np
import pandas as pd
RAW_DIR = "raw/"
RAW_TRAIN_PATH = RAW_DIR + "raw_train_data.csv"
RAW_PREDICT_PATH = RAW_DIR + "raw_predict_data.csv"
CYCLE_AMOUNT_PATH = RAW_DIR + "cycle_amount.csv"
INPUT_DIR = "input/"
TRAIN_DATA_PATH = INPUT_DIR + "train_data.csv"
TEST_DATA_PATH = INPUT_DIR + "test_data.csv"
PREDICT_DATA_PATH = INPUT_DIR + "predict_data.csv"
TEST_PERCENTAGE = 0.1
AMOUNT_LOW_LIMIT = 80
AMOUNT_HIGH_LIMIT = 180
class WeatherDataGenerator:
#CLOSED_HOURS = [ "22:00", "23:00", "0:00", "1:00", "2:00", "3:00", "4:00", "5:00" ]
CLOSED_HOURS = [ "22:00", "23:00", "0:00", "1:00", "2:00", "3:00", "4:00", "5:00", "13:00", "14:00", "15:00", "19:00", "20:00", "21:00" ]
def __init__(self, raw_data=None, amount_data=None):
self.weather_data = pd.DataFrame()
self.raw_data = raw_data
self.amount_data = amount_data
def generate_data(self):
self.__store_split_datetime()
self.__store_real_values()
self.__drop_closed_hours()
self.__pivot_date_x_hour()
self.__store_categolized_values()
self.__store_label_values()
self.__drop_invalid_label_values()
def get_data(self):
return self.weather_data
def __store_split_datetime(self):
print("Splitting datetime to date and hour...")
# index 1, 2, 3 is used later
self.weather_data = self.raw_data[0].apply(lambda datehour: pd.Series(datehour.split(" "), index=[0,4]))
def __drop_closed_hours(self):
print("Dropping closed hours columns...")
drop_rows = self.weather_data.loc[self.weather_data[4].isin(self.CLOSED_HOURS)]
self.weather_data.drop(drop_rows.index, inplace=True)
def __store_real_values(self):
print("Storing temprature and precipiation and wind speed...")
for j in [ 1, 2, 3 ]:
#for j in [ 1, 3 ]: # Passing wind speed
self.weather_data[j] = self.raw_data[j]
def __normalize_real_values(self):
print("Normalizing real values...")
# Normalize real_value columns
for j in [ 1, 2, 3 ]:
#for j in [ 1, 3 ]: # Passing wind speed
# Regression problems doesn't need to be normalized?
self.weather_data[j] = zscore(self.weather_data[j], axis=0)
def __pivot_date_x_hour(self):
print("Pivoting columns date x hour...")
# Pivot data to date x hour
self.weather_data = self.weather_data.pivot(index=0, columns=4)
def __store_categolized_values(self):
print("Appending categolized values...")
# Append oter weathers and labels after pivot
for l in self.weather_data.index:
date = dt.strptime(l, "%Y/%m/%d")
self.weather_data.loc[l, 5] = date.month
self.weather_data.loc[l, 6] = date.weekday()
def __store_label_values(self):
# Reset indexes of self.weather_data as default interger, to match index of two dataframes
self.weather_data.reset_index(drop=True, inplace=True)
if self.amount_data is None:
print("Skipping appending label values...")
else:
print("Appending label values...")
self.weather_data[7] = self.amount_data[0]
def __drop_invalid_label_values(self):
print("Dropping invalid label values...")
#if self.weather_data[7] is None:
if self.amount_data is None:
print("Skipping dropping invalid label values...")
else:
drop_rows = self.weather_data[(AMOUNT_LOW_LIMIT <= self.weather_data[7]) & (self.weather_data[7] <= AMOUNT_HIGH_LIMIT)]
self.weather_data.drop(drop_rows.index, inplace=True)
def read_raw_data():
print("Reading weather and cycle amount data...")
# Adding 0 - 3 numbers as header names.
raw_train_data_df = pd.read_csv(RAW_TRAIN_PATH, header=None, names=np.arange(4))
raw_predict_data_df = pd.read_csv(RAW_PREDICT_PATH, header=None, names=np.arange(4))
amount_data_df = pd.read_csv(CYCLE_AMOUNT_PATH, header=None)
return raw_train_data_df, raw_predict_data_df, amount_data_df
def make_train_test_data(weather_df):
print("Make train and test data by TEST_PERCENTAGE...")
# Select random columns from whole weather data with directed percentage
test_df = weather_df.sample(frac=TEST_PERCENTAGE)
train_df = weather_df.drop(test_df.index.values)
return train_df, test_df
def raw_to_weather():
print('*********************************')
print('Generating train and test data...')
print('*********************************')
raw_train_data_df, raw_predict_data_df, amount_data_df = read_raw_data()
train_data_generator = WeatherDataGenerator(raw_train_data_df, amount_data_df)
train_data_generator.generate_data()
train_df, test_df = make_train_test_data(train_data_generator.get_data())
print('*********************************')
print('Saving train and test data...')
print('*********************************')
train_df.to_csv(TRAIN_DATA_PATH, header=None)
test_df.to_csv(TEST_DATA_PATH, header=None)
print('*********************************')
print('Generating predict data...')
print('*********************************')
predict_data_generator = WeatherDataGenerator(raw_predict_data_df)
predict_data_generator.generate_data()
predict_df = predict_data_generator.get_data()
print('*********************************')
print('Saving predict data...')
print('*********************************')
predict_df.to_csv(PREDICT_DATA_PATH, header=None)
def run():
raw_to_weather()
if __name__ == "__main__":
run()
| ytorii/park-amount | wdnn/raw_to_input_csv.py | raw_to_input_csv.py | py | 5,411 | python | en | code | 0 | github-code | 36 |
16810461794 | import json
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.core.files.uploadhandler import FileUploadHandler
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, render_to_response, redirect
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from outfit.forms import UserForm, ClothesForm
from outfit.models import Clothes, User
def register(request):
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
user = form.save()
# allows users to be redirected to home page after register
messages.info(request, "Thanks for registering.")
new_user = authenticate(username=request.POST['username'],
password=request.POST['password1'])
else:
form = UserForm()
return render(request, 'registration/register.html', {
'form': form,
})
def login_redirect(request):
if request.user.gender == 'M':
return redirect('profile')
else:
return redirect('girly')
def profile(request):
big = Clothes.objects.all()
if request.method == 'POST':
form = ClothesForm(request.POST, request.FILES)
if form.is_valid():
clothes = form.save(commit=False)
clothes.client = request.user
clothes.save()
# FileUploadHandler(request.FILES['image'])
return HttpResponseRedirect('/profile')
else: form = ClothesForm()
clothes_tops = Clothes.objects.filter(type = 'T')
clothes_bottoms = Clothes.objects.filter(type = 'B')
clothes_accessories = Clothes.objects.filter(type = 'A')
clothes_shoes = Clothes.objects.filter(type = 'S')
clothes_headwear = Clothes.objects.filter(type = 'H')
return render_to_response('profile.html', RequestContext(request,
{'form': form, 'big': big, 'clothes_tops': clothes_tops, 'bottoms': clothes_bottoms,
'accessories': clothes_accessories, 'shoes': clothes_shoes, 'headwear': clothes_headwear, }))
def girly(request):
big = Clothes.objects.all()
useall = User.all()
if request.method == 'POST':
form = ClothesForm(request.POST, request.FILES)
if form.is_valid():
clothes = form.save(commit=False)
clothes.client = request.user
clothes.save()
# FileUploadHandler(request.FILES['image'])
return HttpResponseRedirect('/profile')
else: form = ClothesForm()
clothes_tops = Clothes.objects.filter(type = 'T')
clothes_bottoms = Clothes.objects.filter(type = 'B')
clothes_accessories = Clothes.objects.filter(type = 'A')
clothes_shoes = Clothes.objects.filter(type = 'S')
clothes_headwear = Clothes.objects.filter(type = 'H')
return render_to_response('girly.html', RequestContext(request,
{'form': form, 'big': big, 'clothes_tops': clothes_tops, 'bottoms': clothes_bottoms,
'accessories': clothes_accessories, 'shoes': clothes_shoes, 'headwear': clothes_headwear,}))
| SeanKapus/Fashion | outfit/views.py | views.py | py | 3,292 | python | en | code | 0 | github-code | 36 |
16968857057 | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from operator import __or__ as OR
from functools import reduce
import six
from django.conf import settings
try:
from django.utils.encoding import force_unicode as force_text
except ImportError:
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.template.response import TemplateResponse
from django.contrib.admin import helpers
from django.contrib.admin.utils import model_ngettext
from celery import chain
from edw.admin.entity.forms import EntitiesUpdateTermsAdminForm
def update_terms(modeladmin, request, queryset, task, template=None):
"""
ENG: Update terms for multiple entities
RUS: Обновляет термины для нескольких объектов
"""
CHUNK_SIZE = getattr(settings, 'EDW_UPDATE_TERMS_ACTION_CHUNK_SIZE', 100)
opts = modeladmin.model._meta
app_label = opts.app_label
if request.POST.get('post'):
form = EntitiesUpdateTermsAdminForm(request.POST)
if form.is_valid():
to_set = [x.id for x in form.cleaned_data['to_set']]
to_unset = [x.id for x in form.cleaned_data['to_unset']]
n = queryset.count()
if n and (to_set or to_unset):
i = 0
tasks = []
while i < n:
chunk = queryset[i:i + CHUNK_SIZE]
for obj in chunk:
obj_display = force_text(obj)
modeladmin.log_change(request, obj, obj_display)
tasks.append(task.si([x.id for x in chunk], to_set, to_unset))
i += CHUNK_SIZE
chain(reduce(OR, tasks)).apply_async()
modeladmin.message_user(request, _("Successfully proceed %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
else:
form = EntitiesUpdateTermsAdminForm()
if len(queryset) == 1:
objects_name = force_text(opts.verbose_name)
else:
objects_name = force_text(opts.verbose_name_plural)
title = _("Update terms for multiple entities")
context = {
"title": title,
'form': form,
"objects_name": objects_name,
'queryset': queryset,
"opts": opts,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'media': modeladmin.media,
'action': 'update_terms',
}
# Display the confirmation page
kwargs = {} if six.PY3 else {'current_app': modeladmin.admin_site.name}
return TemplateResponse(request, template if template is not None else "edw/admin/base_actions/update_terms.html",
context, **kwargs)
update_terms.short_description = _("Modify terms for selected %(verbose_name_plural)s")
| infolabs/django-edw | backend/edw/admin/base_actions/update_terms.py | update_terms.py | py | 3,011 | python | en | code | 6 | github-code | 36 |
28523093007 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.logger import logger
from urbansim.estimation.estimation_runner import EstimationRunner as UrbansimEstimationRunner
from washtenaw.configs.baseline import Baseline
from urbansim.configs.config_changes_for_estimation import ConfigChangesForEstimation
models = {
'hlcm': ['household_location_choice_model', 'washtenaw.estimation.HLCM_specification', None],
'elcm-industrial': ['employment_location_choice_model', 'washtenaw.estimation.ELCM_specification', 'industrial'],
'elcm-commercial': ['employment_location_choice_model', 'washtenaw.estimation.ELCM_specification', 'commercial'],
'elcm-home_based': ['employment_location_choice_model', 'washtenaw.estimation.ELCM_specification', 'home_based'],
'lpm': ['land_price_model', 'washtenaw.estimation.LPM_specification', None],
'dplcm-industrial': ['development_project_location_choice_model', 'washtenaw.estimation.DPLCM_specification', 'industrial'],
'dplcm-commercial': ['development_project_location_choice_model', 'washtenaw.estimation.DPLCM_specification', 'commercial'],
'dplcm-residential': ['development_project_location_choice_model', 'washtenaw.estimation.DPLCM_specification', 'residential'],
'rlsm': ['residential_land_share_model', 'washtenaw.estimation.RLSM_specification', None],
}
class EstimationRunner(object):
def run_estimation(self, estimation_config, model_name, save_estimation_results=True):
config = Baseline()
config.merge(estimation_config)
config['config_changes_for_estimation'] = ConfigChangesForEstimation()
logger.start_block('Estimating %s' % model_name)
try:
estimator = UrbansimEstimationRunner(
models[model_name][0],
specification_module=models[model_name][1], model_group=models[model_name][2],
configuration=config,
save_estimation_results=save_estimation_results
)
estimator.estimate()
finally:
logger.end_block()
if __name__ == '__main__':
#model_name = 'lpm'
#model_name = 'hlcm'
#model_name = 'elcm-industrial'
#model_name = 'elcm-commercial'
###model_name = 'elcm-home_based'
#model_name = 'dplcm-industrial'
#model_name = 'dplcm-commercial'
model_name = 'dplcm-residential'
#model_name = 'rlsm'
from washtenaw.estimation.my_estimation_config import my_configuration
EstimationRunner().run_estimation(my_configuration, model_name) | psrc/urbansim | washtenaw/estimation/run_estimation.py | run_estimation.py | py | 2,802 | python | en | code | 4 | github-code | 36 |
19608346992 | # PROBLEM:
# Given an array A of non-negative integers, return an array
# consisting of all the even elements of A, followed by all
# the odd elements of A.
# You may return any answer array that satisfies this condition.
# EXAMPLE:
# Input: [3,1,2,4]
# Output: [2,4,3,1]
# The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
from typing import List
class Solution:
# APPROACH: COMBINE 2 LIST
# - In this approach we can create and 'evens'
# and 'odds' list. Then we can itterate through
# the given input list and store numbers either
# in the evens list or odds list based on whether
# or not A[i] % 2 == 0. Then we can return the
# list concatenated together.
# Runtime: 72 ms
# Memory: 14.8 MB
# Faster than 98.15% of Python Submissions.
def approach(self, A: List[int]) -> List[int]:
evens = []
odds = []
for num in range(len(A)):
if A[num] % 2 == 0:
evens.append(A[num])
else:
odds.append(A[num])
# - If you want, you can have
# the lists sorted as well for
# output clarity.
# evens.sort()
# odds.sort()
return evens + odds
if __name__ == '__main__':
solution = Solution()
A = [3,1,2,4,7,8,9,15]
print(solution.approach(A)) | angiereyes99/coding-interview-practice | easy-problems/SortArrayByParity.py | SortArrayByParity.py | py | 1,364 | python | en | code | 0 | github-code | 36 |
27139316721 | # References for fixed parameters:
# https://therideshareguy.com/uber-statistics/
# wikipedia
uber_drivers_worldwide = 3500000
uber_riders_worldwide = 93000000
initial_riders_ratio = uber_riders_worldwide / uber_drivers_worldwide
toledo_population = 270000
saturation_riders = 0.2 * toledo_population
saturation_drivers = saturation_riders / initial_riders_ratio
| lorenzobonomi/platformpricesmodel | parameters.py | parameters.py | py | 368 | python | en | code | 0 | github-code | 36 |
31280891768 | import boto3
import os
import botocore
import logging
from agief_experiment import utils
class Cloud:
# EC2 instances will be launched into this subnet (in a vpc)
subnet_id = 'subnet-0b1a206e'
# For ECS, which cluster to use
cluster = 'default'
# When creating EC2 instances, the root ssh key to use
mainkeyname = 'nextpair'
# For compute hosts, which the security group to use
ec2_compute_securitygroup_id = 'sg-98d574fc'
# AZ for all EC2 instances
availability_zone = 'ap-southeast-2a'
# Placement group for EC2 instances
placement_group = 'MNIST-PGroup'
# Unique, case-sensitive identifier you provide to ensure
# client_token = 'this_is_the_client_token_la_la_34'
# The idempotency of the request.
network_interface_id = 'eni - b2acd4d4'
def __init__(self):
pass
def sync_experiment(self, remote):
"""
Sync experiment from this machine to remote machine
"""
print("\n....... Use remote-sync-experiment.sh to "
"rsync relevant folders.")
cmd = ("../remote/remote-sync-experiment.sh " +
remote.host_key_user_variables())
utils.run_bashscript_repeat(cmd, 15, 6)
def remote_download_output(self, prefix, host_node):
""" Download /output/prefix folder from remote storage (s3) to remote machine.
:param host_node:
:param prefix:
:type host_node: RemoteNode
"""
print("\n....... Use remote-download-output.sh to copy /output files "
"from s3 (typically input and data files) with "
"prefix = " + prefix + ", to remote machine.")
cmd = ("../remote/remote-download-output.sh " + " " + prefix +
" " + host_node.host_key_user_variables())
utils.run_bashscript_repeat(cmd, 15, 6)
def remote_docker_launch_compute(self, host_node):
"""
Assumes there exists a private key for the given
ec2 instance, at keypath
"""
print("\n....... Launch compute node in a docker container "
"on a remote host.")
commands = '''
export VARIABLES_FILE={0}
source {0}
cd $AGI_HOME/bin/node_coordinator
./run-in-docker.sh -d
'''.format(host_node.remote_variables_file)
return utils.remote_run(host_node, commands)
def ecs_run_task(self, task_name):
""" Run task 'task_name' and return the Task ARN """
print("\n....... Running task on ecs ")
client = boto3.client('ecs')
response = client.run_task(
cluster=self.cluster,
taskDefinition=task_name,
count=1,
startedBy='pyScript'
)
logging.debug("LOG: " + response)
length = len(response['failures'])
if length > 0:
logging.error("Could not initiate task on AWS.")
logging.error("reason = " + response['failures'][0]['reason'])
logging.error("arn = " + response['failures'][0]['arn'])
logging.error(" ----- exiting -------")
exit(1)
if len(response['tasks']) <= 0:
logging.error("could not retrieve task arn when initiating task "
"on AWS - something has gone wrong.")
exit(1)
task_arn = response['tasks'][0]['taskArn']
return task_arn
def ecs_stop_task(self, task_arn):
print("\n....... Stopping task on ecs ")
client = boto3.client('ecs')
response = client.stop_task(
cluster=self.cluster,
task=task_arn,
reason='pyScript said so!'
)
logging.debug("LOG: " + response)
def ec2_start_from_instanceid(self, instance_id):
"""
Run the chosen instance specified by instance_id
:return: the instance AWS public and private ip addresses
"""
print("\n....... Starting ec2 (instance id " + instance_id + ")")
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
response = instance.start()
print("LOG: Start response: " + response)
instance_id = instance.instance_id
ips = self.ec2_wait_till_running(instance_id)
return ips
def ec2_start_from_ami(self, name, ami_id, min_ram):
"""
:param name:
:param ami_id: ami id
:param min_ram: (integer), minimum ram to allocate to ec2 instance
:return: ip addresses: public and private, and instance id
"""
print("\n....... Launching ec2 from AMI (AMI id " + ami_id +
", with minimum " + str(min_ram) + "GB RAM)")
# minimum size, 15GB on machine, leaves 13GB for compute
instance_type = None
ram_allocated = 8
if min_ram < 6:
instance_type = 'm4.large' # 8
ram_allocated = 8
elif min_ram < 13:
instance_type = 'r3.large' # 15.25
ram_allocated = 15.25
elif min_ram < 28:
instance_type = 'r3.xlarge' # 30.5
ram_allocated = 30.5
else:
logging.error("cannot create an ec2 instance with that much RAM")
exit(1)
print("\n............. RAM to be allocated: " + str(ram_allocated) +
" GB RAM")
ec2 = boto3.resource('ec2')
subnet = ec2.Subnet(self.subnet_id)
# Set the correct Logz.io token in EC2
logzio_token = os.getenv("AGI_LOGZIO_TOKEN")
user_data = '''
#!/bin/sh
echo export AGI_LOGZIO_TOKEN=%s >> /etc/environment
''' % (logzio_token)
instance = subnet.create_instances(
DryRun=False,
ImageId=ami_id,
MinCount=1,
MaxCount=1,
KeyName=self.mainkeyname,
SecurityGroupIds=[
self.ec2_compute_securitygroup_id,
],
InstanceType=instance_type,
Placement={
'AvailabilityZone': self.availability_zone,
# 'GroupName': self.placement_group,
'Tenancy': 'default' # | 'dedicated' | 'host',
},
Monitoring={
'Enabled': False
},
DisableApiTermination=False,
InstanceInitiatedShutdownBehavior='terminate', # | 'stop'
# ClientToken=self.client_token,
AdditionalInfo='started by run-framework.py',
# IamInstanceProfile={
# 'Arn': 'string',
# 'Name': 'string'
# },
EbsOptimized=False,
UserData=user_data
)
instance_id = instance[0].instance_id
logging.debug("Instance launched %s", instance_id)
# set name
response = ec2.create_tags(
DryRun=False,
Resources=[
instance_id,
],
Tags=[
{
'Key': 'Name',
'Value': name
},
]
)
logging.debug("Set Name tag on instanceid: %s", instance_id)
logging.debug("Response is: %s", response)
ips = self.ec2_wait_till_running(instance_id)
return ips, instance_id
def ec2_wait_till_running(self, instance_id):
"""
:return: the instance AWS public and private ip addresses
"""
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
print("wait_till_running for instance: ", instance)
instance.wait_until_running()
ip_public = instance.public_ip_address
ip_private = instance.private_ip_address
print("Instance is up and running ...")
self.print_ec2_info(instance)
return {'ip_public': ip_public, 'ip_private': ip_private}
def ec2_stop(self, instance_id):
print("\n...... Closing ec2 instance (instance id " +
str(instance_id) + ")")
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
self.print_ec2_info(instance)
response = instance.stop()
print("stop ec2: ", response)
def remote_upload_runfilename_s3(self, host_node, prefix, dest_name):
cmd = ("../remote/remote-upload-runfilename.sh " + " " + prefix +
" " + dest_name +
host_node.host_key_user_variables())
try:
utils.run_bashscript_repeat(cmd, 3, 3)
except Exception as e:
logging.error("Remote Upload Failed for this file")
logging.error("Exception: %s", e)
def remote_upload_output_s3(self, host_node, prefix, no_compress,
csv_output):
cmd = "../remote/remote-upload-output.sh " + prefix + " "
cmd += host_node.host_key_user_variables() + " "
cmd += str(no_compress) + " " + str(csv_output)
utils.run_bashscript_repeat(cmd, 3, 3)
def upload_folder_s3(self, bucket_name, key, source_folderpath):
if not os.path.exists(source_folderpath):
logging.warning("folder does not exist, cannot upload: " +
source_folderpath)
return
if not os.path.isdir(source_folderpath):
logging.warning("path is not a folder, cannot upload: " +
source_folderpath)
return
for root, dirs, files in os.walk(source_folderpath):
for f in files:
filepath = os.path.join(source_folderpath, f)
filekey = os.path.join(key, f)
self.upload_file_s3(bucket_name, filekey, filepath)
@staticmethod
def upload_file_s3(bucket_name, key, source_filepath):
try:
if os.stat(source_filepath).st_size == 0:
logging.warning("file is empty, cannot upload: " +
source_filepath)
return
except OSError:
logging.warning("file does not exist, cannot upload: " +
source_filepath)
return
s3 = boto3.resource('s3')
exists = True
try:
s3.meta.client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
if not exists:
logging.warning("s3 bucket " + bucket_name +
" does not exist, creating it now.")
s3.create_bucket(Bucket=bucket_name)
print(" ... file = " + source_filepath + ", to bucket = " +
bucket_name + ", key = " + key)
response = s3.Object(bucket_name=bucket_name,
key=key).put(Body=open(source_filepath, 'rb'))
logging.debug("Response = : ", response)
@staticmethod
def print_ec2_info(instance):
print("Instance details.")
print(" -- Public IP address is: ", instance.public_ip_address)
print(" -- Private IP address is: ", instance.private_ip_address)
print(" -- id is: ", str(instance.instance_id))
| Cerenaut/run-framework | scripts/run-framework/agief_experiment/cloud.py | cloud.py | py | 11,421 | python | en | code | 2 | github-code | 36 |
4005677116 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 16:06:36 2018
@author: jose.molina
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 15:39:40 2018
@author: jose.molina
"""
from bs4 import BeautifulSoup
from selenium import webdriver
import requests
from xml.etree import ElementTree
from time import sleep
import pandas as pd
from dateutil.relativedelta import relativedelta
import re
def getvalueofnode(node):
""" return node text or None """
return node if node is not None else None
dfcols = ['nombre', 'link', 'overall_rating','ranking','rango_precio','num_opiniones','ops_exc','ops_muybueno','ops_normal','ops_malo','ops_pesimo','punt_servicio','punt_comida','punt_calprecio','direccion','ubicacion','telefono']
df_xml = pd.DataFrame(columns=dfcols)
url = 'https://www.tripadvisor.es/Restaurants-g187514-Madrid.html#EATERY_OVERVIEW_BOX'
browser = webdriver.Chrome(r'C:\Users\Jose.Molina\Downloads\WinPython\projects\tripadvisor\chromedriver.exe')
#'/home/josemolina/programs_python/geckodriver'
browser.implicitly_wait(10)
browser.get(url)
#li id="alphabetical"
alpha = browser.find_element_by_id('alphabetical')
alpha.click()
browser.implicitly_wait(10)
contador = 0
next = True
#cada vez que empieza el bucle se recorre una página entera
while next == True:
html = BeautifulSoup(browser.page_source, 'html.parser')
table = html.find_all('div',{'data-index': re.compile(r".*")})
for row in table:
item = row.find('div', class_='title')
link = item.find('a')
link ="https://www.tripadvisor.es"+link['href']
browser.get(link)
#print(link['href'])
#elemento = browser.find_element_by_xpath('//a[@href="'+link['href']+'"]')
#elemento.click()
browser.get(browser.current_url)
bar_html = BeautifulSoup(browser.page_source,'html.parser')
#contenido a scrapear
name = bar_html.find('h1',{'class':'heading_title'})
rating = bar_html.find('span',{'class':'overallRating'})
ranking = (bar_html.find('span',{'class':'header_popularity'})).find('span')
print(ranking.text)
precio = (bar_html.find('span',{'class':['ui_column',"is-6","price"]})).find('span')
print(precio.text)
#fin contenido a scrapear
df_xml = df_xml.append(
pd.Series([getvalueofnode(name.text), getvalueofnode(link), getvalueofnode(rating.text),getvalueofnode(ranking.text),getvalueofnode(precio.text),'num_opiniones','ops_exc','ops_muybueno','ops_normal','ops_malo','ops_pesimo','punt_servicio','punt_comida','punt_calprecio','direccion','ubicacion','telefono'], index=dfcols),
ignore_index=True)
contador += 1
print(f'Contrato numero: {contador}')
browser.execute_script('window.history.go(-1)')
#if (times == 0):
browser.get(browser.current_url)
nextpage = browser.find_element_by_css_selector('a.nav').click()
# if class = disabled :
# next = False
# else:
#
#
# try:
# nextpage = browser.find_element_by_css_selector('a.nav').click()
## nextpage = browser.execute_script(" ta.restaurant_filter.paginate(this.getAttribute('data-offset'));; ta.trackEventOnPage('STANDARD_PAGINATION', 'next', '2', 0); return false;")
# if (nextpage):
# nextpage.click()
# else:
# next = False
# except:
# next = False
#browser.close()
# expediente = browser.get(link.get_attribute('href'))
#expediente.click()
df_xml.to_excel("tripadvisor_restaurantes_madrid.xlsx", index = False)
#coger id de ventana actual
#main_window = browser.cur | josemolinag/scraping | cosas.py | cosas.py | py | 3,820 | python | en | code | 0 | github-code | 36 |
4419617010 | """
Simple BBS
簡単な掲示板
要件:
1. ページ上部に大きくSimple BBSと書かれている
2. Username と Messageを入力するフォームがある
3. 送信と書かれたスイッチがある
4. 入力された文字が掲示板に表示されていく(下段に追加されていく)
5. Username に何も入力されていない状態で送信された場合は名無しさんにする
6. Message に何も入力されていない状態で送信された場合は空欄にする
"""
import os
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
# 初めてページが選択された時
if request.method == 'GET':
# コメントの読み込み
documents = []
if os.path.isfile("document.txt"):
with open('document.txt', 'r') as file:
line = file.readline()[:-1]
while line:
line_list = line.split(',')
# print(line_list)
documents.append(line_list)
line = file.readline()[:-1]
return render_template('BBS.html', documents=documents)
# 送信がクリックされた時
if request.method == 'POST':
username = request.form['username']
message = request.form['message']
# usernameがないときは"名無しさん"に変更
if username == '':
username = '名無しさん'
# コメントの書き込み
with open('document.txt', mode='a') as file:
file.write(f'{username},{message}\n')
# コメントの読み込み
with open('document.txt', 'r') as file:
documents = []
line = file.readline()[:-1]
while line:
line_list = line.split(',')
# print(line_list)
documents.append(line_list)
line = file.readline()[:-1]
return render_template('BBS.html', documents=documents)
if __name__ == '__main__':
app.run(debug=True)
| tetsuya-yamamoto-ai-learn/practice01-F | WebAP.py | WebAP.py | py | 2,101 | python | ja | code | 0 | github-code | 36 |
25462981448 | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
def system_of_odes(t, y):
# Define the system of second-order ODEs
# y is an array of shape (2n,), where n is the number of equations
# Compute coefficients
n = int(len(y) / 2)
y1 = y[:n] # x,y
y2 = y[n:] # dxdt, dydt
dy1_dt = y2
dy2_dt = -0.001*y2 - 3*y1
return np.concatenate([dy1_dt, dy2_dt])
# Define the initial conditions
initial_conditions = [1, 0] # Initial values for y1 and y2
initial_derivatives = [0, 1] # Initial values for the derivatives dy1/dt and dy2/dt
initial_state = np.concatenate([initial_conditions, initial_derivatives])
# Define the time span for the solution
time_span = (0, 5) # Solve from t=0 to t=1
# Solve the system of ODEs
solution = solve_ivp(system_of_odes, time_span, initial_state)
breakpoint()
# Access the solution
t_values = solution.t # Array of time values
n=2
y1_values = solution.y[:n] # Array of y1 values
y2_values = solution.y[n:] # Array of y2 values
# Plot the solution
plt.plot(solution.t, y1_values[0], label='y1')
plt.plot(solution.t, y2_values[0], label='y2')
plt.xlabel('Time')
plt.ylabel('Solution')
plt.title('Solution of the System of ODEs')
plt.legend()
plt.grid(True)
plt.show()
| mjanszen/Wind_turbine_aeroelasticity | src/dynamics_only_test.py | dynamics_only_test.py | py | 1,287 | python | en | code | 0 | github-code | 36 |
74226664423 | import pytest
from functions import basic_functions
def test_count_animal(spark):
"""
The simplest example is an assert statement
This can be used for checking scalar values, e.g. a row count
or a sum
The function being tested counts the number of animals after first
capitalising the first letter, so the input DF tests some common
scenarios
Structure here follows the Arrange, Act, Assert pattern:
- Arrange: set up your inputs and expected outputs
- Act: call the function and return the result
- Assert: Check that the actual result is as expected
"""
# Arrange
df = spark.createDataFrame([
# Test lowercase
[1, "cat"],
# Test first letter capitalised
[2, "Cat"],
# Test uppercase
[3, "CAT"],
# Check that non cats are not included in the count
[4, "dog"],
], ["id", "animal_group"])
expected_count = 3
# Act
actual_count = basic_functions.count_animal(df, "Cat")
# Assert
assert actual_count == expected_count
def test_format_columns(spark):
"""
A simple assert statements can also be used for checking the names
of the columns, using the .columns property of the DataFrame
The DataFrame created here is just one row, as it is only the column
names which matter
Note that we have defined the expected_columns as a list, which has
an order. If the column order doesn't matter see the test below,
test_format_columns_unordered.
"""
# Arrange
df = spark.createDataFrame([
[1, "Cat", 1, "CAT STUCK IN TREE"],
], ["IncidentNumber", "AnimalGroupParent", "PumpCount", "FinalDescription"])
expected_columns = ["incident_number", "animal_group", "engine_count", "description"]
# Act
actual_df = basic_functions.format_columns(df)
# Assert
assert actual_df.columns == expected_columns
def test_format_columns_unordered(spark):
"""
This works in the same was as test_format_columns, but the column
order does not matter. This is achieved by defining the expected
column names as a set, which is unordered.
Note that the input columns are in a differentorder to
test_format_columns() above.
"""
# Arrange
df = spark.createDataFrame([
[1, "Cat", 1, "CAT STUCK IN TREE"],
], ["AnimalGroupParent", "PumpCount", "IncidentNumber", "FinalDescription"])
# Define as a Python set, which is unordered
expected_columns = {"incident_number", "animal_group", "engine_count", "description"}
# Act
actual_df = basic_functions.format_columns(df)
# Assert
# Both results are now sets, which means the order does not matter
assert set(actual_df.columns) == expected_columns | best-practice-and-impact/ons-spark | pytest-for-pyspark/tests/test_basic.py | test_basic.py | py | 2,912 | python | en | code | 4 | github-code | 36 |
39303528940 | #!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: admin
@file: main.py
@time: 2021/09/02
@desc:
"""
import time
import torch
from model import config
from model.data_process import PrepareData
from model.Transformer import make_model
from model.LabelSmoothing import LabelSmoothing
from model.opt import NoamOpt
from train_evaluate import train
from predict import predict
def main():
# 数据预处理
data = PrepareData(config.TRAIN_FILE, config.DEV_FILE)
src_vocab = len(data.en_word_dict)
tgt_vocab = len(data.cn_word_dict)
# src_vocab 5493
# tgt_vocab 2537
print("src_vocab %d" % src_vocab)
print("tgt_vocab %d" % tgt_vocab)
# 初始化模型
model = make_model(
src_vocab,
tgt_vocab,
config.LAYERS,
config.D_MODEL,
config.D_FF,
config.H_NUM,
config.DROPOUT
)
# 训练
print(">>>>>>> start train")
train_start = time.time()
criterion = LabelSmoothing(tgt_vocab, padding_idx=0, smoothing=0.0)
optimizer = NoamOpt(config.D_MODEL, 1, 2000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
train(data, model, criterion, optimizer)
print(f"<<<<<<< finished train, cost {time.time() - train_start:.4f} seconds")
# 预测
# 加载模型
model.load_state_dict(torch.load(config.SAVE_FILE))
# 开始预测
print(">>>>>>> start predict")
evaluate_start = time.time()
predict(data, model)
print(f"<<<<<<< finished evaluate, cost {time.time() - evaluate_start:.4f} seconds")
if __name__ == '__main__':
main()
| coinyue/Transformer | main.py | main.py | py | 1,629 | python | en | code | 0 | github-code | 36 |
6797262441 | from utils.faker_factory import faker
from ..mails import BaseMailView
class OpportunityReminderCloseMailView(BaseMailView):
"""
"""
template_name = 'mails/opportunity/opportunity_reminder_close.html'
mandatory_mail_args = [
'title',
'created_by_name',
'duedate_timedelta',
'duedate',
'public_url',
]
section = 'opportunities'
subject = '%(duedate_timedelta)s until opportunity closure'
def get_mock_data(self, optional=True):
mock_data = {
'title': '[Role Name] for [Project Name]',
'created_by_name': '[SDM Name]',
'duedate_timedelta': '3 days',
'duedate': '[May 29, 12AM]',
'disable_notification_url': None,
'public_url': '/{}'.format(faker.uri_path()),
}
return mock_data
| tomasgarzon/exo-services | service-exo-mail/mail/mailviews/opportunity_reminder_close.py | opportunity_reminder_close.py | py | 850 | python | en | code | 0 | github-code | 36 |
36891664339 | # functions for handling ABI checking of libraries
import Options, Utils, os, Logs, samba_utils, sys, Task, fnmatch, re, Build
from TaskGen import feature, before, after
# these type maps cope with platform specific names for common types
# please add new type mappings into the list below
abi_type_maps = {
'_Bool' : 'bool',
'struct __va_list_tag *' : 'va_list'
}
version_key = lambda x: map(int, x.split("."))
def normalise_signature(sig):
'''normalise a signature from gdb'''
sig = sig.strip()
sig = re.sub('^\$[0-9]+\s=\s\{*', '', sig)
sig = re.sub('\}(\s0x[0-9a-f]+\s<\w+>)?$', '', sig)
sig = re.sub('0x[0-9a-f]+', '0xXXXX', sig)
for t in abi_type_maps:
# we need to cope with non-word characters in mapped types
m = t
m = m.replace('*', '\*')
if m[-1].isalnum() or m[-1] == '_':
m += '\\b'
if m[0].isalnum() or m[0] == '_':
m = '\\b' + m
sig = re.sub(m, abi_type_maps[t], sig)
return sig
def normalise_varargs(sig):
'''cope with older versions of gdb'''
sig = re.sub(',\s\.\.\.', '', sig)
return sig
def parse_sigs(sigs, abi_match):
'''parse ABI signatures file'''
abi_match = samba_utils.TO_LIST(abi_match)
ret = {}
a = sigs.split('\n')
for s in a:
if s.find(':') == -1:
continue
sa = s.split(':')
if abi_match:
matched = False
for p in abi_match:
if p[0] == '!' and fnmatch.fnmatch(sa[0], p[1:]):
break
elif fnmatch.fnmatch(sa[0], p):
matched = True
break
if not matched:
continue
ret[sa[0]] = normalise_signature(sa[1])
return ret
def save_sigs(sig_file, parsed_sigs):
'''save ABI signatures to a file'''
sigs = ''
for s in sorted(parsed_sigs.keys()):
sigs += '%s: %s\n' % (s, parsed_sigs[s])
return samba_utils.save_file(sig_file, sigs, create_dir=True)
def abi_check_task(self):
'''check if the ABI has changed'''
abi_gen = self.ABI_GEN
libpath = self.inputs[0].abspath(self.env)
libname = os.path.basename(libpath)
sigs = Utils.cmd_output([abi_gen, libpath])
parsed_sigs = parse_sigs(sigs, self.ABI_MATCH)
sig_file = self.ABI_FILE
old_sigs = samba_utils.load_file(sig_file)
if old_sigs is None or Options.options.ABI_UPDATE:
if not save_sigs(sig_file, parsed_sigs):
raise Utils.WafError('Failed to save ABI file "%s"' % sig_file)
Logs.warn('Generated ABI signatures %s' % sig_file)
return
parsed_old_sigs = parse_sigs(old_sigs, self.ABI_MATCH)
# check all old sigs
got_error = False
for s in parsed_old_sigs:
if not s in parsed_sigs:
Logs.error('%s: symbol %s has been removed - please update major version\n\tsignature: %s' % (
libname, s, parsed_old_sigs[s]))
got_error = True
elif normalise_varargs(parsed_old_sigs[s]) != normalise_varargs(parsed_sigs[s]):
Logs.error('%s: symbol %s has changed - please update major version\n\told_signature: %s\n\tnew_signature: %s' % (
libname, s, parsed_old_sigs[s], parsed_sigs[s]))
got_error = True
for s in parsed_sigs:
if not s in parsed_old_sigs:
Logs.error('%s: symbol %s has been added - please mark it _PRIVATE_ or update minor version\n\tsignature: %s' % (
libname, s, parsed_sigs[s]))
got_error = True
if got_error:
raise Utils.WafError('ABI for %s has changed - please fix library version then build with --abi-update\nSee http://wiki.samba.org/index.php/Waf#ABI_Checking for more information' % libname)
t = Task.task_type_from_func('abi_check', abi_check_task, color='BLUE', ext_in='.bin')
t.quiet = True
# allow "waf --abi-check" to force re-checking the ABI
if '--abi-check' in sys.argv:
Task.always_run(t)
@after('apply_link')
@feature('abi_check')
def abi_check(self):
'''check that ABI matches saved signatures'''
env = self.bld.env
if not env.ABI_CHECK or self.abi_directory is None:
return
# if the platform doesn't support -fvisibility=hidden then the ABI
# checks become fairly meaningless
if not env.HAVE_VISIBILITY_ATTR:
return
topsrc = self.bld.srcnode.abspath()
abi_gen = os.path.join(topsrc, 'buildtools/scripts/abi_gen.sh')
abi_file = "%s/%s-%s.sigs" % (self.abi_directory, self.name, self.vnum)
tsk = self.create_task('abi_check', self.link_task.outputs[0])
tsk.ABI_FILE = abi_file
tsk.ABI_MATCH = self.abi_match
tsk.ABI_GEN = abi_gen
def abi_process_file(fname, version, symmap):
'''process one ABI file, adding new symbols to the symmap'''
f = open(fname, mode='r')
for line in f:
symname = line.split(":")[0]
if not symname in symmap:
symmap[symname] = version
f.close()
def abi_write_vscript(vscript, libname, current_version, versions, symmap, abi_match):
'''write a vscript file for a library in --version-script format
:param vscript: Path to the vscript file
:param libname: Name of the library, uppercased
:param current_version: Current version
:param versions: Versions to consider
:param symmap: Dictionary mapping symbols -> version
:param abi_match: List of symbols considered to be public in the current version
'''
invmap = {}
for s in symmap:
invmap.setdefault(symmap[s], []).append(s)
f = open(vscript, mode='w')
last_key = ""
versions = sorted(versions, key=version_key)
for k in versions:
symver = "%s_%s" % (libname, k)
if symver == current_version:
break
f.write("%s {\n" % symver)
if k in invmap:
f.write("\tglobal: \n")
for s in invmap.get(k, []):
f.write("\t\t%s;\n" % s);
f.write("}%s;\n\n" % last_key)
last_key = " %s" % symver
f.write("%s {\n" % current_version)
f.write("\tglobal:\n")
for x in abi_match:
f.write("\t\t%s;\n" % x)
if abi_match != ["*"]:
f.write("\tlocal: *;\n")
f.write("};\n")
f.close()
def abi_build_vscript(task):
'''generate a vscript file for our public libraries'''
tgt = task.outputs[0].bldpath(task.env)
symmap = {}
versions = []
for f in task.inputs:
fname = f.abspath(task.env)
basename = os.path.basename(fname)
version = basename[len(task.env.LIBNAME)+1:-len(".sigs")]
versions.append(version)
abi_process_file(fname, version, symmap)
abi_write_vscript(tgt, task.env.LIBNAME, task.env.VERSION, versions, symmap,
task.env.ABI_MATCH)
def ABI_VSCRIPT(bld, libname, abi_directory, version, vscript, abi_match=None):
'''generate a vscript file for our public libraries'''
if abi_directory:
source = bld.path.ant_glob('%s/%s-[0-9]*.sigs' % (abi_directory, libname))
def abi_file_key(path):
return version_key(path[:-len(".sigs")].rsplit("-")[-1])
source = sorted(source.split(), key=abi_file_key)
else:
source = ''
libname = os.path.basename(libname)
version = os.path.basename(version)
libname = libname.replace("-", "_").replace("+","_").upper()
version = version.replace("-", "_").replace("+","_").upper()
t = bld.SAMBA_GENERATOR(vscript,
rule=abi_build_vscript,
source=source,
group='vscripts',
target=vscript)
if abi_match is None:
abi_match = ["*"]
else:
abi_match = samba_utils.TO_LIST(abi_match)
t.env.ABI_MATCH = abi_match
t.env.VERSION = version
t.env.LIBNAME = libname
t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH']
Build.BuildContext.ABI_VSCRIPT = ABI_VSCRIPT
| RMerl/asuswrt-merlin | release/src/router/samba-3.6.x/buildtools/wafsamba/samba_abi.py | samba_abi.py | py | 7,987 | python | en | code | 6,715 | github-code | 36 |
9587927527 | import os
import shutil
from plugin import plugin
@plugin("file manage")
class file_manage:
""""
Can manipulate files and folders by deleting, moving, or renaming.
"""
def __call__(self, jarvis, s):
self.get_file_directory(jarvis)
self.get_cmd(jarvis)
if self.cmd == "delete":
self.delete(jarvis, self.file)
elif self.cmd == "move":
self.move(jarvis, self.file)
elif self.cmd == "rename":
self.rename(jarvis, self.file)
# determine if directory entered is a file or folder
if os.path.isfile(self.file):
self.folder = False
else:
self.folder = True
def get_file_directory(self, jarvis):
self.file = jarvis.input("Enter the directory of the file you would like to edit: ")
def get_cmd(self, jarvis):
# function to find command to be performed to file
self.possibleCmds = ["delete", "move", "rename"]
cmdValid = False
while not cmdValid:
# iterate through possible commands and say each
jarvis.say("Commands Avaliable")
i = 1
for cmd in self.possibleCmds:
jarvis.say(str(i) + ". " + cmd)
i = i + 1
self.cmd = jarvis.input("Enter command to be performed: ")
# check if command is valid. If not, end cycle
if self.cmd not in self.possibleCmds:
jarvis.say("Invalid command")
else:
cmdValid = True
def delete(self, jarvis, file):
# function to delete files
if self.folder is False:
# first, check if file exists
if os.path.exists(file):
yes = True
while yes:
# confirm that file should be deleted
confirmation = jarvis.input("Are you sure you want to delete this file? This cannot be undone. (y/n)").lower()
if confirmation == "y":
try:
# delete file
if not self.folder:
os.remove(file)
else:
os.rmdir(file)
except:
jarvis.say("Invalid file path")
# break loop after removing file
yes = False
elif confirmation == "n":
# break loop if no confirmation
yes = False
else:
jarvis.say("Invalid input")
else:
jarvis.say("file does not exist")
def move(self, jarvis, file):
# function to move files
path_invalid = True
while path_invalid:
# get destination
dest = jarvis.input("Where would you like to move this file to? :")
try:
# move from old location
shutil.move(file, dest)
path_invalid = False
except:
jarvis.say("Invalid path")
def rename(self, jarvis, file):
# function to rename files
path_invalid = True
while path_invalid:
# get new name
new_name = jarvis.input("What would you like to rename this file to? :")
# get root directory
root = os.path.split(file)[0]
new_dir = os.path.join(root, new_name)
try:
os.rename(file, new_dir)
path_invalid = False
except:
jarvis.say("Invalid Path")
| sukeesh/Jarvis | jarviscli/plugins/file_manager.py | file_manager.py | py | 3,709 | python | en | code | 2,765 | github-code | 36 |
8424152758 | #!/usr/bin/env python3
import pandas as pd
def top_bands():
df1=pd.read_csv("src/bands.tsv",sep='\t')
df2=pd.read_csv("src/UK-top40-1964-1-2.tsv",sep='\t')
print(df1.head())
print(df2.head())
df1['Band']=df1['Band'].str.capitalize()
df2['Artist']=df2['Artist'].str.capitalize()
df_new=pd.merge(df1,df2,right_on="Artist",left_on="Band")
return df_new
def main():
print(top_bands())
if __name__ == "__main__":
main()
| Manmohit10/data-analysis-with-python-summer-2021 | part05-e03_top_bands/src/top_bands.py | top_bands.py | py | 459 | python | en | code | 0 | github-code | 36 |
38810777586 | ''' CAS schema's for the roads '''
__name__ = "CASSchema.py"
__author__ = "COUTAND Bastien"
__date__ = "07.12.22"
from datetime import datetime
from pydantic import BaseModel, Field
class CASBase(BaseModel):
'''
CAS Schema
'''
cas_ip: str = Field(
description='ip for the CAS'
)
cas_port: int = Field(
description='port for the CAS'
)
class CASCreate(CASBase):
'''
CAS schema for the creation of an CAS in the database.
'''
pass
class CASInDB(CASBase):
'''
CAS schema for the db
'''
id: int = Field(
description='ID in the database of the CAS'
)
created_at: datetime = Field(
default=datetime.utcnow,
description='Date of the creation for an CAS'
)
class Config:
orm_mode = True | coutand-bastien/Student-project | ENSIBS-4/eduroom/server/app-container/api/schemas/CASSchema.py | CASSchema.py | py | 838 | python | en | code | 0 | github-code | 36 |
39694098177 | from app.issue_detector import IssueDetector
from app.support_detector import SupportDetector
import pandas as pd
from pathlib import Path
import sys
from pydantic import BaseModel, Field
class SupportScoreCalculator(BaseModel):
timestamp: str = Field()
issue_detector: IssueDetector = Field(default=IssueDetector())
support_detector: SupportDetector = Field(default=SupportDetector())
def calculate(self, messages: list):
# メッセージ群からissue(困り事・質問)に関係するメッセージを取得
issues = self.issue_detector.evaluate(messages)
if not issues:
print("no issue exitst")
sys.exit(0)
records = []
for issue_id in issues:
# idから実際のメッセージを取得
issue_message = self._get_target_message(messages, issue_id)
if not issue_message:
print("targe issue not found")
continue
# issueメッセージと関連がありそうなメッセージ群を抽出
refrences = self._get_reference_messages(messages, issue_id)
if not refrences:
print("no refrence message")
continue
# メッセージ群の中で解決に貢献したメッセージを取得
answer_ids = self.support_detector.evaluate(issue_message, refrences)
for answer_id in answer_ids:
# idから実際のメッセージを取得
answer = self._get_target_message(messages, answer_id)
records.append({"q": issue_message, "a": answer})
df = self._create_df(records)
# 質問と回答バインド情報をcsv出力
self._save_result(df, "qa")
grouped_df = df.groupby("answer_user_id").agg(support_score=("answer_user_id", "size")).reset_index()
self._save_result(grouped_df, "support_score")
return grouped_df
def _get_target_message(self, message_objects, message_id):
for obj in message_objects:
obj_id = obj["id"]
if obj_id == message_id:
return obj
def _get_reference_messages(self, message_objects, issue_id):
messages = []
reference_ids = []
for obj in message_objects:
referenced_message = obj.get("referenced_message")
if referenced_message:
obj_id = obj.get("id")
parent_id = referenced_message["id"]
if parent_id == issue_id or parent_id in reference_ids:
messages.append(obj)
# レコードが生成順にソートされる前提。
reference_ids.append(obj_id)
return messages
def _create_df(self, records):
# issue(困り事・質問文)のmessageId, issueメッセージを投稿したuserId, 回答のmessageId, 回答者のuserId, issue文, 回答文をcsvに出力する
rows = []
for record in records:
row = {
"issue_id": record["q"]["id"],
"issue_user_id": record["q"]["author"]["id"],
"answer_id": record["a"]["id"],
"answer_user_id": record["a"]["author"]["id"],
"issue_message": record["q"]["content"].replace("\n", "\\n"),
"answer_message": record["a"]["content"].replace("\n", "\\n"),
}
rows.append(row)
df = pd.DataFrame(rows)
return df
def _save_result(self, df, prefix: str):
report_dir = Path("result") / "tmp"
df.to_csv(report_dir / f"{prefix}_{self.timestamp}.csv", index=False)
| blocks-web3/empower-link | contribution-analyzer/app/support_score_calculator.py | support_score_calculator.py | py | 3,688 | python | en | code | 0 | github-code | 36 |
17359757102 | from typing import Optional, List
import torch
import uuid
from torch import nn
from supertransformerlib import Core
class DefaultParameterLayer(nn.Module):
"""
A NTM extension layer designed to contain within it the default
state for some sort of parameter and to be manipulatable to create,
interpolate, and reset batch elements to as fine a granularity as is provided
It also contains a unique id which identifies what parameter id
it is corrolated with.
"""
def __init__(self,
parameter: nn.Parameter
):
super().__init__()
self.ident = str(uuid.uuid1())
self.default_parameter = parameter
@torch.jit.export
def make_batch(self,
batch_shape: Core.StandardShapeType
):
"""
:param batch_shape: The shape of the batch, in terms of an int, a list of ints, or a 1d tensor
:return: A batch consisting of a broadcasted defaults
"""
broadcast_shape: List[int] = Core.standardize_shape(batch_shape, "batch_shape").tolist()
expansion_length = len(broadcast_shape)
broadcast_shape += [-1] * self.default_parameter.dim()
defaults = self.default_parameter
for _ in range(expansion_length):
defaults = defaults.unsqueeze(0)
tensor = defaults.expand(broadcast_shape)
return tensor
@torch.jit.export
def reset_to_parameters(self,
reset_probability: torch.Tensor,
tensor: torch.Tensor) -> torch.Tensor:
"""
A small helper method, this will accept a fully expanded tensor and
it's unbroadcasted defaults, then perform linear interpolation between them using the
reset probabilities. A value of 0 will mean do not reset, while 1
means completely reset
:param reset_probability: A float tensor of values between 0..1. The rank of this tensor can
only be greater than or equal to the rank of parameter 'tensor', and
the dimensions here must match the initial dimensions of 'tensor'
:param tensor: A data tensor which we wish to interpolate with.
:return: An interpolated tensor between the tensor and the defaults, mediated by the reset probability
"""
defaults = self.default_parameter
reset_values = defaults.expand_as(tensor)
while reset_probability.dim() < reset_values.dim():
reset_probability = reset_probability.unsqueeze(-1)
updated_tensor = tensor * (1 - reset_probability) + reset_values * reset_probability
return updated_tensor
@torch.jit.export
def force_reset_to_defaults(self,
reset_mask: torch.Tensor,
tensor: torch.Tensor)->torch.Tensor:
"""
Forces a reset to default where the reset mask is marked as true
:param reset_mask: A mask which matches tensor's dimensions on the initial dimensions. Elements
marked true will be reset to defaults
:param tensor: The tensor to reset
:return: A tensor which has had elements replaced with the mask where appropriate
"""
defaults = self.default_parameter
reset_values = defaults.expand_as(tensor)
while reset_mask.dim() < reset_values.dim():
reset_mask = reset_mask.unsqueeze(-1)
updated_tensor = torch.where(reset_mask, reset_values, tensor)
return updated_tensor
def make_memory_parameter(
memory_size: int,
memory_width: int,
ensemble_shape: Optional[Core.StandardShapeType] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None
)->DefaultParameterLayer:
"""
Creates a functional DefaultParameterLayer for representing a memory
parameter, which is capable of handling resetting to defaults.
"""
shape = [memory_size, memory_width]
if ensemble_shape is not None:
ensemble_shape_list: List[int] = Core.standardize_shape(ensemble_shape, "ensemble_shape").tolist()
shape = ensemble_shape_list + shape
parameter = torch.zeros(shape, dtype = dtype, device=device)
torch.nn.init.kaiming_uniform_(parameter)
parameter = nn.Parameter(parameter)
return DefaultParameterLayer(parameter)
def make_weights_parameter(memory_size: int,
num_heads: int,
ensemble_shape: Optional[Core.StandardShapeType] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None
) -> DefaultParameterLayer:
"""
Creates a functional weights layer to contain the default weights
values and to be responsible for resetting the weights.
:param memory_size: The size of the built memory
:param num_heads: The number of heads the memory will manage
:param ensemble_shape: The shape of the ensemble, if used
:param dtype: The dtype
:param device: The device.
:return:
"""
shape = [num_heads, memory_size]
if ensemble_shape is not None:
ensemble_shape_list: List[int] = Core.standardize_shape(ensemble_shape, "ensemble_shape").tolist()
shape = ensemble_shape_list + shape
parameter = torch.zeros(shape, dtype = dtype, device=device)
torch.nn.init.kaiming_uniform_(parameter)
parameter = nn.Parameter(parameter)
return DefaultParameterLayer(parameter) | smithblack-0/torch-supertransformerlib | src/supertransformerlib/NTM/defaults.py | defaults.py | py | 5,642 | python | en | code | 0 | github-code | 36 |
31415174040 | from pydantic import BaseModel
import json
import requests
import Console
import config
HTTP_PREFIX = "http://"
HOST = config.server_address + "/internal"
class DownloadFileFromAgentInputType(BaseModel):
ip_address: str
file_path: str
class ListFilesFromAgentInputType(BaseModel):
ip_address: str
dir_path: str
class MonitorClipboardOnAgentInputType(BaseModel):
ip_address: str
duration: int
class GenerateFirstCodeForAgentInputType(BaseModel):
ip_address: str
class DisconnectAgentInputType(BaseModel):
ip_address: str
def download_file_from_agent(input: DownloadFileFromAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/downloadFile", data=data)
if response.status_code != 200:
Console.console.print(f"Could not schedule downloading file from agent {input.ip_address}", style="error")
Console.console.print(response.json()['detail'], style="error")
else:
Console.console.print(f'Task for downloading file from an agent scheduled successfully with id:'
f' {response.json()["command_id"]}', style="success")
return response.json()
def list_files_from_agent(input: ListFilesFromAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/listFiles", data=data)
if response.status_code != 200:
Console.console.print(f"Could not schedule listing files from agent {input.ip_address}", style="error")
Console.console.print(response.json()['detail'], style="error")
else:
Console.console.print(f'Task for listing files from an agent scheduled successfully with id:'
f' {response.json()["command_id"]}',
style="success")
return response.json()
def monitor_clipboard_on_agent(input: MonitorClipboardOnAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/monitorClipboard", data=data)
if response.status_code != 200:
Console.console.print(f"Could not schedule monitoring clipboard on agent {input.ip_address}", style="error")
Console.console.print(response.json()['detail'], style="error")
else:
Console.console.print(f'Task for monitoring clipboard on agent scheduled successfully with id:'
f' {response.json()["command_id"]}',
style="success")
return response.json()
def generate_first_code_for_agent(input: GenerateFirstCodeForAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/generateAgentCode", data=data)
if response.status_code == 200:
Console.console.print('Code generated successfully', style="success")
return response.json()['code']
def disconnect_agent(input: DisconnectAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/disconnectAgent", data=data)
if response.status_code != 200:
Console.console.print(f"Could not schedule disconnecting agent {input.ip_address}", style="error")
Console.console.print(response.json()['detail'], style="error")
else:
Console.console.print(f'Task for disconnecting agent scheduled successfully with id:'
f' {response.json()["command_id"]}',
style="success")
return response.json()
def list_agents():
response = requests.get(url=HTTP_PREFIX+HOST+"/agents")
if response.status_code == 200:
return response.json()['agents']
return None
| Kuba12a/CybClient | Gateways/CybServerGateway.py | CybServerGateway.py | py | 3,708 | python | en | code | 0 | github-code | 36 |
16732411603 | from typing import List
class Solution:
def findReplaceString(self, s: str, indices: List[int], sources: List[str], targets: List[str]) -> str:
for i, source, target in sorted(list(zip(indices, sources, targets)), reverse=True):
l = len(source)
if s[i:i + l] == source:
s = s[:i] + target + s[i + l:]
return s
| wLUOw/Leetcode | 2023.08/833/Solution.py | Solution.py | py | 372 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.