seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18258641139 | #!/usr/bin/env python3
def main():
A, B = map(int, input().split())
for price in [str(x) for x in range(10, 1001)]:
a = str(int(price) * 8)
a = int(a[:-2]) if len(a) >= 3 else 0
b = int(price[:-1]) if len(price) >= 2 else 0
if a == A and b == B and a <= 100 and b <= 100:
print(price)
return
print(-1)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02755/s929521956.py | s929521956.py | py | 411 | python | en | code | 0 | github-code | 90 |
27729972425 | import h5py
import numpy as np
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import img_to_array, array_to_img, load_img
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
import cv2
# List of 38 classes of healthy and diseased plants
li = ['Apple___Apple_scab', 'Apple___Black_rot', 'Apple___Cedar_apple_rust', 'Apple___healthy', 'Blueberry___healthy', 'Cherry_(including_sour)___Powdery_mildew', 'Cherry_(including_sour)___healthy',
'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot', 'Corn_(maize)___Common_rust_', 'Corn_(maize)___Northern_Leaf_Blight', 'Corn_(maize)___healthy', 'Grape___Black_rot', 'Grape___Esca_(Black_Measles)',
'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)', 'Grape___healthy', 'Orange___Haunglongbing_(Citrus_greening)', 'Peach___Bacterial_spot', 'Peach___healthy', 'Pepper,_bell___Bacterial_spot',
'Pepper,_bell___healthy', 'Potato___Early_blight', 'Potato___Late_blight', 'Potato___healthy', 'Raspberry___healthy', 'Soybean___healthy', 'Squash___Powdery_mildew', 'Strawberry___Leaf_scorch',
'Strawberry___healthy', 'Tomato___Bacterial_spot', 'Tomato___Early_blight', 'Tomato___Late_blight', 'Tomato___Leaf_Mold', 'Tomato___Septoria_leaf_spot', 'Tomato___Spider_mites Two-spotted_spider_mite',
'Tomato___Target_Spot', 'Tomato___Tomato_Yellow_Leaf_Curl_Virus', 'Tomato___Tomato_mosaic_virus', 'Tomato___healthy']
def Load_Training_Model():
# Initializing the CNN
classifier = Sequential()
# Convolution Step 1
classifier.add(Convolution2D(96, 11, strides = (4, 4), padding = 'valid', input_shape=(224, 224, 3), activation = 'relu'))
# Max Pooling Step 1
classifier.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'valid'))
classifier.add(BatchNormalization())
# Convolution Step 2
classifier.add(Convolution2D(256, 11, strides = (1, 1), padding='valid', activation = 'relu'))
# Max Pooling Step 2
classifier.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding='valid'))
classifier.add(BatchNormalization())
# Convolution Step 3
classifier.add(Convolution2D(384, 3, strides = (1, 1), padding='valid', activation = 'relu'))
classifier.add(BatchNormalization())
# Convolution Step 4
classifier.add(Convolution2D(384, 3, strides = (1, 1), padding='valid', activation = 'relu'))
classifier.add(BatchNormalization())
# Convolution Step 5
classifier.add(Convolution2D(256, 3, strides=(1,1), padding='valid', activation = 'relu'))
# Max Pooling Step 3
classifier.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'valid'))
classifier.add(BatchNormalization())
# Flattening Step
classifier.add(Flatten())
# Full Connection Step
classifier.add(Dense(units = 4096, activation = 'relu'))
classifier.add(Dropout(0.4))
classifier.add(BatchNormalization())
classifier.add(Dense(units = 4096, activation = 'relu'))
classifier.add(Dropout(0.4))
classifier.add(BatchNormalization())
classifier.add(Dense(units = 1000, activation = 'relu'))
classifier.add(Dropout(0.2))
classifier.add(BatchNormalization())
classifier.add(Dense(units = 38, activation = 'softmax'))
classifier.summary()
for i, layer in enumerate(classifier.layers[:20]):
print(i, layer.name)
layer.trainable = False
classifier.load_weights('.\\saved_models\\AlexNetModel.hdf5')
return classifier
def Predict_Test_Image_File(model):
root = tk.Tk()
root.withdraw()
imageFileName = filedialog.askopenfilename()
image = cv2.imread(imageFileName)
# pre-process the image for classification
image = cv2.resize(image, (224, 224))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
predictions = model.predict(image)
print(predictions)
d = predictions.flatten()
j = d.max()
for index,item in enumerate(d):
if item == j:
class_name = li[index]
print('\n\n')
print(class_name)
tk.messagebox.showinfo('Test Image Prediction',class_name)
# final_prediction = predictions.argmax(axis=1)[0]
# print(final_prediction)
# ----------------------------------------- MAIN FUNCTION ------------------------------------------------------
if __name__ == "__main__":
model_definition = Load_Training_Model()
root= tk.Tk() # create window
root.withdraw()
MsgBox = tk.messagebox.askquestion ('Tensorflow Predictions','Do you want to test Images for Predictions')
while MsgBox == 'yes':
MsgBox = tk.messagebox.askquestion ('Test Image','Do you want to test new Image')
if MsgBox == 'yes':
Predict_Test_Image_File(model_definition)
else:
tk.messagebox.showinfo('EXIT', "Exiting the Application")
break
| lchauhan8642/Plant-Disease-classification-System | Alexnet/Alexnet_Testing.py | Alexnet_Testing.py | py | 5,378 | python | en | code | 3 | github-code | 90 |
71749268457 | import numpy as np
import pandas as pd
def compute_cos_similarities(vector : np.ndarray, vectors : np.ndarray, vector_norm : float = None,
vectors_norms : np.ndarray = None):
"""Compute the cosine similarities between the given vectors and each one of the column vectors in the given matrix.
The specified `vector` is a row vector (d,), while the specified matrix `vectors` is (d,n): we want to compute the cosine
similarity between the row vector and each one of the column vectors of the matrix.
Parameters
----------
vector : np.ndarray
Input row vector
vectors : np.ndarray
Input matrix
vector_norm : np.ndarray, optional
Norm of the row vector, by default None
vectors_norms : np.ndarray
Norms of the column vectors of the matrix, by default None
Returns
-------
np.ndarray
Flat array containing the cosine similarities
"""
if vector_norm is None:
vector_norm = np.sqrt(np.sum(vector*vector))
if vectors_norms is None:
vectors_norms = np.sqrt(np.sum(vectors*vectors, axis=0))
denominator = vector_norm*vectors_norms
numerator = np.sum(vector[:,np.newaxis]*vectors, axis=0)
return numerator/np.where(denominator!=0, denominator, 1)
def find_most_interesting_words(vt : np.ndarray, s : np.ndarray, u : np.ndarray, k : int = 100, subset : np.ndarray = None,
n : int = 3, normalize : bool = False):
"""Find the `n` most interesting words with respect to the given movies, according to the latent semantic analysis (i.e.
LSA).
The given movies are encoded as column vectors in the matrix `vh`; the words are encoded as row vectors in the matrix `u`.
Optionally, we can consider only some of the movies, and not all of them (we can consider only a subset of the columns
in `vh`). This can be useful, for example for focusing only to the movies with a certain genre.
We want to find the `n` words which are the most related to the given movies. Namely, the words which are the most similar
to the given movies.
To be more specific, for each word, its average cosine similarity computed w.r.t. all the specified movies is calculated.
Parameters
----------
vt : np.ndarray
Bidimensional array, obtained from the SVD.
It's the SVD matrix related to the movies. It has dimensions (d,n_movies), where d=min{n_words,n_movies}. So, the
columns correspond to the movies.
s : np.ndarray
Monodimensional array, containing the singular values, sorted in descending order.
u : np.ndarray
Bidimensional array, obtained from the SVD.
It's the SVD matrix related to the words. It has dimensions (n_words,d), where d=min{n_words,n_movies}. So, the rows
correspond to the words.
k : int, optional
Level of approximation for the LSA: k-rank approximation, by default 100. Basically, new number of dimensions.
subset : np.ndarray, optional
Array of integers, containing the indicies of the movies in which we want to focus on, by default None
n : int, optional
Number of words to retrieve, by default 3
normalize : bool, optional
Whether to normalize or not the movies vectors and the words vectors, by default False
Returns
-------
selected_words_ids : np.ndarray
Array containing the ids of the selected words
mean_cos_similarities : np.ndarray
Array containing the mean cosine similarities of the selected words w.r.t all the specified movies
"""
sk = s[:k]
if subset is None:
subset = range(vt.shape[1])
vt_k = vt[:k,subset]
vt_ks = np.reshape(np.sqrt(sk), newshape=(sk.shape[0],1)) * vt_k
if normalize:
vt_ks_normalized = vt_ks/np.sqrt(np.sum(np.square(vt_ks), axis=0))
vt_ks = vt_ks_normalized
u_k = u[:,:k]
u_ks = u_k * np.reshape(np.sqrt(sk), newshape=(1,sk.shape[0]))
if normalize:
u_ks_normalized = u_ks/np.reshape(np.sqrt(np.sum(np.square(u_ks), axis=1)),newshape=(u_ks.shape[0],1))
u_ks = u_ks_normalized
vh_ks_norms = np.sqrt(np.sum(np.square(vt_ks),axis=0))
u_ks_norms = np.sqrt(np.sum(np.square(u_ks),axis=1))
mean_cos_similarities = np.zeros(shape=(u.shape[0],))
for word_id in range(u_ks.shape[0]):
cos_similarities = compute_cos_similarities(vector=u_ks[word_id,:], vectors=vt_ks, vector_norm=u_ks_norms[word_id],
vectors_norms=vh_ks_norms)
cos_similarities = cos_similarities[~np.isnan(cos_similarities)]
mean_cos_similarities[word_id] = np.mean(cos_similarities)
selected_words_ids = np.argsort(mean_cos_similarities)[::-1]
selected_words_ids = selected_words_ids[~np.isnan(mean_cos_similarities[selected_words_ids])]
selected_words_ids = selected_words_ids[:n]
return selected_words_ids, mean_cos_similarities[selected_words_ids]
| michele98/text_mining_project | utils/similarities.py | similarities.py | py | 5,010 | python | en | code | 0 | github-code | 90 |
33659681997 | #
# @lc app=leetcode.cn id=46 lang=python3
#
# [46] 全排列
#
# https://leetcode-cn.com/problems/permutations/description/
#
# algorithms
# Medium (72.14%)
# Likes: 424
# Dislikes: 0
# Total Accepted: 53.7K
# Total Submissions: 74.2K
# Testcase Example: '[1,2,3]'
#
# 给定一个没有重复数字的序列,返回其所有可能的全排列。
#
# 示例:
#
# 输入: [1,2,3]
# 输出:
# [
# [1,2,3],
# [1,3,2],
# [2,1,3],
# [2,3,1],
# [3,1,2],
# [3,2,1]
# ]
#
#
# @lc code=start
class Solution:
def permute(self, nums: [int]) -> [[int]]:
res, n = [], len(nums)
def helper(first=0):
if first == n:
res.append(nums.copy())
for i in range(first, n):
nums[i], nums[first] = nums[first], nums[i]
helper(first+1)
nums[i], nums[first] = nums[first], nums[i]
helper()
return res
# @lc code=end
| algorithm004-04/algorithm004-04 | Week 02/id_624/LeetCode_46_624.py | LeetCode_46_624.py | py | 955 | python | en | code | 66 | github-code | 90 |
37905265803 | import numpy as np
"""
DO NOT CHANGE THIS VALUE UNLESS THE RADIUS OF THE EARTH CHANGES !!!
"""
"""
Conversion Values
"""
RADIUS_OF_EARTH = 6371.0088 ##In kilometers; Change this to miles if you don't believe in metric system
KMS_TO_METERS = 1000.0
METERS_TO_KILOMETERS = 0.001
MILES_TO_KILOMETERS = 1.60934
KILOMETERS_TO_MILES = 0.621371
def gcd(loc1, loc2, R=RADIUS_OF_EARTH * KMS_TO_METERS):
"""Great circle distance via Haversine formula
Parameters
----------
loc1: tuple (long, lat in decimal degrees)
loc2: tuple (long, lat in decimal degrees)
R: Radius of the earth (3961 miles, 6367 km)
Returns
-------
great circle distance between loc1 and loc2 in units of R
Notes
------
Does not take into account non-spheroidal shape of the Earth
>>> san_diego = -117.1611, 32.7157
>>> austin = -97.7431, 30.2672
>>> gcd(san_diego, austin)
1155.474644164695
"""
lon1, lat1 = loc1
lon2, lat2 = loc2
dLat = np.radians(lat2 - lat1)
dLon = np.radians(lon2 - lon1)
lat1 = np.radians(lat1)
lat2 = np.radians(lat2)
a = np.sin(dLat/2)**2 + np.cos(lat1)*np.cos(lat2)*np.sin(dLon/2)**2
c = 2*np.arcsin(np.sqrt(a))
return R * c
def compareFloat(a, b, rTol = .00001, aTol = .00000001):
"""Uses the same formula numpy's allclose function:
INPUTS:
a (float): float to be compared
b (float): float to be compared
rTol (float): relative tolerance
aTol (float): absolute tolerance
OUTPUT:
return (boolean): true if |a - b| < aTol + (rTol * |b|)
"""
try:
if abs(a - b) < aTol + (rTol * abs(b)):
return True
else:
return False
except:
return False
def convert2Radians(degree):
"""Converts degree to radians.
INPUTS:
degree (float): degree of angle
RETURN (float): radians of angle
"""
return np.pi / 180.0 * degree
def convert2degrees(radians):
"""Converts degree to radians.
INPUTS:
radians (float): radians of angle
RETURN (float): degree of angle
"""
return 180.0 / np.pi * radians
| jkAtGitHub/Standard-Deviational-Ellipse | geomutils.py | geomutils.py | py | 2,125 | python | en | code | 0 | github-code | 90 |
41364070570 | def top_3_words(text):
freq = {}
word = ''
for c in text:
if c == "'" and len(word) < 2:
word = ''
continue
if c != ' ' and c not in "!@#$%^&*()_+=[]{};,./:<>?|":
word += c.lower()
if c == ' ' and word != '':
if word in freq:
freq[word] += 1
# print(freq[word])
else:
freq[word] = 1
# print(freq[word])
if word[-1] == text[-1]:
if word in freq:
freq[word] += 1
# print(freq[word])
else:
freq[word] = 1
# print(freq[word])
word = ""
# print(word)
# print(freq)
a = []
if len(freq) >= 3:
for i in range(3):
m = max(freq, key=freq.get)
a.append(m)
del freq[m]
# print(a)
return a
else:
for i in range(len(freq)):
m = max(freq, key=freq.get)
a.append(m)
del freq[m]
# print(a)
return a
print(top_3_words("""In a village of La Mancha, the name of which I have no desire to call to
mind, there lived not long since one of those gentlemen that keep a lance
in the lance-rack, an old buckler, a lean hack, and a greyhound for
coursing. An olla of rather more beef than mutton, a salad on most
nights, scraps on Saturdays, lentils on Fridays, and a pigeon or so extra
on Sundays, made away with three-quarters of his income."""))
| SalvadorGuido/Python | CodeWars/FrequentWords.py | FrequentWords.py | py | 1,562 | python | en | code | 0 | github-code | 90 |
38617688094 | from datetime import datetime
from sqlalchemy import Column, DateTime, Float, ForeignKey, Integer, String
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from .base import Base
from .pair import Pair
class ScoutHistory(Base):
__tablename__ = "scout_history"
id = Column(Integer, primary_key=True)
pair_id = Column(String, ForeignKey("pairs.id"))
pair = relationship("Pair")
target_ratio = Column(Float)
current_coin_price = Column(Float)
other_coin_price = Column(Float)
datetime = Column(DateTime)
def __init__(
self,
pair: Pair,
target_ratio: float,
current_coin_price: float,
other_coin_price: float,
):
self.pair = pair
self.target_ratio = target_ratio
self.current_coin_price = current_coin_price
self.other_coin_price = other_coin_price
self.datetime = datetime.utcnow()
@hybrid_property
def current_ratio(self):
return self.current_coin_price / self.other_coin_price
def info(self):
return {
"from_coin": self.pair.from_coin.info(),
"to_coin": self.pair.to_coin.info(),
"current_ratio": self.current_ratio,
"target_ratio": self.target_ratio,
"current_coin_price": self.current_coin_price,
"other_coin_price": self.other_coin_price,
"datetime": self.datetime.isoformat(),
}
| edeng23/binance-trade-bot | binance_trade_bot/models/scout_history.py | scout_history.py | py | 1,473 | python | en | code | 7,357 | github-code | 90 |
14189206363 | class Solution(object):
# 09-03-2023 Version
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
slow = head
fast = head
while slow and fast:
if slow.next and fast.next and fast.next.next:
slow = slow.next
else:
return None
if slow == fast:
check = head
while check != slow:
check = check.next
slow = slow.next
return check
return None
# 04-11-2022 Version
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
slow = head
fast = head
counter = -1
if not slow or not slow.next:
return None
while fast.next and slow.next and fast.next.next and counter == -1:
if slow.next == fast.next.next:
slow = slow.next
fast = fast.next.next
counter = 0
while slow != fast.next:
counter += 1
fast = fast.next
slow = slow.next
fast = fast.next.next
if counter != -1:
slow = head
while slow.next:
fast = slow
for i in range(counter + 1):
fast = fast.next
if fast == slow:
return slow
slow = slow.next
return head.next
return None
| Minho16/leetcode | Daily_problems/MAR23/2023-03-09/LinkedListCycleII_Samu.py | LinkedListCycleII_Samu.py | py | 1,579 | python | en | code | 0 | github-code | 90 |
3704019540 | """
Implements a haversine loss function for use in
TensorFlow models
"""
import tensorflow as tf
import numpy as np
import unittest
def haversine_loss(y_true, y_pred, R=3443.92):
"""
Returns the mean squared haversine distance
between arrays consisting of lattitudes and
longitudes.
Args:
y_true: Either an np.array or a tf.constant
of dimensions m x 2 where m in the
number of observations. Each row is
an ordered pair of [lat, long].
y_pred: Has the same form as y_true.
R: Float giving the radius of the earth.
The default value is in nautical
miles. Values in other units:
kilometers -> 6378.14
statute miles -> 3963.19
smoots -> 3.748e+6
Returns:
tf.tensor of shape () and dtype float64 giving
the mean square distance error using the
haversine function.
Examples:
Input:
y1 = np.array([[0, 0]])
y_hat1 = np.array([[0, 180]])
Expected result:
(pi * R) ** 2 = 117059281.6 nm^2
Input:
y2 = np.array([[0, 0]])
y_hat2 = np.array([[90, 0]])
Expected result:
(pi * R / 2) ** 2 = 29264820.4 nm^2
Input:
Portmsouth, VA to Rota, Spain
y3 = tf.constant([[36.8354, -76.2983]])
y_hat3 = tf.constant([[36.6237, -6.3601]])
Expected result:
37065212.0 km^2
Notes:
Closely follows the JS implmentation at
https://www.movable-type.co.uk/scripts/latlong.html.
"""
# Break inputs into lattitudes and longitudes for
# convienience
lat1 = y_true[:,0]
lat2 = y_pred[:,0]
long1 = y_true[:,1]
long2 = y_pred[:,1]
# Compute phis and lambdas
phi1 = lat1 * np.pi / 180
phi2 = lat2 * np.pi / 180
delta_phi = (lat2 - lat1) * np.pi / 180
delta_lambda = (long2 - long1) * np.pi / 180
# Intermediate computations
a = tf.square(tf.sin(delta_phi / 2)) + tf.cos(phi1) * tf.cos(phi2) * tf.square(tf.sin(delta_lambda / 2))
c = 2 * tf.atan2(tf.sqrt(a), tf.sqrt(1 - a))
# Compute distances
d = R * c
# Compute the mean squared distance (MSE)
return tf.reduce_mean(tf.square(d))
class TestHaversine(unittest.TestCase):
def test_isolat(self):
self.assertTrue(np.abs(haversine_loss(
np.array([[0, 0]]),
np.array([[0, 180]])).numpy() - 117059281.6) < 0.1)
def test_isolong(self):
self.assertTrue(np.abs(haversine_loss(
np.array([[0, 0]]),
np.array([[90, 0]])).numpy() - 29264820.4) < 0.1)
def test_vaRota(self):
self.assertTrue(np.abs(haversine_loss(
tf.constant([[36.8354, -76.2983]]),
tf.constant([[36.6237, -6.3601]]),
R=6378.14).numpy() - 37065212.0) < 0.1)
if __name__ == "__main__":
unittest.main() | gregtozzi/deep_learning_celnav | inference/haversine.py | haversine.py | py | 3,150 | python | en | code | 6 | github-code | 90 |
18483702679 | from collections import deque
N = int(input())
A = [int(input()) for _ in range(N)]
A.sort()
X = deque(A.copy())
B = deque([X.pop()])
flg = True
while X:
if flg:
B.append(X.popleft())
if X:
B.appendleft(X.popleft())
else:
B.append(X.pop())
if X:
B.appendleft(X.pop())
flg = not flg
X = deque(A.copy())
C = deque([X.popleft()])
flg = True
while X:
if flg:
C.append(X.pop())
if X:
C.appendleft(X.pop())
else:
C.append(X.popleft())
if X:
C.appendleft(X.popleft())
flg = not flg
print(max(sum([abs(B[i]-B[i-1]) for i in range(1,N)]), sum([abs(C[i]-C[i-1]) for i in range(1,N)]))) | Aasthaengg/IBMdataset | Python_codes/p03229/s760591924.py | s760591924.py | py | 717 | python | en | code | 0 | github-code | 90 |
72462977256 | from flask import Flask
from flask import render_template
import boto
app = Flask(__name__)
app.config.from_pyfile('settings.cfg')
@app.template_filter()
def yesno(value, yes, no):
if value:
return yes
return no
@app.route('/')
def elb_status():
conn = boto.connect_elb(
aws_access_key_id=app.config['ACCESS_KEY'],
aws_secret_access_key=app.config['SECRET_KEY']
)
load_balancers = conn.get_all_load_balancers(app.config['LOAD_BALANCERS'])
lbs = []
for lb in load_balancers:
instances = []
for inst in lb.get_instance_health():
instances.append({
'id': inst.instance_id,
'up': inst.state == 'InService',
'name': get_instance_name(inst.instance_id),
})
lb_status = {
'name': lb.name,
'instances': instances,
'redundant': True,
'serving': True,
}
if len(instances) < 2 or not all([i['up'] for i in instances]):
lb_status['redundant'] = False
if len([i['up'] for i in instances]) == 0:
lb_status['serving'] = False
lbs.append(lb_status)
return render_template('elb_status.html', lbs=lbs)
def get_instance_name(instance_id):
conn = boto.connect_ec2(
aws_access_key_id=app.config['ACCESS_KEY'],
aws_secret_access_key=app.config['SECRET_KEY']
)
reservations = conn.get_all_instances([instance_id])
instances = [i for r in reservations for i in r.instances]
if len(instances) != 1:
return None
return instances[0].tags.get('Name', '')
if __name__ == '__main__':
app.run(host='0.0.0.0')
| alexluke/AWS-load-balancer-status | monitor.py | monitor.py | py | 1,462 | python | en | code | 0 | github-code | 90 |
32926554622 | from Rscript.RscriptClass import Rscript
MSMS = {}
inFile = open('HeLa-Predict-pep.transcript')
for line in inFile:
line = line.strip()
fields = line.split('\t')
MSMS[fields[0]]=int(fields[1])
inFile.close()
RNASEQ = {}
inFile = open('ERR0498-04-05.unmapped.unique.total.fasta.blated.filtered.seq1.splicing.not_known-predict.transcript')
for line in inFile:
line = line.strip()
fields = line.split('\t')
RNASEQ[fields[0]]=int(fields[1])
inFile.close()
GENSCAN = {}
inFile = open('Homo_sapiens.GRCh37.70.pep.abinitio.fa.fa.transcript')
for line in inFile:
line = line.strip()
fields = line.split('\t')
GENSCAN[fields[0]]=int(fields[1])
inFile.close()
R=r'''
library(VennDiagram)
venn.plot <- venn.diagram(
x = list(
MS_MS = c('%s'),
RNA_Seq = c('%s'),
Ensembl_Predict = c('%s')
),
filename = "Predict-rnaseq-msms-venn.pdf",
col = "transparent",
fill = c("red", "blue", "green"),
alpha = 0.5,
label.col = c("darkred", "white", "darkblue", "white", "white", "white", "darkgreen"),
cex = 2.5,
fontfamily = "serif",
fontface = "bold",
cat.default.pos = "text",
cat.col = c("darkred", "darkblue", "darkgreen"),
cat.cex = 2.5,
cat.fontfamily = "serif",
cat.dist = c(0.06, 0.06, 0.03),
cat.pos = 0
);
'''%("','".join(MSMS),"','".join(RNASEQ),"','".join(GENSCAN))
Rscript(R)
| wanghuanwei-gd/SIBS | RNAseqMSMS/7-tandem-sv/predict-protein/1-venn.py | 1-venn.py | py | 1,347 | python | en | code | 0 | github-code | 90 |
26358518811 | """AUTOR:Adrián Agudo Bruno
ENUNCIADO:Escribe un programa que pida una frase, y pase la frase como parámetro a una función que debe eliminar los espacios en blanco (compactar la frase). El programa principal imprimirá por pantalla el resultado final.
"""
def rima(a,b):
if (a[len(a)-1]==b[len(b)-1]):
if (a[len(a)-2]==b[len(b)-2]):
if (a[len(a)-3]==b[len(b)-3]):
print(f'las palabras {a} y {b} riman')
else:
print(f'las palabras {a} y {b} riman un poco')
else:
print(f'las palabras {a} y {b} NO riman')
p1=input('introducir primera palabra: ')
p2=input('introducir segunda palabra: ')
rima(p1,p2)
| aagudobruno/python | P7/P7E9.py | P7E9.py | py | 717 | python | es | code | 0 | github-code | 90 |
18302071189 | N=int(input())
a=list(map(int,input().split()))
ans = 0
res =1
for i in range(N):
if res != a[i]:
ans += 1
elif res == a[i]:
res += 1
if ans ==N:
print(-1)
else:
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02832/s721551397.py | s721551397.py | py | 194 | python | fr | code | 0 | github-code | 90 |
35933076236 | import cv2
import random
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import os
from matplotlib import pyplot as plt
grayscale_max = 255
dirsave="./Results/"
dirGT0="./GT0"
dirGT1="./GT1"
dirIP0="./IM0"
dirIP1="./IM1"
def load_image_IP0():
files = os.listdir(dirIP0)
listimgl=[]
#listpathl=[]
filepaths_IP = [os.path.join(dirIP0,i) for i in files]
for i in filepaths_IP:
img_l = cv2.imread(i, cv2.IMREAD_GRAYSCALE)
listimgl.append(img_l)
#listpathl.append(i)
#return img_l, i
return listimgl
def load_image_IP1():
files = os.listdir(dirIP1)
listimgr=[]
#listpathr=[]
filepaths_IP = [os.path.join(dirIP1,i) for i in files]
#print("The value of i is ", i)
for i in filepaths_IP:
img_r = cv2.imread(i, cv2.IMREAD_GRAYSCALE)
listimgr.append(img_r)
#listpathr.append(i)
#return img_r, i
return listimgr
def load_image_GT0():
files = os.listdir(dirGT0)
listGT0=[]
filepaths_GT = [os.path.join(dirGT0,i) for i in files]
for i in filepaths_GT:
gt_0 = cv2.imread(i, cv2.IMREAD_GRAYSCALE)
listGT0.append(gt_0)
return listGT0
def load_image_GT1():
files = os.listdir(dirGT1)
listGT1=[]
filepaths_GT = [os.path.join(dirGT1,i) for i in files]
for i in filepaths_GT:
gt_1 = cv2.imread(i, cv2.IMREAD_GRAYSCALE)
#return gt_1
listGT1.append(gt_1)
return listGT1
def show_image(title, image):
max_val = image.max()
# image = np.absolute(image)
image = np.divide(image, max_val)
# cv2.imshow(title, image)
#cv2.imwrite(title+str(random.randint(1, 100))+'.png', image*grayscale_max)
#cv2.imwrite(dirsave + title+ path, image*grayscale_max)
cv2.imwrite(dirsave+title+str(random.randint(1, 100))+'.png', image*grayscale_max)
def add_padding(input, padding):
rows = input.shape[0]
print("Rows = ", rows)
columns = input.shape[1]
output = np.zeros((rows + padding * 2, columns + padding * 2), dtype=float)
output[ padding : rows + padding, padding : columns + padding] = input
return output
def search_bounds(column, block_size, width, rshift):
disparity_range = 75
padding = block_size // 2
right_bound = column
if rshift:
left_bound = column - disparity_range
if left_bound < padding:
left_bound = padding
step = 1
else:
left_bound = column + disparity_range
if left_bound >= (width - 2*padding):
left_bound = width - 2*padding - 2
step = -1
return left_bound, right_bound, step
# max disparity 30
def disparity_map(left, right, block_size, rshift):
padding = block_size // 2
left_img = add_padding(left, padding)
right_img = add_padding(right, padding)
height, width = left_img.shape
# d_map = np.zeros((height - padding*2, width - padding*2), dtype=float)
d_map = np.zeros(left.shape , dtype=float)
for row in range(height - block_size + 1):
for col in range(width - block_size + 1):
bestdist = float('inf')
shift = 0
left_pixel = left_img[row:row + block_size, col:col + block_size]
l_bound, r_bound, step = search_bounds(col, block_size, width, rshift)
# for i in range(l_bound, r_bound - padding*2):
for i in range(l_bound, r_bound, step):
right_pixel = right_img[row:row + block_size, i:i + block_size]
# if euclid_dist(left_pixel, right_pixel) < bestdist :
ssd = np.sum((left_pixel - right_pixel) ** 2)
# print('row:',row,' col:',col,' i:',i,' bestdist:',bestdist,' shift:',shift,' ssd:',ssd)
if ssd < bestdist:
bestdist = ssd
shift = i
if rshift:
d_map[row, col] = col - shift
else:
d_map[row, col] = shift - col
print('Calculated Disparity at ('+str(row)+','+str(col)+') :', d_map[row,col])
#print(d_map.shape)
return d_map
def squared_mean_square_error(disparity_map, ground_truth):
# ssd = np.sum((disparity_map - ground_truth)**2)
# mse = ssd/(ground_truth.shape[0]*ground_truth.shape[1])
mse = np.sum((disparity_map - ground_truth)**2)
return mse
def absolute_mean_square_error(disparity_map, ground_truth):
# ssd = np.sum((disparity_map - ground_truth)**2)
# mse = ssd/(ground_truth.shape[0]*ground_truth.shape[1])
mse = np.sum((disparity_map - ground_truth))
return mse
def correlation_coefficient(disparity_map, ground_truth):
product = np.mean((disparity_map - disparity_map.mean()) * (ground_truth - ground_truth.mean()))
standard_dev = disparity_map.std() * ground_truth.std()
if stds == 0:
return 0
else:
product /= stds
return product
global d_map_lr_5
global d_map_lr_7
def main():
l= load_image_IP0()
r= load_image_IP1()
#cv2.imshow('l', l)
#print(type(l),type(r))
#print(len(l))
#l='im0.png'
#r='im1.png'
#d_map_lr_5 = disparity_map(l,r,3, True)
#show_image('D_Map_lr_block5_', d_map_lr_5)
ground_truth_1=load_image_GT0()
ground_truth_2 = load_image_GT1()
for i,j in zip(l,r):
# For window size of 5
d_map_lr_5 = disparity_map(i,j,5, True)
show_image('D_Map_lr_5_', d_map_lr_5)
# For window size of 7
d_map_lr_7 = disparity_map(i,j,7, True)
show_image('D_Map_lr_7_', d_map_lr_7)
# Mean Squared Error
for i,j in zip(ground_truth_1,ground_truth_2):
# For window size of 5
print(i.shape)
print(d_map_lr_5.shape)
loss_sm_5_lr = squared_mean_square_error(d_map_lr_5, i)
print("Loss for window size 5 for GT0 is" , loss_sm_5_lr)
loss_sm_7_lr = squared_mean_square_error(d_map_lr_7, i)
print("Loss for window size 5 for GT0 is" , loss_sm_7_lr)
loss_am_5_lr = absolute_mean_square_error(d_map_lr_5, i)
print("Loss for window size 5 for GT0 is" , loss_am_5_lr)
loss_am_7_lr = absolute_mean_square_error(d_map_lr_7, i)
print("Loss for window size 5 for GT0 is" , loss_am_7_lr)
loss_cc_5_lr = correlation_coefficient(d_map_lr_5, i)
print("MSE for window size 5 for GT0 is" , loss_cc_5_lr)
loss_cc_5_lr = correlation_coefficient(d_map_lr_5, j)
print("MSE for window size 5 for GT1 is" , loss_cc_5_lr)
loss_cc_7_lr = correlation_coefficient(d_map_lr_7, i)
print("MSE for window size 5 for GT0 is" , loss_cc_7_lr)
loss_cc_5_lr = correlation_coefficient(d_map_lr_7, j)
print("MSE for window size 5 for GT1 is" , loss_cc_7_lr)
return
main()
| Himani2000/celestini_task | Q3-Image-Process/Image_Process.py | Image_Process.py | py | 6,873 | python | en | code | 0 | github-code | 90 |
38987918173 | import torch
x = torch.Tensor([5, 3])
y = torch.Tensor([2, 1])
print(x * y)
x = torch.zeros([2, 5])
print(x)
x.shape
torch.Size([2, 5])
y = torch.rand([2, 5])
print(y)
#flatten tensor before passing it into neural network
y = y.view([1, 10])
print(y) | BeardedSultan/PytorchIntro | main.py | main.py | py | 257 | python | en | code | 0 | github-code | 90 |
72318331498 | import os
import pytest
pytestmark = [pytest.mark.girder, pytest.mark.girder_client]
try:
from pytest_girder.web_client import runWebClientTest
except ImportError:
# Make it easier to test without girder
pass
@pytest.mark.singular()
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
@pytest.mark.parametrize('spec', [
'imageViewerSpec.js',
])
def testWebClient(boundServer, fsAssetstore, db, spec, girderWorker):
spec = os.path.join(os.path.dirname(__file__), 'web_client_specs', spec)
runWebClientTest(boundServer, spec, 15000)
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
@pytest.mark.parametrize('spec', [
'largeImageSpec.js',
'otherFeatures.js',
])
def testWebClientNoWorker(boundServer, fsAssetstore, db, spec):
spec = os.path.join(os.path.dirname(__file__), 'web_client_specs', spec)
runWebClientTest(boundServer, spec, 15000)
@pytest.mark.singular()
@pytest.mark.usefixtures('unbindLargeImage')
@pytest.mark.plugin('large_image')
@pytest.mark.parametrize('spec', [
'imageViewerSpec.js',
])
def testWebClientNoStream(boundServer, fsAssetstore, db, spec, girderWorker):
from girder.models.setting import Setting
from girder.settings import SettingKey
Setting().set(SettingKey.ENABLE_NOTIFICATION_STREAM, False)
spec = os.path.join(os.path.dirname(__file__), 'web_client_specs', spec)
runWebClientTest(boundServer, spec, 60000)
| girder/large_image | girder/test_girder/test_web_client.py | test_web_client.py | py | 1,472 | python | en | code | 162 | github-code | 90 |
7714755583 | import requests
from parsel import Selector
from app.constants import PAGE_TARGET
from app.helpers import (
set_new_prices_list,
set_new_units_list,
set_new_values_list_target_1,
set_new_values_list_target_2,
)
def crawler(number_page_target):
url = PAGE_TARGET[number_page_target]
text = requests.get(url).text
selector = Selector(text=text)
if number_page_target == 1:
machine_list = page_target_1_process(selector)
else:
machine_list = page_target_2_process(selector)
return machine_list
def page_target_1_process(selector):
divs_all_machines = selector.xpath('//div[@class="row row--eq-height packages"]')
machines_list = divs_all_machines.xpath('//div[@class="col-lg-3"]')
machine_list = []
index_values_list = [0, 1, 2, 3, 4]
index_units_list = [1, 3, 4]
for index, machine in enumerate(machines_list):
key = (machine.xpath('//h3[@class="package__title h6"]/text()')[index].get()).strip()
values_list = machine.xpath('//li[@class="package__list-item"]//b/text()').getall()
units = [
unit.replace("\n", "").replace("\t", "")
for unit in machine.xpath('//li[@class="package__list-item"]/text()').getall()
]
if index < len(machines_list) - 1:
machine = [
key,
(
f"{values_list[index_values_list[1]]}{units[index_units_list[1]]} "
f"{values_list[index_values_list[2]]}{units[index_units_list[2]]}"
),
values_list[index_values_list[3]],
f"{values_list[index_values_list[0]]}{units[index_units_list[0]]}",
values_list[index_values_list[4]],
machine.xpath('//span[@class="price__value"]/b/text()')[index].get(),
]
machine_list.append(machine)
else: # the last machine has two specifications about SSD/Disk, so it needs extra implementations
machine = [
key,
f"{values_list[20]}{units[29]} {values_list[21]}{units[30]}",
values_list[index_values_list[4]],
(
f"{values_list[index_values_list[0]]}{units[index_units_list[0]]}"
f" / {values_list[index_values_list[1]]}{units[index_units_list[1]]}"
),
values_list[23],
machine.xpath('//span[@class="price__value"]/b/text()')[index].get(),
]
machine_list.append(machine)
index_values_list = set_new_values_list_target_1(index_values_list)
index_units_list = set_new_units_list(index_units_list)
return machine_list
def page_target_2_process(selector):
section_all_machines = selector.xpath('//section[@id="pricing-card-container"]')
machines_list = section_all_machines.xpath('//div[re:test(@class, "pricing-card ")]')
machine_list = []
index_values_list = [0, 2, 4, 6]
index_prices_list = [0, 1]
for index, machine in enumerate(machines_list):
values_list = machine.xpath('//li[@class="pricing-card-list-items"]/text()').getall()
prices_list = machine.xpath('//p[@class="pricing-card-price"]/text()').getall()
machine = [
machine.xpath('//h3[@class="pricing-card-title"]/text()')[index].get(),
values_list[index_values_list[1]],
values_list[index_values_list[0]],
values_list[index_values_list[2]],
values_list[index_values_list[3]],
f"{prices_list[index_prices_list[0]]} {prices_list[index_prices_list[1]]}",
]
machine_list.append(machine)
index_values_list = set_new_values_list_target_2(index_values_list)
index_prices_list = set_new_prices_list(index_prices_list)
return machine_list
| rodrigomaria/crawler | app/crawler.py | crawler.py | py | 3,854 | python | en | code | 0 | github-code | 90 |
21403119252 | import json
import random
random.seed(1)
number_string =input('input the min speed:')
n = float(number_string)
number_string =input('input the max speed:')
m = float(number_string)
with open('pedestrian.json', encoding='utf-8') as a:
data = json.load(a)
x = data['dynamicElements']
for y in range(0,len(x)):
data['dynamicElements'][int(y)]['freeFlowSpeed']=random.uniform(n,m)
with open('pedestrian.json', 'w') as file:
json.dump(data, file, indent=2)
print("Finihsed!")
| lisazhanglii/The-Behavior-aware-Methodology-for-Crowd-Management_Vadere | Change_All_Speed.py | Change_All_Speed.py | py | 502 | python | en | code | 1 | github-code | 90 |
24878149943 | from vehicle import vehicle
from car import car
from supercar import supercar
class mechanicgarage():
def __init__(self,ownername, avgcost, amount):
self.__ownername = ownername
self.agcost = avgcost
self.amount = 0
# This method changes color of vehicle and adds cost
def changeColor(self, vehicle, cost):
self.color = input('The price for color change is 2500 what color would u like: ')
self.amount = self.amount + cost
cost = "{:.2f}".format(self.amount)
print(f'You changed your color to {self.color}')
# This method changes new tires and adds to the cost
def newTires(self, vehicle, tires):
if vehicle.price < 250000:
self.amount = tires * 250
else:
self.amount = tires * 1000
print(f'Your total cost is {self.amount} remeber to come back after a 100,000 miles!')
# This method checls the total cost of the work
def Totalcost(self, vehicle):
print(f'Your cost for the total work is {self.amount}')
tesla = car('Tesla', 'Model x', 80000, 2018, 'white', 220, True)
richmond = mechanicgarage('Bob', 1500, 0)
# richmond.newTires(tesla,3)
# richmond.changeColor(tesla, 2000)
# richmond.Totalcost(tesla)
# McLaren = supercar('McLaren', 'F1', 1000000, 1961, "red", 231, 627,4000, False)
# italymechanic = mechanicgarage('Dan', 40000, 0)
# italymechanic.newTires(McLaren, 2)
| sanchezjairo/Car-garage-OOP | Garage.py | Garage.py | py | 1,431 | python | en | code | 0 | github-code | 90 |
17747309645 | from django.shortcuts import render ,redirect
from accounts.models import Patient_Details,Doctor
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .models import History,DoctorSchedule,Appointment
from datetime import date
# Create your views here.
def doctor_home_view(request):
return render(request,'doctor/doctor_home.html')
# def prescription(request):
# data = Patient_Details.objects.all()
# return render(request, 'doctor/prescription.html', {'data': data})
@login_required
def doctor_view_appointment(request):
username = request.user.username
print(username)
# Retrieve the patients associated with the specific doctor
patients = Patient_Details.objects.filter(doctor=username)
# Pass the patient data to the template
return render(request, 'doctor/doctor_view_appointment.html', {'patients': patients})
def doctor_view_patient(request):
data = Patient_Details.objects.filter()
return render(request, 'doctor/doctor_view_patient.html', {'data': data})
def doctor_view_history(request, pid):
new = Patient_Details.objects.get(id=pid)
data = Patient_Details.objects.values()
items = History.objects.filter(p_id_id=new)
context = {
'items': items,
'new': new,
'data': data
}
return render(request, 'doctor/doctor_view_history.html',context)
# def doctor_view(request):
# # Get the specific doctor's ID from the request parameters or session
# doctor_id = request.GET.get('doctor_id')
# # Retrieve the patients associated with the specific doctor
# patients = Patient_Details.objects.filter(doctor=doctor_id)
# # Pass the patient data to the template
# context = {'data': patients}
# return render(request, 'doctor/doctor_view.html', context)
@login_required
def prescription(request , pid):
username = request.user.username
docname =Doctor.objects.filter(dname=username).distinct()
new = Patient_Details.objects.filter(doctor=username,id=pid)
items = History.objects.filter(p_id_id=pid).order_by('-date')[:1]
context={
'new':new,
'docname':docname,
'items':items,
}
return render(request, 'doctor/prescription.html',context)
def search(request):
if 'q' in request.GET:
q = request.GET['q']
data = Patient_Details.objects.filter(name__icontains=q)
#multiple_q = Q(Q(name__icontains=q) | Q(place__icontains=q))
#data = Patient.objects.filter(multiple_q)
else:
data = Patient_Details.objects.all()
context = {
'data': data
}
return render(request, 'doctor/doctor_view_appointment.html', context)
def doctor_add_prescription(request, pid):
if request.method == "POST":
symptoms = request.POST.get('symp')
tests = request.POST.get('tests')
advice = request.POST.get('advice')
medicine = request.POST.get('medicine')
patient = Patient_Details.objects.get(id=pid)
item = History(symptoms=symptoms, tests=tests, advice=advice, medicine=medicine, p_id=patient)
item.save()
messages.info(request, "ITEM ADDED SUCCESSFULLY")
items = History.objects.all()
new = Patient_Details.objects.get(id=pid)
item = Patient_Details.objects.all()
context = {
'items': items,
'item': item,
'new': new
}
return render(request, 'doctor/doctor_add_prescription.html',context)
# import datetime
# def create_appointment(request):
# if request.method == 'POST':
# doctor_id = request.POST['doctor_id']
# patient_id = request.POST['patient_id']
# appointment_date = request.POST['appointment_date']
# appointment_time = request.POST['appointment_time']
# doctor = Doctor.objects.get(id=doctor_id)
# patient = Patient_Details.objects.get(id=patient_id)
# # Validate appointment date and time
# appointment_datetime = datetime.datetime.combine(appointment_date, appointment_time)
# day_of_week = appointment_date.strftime("%A")
# try:
# doctor_schedule = DoctorSchedule.objects.get(doctor=doctor, day=day_of_week)
# if doctor_schedule.start_time <= appointment_time <= doctor_schedule.end_time:
# # Appointment time is within doctor's schedule
# appointment = Appointment(doctor=doctor, patient=patient, appointment_datetime=appointment_datetime)
# appointment.save()
# # Redirect or render success message
# messages.success(request, "Appointment created successfully!")
# else:
# # Appointment time is not within doctor's schedule
# # Render error message
# messages.error(request, "Selected appointment time is not available for the doctor.")
# except DoctorSchedule.DoesNotExist:
# # Doctor does not have a schedule for the selected day
# # Render error message
# messages.error(request, "Doctor does not have a schedule for the selected day.")
# # Provide necessary context data for rendering the form template
# doctors = Doctor.objects.all()
# patients = Patient_Details.objects.all()
# context = {
# 'doctors': doctors,
# 'patients': patients
# }
# return render(request, 'create_appointment.html', context)
| Zaman-Shah/Mainproject | doctor/views.py | views.py | py | 5,484 | python | en | code | 0 | github-code | 90 |
23930524965 | from setuptools import setup, find_packages
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name = 'isimage'
, version = '0.1.2'
, packages = find_packages()
, entry_points = {'console_scripts':['analyse_image = isimage.analyse_image:main'
, 'select_images = isimage.select_images:main'
]}
, author = 'Ilya Patrushev'
, author_email = 'ilya.patrushev@gmail.com'
, description = 'A module and scripts for analysis of whole-mount in-situ hybridisation images.'
, long_description = read('README')
, license = 'GPL v2.0'
, package_data = {'isimage.analyse_image':['test_images/*'], 'isimage.select_images':['*.dump', '*.txt', 'test_images/*.*', 'test_images/*/*.*']}
, url = 'https://github.com/ilyapatrushev/isimage'
, install_requires = ['Pillow>=2.9.0'
, 'matplotlib>=1.4.3'
, 'numpy>=1.9.2'
, 'pandas>=0.16.0'
, 'scikit-learn>=0.15.2'
, 'scipy>=0.15.1'
, 'python-pptx>=0.5.7'
, 'pbr>=0.11'
, 'six>=1.7'
, 'funcsigs>=0.4'
]
)
| ilyapatrushev/isimage | setup.py | setup.py | py | 1,132 | python | en | code | 0 | github-code | 90 |
12084273302 | import itertools
LIMIT = 10**999
fibs = [1, 1]
for i in itertools.count():
next_fib = fibs[-1] + fibs[-2]
fibs.append(next_fib)
if next_fib >= LIMIT:
print(i + 3)
break
| mishajw/projecteuler | src/p025_1000digit_fib.py | p025_1000digit_fib.py | py | 201 | python | en | code | 0 | github-code | 90 |
18177252499 | N = int(input())
A = list(map(int,input().split()))
money=1000
best = money
pos = 1
kabu = 0
#初動 買うか買わないか
if A[0]<=A[1]:
kabu = money//A[0]
money -= kabu*A[0]
pos=0
else:
pos=1
#二回目以降 買うか売るか保留か
for i in range(1,N):
if pos==0:#売り
if A[i-1]>=A[i]:
money+=kabu*A[i-1]
kabu=0
pos=1
elif pos==1:#買い
if A[i-1]<A[i]:
kabu = money//A[i-1]
money -= kabu*A[i-1]
pos=0
best = max(best,money+kabu*A[i])
print(best) | Aasthaengg/IBMdataset | Python_codes/p02603/s050789004.py | s050789004.py | py | 605 | python | en | code | 0 | github-code | 90 |
12124862707 | from __future__ import division, print_function, absolute_import
import CoolProp as CP
from CoolProp.CoolProp import PropsSI
from ACHP.MicroChannelCondenser import MicroCondenserClass
from ACHP.MicroFinCorrelations import MicroFinInputs
from ACHP.convert_units import in2m, mm2m, cfm2cms, F2K, kPa2Pa, C2K
Fins=MicroFinInputs()
Fins.Tubes.NTubes=30 #Number of tubes (per bank for now!)
Fins.Tubes.Nbank=2 #Number of banks (set to 1 for now!)
Fins.Tubes.Npass=2 #Number of passes (per bank) #averaged if not even
Fins.Tubes.Nports=11 #Number of rectangular ports
Fins.Tubes.Ltube=in2m(18.24) #length of a single tube
Fins.Tubes.Td=in2m(1) #Tube outside width (depth)
Fins.Tubes.Ht=in2m(0.072) #Tube outside height (major diameter)
Fins.Tubes.b=in2m(0.488) #Tube spacing
Fins.Tubes.tw=in2m(0.015) #Tube wall thickness
Fins.Tubes.twp=in2m(0.016) #Port (channel) wall thickness
Fins.Tubes.beta=1.7675 #Port (channel) aspect ratio (=width/height)
Fins.Tubes.kw=237 #Wall thermal conductivity
Fins.Fins.FPI=13 #Fin per inch
Fins.Fins.Lf=in2m(1) #Fin length = tube outside width in this HX
Fins.Fins.t=in2m(0.0045) ##measured## #Fin thickness
Fins.Fins.k_fin=117 #Fin thermal conductivity for pure Aluminum
Fins.Air.Vdot_ha=cfm2cms(1500) #Air volume flow rate in m^3/s
Fins.Air.Tdb=F2K(125) #Air inlet temperature, K
Fins.Air.p=101325 #Air inlet pressure in Pa
Fins.Air.RH=0.199 #Air inlet relative humidity
Fins.Air.FanPower=855 #Fan power, Watts
Fins.Louvers.Lalpha=25 ##estimated## #Louver angle, in degree
Fins.Louvers.lp=mm2m(1.12) ##measured## #Louver pitch
Ref = 'R407C'
Backend = 'HEOS' #choose between: 'HEOS','TTSE&HEOS','BICUBIC&HEOS','REFPROP','SRK','PR'
AS = CP.AbstractState(Backend, Ref)
params={
'AS': AS,
'mdot_r': 0.04472,
'Tin_r': C2K(110),
'psat_r': kPa2Pa(3108),
'Fins': Fins,
'FinsType': 'MultiLouveredMicroFins',
'Verbosity':0,
}
Cond=MicroCondenserClass(**params)
Cond.Calculate()
print ('Heat transfer rate in condenser is', Cond.Q,'W')
print ('Heat transfer rate in condenser (superheat section) is',Cond.Q_superheat,'W')
print ('Heat transfer rate in condenser (twophase section) is',Cond.Q_2phase,'W')
print ('Heat transfer rate in condenser (subcooled section) is',Cond.Q_subcool,'W')
print ('Fraction of circuit length in superheated section is',Cond.w_superheat)
print ('Fraction of circuit length in twophase section is',Cond.w_2phase)
print ('Fraction of circuit length in subcooled section is',Cond.w_subcool)
# for id, unit, value in Cond.OutputList():
# print (str(id) + ' = ' + str(value) + ' ' + str(unit)) | CenterHighPerformanceBuildingsPurdue/ACHP | ComponentTests/MC_Cond_validation.py | MC_Cond_validation.py | py | 2,916 | python | en | code | 48 | github-code | 90 |
42008473513 | __revision__ = "src/engine/SCons/Tool/jar.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Subst
import SCons.Util
def jarSources(target, source, env, for_signature):
"""Only include sources that are not a manifest file."""
try:
env['JARCHDIR']
except KeyError:
jarchdir_set = False
else:
jarchdir_set = True
jarchdir = env.subst('$JARCHDIR', target=target, source=source)
if jarchdir:
jarchdir = env.fs.Dir(jarchdir)
result = []
for src in source:
contents = src.get_text_contents()
if contents[:16] != "Manifest-Version":
if jarchdir_set:
_chdir = jarchdir
else:
try:
_chdir = src.attributes.java_classdir
except AttributeError:
_chdir = None
if _chdir:
# If we are changing the dir with -C, then sources should
# be relative to that directory.
src = SCons.Subst.Literal(src.get_path(_chdir))
result.append('-C')
result.append(_chdir)
result.append(src)
return result
def jarManifest(target, source, env, for_signature):
"""Look in sources for a manifest file, if any."""
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
return src
return ''
def jarFlags(target, source, env, for_signature):
"""If we have a manifest, make sure that the 'm'
flag is specified."""
jarflags = env.subst('$JARFLAGS', target=target, source=source)
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
if not 'm' in jarflags:
return jarflags + 'm'
break
return jarflags
def generate(env):
"""Add Builders and construction variables for jar to an Environment."""
SCons.Tool.CreateJarBuilder(env)
env['JAR'] = 'jar'
env['JARFLAGS'] = SCons.Util.CLVar('cf')
env['_JARFLAGS'] = jarFlags
env['_JARMANIFEST'] = jarManifest
env['_JARSOURCES'] = jarSources
env['_JARCOM'] = '$JAR $_JARFLAGS $TARGET $_JARMANIFEST $_JARSOURCES'
env['JARCOM'] = "${TEMPFILE('$_JARCOM')}"
env['JARSUFFIX'] = '.jar'
def exists(env):
return env.Detect('jar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| cloudant/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/jar.py | jar.py | py | 2,502 | python | en | code | 570 | github-code | 90 |
17679457158 | from __future__ import annotations
from typing import TYPE_CHECKING, Generic, TypeVar
from sqlalchemy import tuple_
from sqlalchemy.orm import Session
from telegram import error, ChatMember
from .models import GivenInevitableTitle
from .models import GivenShuffledTitle
from .models import InevitableTitle
from .models import ShuffledTitle
from .models.title import TitleFromGroupChat
from ...sample import sample_items_inplace
if TYPE_CHECKING:
from datetime import datetime
from typing import List, Tuple, Optional, Dict, Iterable
from telegram import Chat
from .models import GroupChat
from .models import Participant
SHUFFLED_TITLES_TO_GIVE = 5
def assign_titles(
session: Session,
chat_data: GroupChat,
tg_chat: Chat,
trigger_time: datetime
) -> Tuple[List[GivenInevitableTitle], List[GivenShuffledTitle]]:
chat_data.name = tg_chat.title
assignment = _choose_titles(chat_data)
records = [], []
if assignment is None:
chat_data.last_given_titles_count = None
else:
(inevitable_titles, shuffled_titles, participants) = assignment
refresh_user_data(participants, tg_chat)
inevitable_titles_count = len(inevitable_titles)
wrapper_inevitable = NewInevitableTitles(
inevitable_titles,
participants[:inevitable_titles_count]
)
wrapper_shuffled = NewShuffledTitles(
shuffled_titles,
participants[inevitable_titles_count:]
)
records = (
wrapper_inevitable.save(session, trigger_time, chat_data.last_triggered),
wrapper_shuffled.save(session, trigger_time)
)
chat_data.last_given_titles_count = len(inevitable_titles) + len(shuffled_titles)
chat_data.last_triggered = trigger_time
session.commit()
return records
TitleT = TypeVar("TitleT", bound=TitleFromGroupChat)
class NewTitles(Generic[TitleT]):
def __init__(self, titles: List[TitleT], participants: List['Participant']):
assert len(titles) == len(participants)
self.titles: List[TitleT] = titles
self.participants = participants
class NewInevitableTitles(NewTitles[InevitableTitle]):
def save(
self,
session: Session,
given_on: datetime,
old_given_on: Optional[datetime]
) -> List[GivenInevitableTitle]:
"""Calculates new streak lengths and adds new given inevitable titles to database"""
previous_streaks = self._load_old_streaks(session, old_given_on)
records = []
for participant, title in zip(self.participants, self.titles):
new_streak_length = 1
if previous_streaks:
new_streak_length += previous_streaks.get(participant.id, 0)
record = GivenInevitableTitle(
participant=participant,
title=title,
given_on=given_on,
streak_length=new_streak_length,
)
records.append(record)
session.add(record)
return records
def _load_old_streaks(
self, session: Session, old_given_on: Optional[datetime]
) -> Optional[Dict[int, int]]:
if old_given_on is None:
return None
participant_filter = tuple_(
GivenInevitableTitle.participant_id,
GivenInevitableTitle.title_id
).in_(
zip(self._get_participant_ids(), self._get_title_ids())
)
query = session.query(
GivenInevitableTitle.participant_id,
GivenInevitableTitle.streak_length
).filter(
participant_filter,
GivenInevitableTitle.given_on == old_given_on,
)
return dict(query)
def _get_title_ids(self) -> Iterable[int]:
return (i.id for i in self.titles)
def _get_participant_ids(self) -> Iterable[int]:
return (p.id for p in self.participants)
class NewShuffledTitles(NewTitles[ShuffledTitle]):
def save(self, session: Session, given_on: datetime) -> List[GivenShuffledTitle]:
"""Adds new given shuffled titles to database"""
records = []
for participant, title in zip(self.participants, self.titles):
record = GivenShuffledTitle(participant=participant, title=title, given_on=given_on)
session.add(record)
records.append(record)
return records
def _choose_titles(
chat: GroupChat,
) -> Optional[Tuple[List[InevitableTitle], List[ShuffledTitle], List[Participant]]]:
participant_ids = chat.get_participant_ids()
participant_count = len(participant_ids)
if not participant_count:
return None
inevitable_titles: List[InevitableTitle] = chat.inevitable_titles[:participant_count]
inevitable_titles_count = len(inevitable_titles)
shuffled_titles_needed = min(
participant_count - inevitable_titles_count,
SHUFFLED_TITLES_TO_GIVE
)
shuffled_titles = chat.dequeue_shuffled_titles(shuffled_titles_needed)
shuffled_titles_count = len(shuffled_titles)
sample_size = shuffled_titles_count + inevitable_titles_count
sample_items_inplace(participant_ids, sample_size)
sampled_ids = participant_ids[:(-sample_size - 1):-1]
participants = chat.get_participants_ordered(sampled_ids)
return inevitable_titles, shuffled_titles, participants
def refresh_user_data(participants: List[Participant], tg_chat: Chat):
"""Gets current names and usernames of participants, updates database with this data and participation status"""
if not participants:
return
for participant in participants:
try:
member = tg_chat.get_member(participant.user_id)
user = member.user
participant.full_name = user.full_name
participant.username = user.username
if member.status in (ChatMember.KICKED, ChatMember.LEFT):
participant.is_active = False
except error.BadRequest as ex:
if ex.message == "User not found":
participant.is_active = False
participant.is_missing = True
| cl0ne/cryptopotato-bot | devpotato_bot/commands/daily_titles/assign_titles.py | assign_titles.py | py | 6,189 | python | en | code | 2 | github-code | 90 |
13003083228 |
# D4
# 기존 원소들에 새로운 원소를 더한 값들을 추가해주는 방식 + set활용
T = int(input())
for tc in range(1, T+1):
N = int(input())
score = list(map(int, input().split()))
s = len(score)
# 기존 값들에 새로운 값을 각각 더해서 추가하느데 중복을 제거
ans = {0}
for i in range(s):
temp = set()
for j in ans: # ans에 넣으면 길이가 변하니까 일단 temp에
if j + score[i] not in ans:
temp.add(j + score[i])
for k in temp:
ans.add(k)
print('#{} {}'.format(tc, len(ans)))
# 부분집합 -> 시간초과
# T = int(input())
# for tc in range(1, T+1):
# N = int(input())
# score = list(map(int, input().split()))
# s = len(score)
# cnt = []
#
# for i in range(1<<s):
# sum_score = 0
# for j in range(s):
# if i & (1<<j):
# sum_score += score[j]
# if sum_score in cnt:
# continue
# else:
# cnt.append(sum_score)
#
#
# print('#{} {}'.format(tc, len(cnt)))
| hyeinkim1305/Algorithm | SWEA/D4/SWEA_3752_가능한시험점수.py | SWEA_3752_가능한시험점수.py | py | 1,122 | python | ko | code | 0 | github-code | 90 |
35310596867 | import requests
from bs4 import BeautifulSoup
import re
url = 'http://python123.io/ws/demo.html'
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,'html.parser')
print (soup.prettify())
tag = soup.title.parent
for parent in soup.a.parents:
if parent is None:
print(parent)
else:
print(parent.name)
#查找所有的a标签
for link in soup.find_all('a'):
print (link.get('href'))
#find_all(True)打印所有标签
#find_all(name, attrs, recursive, string,**kwargs)
#查找以'b'开头的标签
for tag in soup.find_all(re.compile('b')):
print (tag.name)
| Ziliang-Luo/crawler | BeautifulSoup.py | BeautifulSoup.py | py | 599 | python | en | code | 0 | github-code | 90 |
73618541738 | from fastapi import APIRouter, Cookie, Depends, Request, Response, status
from fastapi.security import OAuth2PasswordRequestForm
from backend import models
from backend.api.docs import auth as auth_responses
from backend.metrics import auth as auth_metrics
from backend.services.auth import AuthService
from backend.settings import Settings, get_settings
router = APIRouter(
prefix='/auth',
tags=['auth'],
)
REFRESH_TOKEN_COOKIE_KEY = "refreshToken"
@router.post(
"/registration",
response_model=models.Tokens,
status_code=status.HTTP_201_CREATED,
responses=auth_responses.registration_responses,
summary="Регистрация",
)
def registration(
request: Request,
response: Response,
user_data: models.UserCreate,
auth_service: AuthService = Depends(),
settings: Settings = Depends(get_settings),
) -> models.Tokens:
"""Регистрация нового пользователя"""
auth_metrics.REGISTRATION_COUNTER.inc()
tokens = auth_service.register_new_user(user_data=user_data, user_agent=request.headers.get('user-agent'))
response.set_cookie(
key=REFRESH_TOKEN_COOKIE_KEY, value=tokens.refresh_token, expires=settings.jwt_refresh_expires_s, httponly=True
)
return tokens
@router.post(
"/login",
response_model=models.Tokens,
status_code=status.HTTP_200_OK,
responses=auth_responses.login_responses,
summary="Вход в систему",
)
def login(
request: Request,
response: Response,
user_data: OAuth2PasswordRequestForm = Depends(),
auth_service: AuthService = Depends(),
settings: Settings = Depends(get_settings),
) -> models.Tokens:
"""Авторизация пользователя"""
auth_metrics.LOGIN_COUNTER.inc()
tokens = auth_service.login_user(
login=user_data.username, password=user_data.password, user_agent=request.headers.get('user-agent')
)
response.set_cookie(
key=REFRESH_TOKEN_COOKIE_KEY, value=tokens.refresh_token, expires=settings.jwt_refresh_expires_s, httponly=True
)
return tokens
@router.get(
"/refresh",
response_model=models.Tokens,
status_code=status.HTTP_200_OK,
responses=auth_responses.refresh_responses,
summary="Обновление токенов доступа",
)
def refresh_tokens(
request: Request,
response: Response,
auth_service: AuthService = Depends(),
refresh_token: str = Cookie(None, alias=REFRESH_TOKEN_COOKIE_KEY),
settings: Settings = Depends(get_settings),
) -> models.Tokens:
"""Обновление токенов"""
auth_metrics.REFRESH_TOKENS_COUNTER.inc()
tokens = auth_service.refresh_tokens(refresh_token=refresh_token, user_agent=request.headers.get('user-agent'))
response.set_cookie(
key=REFRESH_TOKEN_COOKIE_KEY, value=tokens.refresh_token, expires=settings.jwt_refresh_expires_s, httponly=True
)
return tokens
@router.post(
"/logout", status_code=status.HTTP_200_OK, responses=auth_responses.logout_responses, summary="Выход из системы"
)
def logout(
request: Request,
response: Response,
auth_service: AuthService = Depends(),
refresh_token: str = Cookie(None, alias=REFRESH_TOKEN_COOKIE_KEY),
):
"""Выход из системы"""
auth_metrics.LOGOUT_COUNTER.inc()
auth_service.logout(refresh_token=refresh_token, user_agent=request.headers.get('user-agent'))
response.delete_cookie(key=REFRESH_TOKEN_COOKIE_KEY)
return {"message": "Выход из системы выполнен"}
| StanislavBeskaev/Chat-FastAPI-React | backend/api/auth.py | auth.py | py | 3,578 | python | en | code | 0 | github-code | 90 |
18211853969 | import sys
input = sys.stdin.readline
from collections import defaultdict, deque
(n, m), g = map(int, input().split()), defaultdict(list)
for i in range(m): a, b = map(int, input().split()); g[a].append(b); g[b].append(a)
s, q = [0, 0] + [-1 for i in range(1, n)], deque([1])
for i in range(2, n + 1):
x = q.popleft()
for a in g[x]:
if s[a] == -1: q.append(a); s[a] = x
if not all([a != -1 for a in s]): print('No')
else:
print('Yes')
for i in s[2:]: print(i) | Aasthaengg/IBMdataset | Python_codes/p02678/s914300599.py | s914300599.py | py | 484 | python | en | code | 0 | github-code | 90 |
27693237784 | from basic.collection.tree import Node
from basic.collection.stack import Stack
def postorder_recursive(head, results):
if not head:
return
postorder_recursive(head.left, results)
postorder_recursive(head.right, results)
results.append(head.val)
return
def postorder_stack(head, results):
if not head:
return
# temp_stack = Stack()
# temp_stack_a = Stack()
# temp_stack.push(head)
#
# while not temp_stack.is_empty():
# node = temp_stack.pop()
# temp_stack_a.push(node)
#
# if node.left is not None:
# temp_stack.push(node.left)
#
# if node.right is not None:
# temp_stack.push(node.right)
# while not temp_stack_a.is_empty():
# node = temp_stack_a.pop()
# results.append(node.val)
temp_stack = Stack()
temp_stack.push(head)
while not temp_stack.is_empty():
c = temp_stack.peek()
if c.left and c.left != head and c.right != head:
temp_stack.push(c.left)
elif c.right and c.right != head:
temp_stack.push(c.right)
else:
c = temp_stack.pop()
results.append(c.val)
head = c
return
def func(root):
if not root:
return None
results = []
stack = []
stack.append(root)
head = root
while stack:
c = stack[-1]
if c.left and c.left != head and c.right != head:
stack.append(c.left)
elif c.right and c.right != head:
stack.append(c.right)
else:
c = stack.pop()
results.append(c.val)
head = c
if __name__ == '__main__':
"""
0
1 2
3 4 5 6
pre-order: 0 1 3 4 2 5 6
in-order: 3 1 4 0 5 2 6
post-order:3 4 1 5 6 2 0
"""
nodes = [Node(i) for i in range(7)]
head = nodes[0]
nodes[0].left = nodes[1]
nodes[0].right = nodes[2]
nodes[1].left = nodes[3]
nodes[1].right = nodes[4]
nodes[2].left = nodes[5]
nodes[2].right = nodes[6]
pro = [0, 1, 3, 4, 2, 5, 6]
ino = [3, 1, 4, 0, 5, 2, 6]
poo = [3, 4, 1, 5, 6, 2, 0]
results = []
print('postorder: ', poo)
postorder_recursive(nodes[0], results)
print('postorder recursive traversal: ', results)
results = []
print('postorder: ', poo)
postorder_stack(nodes[0], results)
print('postorder stack traversal: ', results) | xyzacademic/LeetCode | basic/BinaryTree/postorder_traversal.py | postorder_traversal.py | py | 2,481 | python | en | code | 0 | github-code | 90 |
16534897125 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('django_outlook', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='mailhandler',
options={'ordering': ('-modified', '-created'), 'get_latest_by': 'modified'},
),
migrations.AddField(
model_name='mailhandler',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(default=datetime.datetime(2018, 6, 28, 4, 40, 38, 895000, tzinfo=utc), verbose_name='created', auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='mailhandler',
name='is_enabled',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='mailhandler',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(default=datetime.datetime(2018, 6, 28, 4, 40, 44, 748000, tzinfo=utc), verbose_name='modified', auto_now=True),
preserve_default=False,
),
]
| weijia/django-outlook | django_outlook/migrations/0002_auto_20180628_1240.py | 0002_auto_20180628_1240.py | py | 1,319 | python | en | code | 0 | github-code | 90 |
9116641165 | import logging
import requests
import pandas as pd
import numpy as np
import json
from datetime import datetime, timedelta
from requests.exceptions import RequestException
class BinanceAPI:
def __init__(self):
# self.base_url = "https://api.binance.com/api/v3"
self.base_url = "https://api.binance.us/api/v3"
# Start with past midnight today
self.end_dt = datetime.today()
self.start_dt = self.end_dt - timedelta(hours=24) # Get past 24 hours
self.df_columns = ['open_time', 'close_time', 'open', 'high', 'low', 'close',
'volume', 'quote_asset_volume', 'num_trades', 'taker_buy_base_asset_volume',
'taker_buy_quote_asset_volume', 'ignore', 'open_timestamp', 'close_timestamp']
self.df_columns_2 = [
'open_timestamp', 'open', 'high', 'low', 'close',
'volume', 'close_timestamp', 'quote_asset_volume',
'num_trades', 'taker_buy_base_asset_volume',
'taker_buy_quote_asset_volume', 'ignore'
]
def get_historical_price(self, symbol: str, currency: str, interval: str, time_range: list) -> pd.DataFrame:
# Convert start and end time strings to datetime objects
try:
start_time = datetime.fromisoformat(time_range[0].strip())
# print("start_time:",start_time)
end_time = datetime.fromisoformat(time_range[1].strip())
# print("end_time:",end_time)
except (ValueError, TypeError, IndexError):
# Use default values if time_range is invalid or None
start_time = self.start_dt
end_time = self.end_dt
# Determine the start and end timestamps based on start_time and end_time
if (end_time - start_time).total_seconds() < 600.0:
# time range shorter than 10 min
start_timestamp = round(self.start_dt.timestamp() * 1000)
end_timestamp = round(self.end_dt.timestamp() * 1000)
else:
start_timestamp = round(start_time.timestamp() * 1000)
end_timestamp = round(end_time.timestamp() * 1000)
print("start_timestamp:",start_timestamp)
print("end_timestamp:",end_timestamp)
# Log request information
logging.info(f'start_timestamp: {start_timestamp}, end_timestamp: {start_timestamp}, symbol: {symbol}, kline_time_interval: {interval}')
print("K线参数:",start_timestamp,end_timestamp,symbol,interval)
try:
# Make request to API endpoint with specified parameters
r = requests.get(f'{self.base_url}/klines?symbol={symbol}{currency}&interval={interval}&startTime={start_timestamp}&endTime={end_timestamp}&limit=3000')
content = json.loads(r.content)
# print("content:",content)
except RequestException as e:
# Log error message if request fails
logging.error(f"Error occurred while fetching data: {e}")
return None
if (len(content) > 0):
# Convert API response to pandas DataFrame
df = pd.DataFrame.from_records(content, columns=self.df_columns_2)
# Convert timestamps to datetime objects for readability
df['open_time'] = df.open_timestamp.apply(lambda ts: datetime.fromtimestamp(ts/1000))
df['close_time'] = df.close_timestamp.apply(lambda ts: datetime.fromtimestamp(ts/1000))
# Return selected columns of DataFrame, sorted by open_time in descending order
return df[self.df_columns].sort_values('open_time', ascending=False).to_dict(orient='records')
else:
# Log error message if API response is empty
logging.error('NO DATA RETRIEVED')
logging.error(f'RESPONSE: {content}')
return None
binance_api = BinanceAPI() | parity-asia/hackathon-2023-summer | projects/05-chatdatainsight/src/backend/services/binance.py | binance.py | py | 3,960 | python | en | code | 14 | github-code | 90 |
20352749495 | import pickle
from datetime import date
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import repository.data_repository as data_repository
with open('helpers\\encoders\\category_encoder.pkl', 'rb') as file:
category_encoder = pickle.load(file)
with open('helpers\\encoders\\store_encoder.pkl', 'rb') as file:
store_encoder = pickle.load(file)
def train_model_and_make_predictions(number_of_prices=5):
df, csv_content, file_name, dates, rows = create_training_df(number_of_prices)
encoded_df = _encode_training_df(df, number_of_prices)
train_data, test_data, validation_data, X_train, y_train, X_test, y_test, X_validation, y_validation, accuracy_test = _split_dataset(
encoded_df)
model = _train_model(train_data, test_data, validation_data, X_train, y_train, X_test, y_test, X_validation,
y_validation,
accuracy_test, number_of_prices)
new_predictions_df = create_predictions_df(number_of_prices - 1)
new_predictions_df_encoded = _encode_dataset_for_making_predictions(new_predictions_df, number_of_prices - 1)
new_predictions = model.predict(new_predictions_df_encoded.iloc[:, :-1]).astype(int)
new_predictions_df_encoded['predicted_price'] = new_predictions
new_predictions_df_encoded['predicted_difference'] = new_predictions_df_encoded['predicted_price'] - \
new_predictions_df_encoded[f'price{number_of_prices - 1}']
new_predictions_df_encoded['predicted_output'] = new_predictions_df_encoded['predicted_difference'] / \
new_predictions_df_encoded[f'price{number_of_prices - 1}']
new_predictions_df_encoded['predicted_output_label'] = new_predictions_df_encoded['predicted_output'].apply(
lambda x: 'UP' if x >= 0.05 else 'DOWN' if x <= -0.05 else 'SAME')
new_predictions_df_encoded = new_predictions_df_encoded[
['id', f'price{number_of_prices - 1}', 'predicted_price', 'predicted_difference', 'predicted_output',
'predicted_output_label']]
today = date.today()
for row in new_predictions_df_encoded.values:
product_id = row[0]
previous_price = row[1]
next_predicted_price = row[2]
predicted_percentage = row[4]
prediction_result = row[5]
data_repository.add_new_prediction(today, next_predicted_price, predicted_percentage, prediction_result,
previous_price, product_id)
def create_training_df(number_of_prices):
dates = data_repository.get_latest_scraping_dates(number_of_prices)
if len(dates) < number_of_prices:
raise Exception
rows = data_repository.get_dataset(dates)
df = pd.DataFrame(rows)
df['date']=df['date'].astype(str)
pivot_df = df.pivot(index=['id', 'category', 'store'], columns='date', values='price').reset_index()
pivot_df=pivot_df.dropna()
today = date.today()
training_dataset_path = f"helpers\\training_datasets\\training_dataset_{today}.csv"
pivot_df.to_csv(training_dataset_path, index=False)
csv_content = pivot_df.to_csv(index=False)
return pivot_df, csv_content, f"training_dataset_{today}.csv", dates, pivot_df.shape[0]
def create_research_training_df():
dates = data_repository.get_all_scraping_dates()
rows = data_repository.get_research_dataset(dates)
df = pd.DataFrame(rows)
df['date']=df['date'].astype(str)
pivot_df = df.pivot(index=['id','name', 'link', 'category', 'store', 'product_cluster_id'], columns='date', values='price').reset_index()
today = date.today()
training_dataset_path = f"helpers\\training_datasets\\research_dataset_{today}.csv"
pivot_df.to_csv(training_dataset_path, index=False)
csv_content = pivot_df.to_csv(index=False)
return pivot_df, csv_content, f"research_dataset_{today}.csv", dates, pivot_df.shape[0]
def create_predictions_df(number_of_prices):
dates = data_repository.get_latest_scraping_dates(number_of_prices)
rows = data_repository.get_dataset(dates)
df = pd.DataFrame(rows)
pivot_df = df.pivot(index=['id', 'category', 'store'], columns='date', values='price').reset_index()
pivot_df.dropna(inplace=True)
return pivot_df
def _encode_training_df(df_original, number_of_prices):
df = df_original.copy()
df.drop(columns=['id'], inplace=True)
df = df.reset_index(drop=True)
category_encoded = category_encoder.transform(df[['category']]).astype(int)
store_encoded = store_encoder.transform(df[['store']]).astype(int)
category_encoded_df = pd.DataFrame(category_encoded, columns=category_encoder.get_feature_names_out(['category']))
store_encoded_df = pd.DataFrame(store_encoded, columns=store_encoder.get_feature_names_out(['store']))
category_encoded_df = category_encoded_df.reset_index(drop=True)
store_encoded_df = store_encoded_df.reset_index(drop=True)
df_encoded = pd.concat([df, category_encoded_df, store_encoded_df], axis=1)
df_encoded = df_encoded.drop(['category', 'store'], axis=1)
price_columns = df_encoded.columns[0:number_of_prices]
for price in price_columns:
df_encoded[price] = df_encoded[price].astype(int)
new_price_columns = dict()
for i, price in enumerate(price_columns):
if i == number_of_prices - 1:
new_price_columns[price] = f"target"
else:
new_price_columns[price] = f"price{i + 1}"
df_encoded.rename(columns=new_price_columns, inplace=True)
columns = df_encoded.columns
desired_order = columns[number_of_prices:].tolist() + columns[0:number_of_prices - 1].tolist() + [
columns[number_of_prices - 1]]
df_encoded = df_encoded[desired_order]
df_encoded['difference'] = df_encoded['target'] - df_encoded[f'price{number_of_prices - 1}']
df_encoded['output'] = df_encoded['difference'] / df_encoded[f'price{number_of_prices - 1}']
df_encoded['output_label'] = df_encoded['output'].apply(
lambda x: 'UP' if x >= 0.05 else 'DOWN' if x <= -0.05 else 'SAME')
return df_encoded
def _encode_dataset_for_making_predictions(df_original, number_of_prices):
df = df_original.copy()
df = df.reset_index(drop=True)
category_encoded = category_encoder.transform(df[['category']]).astype(int)
store_encoded = store_encoder.transform(df[['store']]).astype(int)
category_encoded_df = pd.DataFrame(category_encoded, columns=category_encoder.get_feature_names_out(['category']))
store_encoded_df = pd.DataFrame(store_encoded, columns=store_encoder.get_feature_names_out(['store']))
category_encoded_df = category_encoded_df.reset_index(drop=True)
store_encoded_df = store_encoded_df.reset_index(drop=True)
df_encoded = pd.concat([df, category_encoded_df, store_encoded_df], axis=1)
df_encoded = df_encoded.drop(['category', 'store'], axis=1)
price_columns = df_encoded.columns[1:number_of_prices + 1]
for price in price_columns:
df_encoded[price] = df_encoded[price].astype(int)
new_price_columns = dict()
for i, price in enumerate(price_columns):
new_price_columns[price] = f"price{i + 1}"
df_encoded.rename(columns=new_price_columns, inplace=True)
columns = df_encoded.columns
print(columns)
desired_order = columns[number_of_prices + 1:].tolist() + columns[1:number_of_prices + 1].tolist() + [
columns[0]]
df_encoded = df_encoded[desired_order]
return df_encoded
def _split_dataset(df_encoded):
df = df_encoded.copy()
train_data, test_validation_data = train_test_split(df, test_size=0.4, shuffle=True)
test_data, validation_data = train_test_split(test_validation_data, test_size=0.5, shuffle=True)
x = df_encoded.columns[0:-4]
y = [df_encoded.columns[-4]]
X_train = train_data[x]
y_train = train_data[y]
X_test = test_data[x]
y_test = test_data[y]
X_validation = validation_data[x]
y_validation = validation_data[y]
accuracy_test = validation_data[['output_label']]
return train_data, test_data, validation_data, X_train, y_train, X_test, y_test, X_validation, y_validation, accuracy_test
def _train_model(train_data, test_data, validation_data, X_train, y_train, X_test, y_test, X_validation, y_validation,
accuracy_test, number_of_prices):
X_train_combined = pd.concat([X_train, X_validation], axis=0)
y_train_combined = np.concatenate([y_train, y_validation])
model = RandomForestRegressor()
model.fit(X_train_combined, y_train_combined)
predictions = model.predict(X_test)
predictions_df = _process_prediction_output_pseudo(predictions, y_test, test_data, number_of_prices)
all_rows = predictions_df.shape[0]
correct_rows = predictions_df[predictions_df['output_label'] == predictions_df['predicted_output_label']].shape[0]
model_accuracy = correct_rows / all_rows * 100
data_repository.add_new_ml_model("Random Forest Regressor", date.today(), model_accuracy)
return model
def _process_prediction_output_pseudo(predictions, y_test, test_data, number_of_prices):
predictions_df = pd.DataFrame(predictions, columns=['predicted_target'])
predictions_df['predicted_target'] = predictions_df['predicted_target'].astype(int)
evaluation_validation = test_data[
['target', 'output_label', f'price{number_of_prices - 1}', 'output', 'difference']]
evaluation_validation.reset_index(drop=True, inplace=True)
predictions_df = pd.concat([predictions_df, evaluation_validation], axis=1)
predictions_df['predicted_difference'] = predictions_df['predicted_target'] - predictions_df[
f'price{number_of_prices - 1}']
predictions_df['predicted_output'] = predictions_df['predicted_difference'] / predictions_df[
f'price{number_of_prices - 1}']
predictions_df['predicted_output_label'] = predictions_df['predicted_output'].apply(
lambda x: 'UP' if x >= 0.05 else 'DOWN' if x <= -0.05 else 'SAME')
return predictions_df
| viktor-meglenovski/diplomska_ml | backend/service/training_service.py | training_service.py | py | 10,134 | python | en | code | 0 | github-code | 90 |
27955539533 | from tkinter import *
from tkinter import messagebox
import mysql.connector
from pacotes import criarBanco, telaCompras
from tkinter import ttk
# --------------------------------------------- cores --------------------------------------------
cor0 = '#121010' # Preto
cor1 = '#feffff' # branco
cor2 = '#3fb5a3' # verde
cor3 = '#38576b' # azul
cor4 = '#403d3d' # cinza
cor5 = '#e9edf5' # azul claro
cor6 = '#ef5350' #vermelho
cor7 = '#191970' #azul escuro
cor8 = '#ffff40' #amarelo claro
voltar = False
#------------ função para atualizar tabela ------------------------------
def atualizar():
try:
tela_atualizar = Tk()
tela_atualizar.title('Update produto')
tela_atualizar.geometry('500x120+420+355')
tela_atualizar.configure(bg=cor5)
tela_atualizar.resizable(width=FALSE, height=FALSE)
#------------- labels atualizar --------------------------------------------
label_atualizar = Label(tela_atualizar, text='Altere o produto escolhido', font=('Arial 10 bold'), bg=cor3, fg=cor1)
label_atualizar.grid(row=0, column=2, pady=3)
label_atualizar_id = Label(tela_atualizar, text='ID', font=('Arial 10 bold'), bg=cor3, fg=cor1)
label_atualizar_id.place(x=35, y=30)
label_atualizar_produto = Label(tela_atualizar, text='Produto', font=('Arial 10 bold'), bg=cor3, fg=cor1)
label_atualizar_produto.place(x=145, y=30)
label_atualizar_codigo = Label(tela_atualizar, text='Código', font=('Arial 10 bold'), bg=cor3, fg=cor1)
label_atualizar_codigo.place(x=255, y=30)
label_atualizar_preco = Label(tela_atualizar, text='Preço', font=('Arial 10 bold'), bg=cor3, fg=cor1)
label_atualizar_preco.place(x=365, y=30)
#------------------ entradas atualizar ----------------------------------
entre_atualizar_id = Entry(tela_atualizar, width=10, relief='solid')
entre_atualizar_id.place(x=25, y=55)
entre_atualizar_produto = Entry(tela_atualizar, width=15, relief='solid')
entre_atualizar_produto.place(x=135, y=55)
entre_atualizar_codigo = Entry(tela_atualizar, width=13, relief='solid')
entre_atualizar_codigo.place(x=245, y=55)
entre_atualizar_preco = Entry(tela_atualizar, width=10, relief='solid')
entre_atualizar_preco.place(x=355, y=55)
valores_atualizar = []
dados_select = topo.focus()
valor_dados = topo.item(dados_select)
dados_valor = valor_dados['values']
valores_atualizar.append(dados_valor)
id_atualizar = valores_atualizar[0][0]
print(id_atualizar)
#---------------- inserindo os dados selecionados nas entradas -----------------------------
entre_atualizar_id.insert(0, valores_atualizar[0][0])
entre_atualizar_produto.insert(0, valores_atualizar[0][1])
entre_atualizar_codigo.insert(0, valores_atualizar[0][2])
entre_atualizar_preco.insert(0, valores_atualizar[0][3])
def confirmar_atualizar():
id_atualiza = str(entre_atualizar_id.get())
produto_atualiza = str(entre_atualizar_produto.get())
codigo_atualiza = str(entre_atualizar_codigo.get())
preco_atualiza = str(entre_atualizar_preco.get())
banco = mysql.connector.connect(
host = 'localhost',
user='root',
passwd='',
database='banco_produtos'
);
with banco:
cursor = banco.cursor()
cursor.execute(f'UPDATE produtos SET produto =\"{produto_atualiza}", codigo =\"{codigo_atualiza}", preco =\"{preco_atualiza}" WHERE id ={id_atualiza}')
messagebox.showinfo('Sucesso', 'Produto atualizado com sucesso')
tela_atualizar.destroy()
quartaTela.destroy()
nova_tela_4()
botao_conf_atualizar = Button(tela_atualizar, command=confirmar_atualizar, text='Confirmar', width=10, height=1, font=('Arial 10 bold'), bg=cor6, fg=cor1, relief='raised',
overrelief='ridge')
botao_conf_atualizar.place(x=150, y=85)
tela_atualizar.mainloop()
except IndexError:
messagebox.showerror('ERRO', 'Selecione um item para editar')
finally:
tela_atualizar.destroy()
# ------- funçãoo deletar item da tabela ------------------------
def deletar():
try:
dados_topo = topo.focus()
dici_topo = topo.item(dados_topo)
topo_lista = dici_topo['values']
posicao_id = topo_lista[0]
banco = mysql.connector.connect(
host = 'localhost',
user='root',
passwd='',
database='banco_produtos'
);
with banco:
cursor = banco.cursor()
cursor.execute('DELETE FROM produtos WHERE id =' +str(posicao_id))
quartaTela.destroy()
nova_tela_4()
except:
messagebox.showerror('ERRO', 'Nenhum item selecionado')
#------------- botao ir tela 4 ---------------------------------------------------
def abrir_tela_4():
voltar = True
terceiraTela.destroy()
if voltar:
nova_tela_4()
# -------- Segunda tela (Seguimento login) voltando para primeira tela --------------------------
def voltar_tela_1():
voltar = True
segundaTela.destroy()
if voltar:
tela_login()
# -------- Terceira tela Seguimento login voltando para primeira tela --------------------------
def voltar_tela_1_2():
voltar = True
terceiraTela.destroy()
if voltar:
tela_login()
#-------------- tela 4 (visualizar tabela de produtos) voltando para tela 3 -----------------------------------------------
def voltar_tela_3():
voltar = True
quartaTela.destroy()
if voltar:
nova_tela_3()
#botao abrir tela compras -------------------------------------------
def abrir_tela_compras():
voltar=True
primeiraTela.destroy()
if voltar:
telaCompras.first_tela_compras()
def nova_tela_4():
global quartaTela
global topo
quartaTela = Tk()
quartaTela.title('Banco de produtos')
quartaTela.geometry('500x500+420+100')
quartaTela.configure(bg=cor5)
quartaTela.resizable(width=FALSE, height=FALSE)
#criando frame --------------------------------
tela_4_frame = Frame(quartaTela, width=450, height=450, bg=cor3, relief='flat')
tela_4_frame.grid(row=0, column=0)
tela_4_frame_2 = Frame(quartaTela, width=500, height=300, bg=cor2, relief='flat')
tela_4_frame_2.grid(row=1, column=0)
# lista dos produtos da tabela
produto = criarBanco.visu_info()
# lista para topo da página. cabeçalho
lista = ['ID', 'PRODUTOS', 'CÓDIGO', 'PREÇO']
topo = ttk.Treeview(tela_4_frame,selectmode='extended', columns=lista, show='headings')
#rolagem vertical horizontal -----------------------------
rolagem_vert = ttk.Scrollbar(tela_4_frame, orient='vertical', command=topo.yview)
rolagem_vert.grid(column=1, row=0, sticky='ns')
topo.configure(yscrollcommand=rolagem_vert.set)
topo.grid(column=0, row=0)
tela_4_frame.grid_rowconfigure(0, weight=12)
posicao_cabecalho = ['center','center','center','center']
tamanho_cabecalho = [50, 180, 150, 100]
c=0
for coluna in lista:
topo.heading(coluna, text=coluna.title().upper(), anchor=CENTER)
topo.column(coluna, width=tamanho_cabecalho[c], anchor=posicao_cabecalho[c])
c+=1
for item in produto:
topo.insert('','end',values=item)
#------------------------------------------botões-------------------------------------------------------
#-------criando botao voltar página anterior ---------------------------------------------------
botao_voltar_tela_3 = Button(tela_4_frame_2, command=voltar_tela_3, text='Voltar', font=('Arial 12 bold'), width=5, height=1, overrelief='ridge', bg=cor0, fg=cor1)
botao_voltar_tela_3.place(x=390, y=210)
#-------botão deletar ------------------------------------------------------------------
botao_deletar = Button(tela_4_frame_2, command= deletar, text='Deletar', font=('Yvi 15'), width=7, height=1, overrelief='ridge', bg=cor3, fg=cor0)
botao_deletar.place(x=40, y=30)
#-------botão atualizar ------------------------------------------------------------------
botao_deletar = Button(tela_4_frame_2, command=atualizar, text='Atualizar', font=('Yvi 15'), width=7, height=1, overrelief='ridge', bg=cor8, fg=cor7)
botao_deletar.place(x=170, y=30)
quartaTela.mainloop()
def inserir():
try:
produto = entre_descricao.get()
codigo = entre_codigo.get()
preco = entre_preco.get()
criarBanco.criar_banco()
criarBanco.criar_tabela()
if produto == '' or codigo == '' or preco =='':
messagebox.showerror('Erro', 'Campos não estão preenchidos')
else:
banco = mysql.connector.connect(
host = 'localhost', #A hopedagem do seu MySQL
user='root', #O usuario do seu MySQL
passwd='', #A senha do seu MySQL (padrao é sem senha)
database='banco_produtos' # Com que banco de dados a tabela conectar
)
cursor = banco.cursor()
inserindo = 'INSERT INTO produtos (produto, codigo, preco) VALUES (%s, %s, %s)'
dados = (str(produto), str(codigo), str(preco))
cursor.execute(inserindo, dados)
banco.commit()
print(f'Produto: {produto}')
print(f'Código: {codigo}')
print(f'Preço: {preco}')
entre_descricao.delete(0, 'end')
entre_codigo.delete(0, 'end')
entre_preco.delete(0, 'end')
messagebox.showinfo('Sucesso', 'Produto adicionado com sucesso')
except:
messagebox.showerror('ERRO', 'Algum produto digitado de forma errada')
def nova_tela_3():
global terceiraTela
global segundaTela
global entre_descricao
global entre_codigo
global entre_preco
global label_codigo
terceiraTela = Tk()
terceiraTela.title('Banco de dados')
terceiraTela.geometry('500x500+420+100')
terceiraTela.configure(bg=cor5)
terceiraTela.resizable(width=FALSE, height=FALSE)
# criando frames -------------------------------------
primeiro_frame = Frame(terceiraTela, width=500, height=65, bg=cor3, relief='flat')
primeiro_frame.grid(row=0, column=0)
segundo_frame = Frame(terceiraTela, width=500, height=435, bg=cor1, relief='flat')
segundo_frame.grid(row=1, column=0)
# criando labels e entrys--------------------------------
label_inserir = Label(primeiro_frame, text='Inserir Produtos', font=('Arialblack 20 bold'), bg=cor3, fg=cor1)
label_inserir.place(x=140, y=12)
label_descricao = Label(segundo_frame, text='Produto: *', font=('Yvi 15 bold'), bg=cor1, fg=cor4)
label_descricao.place(x=15, y=55)
entre_descricao = Entry(segundo_frame, width=50, relief='solid')
entre_descricao.place(x=20, y=85)
label_codigo = Label(segundo_frame, text='Código: *', font=('Yvi 15 bold'), bg=cor1, fg=cor4)
label_codigo.place(x=15, y=130)
entre_codigo = Entry(segundo_frame, width=50, relief='solid')
entre_codigo.place(x=20, y=160)
label_preco = Label(segundo_frame, text='Preço *', font=('Yvi 15 bold'), bg=cor1, fg=cor4)
label_preco.place(x=20, y=195)
entre_preco = Entry(segundo_frame, width=50, relief='solid')
entre_preco.place(x=20, y=224)
#botao inserir -----------------------------------
botao_inserir = Button(segundo_frame, text='Inserir', command=inserir, width=10, height=1, font=('Arial 10 bold'), bg=cor6, fg=cor1, relief='raised',
overrelief='ridge')
botao_inserir.place(x=20, y=300)
# botão visualizar tabela ---------------------------------------------------------
botao_ir_tabela = Button(segundo_frame, command=abrir_tela_4, text='Visualizar Produtos', width=15, height=1, font=('Arial 10 bold'), bg=cor7, fg=cor1, relief='raised',
overrelief='ridge')
botao_ir_tabela.place(x=200, y=300)
botao_inicio_t3 = Button(segundo_frame, text='Início', command=voltar_tela_1_2, width=10, height=1, font=('Arial 10 bold'), bg=cor2, fg=cor1, relief='raised',
overrelief='ridge')
botao_inicio_t3.place(x=20, y=390)
terceiraTela.mainloop()
def login_tela_3():
nome = entre_nome.get()
senha = entre_senha.get()
if nome == 'adm' and senha == '123':
messagebox.showinfo('Login', 'Seja bem vindo')
segundaTela.destroy()
nova_tela_3()
else:
nome_senha_erro['text'] = 'Usuário ou senha incorretos'
def nova_tela_login():
global entre_nome
global entre_senha
global nome_senha_erro
global segundaTela
global primeiraTela
global botao_voltar
primeiraTela.destroy()
segundaTela = Tk()
segundaTela.title('Login')
segundaTela.geometry('500x500+420+100')
segundaTela.configure(bg=cor1)
segundaTela.resizable(width=FALSE, height=FALSE)
label_nome = Label(segundaTela, text='Usuário *', font=('Arial 12'), bg=cor1, fg=cor0)
label_nome.place(x=100, y=160)
entre_nome = Entry(segundaTela, width=25, justify='left', font=('', 15), highlightthickness=1, relief='solid')
entre_nome.place(x=100, y=180)
label_senha = Label(segundaTela, text='Senha *', font=('Arial 12'), bg=cor1, fg=cor0)
label_senha.place(x=100, y=250)
entre_senha = Entry(segundaTela, width=25, justify='left', font=('', 15), highlightthickness=1, relief='solid', show='*')
entre_senha.place(x=100, y=270)
#--------- botao segunda tela voltar -----------------------------------
botao_voltar = Button(segundaTela, command= voltar_tela_1, text='Voltar', font=('Arial 12 bold'), width=15, height=1,
overrelief='ridge', bg=cor1, fg=cor0)
botao_voltar.place(x=30, y=30)
# -------- botão segunda tela entrar banco de dados ------------------------------
login_segunda_tela = Button(segundaTela, text='Login', command=login_tela_3, font=('Arial 12 bold'), width=15, height=1,
overrelief='ridge', bg=cor3, fg=cor1)
login_segunda_tela.place(x=100, y=325)
nome_senha_erro = Label(segundaTela, text='', font=('Arial 8'), bg=cor1, fg=cor0)
nome_senha_erro.place(x=100, y=303)
segundaTela.mainloop()
def tela_login():
global primeiraTela
global segundaTela
primeiraTela = Tk()
primeiraTela.title('Login/Compra')
primeiraTela.geometry('500x500+420+100')
primeiraTela.configure(bg=cor1)
primeiraTela.resizable(width=FALSE, height=FALSE)
#----------------------------------- dividindo a tela principal ----------------------------------
frame_esquerda = Frame(primeiraTela, width=250, height=500, bg=cor1, relief='flat')
frame_esquerda.place(x=0, y=0)
frame_direita = Frame(primeiraTela, width=250, height=500, bg=cor1, relief='flat')
frame_direita.place(x=250, y=0)
#----------------------------------- configurando frame esque ----------------------------------
label_nome = Label(frame_esquerda, text='Entrar', font=('Arialblack 20 bold'),bg=cor2, fg=cor0)
label_nome.place(x=30, y=150)
label_descricao = Label(frame_esquerda, text='Área de login para funcionários',
font=('Ivy 10 italic'),bg=cor2, fg=cor0)
label_descricao.place(x=30, y=190)
#criar linha
label_linha = Label(frame_esquerda, text= '', height=160, width=0, anchor=NW, font=('Ivy 1'), bg=cor0, fg=cor4)
label_linha.place(x=248, y=60)
#------------------------------------ Criando botao login --------------------------------------------------------
botao_login = Button(frame_esquerda, command=nova_tela_login, text='Login', font=('Arial 12 bold'), width=15, height=1,
overrelief='ridge', bg=cor3, fg=cor1)
botao_login.place(x=30, y=230)
#----------------------------------- configurando frame direit ----------------------------------
label_nome2 = Label(frame_direita, text='Comprar', font=('Arialblack 20 bold'),bg=cor3, fg=cor0)
label_nome2.place(x=30, y=150)
label_descricao2 = Label(frame_direita, text='Área de pedidos de compras',
font=('Ivy 10 italic'),bg=cor3, fg=cor0)
label_descricao2.place(x=30, y=190)
#-------------------------------- Criando botao comprar ------------------------------------------------------
botao_login = Button(frame_direita, command=abrir_tela_compras, text='Comprar', font=('Arial 12 bold'), width=15, height=1,
overrelief='ridge', bg=cor2, fg=cor1)
botao_login.place(x=30, y=230)
primeiraTela.mainloop()
tela_login()
| Eduardo-J-S/Projeto_IP_20221 | main.py | main.py | py | 16,937 | python | pt | code | 1 | github-code | 90 |
13588922190 | from django.urls import path
from . import views
app_name = 'students'
urlpatterns = [
path('', views.index, name='index'), # GET /students/
path('new/', views.new, name='new'), # GET,POST /students/new/
# path('create/', views.create, name='create'), # POST /students/create/ (x)
path('<int:pk>/', views.detail, name='detail'), # GET /students/1/
path('<int:pk>/edit/', views.edit, name='edit'), # GET,POST /students/1/edit/
# path('<int:pk>/update/', views.update, name='update'), # POST /students/1/update/ (x)
path('<int:pk>/delete/', views.delete, name='delete'), # POST /students/1/delete/
path('<int:student_pk>/comments/new/', views.comments_new, name='comments_new'),
path('<int:student_pk>/comments/<int:pk>/delete/', views.comments_delete, name='comments_delete'), # POST /students/1/comments/1/delete/
path('<int:student_pk>/comments/<int:pk>/edit/', views.comments_edit, name='comments_edit'),
]
# URL Name -> 각각의 path
# path('주소/', views.함수, name='이름')
# {% url '이름' %} #=> 주소
# [장점]
# 1. 주소의 변경이 필요할 때, urls.py에서만 수정해주면 됨
# 2. 마지막 '/'를 빼먹는 실수를 차단할 수 있음
# App Name - 특정 app의 urls.py 자체
# {% url 'app_name:path_name' %} #=> 주소!
# RESTful
# 1. 자원(Resource) - URI
# 2. 행위(Verb) - HTTP Method (GET, POST, ...)
# 3. 표현(Representations) - 자원 + 행위
# Django는 PUT/PATCH/DELETE 불가능. 따라서...
# GET /students/2/edit/ #=> 수정 페이지 보여줌
# POST /students/2/edit/ #=> 수정 작업 수행
# ex)
# GET /users/1 #=> user 1번 가져옴
# PUT /users/1 #=> user 1번 수정
# DELETE /users/1 #=> user 1번 삭제
| 4th5-deep-a/web | django/crud_review/students/urls.py | urls.py | py | 1,728 | python | en | code | 4 | github-code | 90 |
13394981258 | import json
from typing import List, Union
import pandas as pd
from fastapi import FastAPI
from pydantic import BaseModel
from data import get_list_summary, get_summary
app = FastAPI()
@app.get("/trip/{month}")
def get_summary_srv(month: str, expire: int = 15 * 60):
df = get_summary(month, expr=expire)
return dict(
month=df["trip_month"].iloc[0], trip_count=int(df["trip_count"].iloc[0])
)
@app.get("/trip")
def get_all_summary_srv(expire: int = 15 * 60, order: bool = False):
df: pd.DataFrame = get_list_summary(None, expr=expire)
if order:
return [
dict(month=rec.trip_month, trip_count=int(rec.trip_count))
for rec in df.sort_values(by=["trip_count"]).itertuples()
]
else:
return [
dict(month=rec.trip_month, trip_count=int(rec.trip_count))
for rec in df.itertuples()
]
@app.post("/trip")
def get_all_summary_srv(mons: List[str], expire: int = 15 * 60):
df = get_list_summary(mons, expr=expire)
return [
dict(month=rec.trip_month, trip_count=int(rec.trip_count))
for rec in df.itertuples()
]
| husnusensoy/python-workshop | week4/session2/data-service.py | data-service.py | py | 1,149 | python | en | code | 2 | github-code | 90 |
20671197229 | # -*- coding:utf-8 -*-
class Solution:
def VerifySquenceOfBST(self, sequence):
# write code here
if len(sequence) == 0:
return False
return self.check(sequence,0,len(sequence)-1)
def check(self,arr,start,end):
if start>=end:
return True
root = arr[end]
end = end - 1
while(end >=0 and arr[end]>root):
end -= 1
mid = end + 1
for i in range(start,mid):
if arr[i] > root:
return False
return self.check(arr,start,mid-1) and self.check(arr,mid,end) | shenweichen/coding_interviews | 33.二叉搜索树的后序遍历序列/33.二叉搜索树的后序遍历序列.py | 33.二叉搜索树的后序遍历序列.py | py | 596 | python | en | code | 446 | github-code | 90 |
3393423918 | import networkx as nx
import tweepy
import matplotlib.pyplot as plt
import os
consumer_key = "2mwk5WNYkNcWko6MhmRrivazE"
consumer_secret = "P8NEpxECYgKa5YAWr5O3F6TGFWYeJY78EBd7ZhrEX2PcUkl643"
access_token = "2403140949-iKPTtRJJlsgRT6AV2tWeMBid4lFGV7DxNADI14K"
access_token_secret = "4OZlKin7OekWX7Lx00GDNhYuHTRj9BiNwPf8sPeCNXcuO"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
def from_name_to_user(ID):
return(api.get_user(ID)) # result = objecte USER
def proces(actual, explored, max_followers, max_nodes_to_crawl, cases):
explored = first_level(actual, explored, max_followers, cases)
explored_users = [from_name_to_user(x[0]) for x in explored]
for x in explored_users[1:]:
if len(explored) < max_nodes_to_crawl:
explored = second_level(x, explored, max_followers)
return(explored)
def first_level(actual, queue, max_followers, cases):
aux = list() #aux = llista amb tots els 20 seguidors de l'actual
for follower in actual.followers():
if follower.followers_count < max_followers:
aux.append((follower.screen_name, follower.followers_count))
cont = 0
while cont < cases[0][1]:
if len(aux) == 0:
queue = atencio(actual, queue, max_followers, cases, cont)
return(queue)
else:
maxim = max(aux, key = lambda x: x[1])
queue.append((maxim[0], actual.screen_name))
aux.remove(maxim)
cont += 1
return(queue)
def atencio(actual, queue, max_followers, cases, cont):
aux = list() #aux = llista amb tots els 20 seguits de l'actual
for friend in actual.friends():
if friend.followers_count < max_followers:
aux.append((friend.screen_name, friend.followers_count))
while cont < cases[0][1]:
maxim = max(aux, key = lambda x: x[1])
queue.append((maxim[0], actual.screen_name))
aux.remove(maxim)
cont += 1
return(queue)
def second_level(actual, explored, max_followers):
aux = list() #aux = llista amb tots els 20 seguidors de l'actual
for follower in actual.followers():
if follower.followers_count < max_followers:
aux.append((follower.screen_name, follower.followers_count))
maxim = max(aux, key = lambda x: x[1])
explored.append((maxim[0], actual.screen_name))
aux.remove(maxim)
return(explored)
def adaptations(max_nodes_to_crawl):
cases = list()
aux1 = max_nodes_to_crawl - 1
aux2 = aux1 / 2
if aux1/2 == int(aux1/2):
print("CAS NORMAL")
print("Cada node de la 1a capa té un fill")
cases.append(("CASE1", int(aux2), int(aux2)))
else:
print("CAS ESPECIAL")
print("Cada node de la 1a capa té un fill excepte l'últim node de la 1a capa que no en té cap")
cases.append(("CASE2", int(aux2+1), int(aux2)))
return(cases)
def save(seed_node, explored, max_nodes_to_crawl):
print("...saving...")
f = open(str(seed_node.screen_name)+"_"+str(max_nodes_to_crawl)+".txt","w")
for x in explored:
f.write(x[1] + " , " + x[0] + "\n")
print("...saved...")
return
def crawler(nom_node, max_nodes_to_crawl, max_followers): # (ID, 40, 10000)
explored = list()
explored.append((nom_node, nom_node))
seed_node = from_name_to_user(nom_node)
cases = adaptations(max_nodes_to_crawl)
explored = proces(seed_node, explored, max_followers, max_nodes_to_crawl, cases)
save(seed_node, explored, max_nodes_to_crawl)
return
def export_edges_to_graph(file_name):
'''
:param file_name: name of the txt file that contains the edges of the graf.
:return: the function does not return any parameter.
'''
path = os.path.dirname(file_name)
aux_arests = list()
nodes = list()
arests = list()
with open(os.path.join(path, './' + file_name), 'r', encoding='cp1252') as f1:
for line in f1:
aux_arests.append(line.split())
for line in aux_arests:
arests.append((line[0], line[2]))
nodes.append(line[0])
nodes.append(line[2])
nodes = list(set(nodes))
G = nx.DiGraph()
G.add_nodes_from(nodes)
G.add_edges_from(arests)
nx.draw(G, with_labels = True)
plt.show()
nx.write_gpickle(G, file_name[:-4]+".pickle")
def export_graph_to_gexf(g, file_name):
'''
:param g: A graph with the corresponding networkx format.
:param file_name: name of the file that will be saved.
:return: the function does not return any parameter.
'''
nx.gexf.write_gexf(g, file_name+".gexf")
return
def retrieve_bidirectional_edges(g, file_name):
'''
:param g: A graph with the corresponding networkx format.
:param file_name: name of the file that will be saved.
:return: the function does not return any parameter.
'''
G = nx.DiGraph.to_undirected(g, reciprocal = True)
G.remove_nodes_from(list(nx.isolates(G)))
nx.draw(G, with_labels = True)
plt.show()
nx.gexf.write_gexf(G, file_name+"_undirected.pickle")
def prune_low_degree_nodes(g, min_degree, file_name):
'''
:param g: A graph with the corresponding networkx format.
:param min_degree: lower bound value for the degree
:param file_name: name of the file that will be saved.
:return: the function does not return any parameter.
'''
nodes = list(g.nodes)
for node in nodes:
if g.degree(node) <= min_degree:
g.remove_node(node)
print("-----------expulsat",node, g.degree(node))
nx.draw(g, with_labels = True)
plt.show()
nx.gexf.write_gexf(g, file_name+"_undirected_reduced.pickle")
def find_cliques(g, min_size_clique):
'''
:param g: A graph with the corresponding networkx format.
:param min_size_clique: the minimum size of the clique returned
:return:
large_cliques: a list with the large cliques
nodes_in_large_cliques: all different nodes apprearing on these cliques
'''
large_cliques =[]
nodes_in_large_cliques = []
G = nx.DiGraph.to_undirected(g, reciprocal = True)
aux = list(nx.enumerate_all_cliques(G))
for clique in aux:
if len(clique) >= min_size_clique:
large_cliques.append(clique)
for clique in large_cliques:
for node in clique:
if node not in nodes_in_large_cliques:
nodes_in_large_cliques.append(node)
return [large_cliques, nodes_in_large_cliques]
def find_max_k_core(g):
'''
:param g: A graph with the corresponding networkx format.
:return: The k-core with a maximum k value.
'''
g.remove_edges_from(nx.selfloop_edges(g))
k_Core_max = nx.k_core(g)
nx.draw(k_Core_max, with_labels = True)
plt.show()
return k_Core_max | arnaucampru/Social-Network-Analysis | pràctica.py | pràctica.py | py | 7,239 | python | en | code | 1 | github-code | 90 |
320125814 | import csv
from net.initialization.header.metrics import metrics_header
from net.utility.msg.msg_metrics_complete import msg_metrics_complete
from net.metrics.utility.my_notation import scientific_notation
def metrics_train_resume_csv(metrics_path: str,
metrics: dict):
"""
Resume metrics-train.csv
:param metrics_path: metrics path
:param metrics: metrics dictionary
"""
with open(metrics_path, 'w') as file:
writer = csv.writer(file)
# write header
header = metrics_header(metrics_type='train')
writer.writerow(header)
# iterate row writer
for row in range(len(metrics['ticks'])):
writer.writerow([metrics['ticks'][row],
metrics['loss']['loss'][row],
metrics['loss']['classification'][row],
metrics['loss']['regression'][row],
scientific_notation(number=metrics['learning_rate'][row]),
metrics['AUC'][row],
metrics['sensitivity']['work_point'][row],
metrics['sensitivity']['max'][row],
metrics['AUFROC']['[0, 1]'][row],
metrics['AUFROC']['[0, 10]'][row],
metrics['AUFROC']['[0, 50]'][row],
metrics['AUFROC']['[0, 100]'][row],
metrics['time']['train'][row],
metrics['time']['validation'][row],
metrics['time']['metrics'][row]])
msg_metrics_complete(metrics_type='resume')
| cirorusso2910/GravityNet | net/resume/metrics_train_resume.py | metrics_train_resume.py | py | 1,704 | python | en | code | 7 | github-code | 90 |
45824483389 | import re
from textwrap import fill
def protein_char_class(alignment=False, nl=True, allow=None, extra_chars=False, stops=False):
'''Returns the character class string for the sequence described,
i.e. '[AC-IK-NP-TVWY\\n]'.
Parameters:
alignment (default=False) Allow '-' character
nl (default=True) Allow '\\n' characters
allow (default=None) String of additional characters allowed in the sequence
extra_chars (default=False) Allow BOUXZ
stops (default=False) Allow '*' character
'''
char_class = r'AC-IK-NP-TVWY'
if nl:
char_class += r'\n'
if allow is not None:
char_class += allow
if extra_chars:
char_class += r'XBOUZ'
if stops:
char_class += r'*'
if alignment:
char_class += r'\-'
return '[%s]' % char_class
def protein_fasta_re(alignment=False, allow=None, extra_chars=False, stops=False, ignore_case=False, bad_header_chars=None, max_seqs=None, min_seqs=1):
'''Returns a compiled regex for the described FASTA format.
Parameters:
alignment (default=False) Allow '-' character
allow (default=None) String of additional characters allowed in the sequence
extra_chars (default=False) Allow BOUXZ
stops (default=False) Allow '*' character
ignore_case (default=False) Check sequences insensitive to case
min_seqs (default=1) Minimum number of sequences allowed
max_seqs (default=None) Maximum number of sequences allowed
'''
char_class = protein_char_class(nl=False, alignment=alignment, allow=allow,
extra_chars=extra_chars, stops=stops)
char_class_and_ws = protein_char_class(nl=True, alignment=alignment,
allow=(allow or '') + ' ', extra_chars=extra_chars, stops=stops)
fasta_part = r'(>%s+)\n\s*(%s%s*)' % (
# either forbidden characters or dot
bad_header_chars and (r'[^%s\n]' % bad_header_chars) or '.',
char_class,
char_class_and_ws,
)
if max_seqs is None:
mult = r'{%i,}' % min_seqs
else:
mult = r'{%i,%i}' % (min_seqs,max_seqs)
pattern = r'^(%s)%s$' % (fasta_part, mult)
return re.compile(pattern, ignore_case and re.I)
class FastaError(Exception):
"""FASTA format parsing error
Attributes:
message -- explanation of the error
num -- the number of the offending line in the FASTA file
line -- the offending line (optional)
"""
def __init__(self, message, num, line=''):
self.message = message
self.line = "line %i: '%s'" % (num, line)
def __str__(self):
return self.message
# convenience functions
def format_protein_fasta_and_count(inp, to_upper=False, wrap=False, alignment=False, allow=None, extra_chars=False, stops=False, ignore_case=False, bad_header_chars=None, max_seqs=1, min_seqs=1, max_length=None):
'''Check FASTA input for format errors and return a tuple with a string with requested changes and the number of sequences.
Parameters:
to_upper (default=False) Converts sequences to upper case letters
wrap (default=False) Reset the line wrapping for sequences
alignment (default=False) Allow '-' character and enforce equal lengths
allow (default=None) String of additional characters allowed in the sequence
extra_chars (default=False) Allow BOUXZ
stops (default=False) Allow '*' character
ignore_case (default=False) Check sequences insensitive to case
min_seqs (default=1) Minimum number of sequences allowed
max_seqs (default=1) Maximum number of sequences allowed
'''
# Remove unwanted whitespace
inp = inp.strip().replace('\r','')
# Handle empty string
if not inp:
if min_seqs:
raise FastaError('input is an empty string; at least %i sequence(s) required' % min_seqs, 0, '')
else:
# should be None?
return ''
# Convert to_upper and ignore_case to re.I for use as a re.compile argument
to_upper = to_upper and re.I
ignore_case = (ignore_case or to_upper) and re.I
# Handle missing initial header
if not inp.startswith('>'):
if max_seqs == 1:
inp = '>Submission\n' + inp
else:
raise FastaError('missing first header',
0, inp.split('\n',1)[0])
# If no changes need to be made to the sequences, validate via regex
if not (wrap or alignment or to_upper or max_length) and \
protein_fasta_re(alignment=alignment, allow=allow,
extra_chars=extra_chars, stops=stops, ignore_case=ignore_case,
bad_header_chars=bad_header_chars, max_seqs=max_seqs,
min_seqs=min_seqs).match(inp):
return (inp + '\n', inp.count('\n>')+1)
# Parse FASTA input, line by line
recs = []
seq_char_class = protein_char_class(
alignment=alignment, allow=allow, extra_chars=extra_chars,
stops=stops, nl=False)
seq_line_re = re.compile('^%s*$' % seq_char_class, ignore_case)
for num,line in enumerate(inp.split('\n')):
line = line.strip()
if not line:
continue
if seq_line_re.match(line.replace(' ', '')): # Catch a sequence line
recs[-1][1] += line
elif line.startswith('>'): # Catch a header line
if max_length is not None and recs and len(recs[-1][1]) > max_length:
raise FastaError("sequence contains %i residues; only %i residue(s) permitted per sequence" % (
len(recs[-1][1]), max_length),
num, recs[-1][0])
# Check for empty header
if not len(line[1:].strip()):
raise FastaError("illegal empty header", num, line)
recs.append(['>%s' % line[1:].strip(), '', num])
# Check for illegal header characters
bad_chars = set(line).intersection(bad_header_chars)
if bad_chars:
raise FastaError("illegal header character(s) '%s'" % (
"', '".join(bad_chars)),
num, line)
# Check for too many sequences
if max_seqs is not None and len(recs) > max_seqs:
raise FastaError(
'contains over %i sequences; up to %i sequences permitted' \
% (len(recs)-1, max_seqs), recs[-1][2], recs[-1][0])
else: # Error
raise FastaError("illegal sequence character(s) '%s'" % (
"', '".join(re.compile(seq_char_class,
ignore_case).sub('', ''.join(set(line.replace(' ', '')))))),
num, line)
# Check for too few sequences
if min_seqs is not None and len(recs) < min_seqs:
num,line = len(recs) and (recs[-1][2], recs[-1][0]) or (0,'')
raise FastaError('contains %i sequences; at least %i sequences required' % (len(recs), min_seqs), num, line)
# Rewrap if necessary
sizer = wrap and fill or (lambda x: x)
seqs = zip(*recs)[1]
# Check alignment lengths, if necessary
if alignment and not reduce(lambda x,y: x == y and x or False,
map(lambda x: len(x), seqs)):
len_aln = len(recs[0][1])
for rec in recs:
if len(rec[1]) != len_aln:
raise FastaError("length of sequence '%s' differs from previous sequence(s) in alignment" % rec[0], rec[2], rec[0])
# Check for empty sequences
for rec in recs:
header,seq,num = rec
if not seq:
raise FastaError("header '%s' contains no sequence" % header,
num, header)
rec[1] = sizer((to_upper and seq.upper()) or seq)
return ('\n'.join(map(lambda x: '\n'.join(x[0:2]), recs))+'\n',len(rec))
def format_a2m(inp, max_seqs=100, extra_chars=False):
'''Wrapper function for format_protein_fasta, set to match a2m.
'''
return format_protein_fasta(inp, ignore_case=True, max_seqs=max_seqs,
extra_chars=extra_chars, allow='.', alignment=True)
def format_protein_fasta(*args, **kwargs):
'''Check FASTA input for format errors and return a string with requested changes.
Parameters:
to_upper (default=False) Converts sequences to upper case letters
wrap (default=False) Reset the line wrapping for sequences
alignment (default=False) Allow '-' character and enforce equal lengths
allow (default=None) String of additional characters allowed in the sequence
extra_chars (default=False) Allow BOUXZ
stops (default=False) Allow '*' character
ignore_case (default=False) Check sequences insensitive to case
min_seqs (default=1) Minimum number of sequences allowed
max_seqs (default=1) Maximum number of sequences allowed
'''
return format_protein_fasta_and_count(*args, **kwargs)[0]
| berkeleyphylogenomics/BPG_utilities | pfacts003/utils/format_patterns.py | format_patterns.py | py | 8,698 | python | en | code | 1 | github-code | 90 |
28142618430 | import pygame
import sys
import random
from pygame.math import Vector2
class PADDLE:
def __init__(self, x, y, width, height, color, speed):
self.x = x
self.y = y
self.width = width
self.height = height
self.color = color
self.speed = speed
def draw_paddle(self):
display = pygame.display.get_surface()
pygame.draw.rect(display, (self.color), (self.x,self.y, self.width, self.height), 0)
def move_player_1(self, cell_number, cell_size):
self.keys = pygame.key.get_pressed()
if self.keys[pygame.K_w] and self.y>0:
self.y-=self.speed
if self.keys[pygame.K_s] and self.y<(cell_number*cell_size)-self.height:
self.y+=self.speed
def move_player_2(self, cell_number, cell_size):
self.keys = pygame.key.get_pressed()
if self.keys[pygame.K_UP] and self.y>0:
self.y-=self.speed
if self.keys[pygame.K_DOWN] and self.y<(cell_number*cell_size)-self.height:
self.y+=self.speed
class BALL:
def __init__(self, ball_speed_x, ball_speed_y, ball_pos_x, ball_pos_y):
self.ball_speed_x = ball_speed_x
self.ball_speed_y = ball_speed_y
self.ball_pos_x = ball_pos_x
self.ball_pos_y = ball_pos_y
def draw_ball(self):
# create ball
ball1 = pygame.draw.circle(screen, (42,95,23), [self.ball_pos_x,self.ball_pos_y], 5, 0)
ball = ball1.move(self.ball_speed_x, self.ball_speed_y)
# move rect
if ball.left <=0 or ball.right >=600:
self.ball_speed_x = -self.ball_speed_x
if ball.top <=0 or ball.top >=600:
self.ball_speed_y= -self.ball_speed_y
self.ball_pos_x += self.ball_speed_x
self.ball_pos_y += self.ball_speed_y
# draw ball
pygame.draw.circle(screen, (42,95,23), ball1.center, 5, 0)
pygame.init()
cell_size = 30
cell_number = 20
screen = pygame.display.set_mode((cell_number*cell_size,cell_number*cell_size))
pygame.display.set_caption("Pong Game")
clock = pygame.time.Clock()
# objects
ball = BALL(random.randint(1,4), random.randint(1,4), 300, 300)
paddle1 = PADDLE(30,150,10,50,(123,26,136),5)
paddle2 = PADDLE(560,150,10,50,(12,26,116),5)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# ball collision
# color for window background
screen.fill((34,3,3))
# draw the paddle 1 & 2
paddle1.draw_paddle()
paddle2.draw_paddle()
# draw the paddle 2
paddle1.move_player_1(cell_number, cell_size)
paddle2.move_player_2(cell_number, cell_size)
# draw ball
ball.draw_ball()
print(ball.ball_pos_x)
pygame.display.update()
clock.tick(60) | marvinraj/Pong-Game | backup.py | backup.py | py | 2,805 | python | en | code | 0 | github-code | 90 |
13307184959 | #!/usr/bin/env python3
from bs4 import BeautifulSoup
import json
TWITTER_PREFIX = 'https://twitter.com/'
# Even more code duplication.
# I'm not sure whether this still is a good idea.
def get_details_bundestag(old_entry, soup):
assert old_entry['src'] == 'bundestag'
entry = dict()
entry['src'] = old_entry['src']
entry['page'] = old_entry['page']
entry['full_name'] = old_entry['full_name']
entry['name'] = old_entry['name']
# No 'img'
# No 'twitter_handle'
entry['ejected'] = False
detect_party = old_entry['detect_party']
if detect_party.endswith(' *'):
# Parties like 'SPD *' mean: no longer in the Bundestag.
entry['possible_parties'] = []
# soup = BeautifulSoup()
bio = soup.find('div', 'biografie')
if bio is None or not bio.get_text().strip().endswith('ausgeschieden'):
print('WARN: {} apparently left: {}'.format(entry['full_name'], entry['page']))
print(' … and text says something different?')
assert False
detect_party = detect_party[:-2]
entry['ejected'] = True
# Sanitize full_name.
# TODO: Should have probably happened in parse_roots.py, but whatever.
if entry['full_name'].endswith(')'):
parts = entry['full_name'].split('(')[:-1]
entry['full_name'] = ' '.join(parts).strip()
if detect_party == 'Die Linke':
entry['possible_parties'] = ['DIE LINKE']
elif detect_party == 'CDU/CSU':
entry['possible_parties'] = ['CDU', 'CSU']
elif detect_party == 'SPD':
entry['possible_parties'] = ['SPD']
elif detect_party == 'B\u00fcndnis 90/Die Gr\u00fcnen':
entry['possible_parties'] = ['GRÜNE']
else:
assert False, "Unknown party: '{}'".format(old_entry['detect_party'])
return entry
def get_details_linke(old_entry, soup):
assert old_entry['src'] == 'die linke'
entry = dict()
entry['src'] = old_entry['src']
entry['page'] = old_entry['page']
entry['full_name'] = old_entry['full_name']
# No 'ejected'
entry['possible_parties'] = ['DIE LINKE']
imgdata = {'license': 'custom-linke'}
# Specifically, the "license" can be found here:
# https://www.linksfraktion.de/presse/pressedownload/
# Twitter-Handle
# <a href="https://twitter.com/AndrejHunko">Twitter-Profil</a>
for a in soup.find_all('a'):
href = a.get('href')
if href is None or not href.startswith(TWITTER_PREFIX):
# Not even relevant
continue
raw_text = a.get_text()
if raw_text == '':
continue
assert raw_text == 'Twitter-Profil', (a, old_entry)
new_handle = href[len(TWITTER_PREFIX):]
assert 'twitter_handle' not in entry, (entry['twitter_handle'], new_handle, old_entry)
entry['twitter_handle'] = new_handle
# Don't break: check/assert for duplicate links!
# Don't assert: omission is okay
# Image:
# <a href="/fileadmin/user_upload/Pressefotos/pressefoto-jan-van-aken.jpg"
# target="_blank">Pressefoto von Jan van Aken</a>
# Duplicate effort, but whatever. Ease of readability ftw!
for a in soup.find_all('a'):
PREFIX = '/fileadmin/user_upload/Pressefotos/pressefoto-'
href = a.get('href')
good_text = a.get_text().startswith('Pressefoto von ')
good_href = href is not None and href.startswith(PREFIX)
if not good_text and not good_href:
continue
assert good_text and good_href, (a, old_entry)
new_url = 'https://www.linksfraktion.de' + href
assert 'url' not in imgdata, (imgdata['url'], new_url, old_entry)
imgdata['url'] = new_url
# Don't break: check/assert for duplicate links!
assert 'url' in imgdata
# No imgdata['copyright']
entry['img'] = imgdata
return entry
def get_details_gruene(old_entry, soup):
assert old_entry['src'] == 'gruene'
entry = dict()
entry['src'] = old_entry['src']
entry['page'] = old_entry['page']
entry['full_name'] = old_entry['full_name']
# No 'ejected'
entry['possible_parties'] = ['GRÜNE']
imgdata = {'license': 'custom-gruene', 'is_compressed': True}
# License source: private communication (Max Schwenger)
# Twitter-Handle
# <a href="https://twitter.com/Luise_Amtsberg" target="_blank"
# class="share__button share__button--twitter--outline">Twitter</a>
twitter_a = soup.find('a', 'share__button--twitter--outline')
if twitter_a is not None:
href = twitter_a.get('href')
# Dear Grüne,
# please get your shit together.
# https://www.youtube.com/watch?v=jl17CYYSzUw
href = href.replace('http://', 'https://')
href = href.replace('//www.twitter.com/', '//twitter.com/')
assert href.startswith(TWITTER_PREFIX), (href, old_entry)
new_handle = href[len(TWITTER_PREFIX):]
entry['twitter_handle'] = new_handle
# Image:
# <a href="uploads/tx_wwgruenefraktion/Amtsberg_Luise_01.zip"
# class="member-media__download">Download Foto</a>
for a in soup.find_all('a', 'member-media__download'):
PREFIX = 'uploads/tx_wwgruenefraktion/'
href = a.get('href')
good_text = a.get_text() == 'Download Foto'
good_href = href is not None and href.startswith(PREFIX)
if not good_text and not good_href:
continue
assert good_text and good_href, (a, old_entry)
# https://www.gruene-bundestag.de/uploads/tx_wwgruenefraktion/Franziska-Branter.zip
new_url = 'https://www.gruene-bundestag.de/' + href
assert 'url' not in imgdata, (imgdata['url'], new_url, old_entry)
imgdata['url'] = new_url
# Don't break: check/assert for duplicate links!
assert 'url' in imgdata
# No imgdata['copyright']
entry['img'] = imgdata
return entry
def get_details_spd(old_entry, soup):
assert old_entry['src'] == 'spd'
entry = dict()
entry['src'] = old_entry['src']
entry['page'] = old_entry['page']
entry['full_name'] = old_entry['full_name']
# No 'ejected'
entry['possible_parties'] = ['SPD']
imgdata = {'license': 'custom-spd'}
entry['img'] = imgdata
# Sanitize full_name.
# TODO: Should have probably happened in parse_roots.py, but whatever.
entry['full_name'] = " ".join(entry['full_name'].split())
if entry['full_name'].endswith(')'):
parts = entry['full_name'].split('(')[:-1]
entry['full_name'] = ' '.join(parts).strip()
# Twitter-Handle
# <a href="https://twitter.com/NielsAnnen" target="_blank">twitter</a>
for a in soup.find_all('a'):
href = a.get('href')
if href is None or not href.startswith(TWITTER_PREFIX):
# Not even relevant
continue
raw_text = a.get_text()
if 'http://www.spdfraktion.de' in href or \
'search?q=' in href or \
'http%3A%2F%2Fwww.spdfraktion.de%2F' in href or \
raw_text.startswith('@'):
# Dumb header-things. What the hell?
continue
if raw_text != 'twitter':
print('ignore {}: {}'.format(a, old_entry))
continue
new_handle = href[len(TWITTER_PREFIX):]
assert 'twitter_handle' not in entry, (entry['twitter_handle'], new_handle, old_entry)
entry['twitter_handle'] = new_handle
# Don't break: check/assert for duplicate links!
# Don't assert: omission is okay
# Image:
# <a title="Bild-Download" href="http://www.spdfraktion.de/system/files/images/annen_niels.jpg"
# class="ico_download float_right">Pressebild (4249 KB)</a>
img_a = soup.find('a', 'ico_download')
assert img_a is not None, old_entry
img_href = img_a.get('href')
assert img_href.startswith('http://www.spdfraktion.de/system/files/images/'), (img_href, old_entry)
assert img_a.get_text().startswith('Pressebild (')
# https://www.gruene-bundestag.de/uploads/tx_wwgruenefraktion/Franziska-Branter.zip
imgdata['url'] = img_href
# Photographer:
# <span class="copyright">(Foto: spdfraktion.de (Susie Knoll / Florian Jänicke))</span>
copyright = soup.find('span', 'copyright')
assert copyright is not None
copy_text = copyright.get_text()
COPY_START = '(Foto: '
COPY_END = ')'
assert copy_text.startswith(COPY_START) and copy_text.endswith(COPY_END), (copy_text, old_entry)
copy_text = copy_text[len(COPY_START):]
copy_text = copy_text[:-len(COPY_END)]
imgdata['copyright'] = copy_text
return entry
def get_details_cxu(old_entry, soup):
assert old_entry['src'] == 'cxu'
entry = dict()
entry['src'] = old_entry['src']
entry['page'] = old_entry['page']
entry['full_name'] = old_entry['full_name']
# No 'ejected'
# Don't set 'possible_parties' yet: see below
imgdata = dict()
# Don't set 'img' yet: see below
# Determine party
# <div class="vocabulary-landesgruppen">
# <div class="group-left" />
# <div class="group-right">Hessen</div>
# </div>
district = soup.find('div', 'vocabulary-landesgruppen').get_text()
if 'CSU' in district:
entry['possible_parties'] = ['CSU']
else:
entry['possible_parties'] = ['CDU']
# Twitter-Handle
# <a href="http://twitter.com/dieAlbsteigerin" title="Twitter" target="_blank" />
a_twitter = soup.find('a', {'title': 'Twitter'})
if a_twitter is not None:
href_twitter = a_twitter.get('href')
# Dear CXU,
# please get your shit together.
# https://www.youtube.com/watch?v=jl17CYYSzUw
href_twitter = href_twitter.replace('http://', 'https://')
href_twitter = href_twitter.replace('//www.twitter.com/', '//twitter.com/')
assert href_twitter.startswith(TWITTER_PREFIX), (href_twitter, old_entry)
new_handle = href_twitter[len(TWITTER_PREFIX):]
entry['twitter_handle'] = new_handle
# Image:
# <div class="btn btn-light download">
# <p><a href="https://www.cducsu.de/file/37075/download?token=tNcn6T0h">Download</a></p>
# </div>
# (Don't even search for the small, non-ideal image)
img_div = soup.find('div', 'download')
if img_div is None:
# Early exit
return entry
img_href = img_div.find('a')['href']
if img_href.startswith('https://www.cducsu.de/file/'):
assert '/download?token=' in img_href, (img_href, old_entry)
assert img_div.get_text() == 'Download\n', (img_div.get_text(), old_entry)
imgdata['url'] = img_href
elif img_href.startswith('/download/file/fid/'):
assert img_div.get_text() == 'Download', (img_div.get_text(), old_entry)
imgdata['url'] = 'https://www.cducsu.de' + img_href
else:
assert False, (img_href, old_entry)
# Image license:
# <div class="cc-image" />
assert soup.find('div', 'cc-image') is not None, old_entry
# The class 'cc-image' directly implies displaying the CC-BY-SA badge,
# and is accompanied by the 3.0 text. Thus, this is binding.
imgdata['license'] = 'CC-BY-SA-3.0'
# Photographer:
# <div class="group-bildquelle"><div class="label-inline">
# Bildquelle: </div>Junge Union</div>
copyright = soup.find('div', 'group-bildquelle')
assert copyright is not None, old_entry
copy_text = copyright.get_text().strip()
COPY_START = 'Bildquelle:\xa0'
assert copy_text.startswith(COPY_START), (copy_text, old_entry)
copy_text = copy_text[len(COPY_START):]
imgdata['copyright'] = copy_text
entry['img'] = imgdata
return entry
def get_details_all(entries):
# Setup
detailers = {
'bundestag': get_details_bundestag,
'die linke': get_details_linke,
'gruene': get_details_gruene,
'spd': get_details_spd,
'cxu': get_details_cxu,
}
# Actual work
for e in entries:
if e['full_name'] == 'Jakob Maria Mierscheid':
print('Ignoring fictional character {} on {}'.format(e['full_name'], e['src']))
continue
detailer = detailers.get(e['src'])
if detailer is None:
print('[WARN] skip for party {}: {}'.format(e['src'], e['full_name']))
continue
with open(e['page_file'], 'r') as fp:
the_soup = BeautifulSoup(fp.read(), 'html.parser')
detail = detailer(e, the_soup)
if 'twitter_handle' in detail:
orig = detail['twitter_handle']
handle = orig
handle = handle.strip().strip('@')
handle = handle.split('?')[0]
if handle.startswith('#!/'):
handle = handle[len('#!/'):]
if orig != handle:
print('[WARN] Sanitized handle {} to {}'.format(orig, handle))
detail['twitter_handle'] = handle
if 'img' in detail and 'copyright' in detail['img']:
copy_text = detail['img']['copyright']
COPYRIGHT_SANATIZE = {
"spdfraktion.de (Susie Knoll / Florian J\u00e4nicke)": 'Susie Knoll / Florian J\u00e4nicke',
"spdfraktion.de (Susie Knoll)": 'Susie Knoll',
}
DUMB_PREFIXES = ['Fotograf: ', 'Official White House Photo by ']
if copy_text in COPYRIGHT_SANATIZE:
copy_text = COPYRIGHT_SANATIZE[copy_text]
for dumb in DUMB_PREFIXES:
if copy_text.startswith(dumb):
copy_text = copy_text[len(dumb):]
detail['img']['copyright'] = copy_text
yield detail
if __name__ == '__main__':
# Reading
with open('crawl_each.json', 'r') as json_fp:
all_entries = json.load(json_fp)
# Parsing
detailed = list(get_details_all(all_entries))
# Write it out.
with open('parse_each.json', 'w') as fp:
json.dump(detailed, fp, sort_keys=True, indent=2)
print('Done.')
| Schwenger/House-Of-Tweets | tools/PhotoMiner/parse_each.py | parse_each.py | py | 13,967 | python | en | code | 0 | github-code | 90 |
4679114797 | import os
import copy
import time
import math
import datetime
import warnings
import requests
import rasterio
# from rasterio.merge import merge
from rasterio import Affine, windows
def file_exists(path):
return os.path.isfile(path)
def get_current_timestamp(format_str=None):
if format_str is None:
format_str = '%Y_%m_%d_%H_%M'
timestamp = datetime.datetime.fromtimestamp(int(time.time())).strftime(format_str)
return timestamp
def download_file(url, local_filename):
"""Download a file from url to local_filename
Downloads in chunks
"""
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024*1024):
f.write(chunk)
# def create_mosaic(tile_list):
# """Basic mosaic function (not memory efficient)
# """
# mosaic_scenes = [rasterio.open(path) for path in tile_list]
# meta = copy.copy(mosaic_scenes[0].meta)
# data, transform = merge(mosaic_scenes)
# for i in mosaic_scenes: i.close()
# if 'affine' in meta:
# meta.pop('affine')
# meta["transform"] = transform
# meta['height'] = data.shape[1]
# meta['width'] = data.shape[2]
# meta['driver'] = 'GTiff'
# return data, meta
def create_mosaic(tile_list, output_path):
"""Memory efficient mosaic function
Based on code from:
- https://gis.stackexchange.com/questions/348925/merging-rasters-with-rasterio-in-blocks-to-avoid-memoryerror
- https://github.com/mapbox/rasterio/blob/master/rasterio/merge.py
- https://github.com/mapbox/rio-merge-rgba/blob/master/merge_rgba/__init__.py
"""
sources = [rasterio.open(raster) for raster in tile_list]
res = sources[0].res
nodata = sources[0].nodata
dtype = sources[0].dtypes[0]
output_count = sources[0].count
# Extent of all inputs
# scan input files
xs = []
ys = []
for src in sources:
left, bottom, right, top = src.bounds
xs.extend([left, right])
ys.extend([bottom, top])
dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)
out_transform = Affine.translation(dst_w, dst_n)
# Resolution/pixel size
out_transform *= Affine.scale(res[0], -res[1])
# Compute output array shape. We guarantee it will cover the output bounds completely
output_width = int(math.ceil((dst_e - dst_w) / res[0]))
output_height = int(math.ceil((dst_n - dst_s) / res[1]))
# Adjust bounds to fit
# dst_e, dst_s = out_transform * (output_width, output_height)
# create destination array
# destination array shape
shape = (output_height, output_width)
dest_profile = {
"driver": 'GTiff',
"height": shape[0],
"width": shape[1],
"count": output_count,
"dtype": dtype,
"crs": '+proj=latlong',
"transform": out_transform,
"compress": "LZW",
"tiled": True,
"nodata": nodata,
"bigtiff": True,
}
# open output file in write/read mode and fill with destination mosaick array
with rasterio.open(output_path, 'w+', **dest_profile) as mosaic_raster:
for src in sources:
for ji, src_window in src.block_windows(1):
# convert relative input window location to relative output window location
# using real world coordinates (bounds)
src_bounds = windows.bounds(src_window, transform=src.profile["transform"])
dst_window = windows.from_bounds(*src_bounds, transform=mosaic_raster.profile["transform"])
# round the values of dest_window as they can be float
dst_window = windows.Window(round(dst_window.col_off), round(dst_window.row_off), round(dst_window.width), round(dst_window.height))
# read data from source window
r = src.read(1, window=src_window)
# if tiles/windows have overlap:
# before writing the window, replace source nodata with dest nodata as it can already have been written
# dest_pre = mosaic_raster.read(1, window=dst_window)
# mask = (np.isnan(r))
# r[mask] = dest_pre[mask]
# write data to output window
mosaic_raster.write(r, 1, window=dst_window)
def export_raster(data, path, meta, **kwargs):
"""Export raster array to geotiff
"""
if not isinstance(meta, dict):
raise ValueError("meta must be a dictionary")
if 'dtype' in meta:
if meta["dtype"] != data.dtype:
warnings.warn(f"Dtype specified by meta({meta['dtype']}) does not match data dtype ({data.dtype}). Adjusting data dtype to match meta.")
data = data.astype(meta["dtype"])
else:
meta['dtype'] = data.dtype
default_meta = {
'count': 1,
'crs': {'init': 'epsg:4326'},
'driver': 'GTiff',
'compress': 'lzw',
'nodata': -9999,
}
for k, v in default_meta.items():
if k not in meta:
if 'quiet' not in kwargs or kwargs["quiet"] == False:
print(f"Value for `{k}` not in meta provided. Using default value ({v})")
meta[k] = v
# write geotif file
with rasterio.open(path, "w", **meta) as dst:
dst.write(data)
def _task_wrapper(func, args):
try:
func(*args)
return (0, "Success", args)
except Exception as e:
return (1, repr(e), args)
def run_tasks(func, flist, mode, max_workers=None, chunksize=1):
# run all downloads (parallel and serial options)
wrapper_list = [(func, i) for i in flist]
if mode == "parallel":
# see: https://mpi4py.readthedocs.io/en/stable/mpi4py.futures.html
from mpi4py.futures import MPIPoolExecutor
if max_workers is None:
if "OMPI_UNIVERSE_SIZE" not in os.environ:
raise ValueError("Mode set to parallel but max_workers not specified and OMPI_UNIVERSE_SIZE env var not found")
max_workers = os.environ["OMPI_UNIVERSE_SIZE"]
warnings.warn(f"Mode set to parallel but max_workers not specified. Defaulting to OMPI_UNIVERSE_SIZE env var value ({max_workers})")
with MPIPoolExecutor(max_workers=max_workers) as executor:
# results_gen = executor.starmap(func, flist, chunksize=chunksize)
results_gen = executor.starmap(_task_wrapper, wrapper_list, chunksize=chunksize)
results = list(results_gen)
else:
results = []
# for i in flist:
# results.append(func(*i))
for i in wrapper_list:
results.append(_task_wrapper(*i))
return results
| aiddata/geo-datasets | global_forest_change/utility.py | utility.py | py | 6,757 | python | en | code | 19 | github-code | 90 |
42598331264 | import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiheadAttentionRelative(nn.MultiheadAttention):
"""
Multihead attention with relative positional encoding
"""
def __init__(self, embed_dim, num_heads):
super(MultiheadAttentionRelative, self).__init__(embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None)
def gather_attn(self, indexes, bsz, dim, attn):
"""
indexes [LxL]: indexes to shift attn
attn q k_r [N,L,2L-1]: gather along dimension -1
L: target len
N: batch size
attn q_r k [N,2L-1,L]: gather along dimension -2
L: target len
N: batch size
"""
indexes = indexes.unsqueeze(0).expand([bsz, -1, -1]) # N x L x L
attn = torch.gather(attn, dim, indexes)
return attn
def forward(self, query, key, value, need_weights=True, attn_mask=None, pos_enc=None, pos_indexes=None):
tgt_len, bsz, embed_dim = query.size()
head_dim = embed_dim // self.num_heads
assert head_dim * self.num_heads == embed_dim, "embed_dim must be divisible by num_heads"
# project to get qkv
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = F.linear(query, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# cross-attention
_b = self.in_proj_bias
_start = 0
_end = embed_dim
_w = self.in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
_b = self.in_proj_bias
_start = embed_dim
_end = None
_w = self.in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = F.linear(key, _w, _b).chunk(2, dim=-1)
# project to find q_r, k_r
if pos_enc is not None:
# compute k_r, q_r
_start = 0
_end = 2 * embed_dim
_w = self.in_proj_weight[_start:_end, :]
_b = self.in_proj_bias[_start:_end]
q_r, k_r = F.linear(pos_enc, _w, _b).chunk(2, dim=-1) # 2L-1xNxE
if bsz == 2 * q_r.size(1): # this is when left/right features are cat together
q_r, k_r = torch.cat([q_r, q_r], dim=1), torch.cat([k_r, k_r], dim=1)
else:
q_r = None
k_r = None
# scale query
scaling = float(head_dim) ** -0.5
q = q * scaling
if q_r is not None:
q_r = q_r * scaling
# adjust attn mask size
if attn_mask is not None:
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * self.num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# reshape
q = q.contiguous().view(tgt_len, bsz * self.num_heads, head_dim).transpose(0, 1) # N*n_head x L x E
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1)
if q_r is not None: # N*n_head x 2L-1 x E
q_r = q_r.contiguous().view(2 * tgt_len - 1, bsz * self.num_heads, head_dim).transpose(0, 1)
if k_r is not None:
k_r = k_r.contiguous().view(2 * tgt_len - 1, bsz * self.num_heads, head_dim).transpose(0, 1)
src_len = k.size(1)
# compute attn weight
attn_feat = torch.bmm(q, k.transpose(1, 2)) # N*n_head x L x L
# add positional terms
if pos_enc is not None:
# 0.3 s
attn_feat_pos = torch.einsum('ijk,ilk->ijl', q, k_r) # N*n_head x L x 2L -1
attn_feat_pos = self.gather_attn(pos_indexes, bsz * self.num_heads, -1, attn_feat_pos)
attn_pos_feat = torch.einsum('ijk,ilk->ijl', q_r, k) # N*n_head x 2L -1 x L
attn_pos_feat = self.gather_attn(pos_indexes, bsz * self.num_heads, -2, attn_pos_feat)
# 0.1 s
attn_output_weights = attn_feat + attn_feat_pos + attn_pos_feat
else:
attn_output_weights = attn_feat
assert list(attn_output_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
# apply attn mask
if attn_mask is not None:
attn_output_weights += attn_mask
# raw attn
raw_attn_output_weights = attn_output_weights
# softmax
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
# compute v
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * self.num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = F.linear(attn_output, self.out_proj.weight, self.out_proj.bias)
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.sum(dim=1) / self.num_heads
# raw attn
raw_attn_output_weights = raw_attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
raw_attn_output_weights = raw_attn_output_weights.sum(dim=1)
return attn_output, attn_output_weights, raw_attn_output_weights
| rie1010/stereo-transformer | module/attention.py | attention.py | py | 6,276 | python | en | code | null | github-code | 90 |
18343881859 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 16:47:54 2020
@author: liang
"""
N = int(input())
B = list(map(int,input().split()))
ans = B[0]
for i in range(1,N-1):
ans += min(B[i-1], B[i])
ans += B[N-2]
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02917/s567760247.py | s567760247.py | py | 225 | python | en | code | 0 | github-code | 90 |
4459083710 | import cv2
import numpy as np
shorten=0.5
cap = cv2.VideoCapture(0)
ret, a = cap.read()
# a = cv2.imread("rgb.jpg",-1)
a=cv2.resize(a,(int(a.shape[1]*shorten),int(a.shape[0]*shorten)))
# scale1=0.7
# scale2=0.5
# x=int(scale1*a.shape[1])
# y=int(scale2*a.shape[0])
# print(a.shape[0],a.shape[1])
# print(x,y)
# print(a[y,x,0],a[y,x,1],a[y,x,2])
# a = cv2.circle(a,(x,y), 1, (0,0,255), -1)
# a = cv2.line(a,(x,y),(x,y),(255,0,0),5)
cv2.imshow('A',a)
d=np.zeros((a.shape[0],a.shape[1],1),dtype=np.uint8)
blue=np.zeros((a.shape[0],a.shape[1],1),dtype=np.uint8)
e=np.zeros((a.shape[0],a.shape[1],1),dtype=np.uint8)
red=np.zeros((a.shape[0],a.shape[1],1),dtype=np.uint8)
f=np.zeros((a.shape[0],a.shape[1],1),dtype=np.uint8)
green=np.zeros((a.shape[0],a.shape[1],1),dtype=np.uint8)
sm=np.zeros((a.shape[0],a.shape[1],1),dtype=np.uint8)
sample=np.zeros((a.shape[0],a.shape[1],3),dtype=np.uint8)
for i in range (0,a.shape[0]):
for j in range (0,a.shape[1]):
sm[i,j]=np.uint8(a[i,j,0]/3 + a[i,j,1]/3 + a[i,j,2]/3)
d[i,j]=np.uint8(a[i,j,0])
f[i,j]=np.uint8 (a[i,j,1])
e[i,j]=np.uint8 (a[i,j,2])
red[i,j]=e[i,j]
blue[i,j]=d[i,j]
green[i,j]=f[i,j]
if f[i,j]>167:#and f[i,j]<204:
f[i,j]=255
sample[i,j]=a[i,j]
else:
f[i,j]=0
sample[i,j,0]=sm[i,j]
sample[i,j,1]=sm[i,j]
sample[i,j,2]=sm[i,j]
# if e[i,j]>197 | e[i,j]<107 :
# e[i,j]=255
# else:
# e[i,j]=0
# if d[i,j]>127:
# d[i,j]=255
# else:
# d[i,j]=0
# cv2.imshow('d',d)
# cv2.imshow('Blue',blue)
# cv2.imshow('e',e)
# cv2.imshow('Red',red)
cv2.imshow('sample',sample)
cv2.imshow('f',f)
# cv2.imshow('Green',green)
# while True:
# ret, a = cap.read()
# cv2.imshow('A',a)
# key = cv2.waitKey(5) & 0xFF
# if key==27:
# break
cv2.waitKey(0)
| anamaymb/Myfiles2 | Python/Colourdet.py | Colourdet.py | py | 1,966 | python | en | code | 0 | github-code | 90 |
27497876454 | from torch import nn
from torch.nn import functional as F
from .transformer import Transformer
from .length_predictor import LengthPredictor
class TransformerNonAutoRegressive(Transformer):
def __init__(self, ntoken, d_model, nhead=8, num_encoder_layers=6, num_decoder_layers=6,
dim_feedforward=2048, postnorm=True, dropout=0.1, gumbels=False,
use_src_mask=False, use_tgt_mask=False, use_memory_mask=False,
activation='relu', use_vocab_attn=False, use_pos_attn=False,
relative_clip=0, highway=False, device=None, max_sent_length=64,
share_input_output_embedding=False, share_encoder_decoder_embedding=False,
share_vocab_embedding=False, fix_pos_encoding=True,
min_length_change=-20, max_length_change=20, use_src_to_tgt=False):
use_src_mask = use_tgt_mask = use_memory_mask = False
super(TransformerNonAutoRegressive, self).__init__(
ntoken, d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, postnorm, dropout, gumbels,
use_src_mask, use_tgt_mask, use_memory_mask,
activation, use_vocab_attn, use_pos_attn,
relative_clip, highway, device, max_sent_length,
share_input_output_embedding, share_encoder_decoder_embedding,
share_vocab_embedding, fix_pos_encoding, need_tgt_embed=False)
self.length_predictor = LengthPredictor(
d_model, min_value=min_length_change, max_value=max_length_change,
device=device, diffrentable=False)
self.use_src_to_tgt = use_src_to_tgt
def forward(self, src, src_lengths=None, tgt_lengths=None,
src_key_padding_mask=None, tgt_key_padding_mask=None):
'''
use length-predictor to predict target length
'''
src_embed_prev = self.src_embedding(src)
src_embed = self.pos_encoder(src_embed_prev * self.factor)
if self.use_vocab_attn:
if self.share_vocab_embedding:
embedding = self.src_embedding.weight
else:
embedding = self.vocab_embed
else:
embedding = None
# forward
encoder_hidden, encoder_output = self.encoder(
src_embed, embedding=embedding, src_mask=None, src_lengths=src_lengths,
src_key_padding_mask=src_key_padding_mask)
if self.use_src_to_tgt:
# decoder_input, delta_length_probs = self.length_predictor(
# src_embed_prev, src_lengths, tgt_lengths) # B x L x E
decoder_input, delta_length_probs = self.length_predictor(
src_embed_prev+encoder_output, src_lengths, tgt_lengths) # B x L x E
else:
decoder_input, delta_length_probs = self.length_predictor(
encoder_output, src_lengths, tgt_lengths) # B x L x E
tgt_embed = self.pos_decoder(decoder_input)
decoder_hidden, decoder_output = self.decoder(
tgt_embed, encoder_output, embedding=embedding, tgt_mask=None,
memory_mask=None, tgt_lengths=tgt_lengths,
tgt_key_padding_mask=tgt_key_padding_mask)
if not self.share_input_output_embedding:
output = self.out_projection(decoder_output)
else:
output = F.linear(decoder_output, self.src_embedding.weight)
return output, delta_length_probs # need transpose for CE Loss ! ! ! e.g. output.permute(0, 2, 1)
| liu-hz18/Non-Autoregressive-Neural-Dialogue-Generation | nag/modules/transformer_nonautoregressive.py | transformer_nonautoregressive.py | py | 3,583 | python | en | code | 1 | github-code | 90 |
23321616416 | #haunted house
import pygame, sys
from pygame.locals import *
import random
from getworkingpath import *
#construct the sound filename
fileSound1=getworkingpath()+"/hauntedhouse.wav"
fileSound2=getworkingpath()+"/fakenews1.wav"
#construct the picture filename
filePicture=getworkingpath()+"/trump.png"
#Frames pr second
FPS=24
#style=0 => filled, style=1 => thin line, style=4 => thick line
FILLED=0
#window size
WIDTH=700
HEIGHT=400
#initialize the pygame environment
pygame.init()
#sound engine
pygame.mixer.init()
#load the picture
trump=pygame.image.load(filePicture)
# set up the window with size and caption
screen=pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Hus')
# creates a clock
clock=pygame.time.Clock()
#define the the location and sise of the door
door=Rect(325, 220, 40, 80)
#current door state
dooropenclose=True
while True:
#limit updates to FPS
clock.tick(FPS)
#get events from the event queue
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
#check if something happens wth the mouse
elif event.type == MOUSEBUTTONUP:
#get mouse position
posx,posy=event.pos
#did you click on the door
if door.collidepoint(posx, posy):
#toggle door state
dooropenclose=not dooropenclose
if dooropenclose:
#load sound
pygame.mixer.music.load(fileSound2)
else:
#load sound
pygame.mixer.music.load(fileSound1)
#play sound
pygame.mixer.music.play()
#draw background color to blank the screen
screen.fill(pygame.Color("gray")) #grey background
#ground floor
pygame.draw.rect(screen, pygame.Color("blue"), (200,200,250,100), 0)
#chimney
pygame.draw.rect(screen, pygame.Color("brown"), (375, 150, 30, 40), 0)
#roof
polygons=[(200,200),(325,150),(450,200)]
pygame.draw.polygon(screen, pygame.Color("yellow"), polygons, 0)
#togle color of door
doorcolor=pygame.Color("red")
if not dooropenclose:
doorcolor=pygame.Color("black")
#door
pygame.draw.rect(screen, doorcolor, door, 0)
if dooropenclose:
#sun
pygame.draw.circle(screen, pygame.Color("yellow"), (WIDTH-60, 60), 50)
else:
#draw the trump
screen.blit(trump, (WIDTH-105,15))
#window
pygame.draw.rect(screen, pygame.Color("white"), (255,255,30,30), 0)
pygame.draw.rect(screen, pygame.Color("white"), (220,255,30,30), 0)
pygame.draw.rect(screen, pygame.Color("white"), (255,220,30,30), 0)
pygame.draw.rect(screen, pygame.Color("white"), (220,220,30,30), 0)
#grass
for x in range(0, WIDTH, 10):
pygame.draw.line(screen, pygame.Color("green"), (x, 300), (x, 320), 4)
#tree
pygame.draw.rect(screen, pygame.Color("brown"), (550, 250, 20,50), 0)
pygame.draw.circle(screen, pygame.Color("green"), (550,245), 10)
pygame.draw.circle(screen, pygame.Color("green"), (540,245), 10)
pygame.draw.circle(screen, pygame.Color("green"), (550,250), 10)
pygame.draw.circle(screen, pygame.Color("green"), (560,245), 10)
pygame.draw.circle(screen, pygame.Color("green"), (555,255), 10)
pygame.draw.circle(screen, pygame.Color("green"), (565,255), 10)
#update display
pygame.display.flip() | DeepBlue4222/minecraft_catr | house2.py | house2.py | py | 3,468 | python | en | code | 0 | github-code | 90 |
16571392437 | """
布赖恩·克尼根算法
通过减一,异或,消除右侧的1
"""
class Solution:
def hammingDistance(self, x, y):
xor = x ^ y
distance = 0
# 直接越过0位,操作最右侧的1!!!!!!简直神一样的操作
while xor:
distance += 1
# remove the rightmost bit of '1'
xor = xor & (xor - 1)
return distance
x = 1
y = 41238
s = Solution()
print(s.hammingDistance(x, y))
| superggn/myleetcode | else/461-hamming-distance-3.py | 461-hamming-distance-3.py | py | 484 | python | zh | code | 0 | github-code | 90 |
39975479195 | print('-' * 40)
print(f'{"LOJA SUPER BARATÃO":^40}')
print('-' * 40)
total = greater_1000 = cheaper = 0
cheaper_name = ''
while True:
produto = str(input('Nome do Produto: ')).strip().title()
price = float(input('Preço: R$'))
total += price
if price > 1000:
greater_1000 += 1
if cheaper == 0 or price < cheaper:
cheaper = price
cheaper_name = produto
continuar = ' '
while continuar not in 'SN':
continuar = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if continuar == 'N':
break
print(f'{" FIM DO PROGRAMA ":-^40}')
print(f'O total da compra foi R${total:.2f}')
print(f'Temos {greater_1000} produtos custando mais de R$1000.00')
print(f'O produto mais barato foi {cheaper_name} que custa R${cheaper:.2f}')
| thiagokawauchi/curso_em_video_python | ex070.py | ex070.py | py | 792 | python | pt | code | 0 | github-code | 90 |
11306833706 | # Set a cronjob to make it work.
from django.utils import timezone
from django.core.management.base import BaseCommand
from ...models import Planning
from ...tasks import send_campaign
class Command(BaseCommand):
help = 'Checks if a dispatch was planned and in case launches it'
def handle(self, *args, **options):
now = timezone.now()
planned = Planning.objects.filter(schedule__lte=now, sent=False)
for p in planned:
lists_ids = [l.pk for l in p.lists.all()]
send_campaign.delay(lists_ids, p.campaign.pk)
p.sent = True
p.save()
| otto-torino/tazebao | tazebao/newsletter/management/commands/dispatch_planned_campaigns.py | dispatch_planned_campaigns.py | py | 615 | python | en | code | 5 | github-code | 90 |
33661672597 | # 20191117
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
MAX = float('inf')
dp = [0] + [MAX] * amount
for i in range(1, amount + 1):
dp[i] = min([dp[i - c] if i - c >= 0 else MAX for c in coins]) + 1
return [dp[amount], -1][dp[amount] == MAX] | algorithm004-04/algorithm004-04 | Week 05/id_069/LeetCode-322-069.py | LeetCode-322-069.py | py | 323 | python | en | code | 66 | github-code | 90 |
42699222480 | """Django tests for the jugemaj app."""
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from .models import Candidate, Election, NamedCandidate, Vote
class JugeMajTests(TestCase):
"""Mais class for the tests."""
def setUp(self):
"""Create a bunch of users for the other tests."""
a, b, c = (
User.objects.create_user(guy, email="%s@example.org" % guy, password=guy)
for guy in "abc"
)
a.is_superuser = True
a.save()
def test_views(self):
"""Test the django views in the jugemaj app."""
initial_elections = Election.objects.count()
initial_candidates = Candidate.objects.count()
# Check access
self.assertEqual(self.client.get(reverse("jugemaj:elections")).status_code, 200)
self.assertEqual(
self.client.get(reverse("jugemaj:create_election")).status_code, 302
)
self.client.login(username="a", password="a")
self.assertEqual(
self.client.get(reverse("jugemaj:create_election")).status_code, 200
)
# Create an Election
r = self.client.post(
reverse("jugemaj:create_election"),
{
"name": "Élection du roi de Vénus",
"description": "Vénus n’a plus de roi, que pensez-vous des candidats suivants ?",
"end_0": "2025-03-09",
"end_1": "03:19:45",
},
)
self.assertEqual(r.status_code, 302)
self.assertEqual(Election.objects.count(), initial_elections + 1)
slug = Election.objects.last().slug
self.assertEqual(r.url, f"/election/{slug}")
self.assertEqual(self.client.get(r.url).status_code, 200)
self.assertEqual(
self.client.get(
reverse("jugemaj:election", kwargs={"slug": slug})
).status_code,
200,
)
self.client.logout()
self.assertEqual(self.client.get(r.url).status_code, 200)
# Create some Candidates
self.assertEqual(
self.client.get(
reverse("jugemaj:create_candidate", kwargs={"slug": slug})
).status_code,
302,
)
self.client.login(username="b", password="b")
self.assertEqual(
self.client.get(
reverse("jugemaj:create_candidate", kwargs={"slug": slug})
).status_code,
200,
)
r = self.client.post(
reverse("jugemaj:create_candidate", kwargs={"slug": slug}),
{"name": "Capitaine Zorg"},
)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.url, f"/election/{slug}")
self.assertEqual(Candidate.objects.count(), initial_candidates + 1)
for name in [
"Buzz l’Éclair",
"Timon et Pumba",
"Sénateur Palpatine",
"Aragorn",
"Totoro",
"Obi-Wan Kenobi",
]:
r = self.client.post(
reverse("jugemaj:create_candidate", kwargs={"slug": slug}),
{"name": name},
)
# Vote for Totoro
self.client.logout()
vote = Vote.objects.create(
elector=User.objects.get(username="b"),
candidate=NamedCandidate.objects.get(name="Totoro").candidates.get(
election__slug=slug
),
)
url = reverse("jugemaj:vote", kwargs={"pk": vote.pk})
self.assertEqual(self.client.get(url).status_code, 302)
self.client.login(username="b", password="b")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
data = {"csrf_token": r.context["csrf_token"], "choice": "1"}
self.assertEqual(self.client.post(url, data).status_code, 302)
# Check that Totoro wins
self.assertEqual(
Election.objects.get(slug=slug).results()[0].content_object.name, "Totoro"
)
r = self.client.get(reverse("jugemaj:election", kwargs={"slug": slug}))
self.assertEqual(r.status_code, 200)
self.assertIn(
"Totoro",
next(line for line in r.content.decode().split("\n") if "col-md-5" in line),
)
| nim65s/django-jugemaj | jugemaj/tests.py | tests.py | py | 4,329 | python | en | code | 0 | github-code | 90 |
37079798953 | fecha = input("Ingrese la fecha en formato 'dia de la semana, numero del dia y numero del mes en formato dia, DD/MM: ")
fecha = fecha.title()
dia_semanas = fecha[0:fecha.find(",")]
dia_numero = int(fecha[fecha.find(" ")+1:fecha.find("/")])
dia_mes = int(fecha[fecha.find("/")+1:])
if (dia_semanas == "Lunes") or (dia_semanas == "Martes") or (dia_semanas == "Miercoles") and (dia_numero < 31 and dia_numero > 0) and (dia_mes < 12 and dia_mes > 0 ) :
nivel = input("Ingrese en que nivel se encuentra:")
nivel = nivel.lower()
if (nivel == "inicial") or (nivel == "intermedio") or (nivel == "avanzado"):
r = input("¿Se tomaron examanes ese dia? ")
r = r.lower()
if r == "si":
aprobados = int(input("Indique la cantidad de aprobados: "))
desaprobados = int(input("Indique la cantidad de desaprobados: "))
total = aprobados + desaprobados
print("El porcentaje de aprobados fue del: ",round((aprobados*100/total),0),"%")
elif (dia_semanas == "Jueves") and (dia_numero < 31 and dia_numero > 0) and (dia_mes < 12 and dia_mes > 0 ):
asistencia = int(input("Ingrese el porcentaje de personas que asistieron: "))
if asistencia > 50:
print("Asistio la mayoria")
elif asistencia == 50:
print("Asistieron la mitad")
else:
print("Asistio menos de la mayoria")
elif dia_semanas == ("Viernes") and (dia_numero == 1) and (dia_mes == 1 or dia_mes == 7 ) :
print("Comienzo de nuevo ciclo")
alumnos = int(input("Ingrese la cantidad de alumnos nuevos: "))
arancel = float(input("Ingrese el valor del arancel individual: "))
arancel_total = alumnos * arancel
print("El arancel total es de: ",arancel_total,"$")
else:
print("Ingrese fecha valida")
| Vale-source/Programacion-1 | ejercicios_clase_condicionales.py | ejercicios_clase_condicionales.py | py | 1,793 | python | es | code | 0 | github-code | 90 |
44399098038 | ''' Read input from STDIN. Print your output to STDOUT '''
#Use input() to read input from STDIN and use print to write your output to STDOUT
def main():
T =int(input())
for i in range(T):
N = int(input())
grev = list(map(int,input().split()))
opp = list(map(int,input().split()))
grev.sort()
opp.sort()
res =0
for i in grev:
for j in opp:
if i>j:
res +=1
opp.remove(j)
break
print(res)
# Write code here
main()
| pratikroy311/DS-and-Algo-codes | greedy/beybladeChampionship.py | beybladeChampionship.py | py | 631 | python | en | code | 1 | github-code | 90 |
18397870159 | import heapq
n,k = map(int, input().split())
v = list(map(int, input().split()))
ans = 0
for left in range(n+1):
for right in range(left, n+1):
have = []
heapq.heapify(have)
i = 0
while i < left:
heapq.heappush(have, v[i])
i += 1
cost = left + (n - right)
if cost > k:
continue
i = n-1
while i >= right:
heapq.heappush(have, v[i])
i -= 1
while k - cost > 0 and len(have) > 0:
value = heapq.heappop(have)
if value >= 0:
heapq.heappush(have,value)
break
cost += 1
#print("left:{} right:{} ans:{}".format(left, right, sum(have)))
ans = max(ans, sum(have))
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03032/s422249671.py | s422249671.py | py | 787 | python | en | code | 0 | github-code | 90 |
32019521748 | import math
EARTH_RADIUS=6371000
class BikeParkNode():
def __init__(self,address,location,street_name,racks,spaces,placement,mo_installed, yr_installed,Lat,Long):
self.location=location
self.street_name=street_name
self.racks=racks
self.spaces=spaces
self.yr_installed=yr_installed
self.mo_installed=mo_installed
self.Lat=Lat
self.Long=Long
self.placement=placement
self.address=address
def computeDistance(self,Lat, Long):
"d=r*sqrt(theta1-square+theta2-square-2*theta1*theta2*cos(delta-gamma))"
dif_Lat=math.radians(self.Lat)-math.radians(Lat)
dif_Long=math.radians(self.Long)-math.radians(Long)
theta_1=(math.pi/2)-math.radians(self.Lat)
theta_2=(math.pi/2)-math.radians(Lat)
dist_unfactored_square=(theta_1*theta_1)+(theta_2*theta_2)-((2*(theta_1*theta_2))*math.cos(dif_Long))
dist_unfactored=math.sqrt(dist_unfactored_square)
dist_factored=EARTH_RADIUS*dist_unfactored
return dist_factored
"""def main():
#tester main
#should print the distance between lagos and new york
Node1=BikeParkNode("a","b","c","d","e","f","g",6.4531,3.3958)
Node2=BikeParkNode("a","b","c","d","e","f","g",40.7127,-74.0059)
print Node1.computeDistance(Node2.Lat,Node2.Long)/1000
main()"""
| uzonwike/BikeParkingSF | BikeParkNode.py | BikeParkNode.py | py | 1,382 | python | en | code | 0 | github-code | 90 |
73820395177 | class Solution(object):
def reachableNodes(self, edges, M, N):
graph = collections.defaultdict(dict)
for node1, node2, nums in edges:
graph[node1][node2] = nums
graph[node2][node1] = nums
heap = [(0, 0)]
dist = {0: 0}
used = {}
result = 0
while heap:
d, node = heapq.heappop(heap)
if d > dist[node]:
continue
result += 1
for dest, weight in graph[node].items():
v = min(weight, M - d)
used[node, dest] = v
nxtd = d + weight + 1
if nxtd < dist.get(dest, M + 1):
heapq.heappush(heap, (nxtd, dest))
dist[dest] = nxtd
for node1, node2, nums in edges:
result += min(nums, used.get((node1, node2), 0) +
used.get((node2, node1), 0))
return result
| HarrrrryLi/LeetCode | 882. Reachable Nodes In Subdivided Graph/Python 3/solution.py | solution.py | py | 952 | python | en | code | 0 | github-code | 90 |
30255352421 | """
For splitting input into list separated by \n and \t's
"""
class Split_Entry:
def split(entry, func=0):
def remove_dups(xlist):
xlist = list(dict.fromkeys(xlist)) # Remove duplicates due to Dicts only able to have 1 key per item
if '' in xlist:
xlist.remove('') # Remove whitespace value
if len(xlist) == 1:
xlist = xlist[0] # Return just item if there is just 1 result / not list with single item
return xlist
def strip_list(nlist, removeDups=False):
n_list = []
for n in nlist:
n_list.append(n.strip()) # For item in list strip items whitespace
if removeDups:
n_list = remove_dups(n_list) # Remove duplicates from list if set removeDupes=True
return n_list
if func == 1:
return remove_dups(entry)
if len(entry.split('\t')) > 1 and len(entry.split('\n')) > 1: # If list contains tabs and new lines
t_split_l = entry.split('\t')
split_l = []
for t_split in t_split_l:
n_split_l = t_split.split('\n')
split_l.extend(strip_list(n_split_l))
split_l = remove_dups(split_l)
return split_l
elif len(entry.split('\t')) > 1: # Else if list contains only tabs
t_split_l = entry.split('\t')
split_l = strip_list(t_split_l, True)
return split_l
elif len(entry.split('\n')) > 1: # Else if list contains only new lines
n_split_l = entry.split('\n')
split_l = strip_list(n_split_l, True)
return split_l
else: # Else return item with lead/trail whitespace stripped
return entry.strip()
| scionsamurai/Pandas-tkinter-excel | file_pal/_funcs/SplitEntry.py | SplitEntry.py | py | 1,822 | python | en | code | 3 | github-code | 90 |
5828099878 | import json
import logging
import requests
import time
from get_token import get_token
from log_setup import Logging
from program_data import PDApi
# =====================================================================
#
# NetApp / SolidFire
# CPE
# mnode support utility
#
# =====================================================================
# =====================================================================
# Storage healthcheck
# =====================================================================
#============================================================
# set up logging
logmsg = Logging.logmsg()
# Generate a list of clusters
#
class StorageHealthcheck():
#============================================================
# Display a list a storage clusters
# Select cluster for the healthcheck
def generate_cluster_list(repo):
logmsg.info("\nAvailable clusters:")
clusterlist = {}
for cluster in repo.ASSETS[0]["storage"]:
if cluster["host_name"]:
logmsg.info("+ {}".format(cluster["host_name"]))
clusterlist[(cluster["host_name"])] = cluster["id"]
else:
logmsg.info("+ {}".format(cluster["ip"]))
clusterlist[(cluster["ip"])] = cluster["id"]
userinput = input("Enter the target cluster name: ")
storage_id = clusterlist[userinput]
return storage_id
#============================================================
# Start the healthcheck
def run_storage_healthcheck(repo, storage_id):
get_token(repo)
url = ('{}/storage/1/health-checks'.format(repo.BASE_URL))
payload = {"config":{},"storageId":storage_id}
json_return = PDApi.send_post_return_json(repo, url, payload)
if json_return:
if json_return['state'] == "initializing":
logmsg.info("Healthcheck running...")
return json_return
else:
logmsg.info("Failed return. There may be a Healthcheck already running for this target. See /var/log/mnode-support-util.log for details")
exit()
#============================================================
# Watch the healthcheck progress
# Write report to file
def print_healthcheck_status(repo, healthcheck_start):
# prevent the log from filling up with debug messages in the while loop
logging.getLogger("urllib3").setLevel(logging.WARNING)
json_return = healthcheck_start
if json_return:
msg = "none"
report_file_name = ('{}StorageHealthcheck-{}.json'.format(repo.SUPPORT_DIR,json_return['storageId']))
url = ('{}/storage/1/health-checks/{}'.format(repo.BASE_URL,json_return['healthCheckId']))
while not json_return['dateCompleted']:
get_token(repo)
json_return = PDApi.send_get_return_json(repo, url, 'no')
if json_return['status']:
if msg != json_return['status']['message']:
msg = json_return['status']['message']
logmsg.info(json_return['status']['message'])
if json_return['dateCompleted']:
with open(report_file_name, "w") as outfile:
print(json.dumps(json_return), file=outfile)
logmsg.info("Storage Healthcheck completed. Report written to {}".format(report_file_name))
# Set logging back to debug
logging.getLogger("urllib3").setLevel(logging.DEBUG)
| aakittel/mnode-support-util | storage_healthcheck.py | storage_healthcheck.py | py | 3,566 | python | en | code | 0 | github-code | 90 |
43812651218 | from enum import Enum
class Variable(Enum):
PROGRAMA = "PROGRAMA"
TEMPO_EXECUCAO = "TEMPO"
CUSTOS = "CUSTOS"
@classmethod
def factory(cls, val: str) -> "Variable":
for v in cls:
if v.value == val:
return v
return cls.TEMPO_EXECUCAO
def __repr__(self) -> str:
return self.value
| rjmalves/sintetizador-dessem | sintetizador/model/execution/variable.py | variable.py | py | 357 | python | en | code | 2 | github-code | 90 |
73501536937 | from collections import deque
def solution(m, n, puddles):
dp = [[0 for _ in range(m + 1)] for _ in range(n + 1)]
board = [[0] * (m + 2) for _ in range(n + 2)]
for puddle in puddles:
x, y = puddle
board[y][x] = -1
dp[1][1] = 1
for y in range(1, n + 1):
for x in range(1, m + 1):
if board[y][x] != 0 :
continue
count = 0
if board[y - 1][x] == 0:
count += dp[y - 1][x]
if board[y][x - 1] == 0:
count += dp[y][x - 1]
dp[y][x] += count
return dp[n][m] % 1000000007
def solution_(m, n, puddles):
# dp = cost, count
int(1e9), 0
dp = [[[int(1e9), 0] for _ in range(m + 1)] for _ in range(n + 1)]
board = [[0] * (m + 2) for _ in range(n + 2)]
for i in range(m + 2):
board[0][i] = -1
board[n + 1][i] = -1
for i in range(n + 2):
board[i][0] = -1
board[i][m + 1] = -1
for puddle in puddles:
x, y = puddle
board[y][x] = -1
q = deque()
dx = [1, 0]
dy = [0, 1]
# bfs, visited dp
q.append((1, 1, 0))
COST = 0
VISIT_COUNT = 1
while q :
x, y, cost = q.popleft()
if dp[y][x][VISIT_COUNT] > 0 :
if dp[y][x][COST] < cost :
continue
elif dp[y][x][COST] == cost :
dp[y][x][VISIT_COUNT] += 1
elif dp[y][x][COST] > cost:
dp[y][x][COST] = cost
dp[y][x][VISIT_COUNT] = 1
else :
dp[y][x][COST] = cost
dp[y][x][VISIT_COUNT] += 1
for i in range(2):
nx, ny = x + dx[i], y + dy[i]
if board[ny][nx] != 0:
continue
q.append((nx, ny, cost + 1))
answer = dp[n][m][VISIT_COUNT]
return answer
print(solution(4, 3, [[2, 2]])) | Err0rCode7/algorithm | for_retry/wait/programmers/42898.py | 42898.py | py | 1,514 | python | en | code | 0 | github-code | 90 |
18464283569 | #input
import sys
sys.setrecursionlimit(10**7)
N, M = map(int, input().split())
z = [[] for _ in range(N)]
for _ in range(M):
x, y = map(int, input().split())
z[x-1].append(y-1)
#output
#便宜的にすべての点を-1しておく。
#dp[i]を頂点iから始まるpathで最長の長さとする。
#dp[i] = max(dp[v]+1)(vは頂点iから到達できる任意の頂点)
visited = [False] * N
memo = [0] * N
def dp(v):
if visited[v]:
return memo[v]
else:
res = 0
for u in z[v]:
res = max(res, dp(u)+1)
memo[v] = res
visited[v] = True
return res
for i in range(N):
dp(i)
print(max(memo)) | Aasthaengg/IBMdataset | Python_codes/p03166/s720741022.py | s720741022.py | py | 671 | python | ja | code | 0 | github-code | 90 |
12195104069 | from __future__ import absolute_import, division,\
print_function, unicode_literals
import unittest
class CircularQueue(object):
def __init__(self, n):
self.size = n+1
self.data = [0, ]*self.size
self.head = 0
self.tail = 0
def empty(self):
return self.head == self.tail
def full(self):
return ((self.tail+1) % self.size) == self.head
def __len__(self):
return (self.tail-self.head) % self.size
def push(self, o):
if self.full():
raise Exception('queue full')
self.data[self.tail] = o
self.tail = (self.tail+1) % self.size
def pop(self):
if self.empty():
raise Exception('empty queue')
o = self.data[self.head]
self.head = (self.head+1) % self.size
return o
def __iter__(self):
i = self.head
while i != self.tail:
yield self.data[i]
i = (i+1) % self.size
class CircularQueueTest(unittest.TestCase):
def test_vcqueue(self):
q = CircularQueue(2)
q.push(1)
q.push(2)
self.assertEqual(len(q), 2)
self.assertEqual(q.pop(), 1)
self.assertEqual(len(q), 1)
q.push(3)
self.assertEqual(len(q), 2)
self.assertEqual(list(q), [2, 3])
self.assertEqual(q.pop(), 2)
self.assertEqual(q.pop(), 3)
self.assertEqual(len(q), 0)
self.assertEqual(q.empty(), True)
self.assertEqual(q.full(), False)
with self.assertRaises(Exception):
q.pop()
q.push(4)
q.push(5)
self.assertEqual(q.empty(), False)
self.assertEqual(q.full(), True)
with self.assertRaises(Exception):
q.push(6)
| shell909090/data_struct_py | cqueue.py | cqueue.py | py | 1,760 | python | en | code | 0 | github-code | 90 |
71726187498 | from Bio import SeqIO
def read_fasta_dict(fasta: str) -> dict:
"""
Read an input FASTA file and return a dictionary of sequences
Args:
fasta (str): The path to the FASTA file
Returns:
dict: A dictionary of sequences with the sequence name as the key
"""
seqs = {}
with open(fasta, 'r') as fasta_fh:
for record in SeqIO.parse(fasta_fh,'fasta'):
seqs[record.name] = str(record.seq)
return seqs
def read_fasta_list(fasta: str) -> list:
"""
Read an input FASTA file and return a list of sequences
Args:
fasta (str): The path to the FASTA file
Returns:
list: A list of sequences (sequence names are not included)
"""
seqs = []
with open(fasta, 'r') as fasta_fh:
for record in SeqIO.parse(fasta_fh,'fasta'):
seqs.append(str(record.seq))
return seqs
| rpetit3/steamboat-py | steamboat/utils/fasta.py | fasta.py | py | 886 | python | en | code | 1 | github-code | 90 |
69891143978 | from tkinter import *
from quiz_brain import QuizBrain
THEME_COLOR = "#375362"
class QuizInterface:
def __init__(self, quiz_brain: QuizBrain):
self.quiz_q = quiz_brain
self.score = 0
self.window = Tk()
self.window.title("Quizzler")
self.window.config(padx=20, pady=20, bg=THEME_COLOR)
self.canvas = Canvas()
self.canvas.config(height=250, width=300, selectforeground="white")
self.question_text = self.canvas.create_text(
150,
125,
width=280,
text="Questions",
fill=THEME_COLOR,
font=("Arial", 12, "normal")
)
self.canvas.grid(column=0, row=1, columnspan=2, pady=50)
self.score_label = Label(text=f"Score: {0}/10", bg=THEME_COLOR, foreground="white")
self.score_label.grid(column=1, row=0)
image_true = PhotoImage(file="images/true.png")
image_false = PhotoImage(file="images/false.png")
self.button_true = Button(image=image_true, highlightthickness=0, command=self.true_pressed)
self.button_true.grid(column=0, row=2)
self.button_false = Button(image=image_false, highlightthickness=0, command=self.false_pressed)
self.button_false.grid(column=1, row=2)
self.get_next_question()
self.window.mainloop()
def get_next_question(self):
self.canvas.config(bg="white")
if self.quiz_q.still_has_questions():
question = self.quiz_q.next_question()
self.canvas.itemconfig(self.question_text, text=question, fill=THEME_COLOR)
else:
self.canvas.itemconfig(self.question_text, text=f"Game over!\nScore:{self.score}/10", fill=THEME_COLOR)
self.button_true.config(state="disabled")
self.button_false.config(state="disabled")
def true_pressed(self):
self.give_feedback(self.quiz_q.check_answer("True"))
def false_pressed(self):
self.give_feedback(self.quiz_q.check_answer("False"))
def give_feedback(self, is_right):
if is_right:
self.canvas.config(bg="green")
self.canvas.itemconfig(self.question_text, text="True", fill="white")
self.score += 1
self.score_label.config(text=f"Score: {self.score}/10")
else:
self.canvas.config(bg="red")
self.canvas.itemconfig(self.question_text, text="False", fill="white")
self.window.after(500, self.get_next_question)
| bdya-s/Trivia-App | ui.py | ui.py | py | 2,508 | python | en | code | 0 | github-code | 90 |
70160976618 | #!/usr/bin/env python3
"""
Viewer class & window management
"""
# Python built-in modules
from itertools import cycle
# External, non built-in modules
import OpenGL.GL as GL # standard Python OpenGL wrapper
import glfw # lean window system wrapper for OpenGL
from nodeModule import *
from trackball import GLFWTrackball
from transform import identity, translate, rotate, vec
class Viewer(Node):
""" GLFW viewer window, with classic initialization & graphics loop """
def __init__(self, width=640, height=480):
super().__init__()
# version hints: create GL window with >= OpenGL 3.3 and core profile
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.RESIZABLE, False)
self.win = glfw.create_window(width, height, 'Viewer', None, None)
self.drawables = []
self.movables = []
# make win's OpenGL context current; no OpenGL calls can happen before
glfw.make_context_current(self.win)
# register event handlers
glfw.set_key_callback(self.win, self.on_key)
# useful message to check OpenGL renderer characteristics
print('OpenGL', GL.glGetString(GL.GL_VERSION).decode() + ', GLSL',
GL.glGetString(GL.GL_SHADING_LANGUAGE_VERSION).decode() +
', Renderer', GL.glGetString(GL.GL_RENDERER).decode())
# initialize GL by setting viewport and default render characteristics
GL.glClearColor(0.1, 0.1, 0.1, 0.1)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnable(GL.GL_CULL_FACE)
# initialize trackball
self.trackball = GLFWTrackball(self.win)
# cyclic iterator to easily toggle polygon rendering modes
self.fill_modes = cycle([GL.GL_LINE, GL.GL_POINT, GL.GL_FILL])
self.model = identity()
def run(self):
""" Main render loop for this OpenGL window """
while not glfw.window_should_close(self.win):
# clear draw buffer and depth buffer (<-TP2)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
win_size = glfw.get_window_size(self.win)
view = self.trackball.view_matrix()
projection = self.trackball.projection_matrix(win_size)
# draw our scene objects
# self.draw(projection, view, identity())
for movable in self.movables:
movable.draw(projection, view, self.model)
# draw our scene objects
for drawable in self.drawables:
drawable.draw(projection, view, identity())
# flush render commands, and swap draw buffers
glfw.swap_buffers(self.win)
# Poll for and process events
glfw.poll_events()
def add(self, *drawables):
""" add objects to draw in this window """
self.drawables.extend(drawables)
def add_movable(self, movable):
self.movables.append(movable)
def on_key(self, _win, key, _scancode, action, _mods):
""" 'Q' or 'Escape' quits """
if action == glfw.PRESS or action == glfw.REPEAT:
if key == glfw.KEY_ESCAPE:
glfw.set_window_should_close(self.win, True)
if key == glfw.KEY_D:
self.model = self.model @ translate(0.01, 0, 0)
if key == glfw.KEY_A:
self.model = self.model @ translate(-0.01, 0, 0)
if key == glfw.KEY_W:
self.model = self.model @ translate(0, 0, 0.01)
if key == glfw.KEY_S:
self.model = self.model @ translate(0, 0, -0.01)
if key == glfw.KEY_E:
self.model = self.model @ translate(0, 0.01, 0)
if key == glfw.KEY_F:
self.model = self.model @ translate(0, -0.01, 0)
if key == glfw.KEY_Z:
self.model = self.model @ rotate(vec(1, 0, 0), 10)
if key == glfw.KEY_X:
self.model = self.model @ rotate(vec(0, 1, 0), 10)
if key == glfw.KEY_C:
self.model = self.model @ rotate(vec(0, 0, 1), 10)
if key == glfw.KEY_SPACE:
glfw.set_time(0)
| christophezei/3d-graphics-underwater-scene | src/viewer.py | viewer.py | py | 4,366 | python | en | code | 0 | github-code | 90 |
1602699062 | import os
import logging
# Initialize logging
import re
import sys
import numpy as np
FORMAT = "{levelname:<8s} {asctime} {name:>30.30s}: {message}"
formatter = logging.Formatter(FORMAT, style="{")
TFAIP_LOG_LEVEL = getattr(logging, os.environ.get("TFAIP_LOG_LEVEL", "INFO").upper())
this_logger = logging.getLogger(__name__)
logging.basicConfig(level=TFAIP_LOG_LEVEL)
logging.getLogger().handlers[0].setFormatter(formatter)
for handler in logging.getLogger("tensorflow").handlers:
handler.setFormatter(formatter)
# Define a custom extension handler so that the exceptions are logged to logging
def handle_exception(exc_type, exc_value, exc_traceback):
if exc_type is not None and issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
this_logger.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception # Overwrite the excepthook
def logger(name):
return logging.getLogger(name)
class WriteToLogFile:
"""Context to write logging to a log file
When exit the stack the log file gets closed and the handler is removed
"""
def __init__(self, log_dir: str, append: bool, log_name="train.log"):
assert log_dir is not None
os.makedirs(log_dir, exist_ok=True)
self.filename = os.path.join(log_dir, log_name)
self.file_handler = None
self.append = append
def __enter__(self):
self.file_handler = logging.FileHandler(self.filename, "a" if self.append else "w", encoding="utf-8")
self.file_handler.setFormatter(formatter)
self.file_handler.setLevel(level=TFAIP_LOG_LEVEL)
logging.getLogger().addHandler(self.file_handler)
this_logger.info(f"Logging to '{self.filename}'")
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
handle_exception(exc_type, exc_val, exc_tb) # Log exception before log gets closed
logging.getLogger().removeHandler(self.file_handler)
self.file_handler.flush()
self.file_handler.close()
class ParseLogFile:
def __init__(self, log_dir: str, log_name="train.log"):
assert log_dir is not None
self.filename = os.path.join(log_dir, log_name)
def get_metrics(self) -> dict:
if not os.path.isfile(self.filename):
return dict()
with open(self.filename, "r") as f:
# get last epoch line index
res_lines = []
for [i, line] in enumerate(f, 0):
if "Results of epoch" in line:
res_lines.append([i, line])
assert len(res_lines) > 0, "Found no result lines in log-file"
last_res = res_lines[-1][1]
# parse losses and metrics
metrics_re = re.findall(r"([\w/]+): ([-0-9.]+)", last_res)
metrics = {ele[0]: ele[1] for ele in metrics_re}
return metrics
| Planet-AI-GmbH/tfaip | tfaip/util/logging.py | logging.py | py | 2,969 | python | en | code | 12 | github-code | 90 |
3997175735 | import random
def mergesort(list):
new_list = []
new_list1 = []
new_list2 = []
l = len(list)
mid = random.randint(0, l)
left_list = list[:mid]
right_list = list[mid:]
for i in range(len(left_list)):
mini = min(left_list)
new_list.append(mini)
left_list.remove(mini)
for i in range(len(right_list)):
mini = min(right_list)
new_list1.append(mini)
right_list.remove(mini)
res = new_list + new_list1
for i in range(l):
mini = min(res)
new_list2.append(mini)
res.remove(mini)
for i in range(l):
print(new_list2[i], end=' ')
list = [5, 3, 1, 7, 25, 90, 2]
mergesort(list)
| prince-prakash/practice_session | pp_mergesort.py | pp_mergesort.py | py | 700 | python | en | code | 0 | github-code | 90 |
17126925777 | # -*- coding: utf-8 -*-
# @author: yangyd
# @file: app_keycode.py
# @time: 2019/9/18 19:11
class KeyCode:
ENTER = 66
HOME = 3
BACK = 4
CALL = 5
POWER = 26
VOLUME_UP = 24
VOLUME_DOWN = 25
| seceast/PyProjects | APP_Auto_Test/study_code/app_keycode.py | app_keycode.py | py | 217 | python | en | code | 0 | github-code | 90 |
8309229598 | from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework import status
from faker import Faker
class PalindromeChecker(APIView, AllowAny):
"""Check given string is palindrome or not
"""
def post(self, request, format=None):
word = request.data.get('palindromeString')
if not word == "":
is_palindrome = word == word[::-1]
if is_palindrome:
message = "It's plaindrome"
else:
message = "It's not palindrome"
else:
return Response({
"message":"palindrome string missing"
}, status=status.HTTP_400_BAD_REQUEST
)
return Response({"isPalindrome":is_palindrome, "message":message}, status=status.HTTP_200_OK)
class PalindromeComputerMoves(APIView, AllowAny):
"""Geuss a palindrome string for computer.
Brain of computer.
"""
def get(self, request, format=None):
fake = Faker()
word = fake.word()
word += word[::-1]
return Response({"palindromeWord":word}, status=status.HTTP_200_OK)
| neilravi7/palindrome | palindrome_api/views.py | views.py | py | 1,209 | python | en | code | 1 | github-code | 90 |
2546671091 | import subprocess
path = '../java'
cmd = "java -cp " + path + " Main"
times = 100
s = 0
for i in range(times):
s += float(subprocess.check_output(cmd_paillier, shell=True).strip())
print(s/times)
| guyu96/encrypted-domain-DST | py/benchmark.py | benchmark.py | py | 214 | python | en | code | 1 | github-code | 90 |
22487633024 | import aiohttp
import bs4
from discord import option
from discord.ext import commands
from util.EmbedBuilder import EmbedBuilder
from util.Logging import log
async def request(word: str) -> bs4.BeautifulSoup:
url = f"https://www.merriam-webster.com/dictionary/{word}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
soup = bs4.BeautifulSoup(await response.text(), "html.parser")
return soup
async def spellcheck(word: str) -> str:
try:
soup = await request(word.lower())
spelling = soup.find("p", {"class": "spelling-suggestions"}).text
return spelling.strip().capitalize()
except (AttributeError, IndexError):
return "No spelling suggestions found"
async def get_word_info(word: str) -> dict:
word_data = {}
soup = await request(word.lower())
word_data["word"] = word
try:
word_data["definition"] = soup.find("span", {"class": "dtText"}).text.split(
":"
)[1]
except (AttributeError, IndexError):
word_data["definition"] = "No definition found"
try:
word_data["phonetic"] = soup.find("span", {"class": "pr"}).text.replace(" ", "")
except (AttributeError, IndexError):
word_data["phonetic"] = "No phonetic found"
try:
word_data["synonyms"] = soup.find("ul", {"class": "mw-list"}).find_all("li")
word_data["synonyms"] = ", ".join(
[
synonym.text.replace(", ", "").capitalize()
for synonym in word_data["synonyms"]
if "(" not in synonym.text
]
)
except (AttributeError, IndexError):
word_data["synonyms"] = "No synonyms found"
try:
word_data["antonyms"] = soup.find_all("ul", {"class": "mw-list"})[1].find_all(
"li"
)
word_data["antonyms"] = ", ".join(
[
antonym.text.replace(", ", "").capitalize()
for antonym in word_data["antonyms"]
if "(" not in antonym.text
]
)
except (AttributeError, IndexError):
word_data["antonyms"] = "No antonyms found"
try:
word_data["usage"] = soup.find("p", {"class": "ety-sl"}).text
word_data["usage"] = word_data["usage"].split(",")[0]
except (AttributeError, IndexError):
word_data["usage"] = "No use found"
try:
word_data["etymology"] = soup.find_all("p", {"class": "et"})[0].text.split("—")[
0
]
except (AttributeError, IndexError):
word_data["etymology"] = "No etymology found"
return word_data
class Dictionary(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
@commands.slash_command(name="define", description="Defines a word.")
@option("word", str, description="The word to define.", required=True)
async def define(self, ctx: commands.Context, word: str) -> None:
await ctx.defer()
if " " in word:
embed = EmbedBuilder(
title="Error",
description="Please enter a single word.",
).build()
await ctx.respond(embed=embed)
return
try:
word_data = await get_word_info(word)
old_word = ""
if word_data["definition"] == "No definition found":
old_word = word
word_data = await get_word_info(await spellcheck(word))
if word_data["definition"] == "No definition found":
await ctx.edit(
content=f"No results found for **{old_word.capitalize()}**."
)
return
embed = EmbedBuilder(
title=f"Definition of __{word_data['word'].capitalize()}__",
description=word_data["definition"],
fields=[
("Phonetic Pronunciation", word_data["phonetic"], False),
("Synonyms", word_data["synonyms"], True),
("Antonyms", word_data["antonyms"], True),
("First Known Use", word_data["usage"], False),
("Etymology", word_data["etymology"], False),
],
).build()
content = (
None
if old_word == ""
else f"No results found for **{old_word.capitalize()}**. Did you mean **{word_data['word'].capitalize()}**?"
)
await ctx.edit(content=content, embed=embed)
log(f"Define command used by {ctx.author} in {ctx.guild}.")
except Exception as e:
embed = EmbedBuilder(
title=f"Definition of __{word.capitalize()}__",
description=f"An error occurred while trying to define {word.capitalize()}:\n\n{e}",
).build()
await ctx.edit(embed=embed)
def setup(bot) -> None:
bot.add_cog(Dictionary(bot))
| woseek/pax | cogs/MerriamWebster.py | MerriamWebster.py | py | 5,007 | python | en | code | 0 | github-code | 90 |
18054757969 | N, A, B = map(int, input().split())
S = input()
count = 0
count2 = 0
for i in range(N):
if S[i] == "c":
print("No")
elif S[i] == "a":
if count < A+B:
print("Yes")
count += 1
else:
print("No")
elif S[i] == "b":
if count < A+B and count2 < B:
print("Yes")
count += 1
count2 += 1
else:
print("No") | Aasthaengg/IBMdataset | Python_codes/p03971/s378994864.py | s378994864.py | py | 450 | python | en | code | 0 | github-code | 90 |
74834799977 | import logging
from homeassistant import config_entries, exceptions
from homeassistant.core import callback
from . import InvalidAuth
from .const import *
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register(DOMAIN)
class StibMivbConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""STIB-MIVB config flow."""
VERSION = 1
def __init__(self):
"""Initialize the STIB config flow."""
self.api_config = {}
async def async_step_user(self, user_input=None):
"""
Handle a Stib config flow start.
"""
errors = {}
if user_input is not None:
_LOGGER.error("STIBu: " + str(user_input) +"--" + str(self.api_config) + " - " + str(STIB_API_ENTRY) + " -- " + str(errors))
try:
uid = f"{user_input[CONF_STOP_NAME]}[{user_input[CONF_MAIN_DIRECTION]}]"
await self.async_set_unique_id(uid)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=uid, data=user_input)
except InvalidAuth:
errors["base"] = "faulty_credentials"
except CannotConnect:
errors["base"] = "device_unavailable"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(STIB_API_ENTRY),
errors=errors,
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for tado."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
_LOGGER.error("STBI"+str(user_input))
return await self.async_step_line_filter()
async def async_step_line_filter(self, user_input=None):
"""Choose line filter."""
_LOGGER.error("STBLF"+str(user_input))
errors = {}
if user_input is not None:
lines_filter = [int(n) for n in user_input.get(CONF_MONITORED_LINES).split(",")]
lines_filter.sort()
self.config_entry.options[CONF_MONITORED_LINES] = lines_filter
self.config_entry.options[CONF_MAX_PASSAGES] = user_input.get(CONF_MAX_PASSAGES)
return self.async_show_form(
step_id="line_filter",
data_schema=vol.Schema(LINE_FILTERS_ENTRY),
errors=errors,
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| helldog136/ha-stib-mivb | custom_components/stib_mivb/config_flow.py | config_flow.py | py | 2,762 | python | en | code | 1 | github-code | 90 |
26963193227 | frutas = {"Plátano": 1.35,
"Manzana": 0.80,
"Pera": 0.85,
"Naranja": 0.70}
fruta = input("Que fruta decea comprar? ").title()
kilos = int(input("Cuantos kilos? "))
if fruta in frutas:
print(kilos, 'kilos de', fruta, 'valen', frutas[fruta]*kilos, "$")
else:
print("Lo siento, la fruta", fruta, "no está disponible.") | alex2rive3/practicaPython | tablaFrutas.py | tablaFrutas.py | py | 326 | python | es | code | 0 | github-code | 90 |
41327381740 | #Basic Calculator Made Using Tkinter, will upgrade it into a Scientic Calculator soon, with a mode to switch.
from tkinter import *
import math
top = Tk()
top.geometry("312x370")
top.resizable(0,0)
top.title("Calculator")
def bt_click(item):
global expression
expression = expression + str(item)
input_text.set(expression)
def bt_clear():
global expression
expression = ""
input_text.set("")
def bt_equal():
global expression
result = str(eval(expression))
x = expression
input_text.set(result)
expression = result
def bt_square():
global expression
expression = int(expression)**2
input_text.set(expression)
def bt_root():
global expression
expression = int(expression)**0.5
input_text.set(expression)
def factorial_():
global expression
expression=math.factorial(int(expression))
input_text.set(expression)
expression = ""
input_text = StringVar()
input_frame = Frame(top, width=312, height=100, bd=0, highlightbackground="#3B3A3A", highlightcolor="#3B3A3A", highlightthickness=1)
input_frame.pack(side=TOP)
input_field = Entry(input_frame, font=('arial', 18), textvariable=input_text,width=75, bg="#202121", bd=0, justify=RIGHT,fg="White")
input_field.grid(row=0, column=0)
input_field.pack(ipady=10)
button_frame = Frame(top, width=312, height=372.5, bg="#202121")
button_frame.pack()
#buttons
root=Button(button_frame,text="√",fg="White",width=10,height=3,bd=0,bg = "#333332",command=lambda: bt_root())
root.grid(row=0,column=0, padx=1, pady=1)
squarex=Button(button_frame,text="x^y",fg="White",width=10,height=3,bd=0,bg = "#333332",command=lambda: bt_click("**"))
squarex.grid(row=0,column=1, padx=1, pady=1)
square=Button(button_frame,text="x²",fg="White",width=10,height=3,bd=0,bg = "#333332",command=lambda: bt_square())
square.grid(row=0,column=2, padx=1, pady=1)
fact=Button(button_frame,text="x!",fg="White",width=10,height=3,bd=0,bg = "#333332",command=lambda: factorial_())
fact.grid(row=0,column=3, padx=1, pady=1)
#row1
clear=Button(button_frame,text="C",fg="White",width=33,height=3,bd=0,bg = "#333332",command=lambda: bt_clear())
clear.grid(row=1,column=0,columnspan=3, padx=1, pady=1)
div=Button(button_frame,text="/",fg="White",width=10,height=3,bd=0,bg = "#333332",command=lambda: bt_click("/"))
div.grid(row=1,column=3, padx=1, pady=1)
#row2
nine=Button(button_frame,text="7",fg="White",width=10,height=3,bd=0,bg = "#3B3A3A",command=lambda: bt_click(7))
nine.grid(row=2,column=0, padx=1, pady=1)
eight=Button(button_frame,text="8",fg="White",width=10,height=3,bd=0,bg = "#3B3A3A",command=lambda: bt_click(8))
eight.grid(row=2,column=1, padx=1, pady=1)
seven=Button(button_frame,text="9",fg="White",width=10,height=3,bd=0,bg = "#3B3A3A",command=lambda: bt_click(9))
seven.grid(row=2,column=2, padx=1, pady=1)
product=Button(button_frame,text="*",fg="White",width=10,height=3,bd=0,bg = "#333332",command=lambda: bt_click("*"))
product.grid(row=2,column=3, padx=1, pady=1)
#row3
four=Button(button_frame,text="4",fg="White",width=10,height=3,bd=0,bg = "#3B3A3A",command=lambda: bt_click(4))
four.grid(row=3,column=0, padx=1, pady=1)
five=Button(button_frame,text="5",fg="White",width=10,height=3,bd=0,bg = "#3B3A3A",command=lambda: bt_click(5))
five.grid(row=3,column=1, padx=1, pady=1)
six=Button(button_frame,text="6",fg="White",width=10,height=3,bd=0,bg = "#3B3A3A",command=lambda: bt_click(6))
six.grid(row=3,column=2, padx=1, pady=1)
subtract=Button(button_frame,text="-",fg="White",width=10,height=3,bd=0,bg = "#333332",command=lambda: bt_click("-"))
subtract.grid(row=3,column=3, padx=1, pady=1)
#row4
one=Button(button_frame,text="1",fg="White",width=10,height=3,bd=0,bg = "#3B3A3A",command=lambda: bt_click(1))
one.grid(row=4,column=0, padx=1, pady=1)
two=Button(button_frame,text="2",fg="White",width=10,height=3,bd=0,bg = "#3B3A3A",command=lambda: bt_click(2))
two.grid(row=4,column=1, padx=1, pady=1)
three=Button(button_frame,text="3",fg="White",width=10,height=3,bd=0,bg = "#3B3A3A",command=lambda: bt_click(3))
three.grid(row=4,column=2, padx=1, pady=1)
add=Button(button_frame,text="+",fg="White",width=10,height=3,bd=0,bg = "#333332",command=lambda: bt_click("+"))
add.grid(row=4,column=3, padx=1, pady=1)
#row5
zero=Button(button_frame,text="0",fg="White",width=22,height=3,bd=0,bg="#3B3A3A",command=lambda:bt_click(0))
zero.grid(row=5,column=0,columnspan=2,padx=1,pady=1)
point=Button(button_frame,text=".",fg="white",width=10,height=3,bd=0,bg="#333332",command=lambda:bt_click("."))
point.grid(row=5,column=2,padx=1,pady=1)
evalu=Button(button_frame,text="=",fg="White",width=10,height=3,bd=0,bg="#333332",command=lambda:bt_equal())
evalu.grid(row=5,column=3,padx=1,pady=1)
top.mainloop()
| rishuu42/TkinterRishitt | Calculator/basiccalculator.py | basiccalculator.py | py | 4,745 | python | en | code | 0 | github-code | 90 |
74110647977 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 20:08:16 2019
@author: sarfaraz
"""
import numpy as np
import gdal
# Reading Header file
def hdr_read(path):
row = 0
col = 0
bands = 0
datatype = None
with open(path, "r") as f:
for l in f:
k = l.split()
#print(type(k))
#print('checking k : ')
#print(k)
if k[0] == "BANDS:":
bands = k[1]
elif k[0] == 'ROWS:':
row = k[1]
elif k[0] == 'COLS:':
col = k[1]
elif k[0] == 'DATATYPE:':
datatype = k[1]
mul, D_type = (255, 'uint8') if datatype == 'U8' else ((2**16-1), 'U16')
t = (255, 'uint8') if datatype == 'U8' else ((2**16-1), 'U16')
# =============================================================================
# print("inside the reading block\n")
# print(type(t))
# print(t)
# print(mul, D_type)
# =============================================================================
row = int(row)
col = int(col)
bands = int(bands)
return row, col, bands, datatype
import matplotlib.pyplot as plt
# Reading Image file
def ReadBilFile(bil,bands,pixels):
extract_band = 1
image = np.zeros([pixels, bands], dtype=np.uint16)
gdal.GetDriverByName('EHdr').Register()
bil =str(bil)
img = gdal.Open(bil)
# =============================================================================
# print("\n\n\n\n printing the data returned by Open")
# print(type(img))
# print(img)
# =============================================================================
x = 1
while bands >= extract_band:
bandx = img.GetRasterBand(extract_band)
print("\n\n\nBandx data\n:--------------")
#print(type(bandx))
#print(bandx)
datax = bandx.ReadAsArray()
if x ==3:
#plt.imshow(datax)
r = datax
if x ==4:
#plt.imshow(datax)
g = datax
if x ==9:
#plt.imshow(datax)
b = datax
x = x+1
if x ==10:
plt.figure(figsize=(150,150))
r_max, r_min = r.max(),r.min()
g_max, g_min = g.max(),g.min()
b_max, b_min = g.max(),g.min()
r_scale = ( 255*((r-r_min)/(r_max - r_min) )).astype(np.uint8)
g_scale = ( 255*((g-g_min)/(g_max - g_min) )).astype(np.uint8)
b_scale = ( 255*((b-b_min)/(b_max - b_min) )).astype(np.uint8)
r_scale = r_scale.astype('uint8')
g_scale = g_scale.astype('uint8')
b_scale = b_scale.astype('uint8')
print('red component: ',r_scale)
print('green component: \n',g_scale,'\nblue component : ',b_scale )
print("printing the image")
rgb = np.dstack((r_scale,g_scale,b_scale))
print("Red scale :\n",r_scale)
print("Greed scale : \n",g_scale)
print("Blue scale :\n",b_scale)
#plt.colorbar()
plt.axis('off')
#print(rgb.shape)
#plt.axis('tight')
plt.imshow(rgb,cmap='hot',interpolation='nearest')
#plt.savefig('stacked_image_june19_1.png')
plt.show()
#plt.subplots(3,1,1)
plt.imshow(r_scale)
plt.subplots(3,1,2)
plt.imshow(g_scale)
plt.subplots(3,1,3)
plt.imshow(b_scale)
plt.show()
#print("\nDatax data --------\n")
#print(type(datax))
#print(datax.shape)
#print(datax)
temp = datax
store = temp.reshape(pixels)
for i in range(pixels):
image[i][extract_band - 1] = store[i]
extract_band = extract_band + 1
return image
from pathlib import Path
data_folder = Path("/home/sarfaraz/Desktop/isro@internship/GAN Cloud Images-20191215T170608Z-001")
# =============================================================================
file_to_open = data_folder / "stacked_june_19.hdr"
print(file_to_open)
a = hdr_read(file_to_open)
print(type(a))
print(a)
print("rows: ",a[0], "\ncolumn : ",a[1], "\nbands : ",a[2], "\ndatatype : ",a[3])
#
file_bil = data_folder / 'stacked_june_19'
pixels = 499*499
op1 = ReadBilFile(file_bil,a[2], a[0]*a[1])
print(type(op1))
print(op1)
#
bands = 10
# =============================================================================
image = np.zeros([pixels, bands], dtype=np.uint16)
# =============================================================================
# print("------------------------------------\nStacked image june 24 analysis\n")
# header_file = data_folder / 'stacked_june_24.hdr'
# b = hdr_read(header_file)
# print(type(b))
# print(b)
# print("rows : ",b[0], "\nColumn : ",b[1], "\nbands : ",b[2], "\ndatatype: ",b[3])
#
#
#
# file_bil = data_folder / 'stacked_june_24'
#
# op2 = ReadBilFile(file_bil, b[2], b[0]*b[1])
# print(type(op2))
# print(op2)
#
# =============================================================================
| coder-tle/GAN_work_internship | task1/read2.py | read2.py | py | 5,190 | python | en | code | 0 | github-code | 90 |
38414135944 | """
2-D array consists of a collection of elements organized into rows and columns
Elements are referenced as (r, c), indexes starting at 0
Array2D(rows, columns)
num_rows(), num_cols()
clear(value) -- Clears the array by setting each element to value
get_item(r,c), set_item(r, c, value)
"""
from Array_ADT import Array
class Array2D:
def __init__(self, rows, columns):
self._rows = Array(rows)
for i in range(rows):
self._rows[i] = Array(columns)
def num_rows(self):
return len(self._rows)
def num_cols(self):
return len(self._rows[0])
def clear(self, value):
for row in range(self.num_rows()):
self._rows[row].clear(value)
def __getitem__(self, tuple_idx):
assert len(tuple_idx) == 2, "Invalid number of array subscripts"
row = tuple_idx[0]
col = tuple_idx[1]
assert 0 <= row < self.num_rows() \
and 0 <= col < self.num_cols(), \
"Array subscript out of range"
req_row = self._rows[row]
return req_row[col]
def __setitem__(self, tuple_idx, value):
assert len(tuple_idx) == 2, "Invalid number of array subscripts"
row = tuple_idx[0]
col = tuple_idx[1]
assert 0 <= row < self.num_rows() \
and 0 <= col < self.num_cols(), \
"Array subscript out of range"
req_row = self._rows[row]
req_row[col] = value
| Sakchhi/Algo_DS_Python | 2_Arrays/Array2d_ADT.py | Array2d_ADT.py | py | 1,450 | python | en | code | 0 | github-code | 90 |
6818786600 | # from googletrans import Translator
import pandas as pd
# translator = Translator()
# data = pd.read_excel('2semestr/1dz/3lesson/Grades.xlsx')
# a =translator.translate(data, src = 'en', dest ='uk')
# print(a.text)
# print(a.text)
products = ['Water','Milk', 'Melon', 'Apples']
price = ['15','50','200','60']
data = pd.DataFrame(list(zip(products, price), columns = ['Products', 'Price']))
| KostyaGoodAlive/python | 2semestr/1dz/3lesson/main.py | main.py | py | 395 | python | en | code | 0 | github-code | 90 |
43470354014 | from flask_app import app
from flask_app.models.dojo import Dojo
from flask import render_template,redirect,request,session,flash
#read all Route
@app.route("/")
def main_page():
dojos= Dojo.get_all_dojos()
return render_template("dojos.html", dojos=dojos)
@app.route("/dojo/create", methods=['POST'])
def create_dojo():
data={
"name": request.form['name']
}
dojo= Dojo.create_new_dojo(data)
return redirect("/")
@app.route("/dojo/<int:id>")
def show_dojo(id):
data={
"id": id
}
dojo= Dojo.get_one_dojo(data)
return render_template("dojo_info.html", dojo=dojo) | AntonioSC1/Dojos_and_NInjas_core | flask_app/controllers/dojos_controller.py | dojos_controller.py | py | 620 | python | en | code | 0 | github-code | 90 |
3385316255 | # playstore-country-check - testversion - by treysis / https://github.com/treysis
# License: LGPL 2.1
#
# Checks the availability of apps in local variants of Google's PlayStore.
#
# Relies on google-play-scraper (https://github.com/JoMingyu/google-play-scraper),
# install with:
# pip install google-play-scraper
#
# --- DEPRECATED --- for testing or learning ;)
try:
from google_play_scraper import app
except ImportError as error:
print("Error: error while loading 'google-play-scraper'! Install with 'pip install google-play-scraper' and try again.")
exit()
from sys import stdout
from multiprocessing.dummy import Pool
from itertools import product
# Initialize country codes and names
GL_CC = {
"ad": "Andorra",
"at": "Austria",
"gd": "Grenada",
"ge": "Georgia",
"gf": "French Guiana",
"gg": "Guernsey",
"gh": "Ghana",
"gi": "Gibraltar",
"gl": "Greenland",
"gm": "The Gambia",
"gn": "Guinea",
"gp": "Guadeloupe",
"gq": "Equatorial Guinea",
"gr": "Greece",
"gs": "South Georgia and the South Sandwich Islands",
"gt": "Guatemala",
"gu": "Guam",
"gw": "Guinea-Bissau",
"gy": "Guyana",
"hk": "Hong Kong",
"hm": "Heard Island and McDonald Islands",
"hn": "Honduras",
"hr": "Croatia",
"ht": "Haiti",
"hu": "Hungary",
"sk": "Slovakia",
"sl": "Sierra Leone",
"sm": "San Marino",
"sn": "Senegal",
"so": "Somalia",
"sr": "Suriname",
"ss": "South Sudan",
"wf": "Wallis and Futuna",
"ws": "Samoa",
"xk": "Kosovo",
"ye": "Yemen",
"yt": "Mayotte",
"za": "South Africa",
"zm": "Zambia"
}
cwaa = list()
cwana = list()
delete = "\b" * 15
def crawl(a, k):
# Progress indicator
print("{0}{0}{1:{2}}".format(delete, len(cwaa)+len(cwana), 3), end=" of " + str(len(GL_CC)) + "... (current: " + k + ")")
stdout.flush()
#print(k, end = ',')
# Request app data from google-play-scraper with country code. If available, "released" will
# contain some release date. If not available in the selected country, this value is empty.
if app(a, country=k)['released'] is not None:
cwaa.append(GL_CC[k])
print("{0}{0}{1:{2}}".format(delete, len(cwaa)+len(cwana), 3), end=" of " + str(len(GL_CC)) + "... (current: " + k + ")")
stdout.flush()
else:
cwana.append(GL_CC[k])
print("{0}{0}{1:{2}}".format(delete, len(cwaa)+len(cwana), 3), end=" of " + str(len(GL_CC)) + "... (current: " + k + ")")
stdout.flush()
return(k)
def main():
print("--\nplaystore-country-check - testversion:\nChecking the enabled PlayStore countries for Germany's", \
"Corona-Warn-App\n(package name: de.rki.coronawarnapp).")
print("Sourcecode @ https://github.com/treysis/playstore-country-check\n\n")
# Initialize variables.
#cwaa = list()
#cwana = list()
#delete = "\b" * 15
# Number of parallel threads. 10 seems safe to use for rate limiting.
n_Psize = 10
# Start checking
print("Checking countries (using " + str(n_Psize) + " parallel threads):")
#i=0
pool = Pool(n_Psize)
appname = ['de.rki.coronawarnapp']
thread_result = pool.starmap(crawl, product(appname, list(GL_CC.keys())))
print("...done!\n")
print(thread_result)
# Prepare and format output
cwaa.sort()
cwana.sort()
print("App available in " + str(len(cwaa)) + " local PlayStores:")
print(*cwaa, sep=", ", end=".\n")
print("\nNot available in:")
print(*cwana, sep=", ", end=".\n")
print("\n--- Finished, exiting... ---")
return
if __name__ == '__main__':
main()
| treysis/playstore-country-check | pcc-threading.py | pcc-threading.py | py | 3,658 | python | en | code | 1 | github-code | 90 |
4274961198 | ####################### DO NOT MODIFY THIS CODE ########################
menu = {
"original cupcake": 2,
"signature cupcake": 2.750,
"coffee": 1,
"tea": 0.900,
"bottled water": 0.750
}
original_flavors = ["vanilla", "chocolate", "strawberry", "caramel", "raspberry"]
original_price = 2
signature_price = 2.750
############################# Start Here! ##############################
cupcake_shop_name = "shaafal"
signature_flavors = ["tuna" , "salmon" , "red herring "]
order_list = []
def print_menu():
"""
Print the items in the menu dictionary.
"""
# your code goes here!
for item in menu:
print ("- '%s' " "( KD %s)" % (item , menu[item]))
def print_originals():
"""
Print the original flavor cupcakes.
"""
print("Our original flavor cupcakes (KD %s each):" % original_price)
# your code goes here!
for original in original_flavors:
print ( "- '%s' " % original)
def print_signatures():
"""
Print the signature flavor cupcakes.
"""
print("Our signature flavor cupcake (KD %s each):" % signature_price)
# your code goes here!
for signature in signature_flavors:
print ( "- '%s' " % signature)
def is_valid_order(order):
"""
Check if an order exists in the shop.
"""
# your code goes here!
if order in menu or order in signature_flavors or order in original_flavors:
return True
else:
return False
def get_order():
"""
Repeatedly ask customer for order until they end their order by typing "Exit".
"""
order_list = []
# your code goes here!
print("ENTERE YOUR ORDER FROM THE MENUE OR PRESS Exit")
while True:
order_l =input()
if (order_l)== "Exit":
print("THANK YOU FOR VISITING")
break
elif is_valid_order(order_l)== True:
order_list.append(order_l)
print("ENTERE ANOTHER OREDER")
else :
print (" PRINT THE ORDER CORRECTLY")
return order_list
def accept_credit_card(total):
"""
Return whether an order is eligible for credit card payment.
"""
# your code goes here!
if total >5:
return False
else :
return True
def get_total_price(order_list):
"""
Calculate and return total price of the order.
"""
total = 0.0
# your code goes here!
for order in order_list:
if order in menu:
total += menu[order]
elif order in original_flavors:
total += original_price
else:
total = signature_price
return total
def print_order(order_list):
"""
Print the order of the customer.
"""
print()
print("Your order is: ")
# your code goes here!
for order in order_list:
print(order)
total = get_total_price(order_list)
print("That\'ll be KD %s " % (total))
if accept_credit_card(total):
print("this order is eligible for credit card ")
else:
print("this order is eligible for cash only ")
print(" THANK YOU For SHOPPING AT %s : " % (cupcake_shop_name))
| shaafalab/FoundationsProjectOne | shop.py | shop.py | py | 3,183 | python | en | code | null | github-code | 90 |
17007914801 | from SimEnvironment import SimEnvironment
from Icarous import Icarous
from IcarousRunner import IcarousRunner
from ichelper import GetHomePosition,ReadTrafficInput
import argparse
def checkDAAType(value):
if value.upper() not in ['DAIDALUS','ACAS']:
raise argparse.ArgumentTypeError("%s is an invalid DAA option" % value)
return value.upper()
parser = argparse.ArgumentParser(description=\
" Run a fast time simulation of Icarous with the provided inputs.\n\
- See available input flags below.\n\
- Icarous output are written to log file: SPEEDBIRD.json\n\
- Use VisualizeLog.py to animate/record simoutput.\n\
- See VisualizeLog.py --help for more information.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-d", "--daaType", type=checkDAAType, default='DAIDALUS',
help='Specify DAA modules to be used. DAIDALUS,ACAS,...')
parser.add_argument("-f", "--flightplan", type=str, default='/home/josuehfa/System/icarous/Python/pycarous/data/flightplan.txt',
help='flightplan file. default: data/flightplan.txt')
parser.add_argument("-t", "--traffic", type=str, default='/home/josuehfa/System/icarous/Python/pycarous/data/traffic.txt',
help='File containing traffic initial condition. See data/traffic.txt for example')
parser.add_argument("-l", "--tlimit", type=float, default=1000,
help='set max sim time limit (in seconds). default 300 s')
parser.add_argument("-p", "--params", type=str, default='/home/josuehfa/System/icarous/Python/pycarous/data/icarous_default.parm',
help='icarous parameter file. default: data/icarous_default.parm')
parser.add_argument("-g", "--geofence", type=str, default='',
help='geofence xml input. example: data/geofence2.xml')
parser.add_argument("-c", "--daaConfig", type=str, default='/home/josuehfa/System/icarous/Python/pycarous/data/DaidalusQuadConfig.txt',
help='specify configuration file if one is required by the DAA module specified by -d/--daaType')
parser.add_argument("-v", "--verbosity", type=int, choices=[0,1,2], default=1,
help='Set print verbosity level')
parser.add_argument("--realtime", dest="fasttime", action="store_false",
help='Run sim in real time')
parser.add_argument("--fasttime", dest="fasttime", action="store_true",
help='Run sim in fast time (not available for cFS simulations)')
parser.add_argument("--cfs", action="store_true",
help='Run Icarous using cFS instead of pycarous')
parser.add_argument("-u", "--uncertainty", type=bool, default=False,
help='Enable uncertainty')
parser.add_argument("-r", "--repair", action="store_true",
help='Convert the given flightplan into a EUTL plan')
parser.add_argument("-e", "--eta", action="store_true",
help='Enable eta control for waypoint arrivals')
parser.add_argument("--daalog", action="store_true",
help='Enable daa logs')
args = parser.parse_args()
if args.cfs:
args.fasttime = False
# Initialize simulation environment
sim = SimEnvironment(fasttime=args.fasttime,verbose=args.verbosity)
# Set the home position for the simulation
HomePos = GetHomePosition(args.flightplan)
# Add traffic inputs
if args.traffic != '':
tfinputs = ReadTrafficInput(args.traffic)
for tf in tfinputs:
sim.AddTraffic(tf[0], HomePos, *tf[1:])
# Initialize Icarous class
if args.cfs:
ic = IcarousRunner(HomePos, verbose=args.verbosity)
else:
ic = Icarous(HomePos,simtype="UAM_VTOL",monitor=args.daaType,verbose=args.verbosity,
daaConfig=args.daaConfig, fasttime=args.fasttime)
if args.daalog:
# Dirty hack to silently update the daa logging parameter from commandline
import os
os.system("sed -Ein -e \'s/(LOGDAADATA)(\\ *)([0-1])(\\.0*)/\\1\\21\\4/\' "+args.params)
# Read params from file and input params
ic.SetParametersFromFile(args.params)
# Input flightplan
ic.InputFlightplanFromFile(args.flightplan,eta=args.eta,repair=args.repair)
# Input geofences from file
if args.geofence != '':
ic.InputGeofence("data/geofence2.xml")
# Add icarous instance to sim environment
sim.AddIcarousInstance(ic,time_limit=args.tlimit)
# Set position uncertainty for vehicles in the simulation
if args.uncertainty:
sim.SetPosUncertainty(0.1, 0.1, 0, 0, 0, 0)
# Run the Simulation
sim.RunSimulation()
# Save json log outputs
sim.WriteLog() | josuehfa/System | CoreSystem/pycarous/RunPySim.py | RunPySim.py | py | 4,544 | python | en | code | 3 | github-code | 90 |
6727840470 | import numpy as np
import pytest
import math
from sklearn.base import clone
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestRegressor
import doubleml as dml
from ._utils import draw_smpls
from ._utils_irm_manual import fit_irm, boot_irm, tune_nuisance_irm
@pytest.fixture(scope='module',
params=[RandomForestRegressor(random_state=42)])
def learner_g(request):
return request.param
@pytest.fixture(scope='module',
params=[LogisticRegression()])
def learner_m(request):
return request.param
@pytest.fixture(scope='module',
params=['ATE', 'ATTE'])
def score(request):
return request.param
@pytest.fixture(scope='module',
params=['dml2'])
def dml_procedure(request):
return request.param
@pytest.fixture(scope='module',
params=[True, False])
def normalize_ipw(request):
return request.param
@pytest.fixture(scope='module',
params=[True, False])
def tune_on_folds(request):
return request.param
def get_par_grid(learner):
if learner.__class__ in [RandomForestRegressor]:
par_grid = {'n_estimators': [5, 10, 20]}
else:
assert learner.__class__ in [LogisticRegression]
par_grid = {'C': np.logspace(-4, 2, 10)}
return par_grid
@pytest.fixture(scope='module')
def dml_irm_fixture(generate_data_irm, learner_g, learner_m, score, dml_procedure, normalize_ipw, tune_on_folds):
par_grid = {'ml_g': get_par_grid(learner_g),
'ml_m': get_par_grid(learner_m)}
n_folds_tune = 4
boot_methods = ['normal']
n_folds = 2
n_rep_boot = 499
# collect data
(x, y, d) = generate_data_irm
# Set machine learning methods for m & g
ml_g = clone(learner_g)
ml_m = clone(learner_m)
np.random.seed(3141)
obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d)
dml_irm_obj = dml.DoubleMLIRM(obj_dml_data,
ml_g, ml_m,
n_folds,
score=score,
dml_procedure=dml_procedure,
normalize_ipw=normalize_ipw)
# tune hyperparameters
tune_res = dml_irm_obj.tune(par_grid, tune_on_folds=tune_on_folds, n_folds_tune=n_folds_tune,
return_tune_res=False)
assert isinstance(tune_res, dml.DoubleMLIRM)
dml_irm_obj.fit()
np.random.seed(3141)
n_obs = len(y)
all_smpls = draw_smpls(n_obs, n_folds)
smpls = all_smpls[0]
if tune_on_folds:
g0_params, g1_params, m_params = tune_nuisance_irm(y, x, d,
clone(learner_g), clone(learner_m), smpls, score,
n_folds_tune,
par_grid['ml_g'], par_grid['ml_m'])
else:
xx = [(np.arange(len(y)), np.array([]))]
g0_params, g1_params, m_params = tune_nuisance_irm(y, x, d,
clone(learner_g), clone(learner_m), xx, score,
n_folds_tune,
par_grid['ml_g'], par_grid['ml_m'])
g0_params = g0_params * n_folds
m_params = m_params * n_folds
if score == 'ATE':
g1_params = g1_params * n_folds
else:
assert score == 'ATTE'
g1_params = None
res_manual = fit_irm(y, x, d, clone(learner_g), clone(learner_m),
all_smpls, dml_procedure, score,
normalize_ipw=normalize_ipw,
g0_params=g0_params, g1_params=g1_params, m_params=m_params)
res_dict = {'coef': dml_irm_obj.coef,
'coef_manual': res_manual['theta'],
'se': dml_irm_obj.se,
'se_manual': res_manual['se'],
'boot_methods': boot_methods}
for bootstrap in boot_methods:
np.random.seed(3141)
boot_theta, boot_t_stat = boot_irm(y, d, res_manual['thetas'], res_manual['ses'],
res_manual['all_g_hat0'], res_manual['all_g_hat1'],
res_manual['all_m_hat'], res_manual['all_p_hat'],
all_smpls, score, bootstrap, n_rep_boot,
normalize_ipw=normalize_ipw,
dml_procedure=dml_procedure)
np.random.seed(3141)
dml_irm_obj.bootstrap(method=bootstrap, n_rep_boot=n_rep_boot)
res_dict['boot_coef' + bootstrap] = dml_irm_obj.boot_coef
res_dict['boot_t_stat' + bootstrap] = dml_irm_obj.boot_t_stat
res_dict['boot_coef' + bootstrap + '_manual'] = boot_theta
res_dict['boot_t_stat' + bootstrap + '_manual'] = boot_t_stat
return res_dict
@pytest.mark.ci
def test_dml_irm_coef(dml_irm_fixture):
assert math.isclose(dml_irm_fixture['coef'],
dml_irm_fixture['coef_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_irm_se(dml_irm_fixture):
assert math.isclose(dml_irm_fixture['se'],
dml_irm_fixture['se_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_irm_boot(dml_irm_fixture):
for bootstrap in dml_irm_fixture['boot_methods']:
assert np.allclose(dml_irm_fixture['boot_coef' + bootstrap],
dml_irm_fixture['boot_coef' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_irm_fixture['boot_t_stat' + bootstrap],
dml_irm_fixture['boot_t_stat' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
| DoubleML/doubleml-for-py | doubleml/tests/test_irm_tune.py | test_irm_tune.py | py | 5,984 | python | en | code | 347 | github-code | 90 |
7349978136 | import requests
from bs4 import BeautifulSoup
import pandas as pd
from collections import OrderedDict
class UseBeautifulSoup:
def __init__(self, url):
self.url = url
def get_soup(self) -> BeautifulSoup:
response = requests.get(self.url)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
def display_html_layers(self, soup: BeautifulSoup) -> None:
print(soup.prettify())
# plese set URL
URL = ''
use_beautiful_soup = UseBeautifulSoup(URL)
tag = 'dd ul li p a.TextLinkWrapper_t1bvfs58'
elements = use_beautiful_soup.get_soup().select(tag)
computer_science_classes = []
for element in elements:
class_name = element.get_text(strip=True)
computer_science_classes.append(class_name)
# remove duplicate
unique_computer_science_classes = list(
OrderedDict.fromkeys(computer_science_classes)
)
df = pd.DataFrame(unique_computer_science_classes)
csv_file_name = ''
df.to_csv(csv_file_name, index=False, header=False)
| Squirrel-TH/tool-box | common_web_scraping.py | common_web_scraping.py | py | 995 | python | en | code | 0 | github-code | 90 |
39268068219 | import cv2
import numpy as np
import time
import PoseModule as pm
from flask import Flask,jsonify # pip install flask # this is for deployment of the given module
app= Flask(__name__)
def main():
cap = cv2.VideoCapture(0) #for accesing the webcam
detector = pm.poseDetector()
count = 0
dir = 0 #taking the directions i.e up or down # 1 for down and 2 for up
pTime = 0 # present time required for reducing the frame rate per second for more accuracy
while True:
success,img = cap.read() # reading the processed image
img = cv2.resize(img,(1280,720))
img = detector.findPose(img,False)
lmList = detector.FindPosition(img,False) # draw = false
if len(lmList) != 0:
angle = detector.findAngle(img, 12, 14, 16)
per = np.interp(angle, (210, 310), (0, 100))
bar = np.interp(angle, (220, 310), (650, 100))# making bar on the screen to determine & count a proper rep
color= (255,0,255) # used for selecting colour in rgb mode
if per == 100: # if user has done a perfect half rep in upward direction
if dir == 0: # changing direction
count+=0.5 # adding half rep
dir = 1
if per ==0:
if dir ==1:
count+=0.5
dir=0
print(count)
cv2.rectangle(img, (1100, 100), (1175, 650), color, 3) # creating an empty bar to demonstrate nothing has happened
cv2.rectangle(img, (1100, int(bar)), (1175, 650), color, cv2.FILLED) # filling when goes up
cv2.putText(img, f'{int(per)} %', (1100, 75), cv2.FONT_HERSHEY_PLAIN, 4,
color, 4) # using formatted string for this
cv2.rectangle(img, (0, 450), (250, 720), (0, 255, 0), cv2.FILLED)
cv2.putText(img, str(int(count)), (45, 670), cv2.FONT_HERSHEY_PLAIN, 15, # for displaying the count on screen
(255, 0, 0), 25)
# reducing the frame rate from 120 to 30 using the formula written below
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.imshow("Image", img)
cv2.waitKey(1)
ret, buffer = cv2.imencode('.jpg', img)
img = buffer.tobytes()
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n\r\n')
if __name__ == "__main__":
main()
| piyushsir/OSDVirtualVelocity | bicepCurl.py | bicepCurl.py | py | 2,457 | python | en | code | 1 | github-code | 90 |
28741538773 | import itertools
def isprime(num):
if num == 0 or num == 1: return 0
for i in range(2, int((num**1/2))+1):
if num % i == 0:
return 0
return 1
A = []
def getnum(numbers, gotnum, left, index):
if left == 0:
arr = list(map("".join, list(itertools.permutations(gotnum))))
for a in arr:
A.append(int(a))
return
for i in range(index, len(numbers)):
getnum(numbers, gotnum + numbers[i], left-1, i+1)
def solution(numbers):
global A
answer= 0
for i in range(1, len(numbers)+1):
getnum(numbers, "", i, 0)
B = set(A)
for b in B:
answer += isprime(b)
return answer
| MountainNine/ForifAlgorithm | 완전탐색/소수찾기/박병현/main.py | main.py | py | 680 | python | en | code | 0 | github-code | 90 |
33055233468 | def p_valeur(pile):
"""
- prend en paramètre une pile pile
- renvoie le sommet de la pile
Exemple :
>>> p_valeur([2, 3, 5])
>>> 5
>>> p_valeur([])
>>> None
"""
if len(pile) != 0:
l_pile = len(pile)
return pile[l_pile-1]
else:
return None
assert p_valeur([]) is None
assert p_valeur([2, 3, 5]) == 5
def p_depile(pile):
"""
- prend en paramètre une pile pile
- dépile le dernier élément saisi
Exemple :
>>> p_valeur([2, 3, 5])
>>> 5
>>> p_valeur([])
>>> None
"""
if len(pile) != 0:
v_depile = pile.pop(len(pile)-1)
return v_depile
else:
return None
p=[2, 3, 5]
assert p_depile(p) == 5
assert p == [2, 3]
assert p_depile([]) is None
def p_empile(pile, v):
"""
- prend en paramètre une pile et une valeur v
- empile la valeur v
Exemple :
>>> pile = [2, 3]
>>> p_empile(pile, 5)
>>> pile
>>> [2, 3, 5]
"""
pile.append(v)
return pile
pile = [2, 3]
p_empile(pile, 5)
assert pile == [2, 3, 5]
def f_valeur(file):
"""
- prend en paramètre une file
- renvoie la valeur à l’avant de la file ou None si la file est vide
Exemple :
>>> f_valeur([2, 3, 5])
>>> 5
>>> f_valeur([])
>>> None
"""
if len(file) != 0:
l_file = len(file)
return pile[l_file -1]
else:
return None
assert f_valeur([]) is None
assert f_valeur([2, 3, 5]) == 5
def f_defile(file):
"""
- prend en paramètre une file
- défile l'élément situé à l'avant de la file
- renvoie la valeur défilée ou None si la file est vide
Exemple :
>>> file = [2, 3, 5, 8]
>>> f_defile(file)
>>> 8
>>> file
>>> [2, 3, 5]
"""
if len(file) != 0:
v_defile = file.pop(len(file)-1)
return v_defile
else:
return None
file = [2, 3, 5, 8]
assert f_defile(file) == 8
assert file == [2, 3, 5]
def f_enfile(file, v):
"""
- prend en paramètre une file et une valeur v
- enfile la valeur v à l'arrière de la file
Exemple :
>>> file = [2, 3, 5, 8]
>>> f_enfile(file, 1)
>>> file
>>> [1, 2, 3, 5, 8]
"""
file.insert(0, v)
return file
file = [2, 3, 5, 8]
f_enfile(file, 1)
assert file == [1, 2, 3, 5, 8]
def echange_FondSommet(p):
""" Échange le sommet et le fond de la pile p (p[-1] <-> p[0]) """
index_fond = 0
index_sommet = len(p)-1
element_fond = p.pop(index_fond)
element_sommet = p.pop(index_sommet-1)
p.insert(index_fond, element_sommet)
p.insert(index_sommet, element_fond)
return p
def pile_copie(p):
"""Fonction pile_copie(p) recevant une pile p en paramètre et renvoyant une copie p2 de p."""
p2 = p.copy()
return p2
def pile_inversee(p):
"""Fonction pile_inversee(p) recevant une pile p en paramètre et renvoyant la pile p inversée."""
p.reverse()
return p
### programme principal #####
print(p_valeur([]))
print(p_valeur([2, 3, 5]) == 5)
p=[2, 3, 5]
print("contenu de la pile : ",p)
print("depiler un element = ",p_depile(p))
print("contenu de la pile : ",p)
print("empiler un element = ",p_empile(p,128))
print("contenu de la pile : ",p)
print("echanger le sommet avec le fond = ",echange_FondSommet(p))
print("contenu de la pile : ",p)
print("copie de pile, pile résultante = ",pile_copie(p))
print("pile inversée, pile résultante = ",pile_inversee(p))
print("contenu de la pile d'origine' : ",p) | Samoxxxxx/TNSI | TD_Piles&Files.py | TD_Piles&Files.py | py | 3,749 | python | fr | code | 0 | github-code | 90 |
31384162581 | import re
import string
from pprint import pprint
import numpy as np
import pandas as pd
from hyperopt import fmin, tpe, Trials, space_eval, hp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from nlplay.features.text_cleaner import base_cleaner
from nlplay.utils.parlib import parallelApply
if __name__ == "__main__":
train_csv = "../nlplay/data_cache/IMDB/IMDB_train.csv"
test_csv = "../nlplay/data_cache//IMDB/IMDB_test.csv"
# Data preparation
df_train = pd.read_csv(train_csv)
df_train = df_train.sample(frac=1)
df_test = pd.read_csv(test_csv)
df_train[df_train.columns[0]] = parallelApply(
df_train[df_train.columns[0]], base_cleaner, 3
)
df_test[df_test.columns[0]] = parallelApply(
df_test[df_test.columns[0]], base_cleaner, 3
)
# Train/test set creation
X_train = df_train[df_train.columns[0]].tolist()
y_train = df_train[df_train.columns[1]].tolist()
X_test = df_test[df_test.columns[0]].tolist()
y_test = df_test[df_test.columns[1]].tolist()
re_tok = re.compile("([%s“”¨«»®´·º½¾¿¡§£₤‘’])" % string.punctuation)
tokenizer = lambda x: re_tok.sub(r" \1 ", x).split()
# Pipeline definition
pipeline = Pipeline(
[("vect", TfidfVectorizer(sublinear_tf=True)), ("clf", SGDClassifier(loss="modified_huber"))]
)
# Parameter search space
space = {}
space["vect__ngram_range"] = hp.choice("vect__ngram_range", [(1, 2), (1, 3)])
space["vect__min_df"] = 1 + hp.randint("vect__min_df", 5)
space["vect__max_df"] = hp.uniform("vect__max_df", 0.80, 1.0)
space["clf__alpha"] = hp.loguniform("clf__alpha", -9 * np.log(10), -4 * np.log(10))
# Define Hyperopt objective function - ie we want to maximize accuracy
def objective(params):
pipeline.set_params(**params)
shuffle = KFold(n_splits=5, shuffle=True)
score = cross_val_score(
pipeline, X_train, y_train, cv=shuffle, scoring="accuracy", n_jobs=-1
)
return 1 - score.mean()
# The Trials object will store details of each iteration
trials = Trials()
# Run hyperparameter search using the tpe algorithm
best = fmin(objective, space, algo=tpe.suggest, max_evals=15, trials=trials)
# Get the values of the optimal parameters
best_params = space_eval(space, best)
print("Best Parameters:")
pprint(best_params)
# Fit the model with the optimal hyperparameters
pipeline.set_params(**best_params)
pipeline.fit(X_train, y_train)
print("Training accuracy : " + str(pipeline.score(X_train, y_train)))
# Score with the test data
y_preds = pipeline.predict(X_test)
print("Test accuracy : " + str(accuracy_score(y_test, y_preds)))
# 100%|██████████| 15/15 [06:42<00:00, 26.83s/trial, best loss: 0.09144000000000008]
# Best Parameters:
# {'clf__alpha': 2.6877296252886694e-05,
# 'vect__max_df': 0.8482243048758884,
# 'vect__min_df': 1,
# 'vect__ngram_range': (1, 2)}
# Training accuracy : 1.0
# Test accuracy : 0.90696 | jeremypoulain/nlplay | scripts/skl_sgdlinear_train_script.py | skl_sgdlinear_train_script.py | py | 3,321 | python | en | code | 7 | github-code | 90 |
18242014249 | def calc_factors(N):
"""
約数をlistでreturn
この問題はK>1なので、約数に1を含めてない
"""
i=2
factors={N}
while i*i<=N:
if N%i==0:
factors.add(i)
factors.add(N//i)
i+=1
return list(sorted(factors))
N = int(input())
if N==2:
print(1)
exit()
#割り算しない場合
ans = len(calc_factors(N-1))
#割り算する場合
for fac in calc_factors(N):
tmp=N
while tmp%fac==0:
tmp//=fac
if tmp%fac==1:
ans+=1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02722/s278585178.py | s278585178.py | py | 546 | python | ja | code | 0 | github-code | 90 |
71199297577 | import logging
from datetime import date
from typing import Optional
import numpy as np
import pandas as pd
from anndata import AnnData
from cell2location.models.base._pyro_mixin import (
AutoGuideMixinModule,
PltExportMixin,
QuantileMixin,
init_to_value,
)
from pyro import clear_param_store
from pyro.infer.autoguide import AutoNormalMessenger, init_to_feasible, init_to_mean
from scvi import REGISTRY_KEYS
from scvi.data import AnnDataManager
from scvi.data.fields import CategoricalJointObsField, LayerField, NumericalObsField
from scvi.model.base import BaseModelClass, PyroSampleMixin, PyroSviTrainMixin
from scvi.module.base import PyroBaseModuleClass
from scvi.utils import setup_anndata_dsp
from ._logistic_module import HierarchicalLogisticPyroModel
logger = logging.getLogger(__name__)
def infer_tree(labels_df, level_keys):
"""
Parameters
----------
labels_df
DataFrame with annotations
level_keys
List of column names from top to bottom levels (from less detailed to more detailed)
Returns
-------
List of edges between level len(n_levels - 1 )
"""
# for multiple layers of hierarchy
if len(level_keys) > 1:
tree_inferred = [{} for i in range(len(level_keys) - 1)]
for i in range(len(level_keys) - 1):
layer_p = labels_df.loc[:, level_keys[i]]
layer_ch = labels_df.loc[:, level_keys[i + 1]]
for j in range(labels_df.shape[0]):
if layer_p[j] not in tree_inferred[i].keys():
tree_inferred[i][layer_p[j]] = [layer_ch[j]]
else:
if layer_ch[j] not in tree_inferred[i][layer_p[j]]:
tree_inferred[i][layer_p[j]].append(layer_ch[j])
# if only one level
else:
tree_inferred = [list(labels_df[level_keys[0]].unique())]
return tree_inferred
"""def _setup_summary_stats(adata, level_keys):
n_cells = adata.shape[0]
n_vars = adata.shape[1]
n_cells_per_label_per_level = [
adata.obs.groupby(group).size().values.astype(int) for group in level_keys
]
n_levels = len(level_keys)
summary_stats = {
"n_cells": n_cells,
"n_vars": n_vars,
"n_levels": n_levels,
"n_cells_per_label_per_level": n_cells_per_label_per_level,
}
adata.uns["_scvi"]["summary_stats"] = summary_stats
adata.uns["tree"] = infer_tree(adata.obsm["_scvi_extra_categoricals"], level_keys)
logger.info(
"Successfully registered anndata object containing {} cells, {} vars, "
"{} cell annotation levels.".format(n_cells, n_vars, n_levels)
)
return summary_stats"""
class LogisticBaseModule(PyroBaseModuleClass, AutoGuideMixinModule):
def __init__(
self,
model,
init_loc_fn=init_to_mean(fallback=init_to_feasible),
guide_class=AutoNormalMessenger,
guide_kwargs: Optional[dict] = None,
**kwargs,
):
"""
Module class which defines AutoGuide given model. Supports multiple model architectures.
Parameters
----------
amortised
boolean, use a Neural Network to approximate posterior distribution of location-specific (local) parameters?
encoder_mode
Use single encoder for all variables ("single"), one encoder per variable ("multiple")
or a single encoder in the first step and multiple encoders in the second step ("single-multiple").
encoder_kwargs
arguments for Neural Network construction (scvi.nn.FCLayers)
kwargs
arguments for specific model class - e.g. number of genes, values of the prior distribution
"""
super().__init__()
self.hist = []
self._model = model(**kwargs)
if guide_kwargs is None:
guide_kwargs = dict()
self._guide = guide_class(
self.model,
init_loc_fn=init_loc_fn,
**guide_kwargs
# create_plates=model.create_plates,
)
self._get_fn_args_from_batch = self._model._get_fn_args_from_batch
@property
def model(self):
return self._model
@property
def guide(self):
return self._guide
@property
def list_obs_plate_vars(self):
return self.model.list_obs_plate_vars()
def init_to_value(self, site):
if getattr(self.model, "np_init_vals", None) is not None:
init_vals = {
k: getattr(self.model, f"init_val_{k}")
for k in self.model.np_init_vals.keys()
}
else:
init_vals = dict()
return init_to_value(site=site, values=init_vals)
class LogisticModel(
QuantileMixin, PyroSampleMixin, PyroSviTrainMixin, PltExportMixin, BaseModelClass
):
"""
Model which estimates per cluster average mRNA count account for batch effects. User-end model class.
https://github.com/BayraktarLab/cell2location
Parameters
----------
adata
single-cell AnnData object that has been registered via :func:`~scvi.data.setup_anndata`.
level_keys
List of column names from top to bottom levels (from less detailed to more detailed)
use_gpu
Use the GPU?
**model_kwargs
Keyword args for :class:`~scvi.external.LocationModelLinearDependentWMultiExperimentModel`
Examples
--------
TODO add example
>>>
"""
def __init__(
self,
adata: AnnData,
laplace_learning_mode: str = "fixed-sigma",
# tree: list,
model_class=None,
**model_kwargs,
):
# in case any other model was created before that shares the same parameter names.
clear_param_store()
super().__init__(adata)
level_keys = self.adata_manager.get_state_registry(REGISTRY_KEYS.CAT_COVS_KEY)[
"field_keys"
]
self.n_cells_per_label_per_level_ = [
self.adata.obs.groupby(group).size().values.astype(int)
for group in level_keys
]
self.n_levels_ = len(level_keys)
self.level_keys_ = level_keys
self.tree_ = infer_tree(
self.adata_manager.get_from_registry(REGISTRY_KEYS.CAT_COVS_KEY), level_keys
)
self.laplace_learning_mode_ = laplace_learning_mode
if model_class is None:
model_class = HierarchicalLogisticPyroModel
self.module = LogisticBaseModule(
model=model_class,
n_obs=self.summary_stats["n_cells"],
n_vars=self.summary_stats["n_vars"],
n_levels=self.n_levels_,
n_cells_per_label_per_level=self.n_cells_per_label_per_level_,
tree=self.tree_,
laplace_learning_mode=self.laplace_learning_mode_,
**model_kwargs,
)
self.samples = dict()
self.init_params_ = self._get_init_params(locals())
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
layer: Optional[str] = None,
level_keys: Optional[list] = None,
**kwargs,
):
"""
%(summary)s.
Parameters
----------
%(param_layer)s
%(param_batch_key)s
%(param_labels_key)s
%(param_cat_cov_keys)s
%(param_cont_cov_keys)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
adata.obs["_indices"] = np.arange(adata.n_obs).astype("int64")
anndata_fields = [
LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
CategoricalJointObsField(REGISTRY_KEYS.CAT_COVS_KEY, level_keys),
NumericalObsField(REGISTRY_KEYS.INDICES_KEY, "_indices"),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
def _export2adata(self, samples):
r"""
Export key model variables and samples
Parameters
----------
samples
dictionary with posterior mean, 5%/95% quantiles, SD, samples, generated by ``.sample_posterior()``
Returns
-------
Updated dictionary with additional details is saved to ``adata.uns['mod']``.
"""
# add factor filter and samples of all parameters to unstructured data
results = {
"model_name": str(self.module.__class__.__name__),
"date": str(date.today()),
"var_names": self.adata.var_names.tolist(),
"obs_names": self.adata.obs_names.tolist(),
"post_sample_means": samples["post_sample_means"] if "post_sample_means" in samples else None,
"post_sample_stds": samples["post_sample_stds"] if "post_sample_stds" in samples else None,
}
# add posterior quantiles
for k, v in samples.items():
if k.startswith("post_sample_"):
results[k] = v
return results
def export_posterior(
self,
adata,
prediction: bool = False,
use_quantiles: bool = False,
sample_kwargs: Optional[dict] = None,
export_slot: str = "mod",
add_to_varm: list = ["means", "stds", "q05", "q95"],
):
"""
Summarise posterior distribution and export results (cell abundance) to anndata object:
1. adata.obsm: Estimated references expression signatures (average mRNA count in each cell type),
as pd.DataFrames for each posterior distribution summary `add_to_varm`,
posterior mean, sd, 5% and 95% quantiles (['means', 'stds', 'q05', 'q95']).
If export to adata.varm fails with error, results are saved to adata.var instead.
2. adata.uns: Posterior of all parameters, model name, date,
cell type names ('factor_names'), obs and var names.
Parameters
----------
adata
anndata object where results should be saved
prediction
Prediction mode predicts cell labels on new data.
sample_kwargs
arguments for self.sample_posterior (generating and summarising posterior samples), namely:
num_samples - number of samples to use (Default = 1000).
batch_size - data batch size (keep low enough to fit on GPU, default 2048).
use_gpu - use gpu for generating samples?
export_slot
adata.uns slot where to export results
add_to_varm
posterior distribution summary to export in adata.varm (['means', 'stds', 'q05', 'q95']).
Returns
-------
"""
sample_kwargs = sample_kwargs if isinstance(sample_kwargs, dict) else dict()
label_keys = list(
self.adata_manager.get_state_registry(REGISTRY_KEYS.CAT_COVS_KEY)[
"field_keys"
]
)
# when prediction mode change to evaluation mode and swap adata object
if prediction:
self.module.eval()
self.module.model.prediction = True
# use version of this function for prediction
self.module._get_fn_args_from_batch = (
self.module.model._get_fn_args_from_batch
)
# resize plates for according to the validation object
self.module.model.n_obs = adata.n_obs
# create index column
adata.obs["_indices"] = np.arange(adata.n_obs).astype("int64")
# for minibatch learning, selected indices lay in "ind_x"
# scvi.data.register_tensor_from_anndata(
# adata,
# registry_key="ind_x",
# adata_attr_name="obs",
# adata_key_name="_indices",
# )
# if all columns with labels don't exist, create them and fill with 0s
if np.all(~np.isin(label_keys, adata.obs.columns)):
adata.obs.loc[:, label_keys] = 0
# substitute adata object
adata_train = self.adata.copy()
self.adata = self._validate_anndata(adata)
# self.adata = adata
if use_quantiles:
add_to_varm = [i for i in add_to_varm if (i not in ["means", "stds"]) and ("q" in i)]
if len(add_to_varm) == 0:
raise ValueError("No quantiles to export - please add add_to_obsm=['q05', 'q50', 'q95'].")
self.samples = dict()
for i in add_to_varm:
q = float(f"0.{i[1:]}")
self.samples[f"post_sample_{i}"] = self.posterior_quantile(q=q, **sample_kwargs)
else:
# generate samples from posterior distributions for all parameters
# and compute mean, 5%/95% quantiles and standard deviation
self.samples = self.sample_posterior(**sample_kwargs)
# revert adata object substitution
self.adata = adata_train
self.module.eval()
self.module.model.prediction = False
# re-set default version of this function
self.module._get_fn_args_from_batch = (
self.module.model._get_fn_args_from_batch
)
obs_names = adata.obs_names
else:
if use_quantiles:
add_to_varm = [i for i in add_to_varm if (i not in ["means", "stds"]) and ("q" in i)]
if len(add_to_varm) == 0:
raise ValueError("No quantiles to export - please add add_to_obsm=['q05', 'q50', 'q95'].")
self.samples = dict()
for i in add_to_varm:
q = float(f"0.{i[1:]}")
self.samples[f"post_sample_{i}"] = self.posterior_quantile(q=q, **sample_kwargs)
else:
# generate samples from posterior distributions for all parameters
# and compute mean, 5%/95% quantiles and standard deviation
self.samples = self.sample_posterior(**sample_kwargs)
obs_names = self.adata.obs_names
# export posterior distribution summary for all parameters and
# annotation (model, date, var, obs and cell type names) to anndata object
adata.uns[export_slot] = self._export2adata(self.samples)
# export estimated expression in each cluster
# first convert np.arrays to pd.DataFrames with cell type and observation names
# data frames contain mean, 5%/95% quantiles and standard deviation, denoted by a prefix
for i in range(self.n_levels_):
categories = list(
list(
self.adata_manager.get_state_registry(REGISTRY_KEYS.CAT_COVS_KEY)[
"mappings"
].values()
)[i]
)
for k in add_to_varm:
sample_df = pd.DataFrame(
self.samples[f"post_sample_{k}"].get(f"weight_level_{i}", None),
columns=[f"{k}_weight_{label_keys[i]}_{c}" for c in categories],
index=self.adata.var_names,
)
try:
adata.varm[f"{k}_weight_{label_keys[i]}"] = sample_df.loc[
adata.var_names, :
]
except ValueError:
# Catching weird error with obsm: `ValueError: value.index does not match parent’s axis 1 names`
adata.var[sample_df.columns] = sample_df.loc[adata.var_names, :]
sample_df = pd.DataFrame(
self.samples[f"post_sample_{k}"].get(f"label_prob_{i}", None),
columns=obs_names,
index=[f"{k}_label_{label_keys[i]}_{c}" for c in categories],
).T
try:
# TODO change to user input name
adata.obsm[f"{k}_label_prob_{label_keys[i]}"] = sample_df.loc[
adata.obs_names, :
]
except ValueError:
# Catching weird error with obsm: `ValueError: value.index does not match parent’s axis 1 names`
adata.obs[sample_df.columns] = sample_df.loc[adata.obs_names, :]
return adata
# TODO plot QC - prediction accuracy curve
| dissatisfaction-ai/scHierarchy | schierarchy/logistic/_logistic_model.py | _logistic_model.py | py | 16,466 | python | en | code | 18 | github-code | 90 |
1882574809 | if __name__ == "__main__":
with open("10-input.txt") as f:
lines = f.readlines()
signal_strenghts = []
registry_buffer = [0, 0]
registry_x = 1
cycle_count = 0
for line in lines:
sanitized_line = line.strip()
instruction = sanitized_line.split()[0]
if instruction == "addx":
cycles = 2
registry_buffer[0] = int(sanitized_line.split()[1])
else:
cycles = 1
for cycle in range(0, cycles):
cycle_count += 1
# print(
# f"During cycle {cycle_count} registry X is {registry_x} and buffer is {registry_buffer}"
# )
if (cycle_count - 20) % 40 == 0:
signal_strenghts.append(registry_x * cycle_count)
print(
f"On cycle {cycle_count} registry X is {registry_x} and strength is {registry_x * cycle_count}"
)
if registry_buffer:
registry_x += registry_buffer[1]
registry_buffer[1] = registry_buffer[0]
registry_buffer[0] = 0
# print(f"At the end of cycle {cycle_count} registry X is {registry_x}")
print(f"The sum of the signal strenghts is {sum(signal_strenghts)}")
| blacksd/adeventofcode2022 | 10/10-solution-1.py | 10-solution-1.py | py | 1,374 | python | en | code | 0 | github-code | 90 |
14012153188 | import torch
# Output functions come from https://github.com/pytorch/pytorch/blob/master/torch/_meta_registrations.py
def check_cuda_mm(*args):
for x in args:
assert isinstance(x, torch.Tensor)
assert x.device.type == 'cuda'
def mm_output(a, b):
assert a.dim() == 2, 'a must be 2D'
assert b.dim() == 2, 'b must be 2D'
N, M1 = a.shape
M2, P = b.shape
assert M1 == M2, 'a and b must have same reduction dim'
return (N, P)
def addmm_output(bias, x, y):
return mm_output(x, y)
def common_baddbmm_bmm(batch1, batch2, is_bmm, self_baddbmm=None):
assert batch1.dim() == 3, 'batch1 must be a 3D tensor'
assert batch2.dim() == 3, 'batch2 must be a 3D tensor'
batch1_sizes = batch1.size()
batch2_sizes = batch2.size()
bs = batch1_sizes[0]
contraction_size = batch1_sizes[2]
res_rows = batch1_sizes[1]
res_cols = batch2_sizes[2]
output_size = (bs, res_rows, res_cols)
assert batch2_sizes[0] == bs and batch2_sizes[1] == contraction_size
if not is_bmm and self_baddbmm is not None:
assert self_baddbmm.dim() == 3, 'self must be a 3D tensor'
assert self_baddbmm.size() == output_size, \
f'Expected an input tensor shape with shape {output_size} but got shape: {self_baddbmm.size()}'
return output_size
def bmm_output(mat1, mat2):
return common_baddbmm_bmm(mat1, mat2, True)
| hpcaitech/Elixir | elixir/tracer/memory_tracer/output_shape.py | output_shape.py | py | 1,404 | python | en | code | 8 | github-code | 90 |
18161392259 | N = int(input())
p = 10**9 + 7
A = [int(i) for i in input().split()]
S = sum(A)%p
ans = S**2 % p
B = [(i**2%p) for i in A]
ans -= sum(B)%p
if ans < 0:
ans += p
if ans % 2 == 0:
print(ans//2)
else:
print((ans+p)//2) | Aasthaengg/IBMdataset | Python_codes/p02572/s372908487.py | s372908487.py | py | 226 | python | en | code | 0 | github-code | 90 |
18058626189 | from collections import deque
sa = deque(input())
sb = deque(input())
sc = deque(input())
a = len(sa)
b = len(sb)
c = len(sc)
s = sa.popleft()
a -= 1
while True:
if s == "a":
a -= 1
if a == -1:
ans = "A"
break
else:
s = sa.popleft()
elif s == "b":
b -= 1
if b == -1:
ans = "B"
break
else:
s = sb.popleft()
elif s == "c":
c -= 1
if c == -1:
ans = "C"
break
else:
s = sc.popleft()
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03998/s201955161.py | s201955161.py | py | 589 | python | en | code | 0 | github-code | 90 |
9024390231 |
import pickle
import pandas
import networkx as nx
from nltk import *
import nltk
def oauth_login():
CONSUMER_KEY = 'OCa2LGsxL0EUALx6zRUjQWeHl'
CONSUMER_SECRET = 'JWCtpW8inPkfC6QUJbtfJ9uz02JcO78dC5sJDi4obx5LZcBCc5'
OAUTH_TOKEN = '1100042597370920961-4kzA5Em8CbPk4q8jE6GwnSXp3gSdyS'
OAUTH_TOKEN_SECRET = '3fAlogX64SO98ZWyQLAaE4ok7AHljlEHp8r3QcWqNWkM7'
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
import sys
import time
from urllib.error import URLError
from http.client import BadStatusLine
import twitter
def make_twitter_request(twitter_api_func, max_errors=10, *args, **kw):
# A nested helper function that handles common HTTPErrors. Return an updated
# value for wait_period if the problem is a 500 level error. Block until the
# rate limit is reset if it's a rate limiting issue (429 error). Returns None
# for 401 and 404 errors, which requires special handling by the caller.
def handle_twitter_http_error(e, wait_period=2, sleep_when_rate_limited=True):
if wait_period > 3600: # Seconds
print('Too many retries. Quitting.', file=sys.stderr)
raise e
# See https://developer.twitter.com/en/docs/basics/response-codes
# for common codes
if e.e.code == 401:
print('Encountered 401 Error (Not Authorized)', file=sys.stderr)
return None
elif e.e.code == 404:
print('Encountered 404 Error (Not Found)', file=sys.stderr)
return None
elif e.e.code == 429:
print('Encountered 429 Error (Rate Limit Exceeded)', file=sys.stderr)
if sleep_when_rate_limited:
print("Retrying in 15 minutes...ZzZ...", file=sys.stderr)
sys.stderr.flush()
time.sleep(60 * 15 + 5)
print('...ZzZ...Awake now and trying again.', file=sys.stderr)
return 2
else:
raise e # Caller must handle the rate limiting issue
elif e.e.code in (500, 502, 503, 504):
print('Encountered {0} Error. Retrying in {1} seconds' \
.format(e.e.code, wait_period), file=sys.stderr)
time.sleep(wait_period)
wait_period *= 1.5
return wait_period
else:
raise e
# End of nested helper function
wait_period = 2
error_count = 0
while True:
try:
return twitter_api_func(*args, **kw)
except twitter.api.TwitterHTTPError as e:
error_count = 0
wait_period = handle_twitter_http_error(e, wait_period)
if wait_period is None:
return
except URLError as e:
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
print("URLError encountered. Continuing.", file=sys.stderr)
if error_count > max_errors:
print("Too many consecutive errors...bailing out.", file=sys.stderr)
raise
except BadStatusLine as e:
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
print("BadStatusLine encountered. Continuing.", file=sys.stderr)
if error_count > max_errors:
print("Too many consecutive errors...bailing out.", file=sys.stderr)
raise
from functools import partial
from sys import maxsize as maxint
def get_friends_followers_ids(twitter_api, screen_name=None, user_id=None,
friends_limit=maxint, followers_limit=maxint):
# Must have either screen_name or user_id (logical xor)
assert (screen_name != None) != (user_id != None), \
"Must have screen_name or user_id, but not both"
# See http://bit.ly/2GcjKJP and http://bit.ly/2rFz90N for details
# on API parameters
get_friends_ids = partial(make_twitter_request, twitter_api.friends.ids,
count=100)
get_followers_ids = partial(make_twitter_request, twitter_api.followers.ids,
count=100)
friends_ids, followers_ids = [], []
for twitter_api_func, limit, ids, label in [
[get_friends_ids, friends_limit, friends_ids, "friends"],
[get_followers_ids, followers_limit, followers_ids, "followers"]
]:
if limit == 0: continue
cursor = -1
while cursor != 0:
# Use make_twitter_request via the partially bound callable...
if screen_name:
response = twitter_api_func(screen_name=screen_name, cursor=cursor)
else: # user_id
response = twitter_api_func(user_id=user_id, cursor=cursor)
if response is not None:
ids += response['ids']
cursor = response['next_cursor']
# XXX: You may want to store data during each iteration to provide an
# an additional layer of protection from exceptional circumstances
if len(ids) >= limit or response is None:
break
# Do something useful with the IDs, like store them to disk...
return friends_ids[:friends_limit], followers_ids[:followers_limit]
def crawl_followers(twitter_api, screen_name, first_connection, user_id,limit=5000 ):
next_queue=first_connection
nodes = [user_id]
nodes.extend(first_connection)
edge=list(zip([0]*len(first_connection),first_connection))
while len(nodes) <= limit:
(queue, next_queue) = (next_queue, [])
for fid in queue:
friends_ids, follower_ids = get_friends_followers_ids(twitter_api, user_id=fid)
connection = []
connection.extend(friends_ids)
connection.extend(follower_ids)
nodes.extend(connection)
edge.extend(list(zip([fid]*len(connection),connection)))
nodes.extend(connection)
# print(f"{fid}'s connection are {connection}")
next_queue.extend(connection)
if len(nodes)>limit:
break
return edge, nodes
def BMP(s):
return "".join((i if ord(i) < 10000 else '\ufffd' for i in s))
def get_my_object(twitter_api, screen_names=None, user_ids=None):
# Must have either screen_name or user_id (logical xor)
assert (screen_names != None) != (user_ids != None), \
"Must have screen_names or user_ids, but not both"
items_to_info = []
if screen_names:
response = make_twitter_request(twitter_api.users.lookup,
screen_name=screen_names)
else:
response = make_twitter_request(twitter_api.users.lookup,
user_id=user_ids)
#print(response)
for user_info in response:
items_to_info.append(str(user_info['id']))
items_to_info.append(user_info['screen_name'])
print(user_info['screen_name'])
if 'status' in user_info:
content=user_info['status']['text']
content=BMP(content)
print(content)
contentgbk=content.encode('gbk','ignore');
content=contentgbk.decode('gbk','ignore');
items_to_info.append(content)
if user_info['location'] == '':
items_to_info.append('')
else:
items_to_info.append(user_info['location'])
items_to_info.append(str(user_info['followers_count']))
items_to_info.append(str(user_info['friends_count']))
return items_to_info
def store_data(screen_name,limit):
premium_search_args = oauth_login()
response = make_twitter_request(premium_search_args.users.lookup,
screen_name=screen_name)
user_id = ''
for r in response:
user_id = r['id']
friends_ids, followers_ids = get_friends_followers_ids(premium_search_args, screen_name=screen_name,
friends_limit=100,
followers_limit=100)
connection = []
connection.extend(friends_ids)
connection.extend(followers_ids)
edge,nodes = crawl_followers(premium_search_args, screen_name, connection, user_id,limit=limit)
print("edge")
print(edge)
print(nodes)
all_user_info = []
with open('data.csv', 'w',encoding='utf8') as file:
for e in set(nodes):
user_info = get_my_object(premium_search_args, user_ids=e)
all_user_info.append(user_info)
df = pandas.DataFrame(all_user_info,columns=['id','name','text','location','friend_count','follower_count'])
pandas.DataFrame.to_csv(df,file,index=None,encoding="utf-8")
with open('edge.pickle','wb') as file:
pickle.dump(edge, file)
def load_edge():
with open('edge.pickle','rb') as file:
edges = pickle.load(file)
return edges
# load data from the file
# x:id y:screen name z:text content w:number of friends v:number of followers
#
#
def load_inf():
df=pandas.read_csv('data.csv',header=None,sep=',')
x=df[0][1:]
y=df[1][1:]
z=df[2][1:]
w=df[4][1:]
v=df[5][1:]
#print (x)
return x,y,z,w,v
import math
import matplotlib.pyplot as plt
# calculate the centrality of every node in the graph
def centrality():
edge = load_edge()
G=nx.Graph()
G.add_edges_from(edge)
nodes=G.node
#print(nodes)
cloness=nx.closeness_centrality(G)
cloness=dict(sorted(cloness.items(),key=lambda item:item[1],reverse=True))
degree=nx.degree_centrality(G)
betweenness=nx.betweenness_centrality(G)
del cloness[0]
#print(cloness)
return cloness,degree,betweenness
def networkplot():
edge = load_edge()
G = nx.Graph()
G.add_edges_from(edge)
print("the diameter is ", nx.diameter(G))
print("the average distance is ", nx.average_shortest_path_length(G))
print('number of nodes:', G.number_of_nodes())
print('number of edges:', G.number_of_edges())
nx.draw(G, with_labels=False, font_weight='bold')
plt.savefig("final_edge.png")
plt.show()
#find every hashtag after "#"
def My_Filter(mystr):
tt=TweetTokenizer(mystr)
tokens = tt.tokenize(mystr)
tokens_filtered = [w for w in tokens if w[0]=='#'] ####
for w in range(0,len(tokens_filtered)):
tokens_filtered[w]=tokens_filtered[w].lower()
length_filtered = [w for w in tokens_filtered if len(w)>=2]
#print(length_filtered)
result=length_filtered
"""
porter = PorterStemmer()
stems = [porter.stem(t) for t in tokens]
#print(stems)
from nltk.corpus import stopwords
import string
stop=stopwords.words('english')
#print(stop)
tokens_filtered = [w for w in stems if w.lower() not in stop and w.lower() not in string.punctuation]
for w in range(1,len(tokens_filtered)):
tokens_filtered[w]=tokens_filtered[w].lower()
length_filtered = [w for w in tokens_filtered if len(w)>=4]
taged_sent = nltk.pos_tag(length_filtered)
taged_sent=dict(taged_sent)
deleteset=["CC","CD","EX","IN","JJ","MD","UH","VB"]
postag_filtered=[w for w in length_filtered if (not(taged_sent[w] in deleteset))]
special=[]
special=["today","love","happi"]
result=[w for w in postag_filtered if (w not in special)]
#print(length_filtered)
#print(taged_sent)
"""
return result
#get the most common hashtags from the community
def getcommonword(x,y,z):
combine=[];
allwords=[];
for i in range(1,z.shape[0]):
mystr=""
t=type(0.0)
if type(z[i])==t:
mystr=""
else:
mystr=z[i]
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
mystr=mystr.translate(non_bmp_map)
#print(mystr)
words=My_Filter(mystr)
allwords.append(words)
for j in range(0,len(words)):
combine.append(words[j])
#print(allwords)
fdist = FreqDist(combine)
#print("fdist")
#print(fdist.most_common(3))
popular3=[]
for k in range(0,3):
popular3.append(fdist.most_common(100)[k][0])
#print(popular3)
choose1=[]
choose2=[]
choose3=[]
for k in range(0,len(allwords)):
for n in range(0,len(popular3)):
if popular3[n] in allwords[k]:
choose1.append(x[k+1])
choose2.append(y[k+1])
choose3.append(allwords[k])
#print(choose1)
#print(choose2)
#print(choose3)
return popular3
#length_filtered = [w for w in tokens_filtered if len(w)>=3]
# The main function to do recommended based on friends and followers/closeness centrality/hashtag
def jqpart():
x,y,z,w,v=load_inf()
xx=list(map(int, x))
d=zip(xx,y)
id_name=dict(d)
d=zip(xx,z)
id_text=dict(d)
for i in range(1,len(w)):
if (math.isnan(float(w[i]))):
w[i]='0'
for i in range(1,len(v)):
if (math.isnan(float(v[i]))):
v[i]='0'
ww=list(map(int, w))
d=zip(xx,ww)
id_friends=dict(d)
vv=list(map(int, v))
d=zip(xx,vv)
id_followers=dict(d)
screen_name=[]
popular3=getcommonword(x,y,z)
print("The most popular three words=")
print(popular3)
#user_id,screen_name=getcommonword(x,y,z)
#user_id = list(map(int, user_id))
closeness,degree,betweenness=centrality()
#print("closeness")
#print(closeness)
#print("id_name")
#print(id_name)
#p = {key:value for key, value in closeness.items() if key in user_id}
#p = {key:value for key, value in degree.items() if key in user_id}
#p = {key:value for key, value in betweenness.items() if key in user_id}
#print(user_id)
#print(p)
id_friends=dict(sorted(id_friends.items(),key=lambda item:item[1],reverse=True))
#print(id_friends)
id_followers=dict(sorted(id_followers.items(),key=lambda item:item[1],reverse=True))
#sorted(closeness.items(),key=lambda item:item[1])
n=3
result1=[]
result2=[]
result3=[]
print("Most friends")
for i in range(0,n):
k = list(id_friends.keys())[i]
j=i+1
print("No."+str(j))
print("User_ID="+str(k))
print("closeness="+str(closeness[k]))
print("ScreenName="+id_name[k])
print("friends="+str(id_friends[k]))
print("followers="+str(id_followers[k]))
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
idstr=id_text[k].translate(non_bmp_map)
print("His text="+idstr)
result1.append(k)
print("Most followers")
for i in range(0,n):
k = list(id_followers.keys())[i]
j=i+1
print("No."+str(j))
print("User_ID="+str(k))
print("closeness="+str(closeness[k]))
print("ScreenName="+id_name[k])
print("friends="+str(id_friends[k]))
print("followers="+str(id_followers[k]))
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
idstr=id_text[k].translate(non_bmp_map)
print("His text="+idstr)
result2.append(k)
print("Centrality")
for i in range(0,n):
k = list(closeness.keys())[i]
j=i+1
print("No."+str(j))
print("User_ID="+str(k))
print("closeness="+str(closeness[k]))
print("ScreenName="+id_name[k])
print("friends="+str(id_friends[k]))
print("followers="+str(id_followers[k]))
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
idstr=id_text[k].translate(non_bmp_map)
print("His text="+idstr)
result3.append(k)
"""
screen_name = 'stacynance85'
node_limit = 1000
store_data(screen_name,node_limit)
networkplot()
"""
if __name__=='__main__':
#networkplot()
jqpart()
| helishah29/Recommendation-System-Twitter | Source_Code_Repo/popularity_hashtag.py | popularity_hashtag.py | py | 16,192 | python | en | code | 1 | github-code | 90 |
31139081908 | import igraph
def distinct_edges_traveled(path_list):
return set(edge for path in path_list for edge in path)
def redundancy(path_list):
distinct_edges = distinct_edges_traveled(path_list)
total_number_of_edges = sum([len(p) for p in path_list])
redundancy_score = (total_number_of_edges/len(distinct_edges))
#if normalize:
#redundancy_score = (redundancy_score-1)/(len(path_list)-1)
return redundancy_score
def normalized_jaccard_coefficient(list1, list2):
# Find the intersection of the two lists
intersection = set(list1).intersection(list2)
# Find the union of the two lists
union = set(list1).union(list2)
# Calculate and return the Jaccard coefficient
return len(intersection) / (len(list1)+ len(list2) - len(intersection))
def compute_edge_load(path_list, edge_list):
edge_load = {e:0 for e in edge_list}
for path in path_list:
for edge in path:
edge_load[edge]+=1
return edge_load
def compute_edge_capacity(sumo_edges):
conversion_factor = 2.2369362912
q = 0.5
edge_capacity = {}
for edge in sumo_edges:
speed_m_s = edge.getSpeed()
sl = speed_m_s*conversion_factor
num_lanes = edge.getLaneNumber()
# when the speed limit of a road segment sl≤45, it is defined as an arterial road
if sl <= 45:
capacity = 1900*num_lanes*q
# when the speed limit of a road segment 45<sl<60, it is defined as a highway
elif 45<sl<60:
capacity = (1000+20*sl)*num_lanes
# when the speed limit of a road segment sl ≥60, it is defined as a freeway
elif sl>=60:
capacity = (1700+10*sl)*num_lanes
edge_capacity[edge.getID()] = capacity
return edge_capacity
def compute_voc(edge_load, edge_capacity):
edge_voc = {}
for edge in edge_load:
volume = edge_load[edge]
capacity = edge_capacity[edge]
voc = volume/capacity
edge_voc[edge] = voc
return edge_voc
def dis(G, path0, path1, attribute):
intersection = set(path0).intersection(path1)
sum_intersection = sum(G.es[intersection][attribute])
union = set(path0).union(path1)
sum_union = sum(G.es[union][attribute])
return 1 - (sum_intersection/sum_union)
def div(G, path_list, attribute):
dis_list = []
if len(path_list) == 0:
return 0
for i, pi in enumerate(path_list):
for j, pj in enumerate(path_list):
if i<j:
dis_list.append(dis(G, pi, pj, attribute))
return min(dis_list)
def compute_driver_sources(list_routes, edge_tile_dict, origin=True):
ds_dict = {}
if origin:
s = 0
else:
s = -1
for route in list_routes:
edges = route
origin = edges[s]
tile = edge_tile_dict[origin]
for edge in edges:
if edge in ds_dict:
if tile in ds_dict[edge]:
ds_dict[edge][tile] += 1
else:
ds_dict[edge][tile] = 1
else:
ds_dict[edge] = {}
ds_dict[edge][tile] = 1
return ds_dict
def compute_MDS(ds_dict, traffic_threshold):
mds_dict = {}
for edge, ds in ds_dict.items():
# driver sources sorted by flow
ds_list = sorted(ds.items(), key=lambda x: x[1], reverse=True)
ds_flow = sum(x[1] for x in ds_list)
tmp_sum = 0
i = 0
mds_edge_dict = {}
while tmp_sum <= ds_flow*traffic_threshold:
mds_edge_dict[ds_list[i][0]] = ds_list[i][1]
tmp_sum += ds_list[i][1]
i += 1
mds_dict[edge] = mds_edge_dict
return mds_dict
def compute_k_road(mds):
k_road = {}
for edge, mds_dict in mds.items():
k_road[edge] = len(mds_dict)
return k_road
def split_interval_sliding(a, b, s, slide):
intervals = []
current = a
while current + s <= b:
intervals.append([current, current + s])
current += slide
intervals[-1][1] += 1
return intervals
def compute_temp_redundancy_sliding(dict_md, dict_path_tmp_red, window_s, slide):
# compute high and low
tt_list = [dict_md[k]["time"] for k in dict_md]
low = min(tt_list)
high = max(tt_list)
intervals = split_interval_sliding(low, high, window_s, slide)
red_list = []
for (i, j) in intervals:
vehicles_id_split = [int(k.split("_")[1]) for k in dict_md if i<=dict_md[k]["time"]<j]
paths_split = [dict_path_tmp_red[k] for k in vehicles_id_split]
red_list.append(redundancy(paths_split))
return red_list
| jonpappalord/geospatial_analytics | AlternativeRouting/routing_measures.py | routing_measures.py | py | 4,924 | python | en | code | 18 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.