seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
71622317224 | from gnocchi import gnocchi_api
def create_NN_file(instance_id) :
data = gnocchi_api("admin" , "hamed" , "admin")
instance_resource_id = instance_id
network_instance_resource_id = data.get_resource_id("instance_network_interface" , instance_id)
disk_instance_resource_id = data.get_resource_id("instance_disk" , instance_id)
TIMESTAMP = None
with open("/root/autoscale-cloud/get_data/RTs.txt") as RT_file:
lines = RT_file.readlines()
for line in lines :
TIMESTAMP , rt = line.split(",")
RT = float(RT) * 1000
cpuUtileValue = data.get_metric_value("cpu_util" , "instance" , instance_resource_id , TIMESTAMP)
memoryUSageValue = data.get_metric_value("memory.usage" , "instance" , instance_resource_id , TIMESTAMP)
incommingPacketRate = data.get_metric_value("network.incoming.packets.rate" , "instance_network_interface" , network_instance_resource_id , TIMESTAMP)
outgoingPacketRate = data.get_metric_value("network.outgoing.packets.rate" , "instance_network_interface" , network_instance_resource_id , TIMESTAMP)
diskReadPacketRate = data.get_metric_value("disk.device.read.requests.rate" , "instance_disk" , disk_instance_resource_id , TIMESTAMP)
diskWritePacketRate = data.get_metric_value("disk.device.write.requests.rate" , "instance_disk" , disk_instance_resource_id , TIMESTAMP)
f = open("./data_rt_rr_statwosysdsk.txt", "ar")
f.write("%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f\n" % (RT, cpuUtileValue, memoryUSageValue
, incommingPacketRate, outgoingPacketRate, diskReadPacketRate,diskWritePacketRate))
f.close()
create_NN_file("3b4419bb-b94f-45a7-b699-3e9c9e3bc108")
| universcom/Cloud-AI-AutoScale | get_data/data_gathering.py | data_gathering.py | py | 1,768 | python | en | code | 0 | github-code | 36 |
43566759366 | # Connect to an "eval()" service over BLE UART.
import os
import sys
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.nordic import UARTService
ble = BLERadio()
ble.stop_scan()
uart_connection = None
while True:
if not uart_connection:
print("Trying to connect...")
for adv in ble.start_scan(ProvideServicesAdvertisement):
if UARTService in adv.services:
uart_connection = ble.connect(adv)
print("Connected")
break
ble.stop_scan()
if uart_connection and uart_connection.connected:
uart_service = uart_connection[UARTService]
while uart_connection.connected:
# while 1:
# byte = os.read(0,1)
# os.write(2,byte)
# uart_service.write(byte)
#if (byte == b'\n'):
# break
# print(" ->")
line = sys.stdin.readline()
uart_service.write(line.encode("utf-8"))
#print(line);
print(uart_service.readline().decode("utf-8"))
| jappavoo/flashy | ble.py | ble.py | py | 1,214 | python | en | code | 0 | github-code | 36 |
29389754682 | class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
rows = [set() for _ in range(9)]
cols = [set() for _ in range(9)]
boxes = [set() for _ in range(9)]
for i in range(9):
for j in range(9):
cell = board[i][j]
if cell != '.':
if cell in rows[i] or cell in cols[j] or cell in boxes[3 * (i // 3) + (j // 3)]:
return False
rows[i].add(cell)
cols[j].add(cell)
boxes[3 * (i // 3) + (j // 3)].add(cell)
return True | AnotherPianist/LeetCode | 0036-valid-sudoku/0036-valid-sudoku.py | 0036-valid-sudoku.py | py | 629 | python | en | code | 1 | github-code | 36 |
14365200747 | #Import dependencies
import json
import pandas as pd
import numpy as np
import re
from sqlalchemy import create_engine
import psycopg2
from config import db_password
import time
#Set file directory
file_dir = '/Users/mariacarter/Desktop/Berkeley-Bootcamp/Analysis-Projects/Movies-ETL/Resources/'
def process_ETL(wiki_movies, kaggle_metadata, ratings):
with open (f'{file_dir}/'+wiki_movies, mode='r') as file:
wiki_movies_raw = json.load(file)
kaggle_metadata = pd.read_csv(f'{file_dir}/'+kaggle_metadata, low_memory=False)
ratings = pd.read_csv(f'{file_dir}/'+ratings)
#Use a list comprehension to filter data
wiki_movies = [movie for movie in wiki_movies_raw
if ('Director' in movie or 'Directed by' in movie)
and 'imdb_link' in movie
and 'No. of episodes' not in movie]
#Loop through every key, add the alt_titles dict to the movie object
def clean_movie(movie):
movie = dict(movie) # create a non-destructive copy
alt_titles = {}
#Combine alternate titles into one list
for key in ['Also known as', 'Arabic', 'Cantonese', 'Chinese', 'French',
'Hangul', 'Hebrew', 'Hepburn', 'Japanese', 'Literally',
'Mandarin', 'McCune–Reischauer', 'Original title', 'Polish',
'Revised Romanization', 'Romanized', 'Russian',
'Simplified', 'Traditional', 'Yiddish']:
if key in movie:
alt_titles[key] = movie[key]
movie.pop(key)
if len(alt_titles) > 0:
movie['alt_titles'] = alt_titles
#Merge column names
def change_column_name(old_name, new_name):
if old_name in movie:
movie[new_name] = movie.pop(old_name)
change_column_name('Adaptation by', 'Writer(s)')
change_column_name('Country of origin', 'Country')
change_column_name('Directed by', 'Director')
change_column_name('Distributed by', 'Distributor')
change_column_name('Edited by', 'Editor(s)')
change_column_name('Length', 'Running time')
change_column_name('Original release', 'Release date')
change_column_name('Music by', 'Composer(s)')
change_column_name('Produced by', 'Producer(s)')
change_column_name('Producer', 'Producer(s)')
change_column_name('Productioncompanies', 'Production company(s)')
change_column_name('Productioncompanies ', 'Production company(s)')
change_column_name('Productioncompany', 'Production company(s)')
change_column_name('Productioncompany ', 'Production company(s)')
change_column_name('Released', 'Release date')
change_column_name('Released Date', 'Release date')
change_column_name('Screen story by', 'Writer(s)')
change_column_name('Screenplay by', 'Writer(s)')
change_column_name('Story by', 'Writer(s)')
change_column_name('Theme music composer', 'Composer(s)')
change_column_name('Written by', 'Writer(s)')
return movie
#Use a list comprehension to make a list of clean movies
clean_movies = [clean_movie(movie) for movie in wiki_movies]
#Create a Wiki Movies DF from the clean movies dataset
wiki_movies_df = pd.DataFrame(clean_movies)
#Extract IMDb ID
wiki_movies_df['imdb_id'] = wiki_movies_df['imdb_link'].str.extract(r'(tt\d{7})')
#Drop duplicate IMDb IDs
wiki_movies_df.drop_duplicates(subset= 'imdb_id', inplace=True)
#Use a list comprehension to remove mostly null columns from the Wiki Movies DF
wiki_columns_to_keep = [column for column in wiki_movies_df.columns
if wiki_movies_df[column].isnull().sum() < len(wiki_movies_df) * 0.9]
#Create a revised Wiki Movies DF from the updated data
wiki_movies_df = wiki_movies_df[wiki_columns_to_keep]
#Drop 'Box Office' from dataset, converting lists to strings
box_office = wiki_movies_df['Box office'].dropna().apply(lambda x: ''.join(x) if type(x) == list else x)
#Create forms in the 'Box Office' data and use regular expressions to parse the data
form_one = r'\$\s*\d+\.?\d*\s*[mb]illi?on'
form_two = r'\$\s*\d{1,3}(?:[,\.]\d{3})+(?!\s[mb]illion)'
box_office = box_office.str.replace(r'\$.*[-—–](?![a-z])', '$', regex=True)
#Extract & convert the 'Box Office' values
box_office.str.extract(f'({form_one}|{form_two})')
def parse_dollars(s):
#If s is not a string, return NaN
if type(s) != str:
return np.nan
#If input is of the form $###.# million
if re.match(r'\$\s*\d+\.?\d*\s*milli?on', s, flags=re.IGNORECASE):
#Remove dollar sign and " million"
s = re.sub('\$|\s|[a-zA-Z]', '', s)
#Convert to float and multiply by a million
value = float(s) * 10**6
#Return value
return value
#If input is of the form $###.# billion
elif re.match('\$\s*\d+\.?\d*\s*billi?on', s, flags=re.IGNORECASE):
#Remove dollar sign and " billion"
s = re.sub('\$|\s|[a-zA-Z]', '', s)
#Convert to float and multiply by a billion
value = float(s) * 10**9
#Return value
return value
#If input is of the form $###,###,###
elif re.match(r'\$\s*\d{1,3}(?:[,\.]\d{3})+(?!\s[mb]illion)', s, flags=re.IGNORECASE):
#Remove dollar sign and commas
s = re.sub('\$|,','', s)
#Convert to float
value = float(s)
#Return value
return value
#Otherwise, return NaN
else:
return np.nan
#Extract the values from 'Box Office' using str.extract & apply parse_dollars to the 1st column
wiki_movies_df['box_office'] = box_office.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)
#Drop the 'Box Office' column
wiki_movies_df.drop('Box office', axis=1, inplace=True)
#Drop 'Budget' from dataset, converting lists to strings:
budget = wiki_movies_df['Budget'].dropna().apply(lambda x: ''.join(x) if type(x) == list else x)
#Remove any values betwen a dollar sign & a hyphen in 'Budget'
budget = budget.str.replace(r'\$.*[-—–](?![a-z])', '$', regex=True)
#Remove any values betwen a dollar sign & a hyphen in 'Budget'
budget = budget.str.replace(r'\$.*[-—–](?![a-z])', '$', regex=True)
#Use same pattern matches to parse 'Budget'
matches_form_one = budget.str.contains(form_one, flags=re.IGNORECASE)
matches_form_two = budget.str.contains(form_two, flags=re.IGNORECASE)
budget[~matches_form_one & ~matches_form_two]
#Remove citation references
budget = budget.str.replace(r'\[\d+\]s*','')
budget[~matches_form_one & ~matches_form_two]
#Parse the 'Budget' values
wiki_movies_df['budget'] = budget.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)
#Drop the 'Budget' column
wiki_movies_df.drop('Budget', axis=1, inplace=True)
#Drop 'Release date' from dataset, converting lists to strings:
release_date = wiki_movies_df['Release date'].dropna().apply(lambda x: ''.join(x) if type(x)== list else x)
#Parse the forms
date_form_one = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\s[123]\d,\s\d{4}'
date_form_two = r'\d{4}.[01]\d.[123]\d'
date_form_three = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\s\d{4}'
date_form_four = r'\d{4}'
#Extract the dates
release_date.str.extract(f'({date_form_one}|{date_form_two}|{date_form_three}|{date_form_four})', flags=re.IGNORECASE)
#Use built-in to_datetime() to parse the dates, and set the infer_datetime_format option to 'True' because there are different date formats.
wiki_movies_df['release_date'] = pd.to_datetime(release_date.str.extract(f'({date_form_one}|{date_form_two}|{date_form_three}|{date_form_four})')[0], infer_datetime_format=True)
#Drop 'Running time' from dataset, converting lists to strings:
running_time = wiki_movies_df['Running time'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)
#Extract digits, and allow for both possible patterns by adding capture groups around the \d instances and add an alternating character
running_time_extract = running_time.str.extract(r'(\d+)\s*ho?u?r?s?\s*(\d*)|(\d+)\s*m')
#Convert from string to numeric
running_time_extract = running_time_extract.apply(lambda col: pd.to_numeric(col, errors='coerce')).fillna(0)
#Apply a function that converts the 'hour' and 'minute' capture groups to 'minutes' if the pure minutes capture group is zero, and save the output to wiki_movies_df
wiki_movies_df['running_time'] = running_time_extract.apply(lambda row: row[0]*60 + row[1] if row[2] == 0 else row[2], axis=1)
#Drop 'running time'
wiki_movies_df.drop('Running time', axis=1, inplace=True)
#Remove bad data from Kaggle Metadata DF
kaggle_metadata[~kaggle_metadata['adult'].isin(['True','False'])]
#Keep rows where adult=False, then drop the adult column
kaggle_metadata = kaggle_metadata[kaggle_metadata['adult'] == 'False'].drop('adult', axis='columns')
#Convert data to since 'video' are T/F values
kaggle_metadata['video'] = kaggle_metadata['video'] == 'True'
#For numeric columns, use to_numeric() method.
#Make sure errors= argument is set to 'raise' so that we know if theres data that can't be converted to numbers
kaggle_metadata['budget'] = kaggle_metadata['budget'].astype(int)
kaggle_metadata['id'] = pd.to_numeric(kaggle_metadata['id'], errors='raise')
kaggle_metadata['popularity'] = pd.to_numeric(kaggle_metadata['popularity'], errors='raise')
#Convert 'release_date' to datetime using to_datetime()
kaggle_metadata['release_date'] = pd.to_datetime(kaggle_metadata['release_date'])
#Since there's so many rows that are null in the Ratings DF, set the null_counts = True
ratings.info(null_counts=True)
#Specify in to_datetime() that the origin is 'unix' and the time unit is seconds, and assign it to the 'timestamp; column
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
#Merge Wikipedia & Kaggle Metadata
movies_df = pd.merge(wiki_movies_df, kaggle_metadata, on='imdb_id', suffixes=['_wiki', '_kaggle'])
#Drop the wild outlier (aka 'The Holiday') from Wikipedia data
movies_df = movies_df.drop(movies_df[(movies_df['release_date_wiki'] > '1996-01-01') & (movies_df['release_date_kaggle'] < '1965-01-01')].index)
#Convert the 'Languge' list to a tuple so that .value_counts() can work
movies_df['Language'].apply(lambda x: tuple(x) if type(x) == list else x).value_counts(dropna=False)
movies_df['original_language'].value_counts(dropna=False)
#Drop the title_wiki, release_date_wiki, Language, and Production company(s) columns
movies_df.drop(columns=['title_wiki', 'release_date_wiki', 'Language', 'Production company(s)'], inplace=True)
#Make a function that fills in missing data for a column pair and then drops the redundant column
def fill_missing_kaggle_data(df, kaggle_column, wiki_column):
df[kaggle_column] = df.apply(
lambda row: row[wiki_column] if row[kaggle_column] == 0 else row[kaggle_column]
, axis=1)
df.drop(columns=wiki_column, inplace=True)
#Run the function for the three column pairs that were decided to be filled with zeros
fill_missing_kaggle_data(movies_df, 'runtime', 'running_time')
fill_missing_kaggle_data(movies_df, 'budget_kaggle', 'budget_wiki')
fill_missing_kaggle_data(movies_df, 'revenue', 'box_office')
#Check that there aren’t any columns with only one value, and convert lists to tuples for value_counts() to work.
for col in movies_df.columns:
lists_to_tuples = lambda x: tuple(x) if type(x) == list else x
value_counts = movies_df[col].apply(lists_to_tuples).value_counts(dropna=False)
num_values = len(value_counts)
if num_values == 1:
print(col)
movies_df['video'].value_counts(dropna=False)
#Reorder the columns
movies_df = movies_df[['imdb_id','id','title_kaggle','original_title','tagline','belongs_to_collection','url','imdb_link',
'runtime','budget_kaggle','revenue','release_date_kaggle','popularity','vote_average','vote_count',
'genres','original_language','overview','spoken_languages','Country',
'production_companies','production_countries','Distributor',
'Producer(s)','Director','Starring','Cinematography','Editor(s)','Writer(s)','Composer(s)','Based on'
]]
#Rename the columns
movies_df.rename({'id':'kaggle_id',
'title_kaggle':'title',
'url':'wikipedia_url',
'budget_kaggle':'budget',
'release_date_kaggle':'release_date',
'Country':'country',
'Distributor':'distributor',
'Producer(s)':'producers',
'Director':'director',
'Starring':'starring',
'Cinematography':'cinematography',
'Editor(s)':'editors',
'Writer(s)':'writers',
'Composer(s)':'composers',
'Based on':'based_on'
}, axis='columns', inplace=True)
#Count how many times a movie received a given rating
rating_counts = ratings.groupby(['movieId','rating'], as_index=False).count() \
.rename({'userId':'count'}, axis=1) \
.pivot(index='movieId',columns='rating', values='count')
#Rename the columns... prepend rating_ to each column with a list comprehension:
rating_counts.columns = ['rating_' + str(col) for col in rating_counts.columns]
#Connect Pandas to SQL
db_string = f"postgres://postgres:{db_password}@127.0.0.1:5432/movie_data"
engine = create_engine(db_string)
#Import the movie data
movies_df.to_sql(name='movies', con=engine)
#Create a variable for the number of rows imported
rows_imported = 0
#Get the start_time from time.time()
start_time = time.time()
for data in pd.read_csv(f'{file_dir}ratings.csv', chunksize=1000000):
#Print out the range of rows that are being imported
print(f'importing rows {rows_imported} to {rows_imported + len(data)}...', end='')
data.to_sql(name='ratings', con=engine, index=False, if_exists='replace')
#Increment the number of rows imported by the size of 'data'
rows_imported += len(data)
#Add elapsed time to final printout
print(f'Done. {time.time() - start_time} total seconds elapsed')
process_ETL("wikipedia.movies.json", "movies_metadata.csv", "ratings.csv") | mcarter-00/Movies-ETL | challenge.py | challenge.py | py | 15,363 | python | en | code | 0 | github-code | 36 |
7704128693 | # -*- coding:utf-8 -*-
"""
Evluate the performance of embedding via different methods.
"""
import math
import numpy as np
from sklearn import metrics
from sklearn import utils as sktools
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import SpectralClustering
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, balanced_accuracy_score, f1_score, precision_score, \
recall_score
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
def cluster_evaluate(embeddings, labels, n_class, metric="euclidean"):
"""
Unsupervised setting: We assess the ability of each method to embed close together nodes
with the same ground-truth structural role. We use agglomerative clustering (with single linkage)
to cluster embeddings learned by each method and evaluate the clustering quality via:
(1) homogeneity, conditional entropy of ground-truth structural roles given the predicted clustering;
(2) completeness, a measure of how many nodes with the same ground-truth structural role are assigned to the same cluster;
(3) silhouette score, a measure of intra-cluster distance vs. inter-cluster distance.
Supervised setting: We assess the performance of learned embeddings for node classifcation.
Using 10-fold cross validation, we predict the structural role (label) of each node in the tests set
based on its 4-nearest neighbors in the training set as determined by the embedding space.
The reported score is then the average accuracy and F1-score over 25 trials.
"""
clusters = AgglomerativeClustering(n_clusters=n_class, linkage='single', affinity=metric).fit_predict(embeddings)
h, c, v = metrics.homogeneity_completeness_v_measure(labels, clusters)
s = metrics.silhouette_score(embeddings, clusters)
acc = accuracy_score(labels, clusters)
macro_f1 = f1_score(labels, clusters, average="macro")
print("cluster:", clusters, "labels:", labels)
print("accuracy: ", acc)
print("macro_score: ", macro_f1)
print("homogeneity: ", h)
print("completeness: ", c)
print("v-score: ", v)
print("silhouette: ", s)
return h, c, v, s
def LR_evaluate(data, labels, cv=5):
"""
Evaluate embedding effect using Logistic Regression. Mode = One vs Rest (OVR)
"""
data, labels = sktools.shuffle(data, labels)
lr = LogisticRegression(solver="lbfgs", penalty='l2', max_iter=1000, multi_class='ovr')
test_scores = cross_val_score(lr, data, y=labels, cv=cv)
print(f"LR: tests scores={test_scores}, mean_score={np.mean(test_scores)}\n")
return np.mean(test_scores)
def KNN_evaluate(data, labels, metric="minkowski", cv=5, n_neighbor=10):
"""
基于节点的相似度进行KNN分类,在嵌入之前进行,为了验证通过层次化相似度的优良特性。
"""
data, labels = sktools.shuffle(data, labels)
knn = KNeighborsClassifier(weights='uniform', algorithm="auto", n_neighbors=n_neighbor, metric=metric, p=2)
test_scores = cross_val_score(knn, data, y=labels, cv=cv, scoring="accuracy")
print(f"KNN: tests scores:{test_scores}, mean_score={np.mean(test_scores)}\n")
return np.mean(test_scores)
def evalute_results(labels: list, preds: list):
accuracy = accuracy_score(labels, preds)
balanced_accuracy = balanced_accuracy_score(labels, preds)
precision = precision_score(labels, preds, average="micro")
recall = recall_score(labels, preds, average="micro")
macro_f1 = f1_score(labels, preds, average="macro")
micro_f1 = f1_score(labels, preds, average="micro")
report = classification_report(labels, preds, digits=7)
res = { "accuracy": accuracy,
"balanced accuracy": balanced_accuracy,
"micro precision": precision,
"micro recall": recall,
"macro f1": macro_f1,
"micro f1": micro_f1,
"report": report
}
print(res)
return res
def spectral_cluster_evaluate(data, labels, n_cluster, affinity="rbf"):
"""
:param data: 相似度矩阵 or 嵌入向量
:param n_cluster:
:param affinity: precomputed || rbf
:return:
"""
metric = "euclidean"
if affinity == "precomputed":
# sklearn指导,如果data是距离矩阵而不是相似度矩阵,则可以用下面的rbf转换一下
distance_mat = data
delta = math.sqrt(2)
data = np.exp(-distance_mat ** 2 / (2. * delta ** 2))
metric = affinity
clustering = SpectralClustering(n_clusters=n_cluster, affinity=affinity, n_init=50, random_state=42)
preds = clustering.fit_predict(data)
h, c, v = metrics.homogeneity_completeness_v_measure(labels, preds)
s1 = metrics.silhouette_score(embeddings, labels, metric=metric)
s2 = metrics.silhouette_score(embeddings, preds, metric=metric)
print(f"homogenetiy: {h}, completeness: {c}, v_measure: {v}, silhouette_score label: {s1}, silhouette_score pred: {s2}\n")
| Sngunfei/HSD | tools/evaluate.py | evaluate.py | py | 5,117 | python | en | code | 3 | github-code | 36 |
14049615322 | from django.shortcuts import render
from admin_dashboard.models import Review, Package
from django.contrib import messages
def home(request):
reviews = Review.objects.all()
packages = Package.objects.all()
return render(request, 'index.html', {
'title': 'Home',
'reviews': reviews,
'packages': packages,
'request': request,
})
def handler404(request, exception, template_name='404.html'):
response = render_to_response(template_name)
response.status_code = 404
return response
def handler500(request, exception, template_name='500.html'):
response = render_to_response(template_name)
response.status_code = 500
return response | aniatki/pro-dad | homepage/views.py | views.py | py | 709 | python | en | code | 0 | github-code | 36 |
40568030755 | from __future__ import annotations
import random
from datetime import timedelta
from typing import Type
from game.theater import FrontLine
from game.utils import Distance, Speed, feet
from .capbuilder import CapBuilder
from .invalidobjectivelocation import InvalidObjectiveLocation
from .patrolling import PatrollingFlightPlan, PatrollingLayout
from .waypointbuilder import WaypointBuilder
class BarCapFlightPlan(PatrollingFlightPlan[PatrollingLayout]):
@staticmethod
def builder_type() -> Type[Builder]:
return Builder
@property
def patrol_duration(self) -> timedelta:
return self.flight.coalition.doctrine.cap_duration
@property
def patrol_speed(self) -> Speed:
return self.flight.unit_type.preferred_patrol_speed(
self.layout.patrol_start.alt
)
@property
def engagement_distance(self) -> Distance:
return self.flight.coalition.doctrine.cap_engagement_range
class Builder(CapBuilder[BarCapFlightPlan, PatrollingLayout]):
def layout(self) -> PatrollingLayout:
location = self.package.target
if isinstance(location, FrontLine):
raise InvalidObjectiveLocation(self.flight.flight_type, location)
start_pos, end_pos = self.cap_racetrack_for_objective(location, barcap=True)
preferred_alt = self.flight.unit_type.preferred_patrol_altitude
randomized_alt = preferred_alt + feet(random.randint(-2, 1) * 1000)
patrol_alt = max(
self.doctrine.min_patrol_altitude,
min(self.doctrine.max_patrol_altitude, randomized_alt),
)
builder = WaypointBuilder(self.flight, self.coalition)
start, end = builder.race_track(start_pos, end_pos, patrol_alt)
return PatrollingLayout(
departure=builder.takeoff(self.flight.departure),
nav_to=builder.nav_path(
self.flight.departure.position, start.position, patrol_alt
),
nav_from=builder.nav_path(
end.position, self.flight.arrival.position, patrol_alt
),
patrol_start=start,
patrol_end=end,
arrival=builder.land(self.flight.arrival),
divert=builder.divert(self.flight.divert),
bullseye=builder.bullseye(),
)
def build(self, dump_debug_info: bool = False) -> BarCapFlightPlan:
return BarCapFlightPlan(self.flight, self.layout())
| dcs-liberation/dcs_liberation | game/ato/flightplans/barcap.py | barcap.py | py | 2,444 | python | en | code | 647 | github-code | 36 |
33865589189 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('bot', '0007_bot_logo'),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(default=datetime.datetime(2016, 5, 15, 20, 22, 16, 862988))),
('log', models.TextField()),
('bot', models.ForeignKey(to='bot.Bot')),
],
),
]
| DenerRodrigues/Chatterbot | bot/migrations/0008_chat.py | 0008_chat.py | py | 681 | python | en | code | 0 | github-code | 36 |
4255077844 | """
Introduction
Given the weights and profits of 'N' items, we are asked to put these items in a knapsack that has a capacity 'C'. The goal is to get the maximum profit from the items in the knapsack. The only difference between the "0/1 Knapsack" problem and this problem is that we are allowed to use an unlimited quantity of an item.
Let's take the example of Merry, who wants to carry some fruits in the knapsack to get maximum profit. Here are the weights and profits of the fruits:
Items: { Apple, Orange, Melon }
Weights: { 1, 2, 3 }
Profits: { 15, 20, 50 }
Knapsack capacity: 5
Let's try to put different combinations of fruits in the knapsack, such that their total weight is not more than 5.
5 Apples (total weight 5) => 75 profit
1 Apple + 2 Oranges (total weight 5) => 55 profit
2 Apples + 1 Melon (total weight 5) => 80 profit
1 Orange + 1 Melon (total weight 5) => 70 profit
This shows that 2 apples + 1 melon is the best combination, as it gives us the maximum profit and the total weight does not exceed the capacity.
Problem Statement
Given two integer arrays to represent weights and profits of 'N' items, we need to find a subset of these items which will give us maximum profit such that their cumulative weight is not more than a given number 'C'. We can assume an infinite supply of item quantities; therefore, each item can be selected multiple times.
"""
def solve_knapsack_recursive(profits, weights, capacity):
dp = [[-1] * (capacity + 1) for _ in range(len(profits))]
return recurse(profits, weights, capacity, 0, dp)
def recurse(profits, weights, curr_capacity, i, dp):
if i >= len(profits):
return 0
if curr_capacity < 0:
return 0
if dp[i][curr_capacity] > -1:
return dp[i][curr_capacity]
p1 = 0
if weights[i] <= curr_capacity:
p1 = profits[i] + recurse(profits, weights, curr_capacity - weights[i], i, dp)
p2 = recurse(profits, weights, curr_capacity, i + 1, dp)
dp[i][curr_capacity] = max(p1, p2)
return dp[i][curr_capacity]
def solve_knapsack_iterative(profits, weights, capacity):
dp = [[0] * (capacity + 1) for _ in range(len(profits))]
for i in range(len(profits)):
dp[i][0] = 0
for j in range(capacity + 1):
if weights[0] <= j:
dp[0][j] = profits[0] + dp[0][j - weights[0]]
for i in range(1, len(profits)):
for j in range(1, capacity + 1):
p1 = 0
if weights[i] <= j:
p1 = profits[i] + dp[i][j - weights[i]]
p2 = dp[i - 1][j]
dp[i][j] = max(p1, p2)
return dp[-1][capacity]
| blhwong/algos_py | grokking_dp/unbounded_knapsack/unbounded_knapsack/main.py | main.py | py | 2,617 | python | en | code | 0 | github-code | 36 |
21393775413 | """
5 element API Client
"""
from typing import Optional, Tuple
from bgd.constants import FIFTHELEMENT
from bgd.responses import GameSearchResult, Price
from bgd.services.abc import GameSearchResultFactory
from bgd.services.api_clients import GameSearcher, JsonHttpApiClient
from bgd.services.base import CurrencyExchangeRateService, GameSearchService
from bgd.services.constants import GET
from bgd.services.responses import APIResponse
class FifthElementApiClient(JsonHttpApiClient):
"""Api client for 5element.by"""
BASE_SEARCH_URL = "https://api.multisearch.io"
async def search(self, query: str, options: Optional[dict] = None) -> APIResponse:
"""Search query string"""
search_app_id = options["search_app_id"] # type: ignore
url = f"?query={query}&id={search_app_id}&lang=ru&autocomplete=true"
return await self.connect(GET, self.BASE_SEARCH_URL, url)
class FifthElementSearchService(GameSearchService):
"""Search service for 5element.by"""
def __init__(
self,
client: GameSearcher,
result_factory: GameSearchResultFactory,
currency_exchange_rate_converter: CurrencyExchangeRateService,
game_category_id: str,
search_app_id: str,
) -> None:
"""Init 5th element Search Service"""
# there are more than one category that we should check
self._game_category_ids = game_category_id.split(",")
super().__init__(client, result_factory, currency_exchange_rate_converter)
self._search_app_id = search_app_id
async def do_search(self, query: str, *args, **kwargs) -> Tuple[GameSearchResult]:
response = await self._client.search(query, {"search_app_id": self._search_app_id})
products = self.filter_results(
response.response["results"]["items"], self._is_available_game
)
return self.build_results(products)
def _is_available_game(self, product: dict) -> bool:
"""True if it's available board game"""
return (
product["is_presence"]
and product["params_data"]["category_id"] in self._game_category_ids
)
class FifthElementGameSearchResultFactory:
"""Builder for GameSearch results from 5element"""
BASE_URL = "https://5element.by"
def create(self, search_result: dict) -> GameSearchResult:
"""Build search result"""
return GameSearchResult(
description="",
images=self._extract_images(search_result),
location=None,
owner=None,
prices=[self._extract_price(search_result)],
source=FIFTHELEMENT,
subject=search_result["name"],
url=self._extract_url(search_result),
)
@staticmethod
def _extract_images(product: dict) -> list[str]:
"""Extract product images"""
return [product["picture"]]
@staticmethod
def _extract_price(product: dict) -> Optional[Price]:
"""Extract price"""
price = product["price"] * 100
return Price(amount=price)
def _extract_url(self, product: dict) -> str:
"""Extract product url"""
return f"{self.BASE_URL}{product['url']}"
| ar0ne/bg_deal | bgd/services/apis/fifth_element.py | fifth_element.py | py | 3,213 | python | en | code | 0 | github-code | 36 |
15827248022 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import importlib
import os
from emission.core.wrapper.trip_old import Coordinate
import requests
import emission.core.wrapper.entry as ecwe
import emission.analysis.intake.cleaning.clean_and_resample as clean
import emission.net.ext_service.geocoder.nominatim as eco
#Setting query URLs
OPENSTREETMAP_QUERY_URL = os.environ.get("OPENSTREETMAP_QUERY_URL")
GEOFABRIK_QUERY_URL = os.environ.get("GEOFABRIK_QUERY_URL")
NOMINATIM_CONTAINER_URL = os.environ.get("NOMINATIM_CONTAINER_URL")
class NominatimTest(unittest.TestCase):
maxDiff = None
def setUp(self):
#Creates a fake, cleaned place in Rhode Island
fake_id = "place_in_rhodeisland"
key = "segmentation/raw_place"
write_ts = 1694344333
data = {'source': 'FakeTripGenerator','location': {'type': 'Point', 'coordinates': [-71.4128343, 41.8239891]}}
fake_place = ecwe.Entry.create_fake_entry(fake_id, key, data, write_ts)
self.fake_place = fake_place
#When a nominatim service is called, we set the value of the NOMINATIM_QUERY_URL environment variable in nominatim.py and re-load the module.
def nominatim(service):
if service == "container":
os.environ["NOMINATIM_QUERY_URL"] = NOMINATIM_CONTAINER_URL
importlib.reload(eco)
elif service == "geofabrik":
os.environ["NOMINATIM_QUERY_URL"] = GEOFABRIK_QUERY_URL
importlib.reload(eco)
elif service == "OSM":
os.environ["NOMINATIM_QUERY_URL"] = OPENSTREETMAP_QUERY_URL
importlib.reload(eco)
#Basic query to check that OSM, the Rhode Island Container, and geofabrik are returning the same data.
def test_geofabrik_and_nominatim(self):
lat, lon = 41.8239891, -71.4128343
NominatimTest.nominatim("container")
container_result = eco.Geocoder.get_json_reverse(lat,lon)
NominatimTest.nominatim("OSM")
osm_result = eco.Geocoder.get_json_reverse(lat,lon)
NominatimTest.nominatim("geofabrik")
geofabrik_result = eco.Geocoder.get_json_reverse(lat,lon)
key_list = ['osm_id', 'boundingbox']
for k in key_list:
self.assertEqual(osm_result[k], geofabrik_result[k])
self.assertEqual(container_result[k], geofabrik_result[k])
#Checks the display name generated by get_filtered_place in clean_and_resample.py, which creates a cleaned place from the fake place
# and reverse geocodes with the coordinates.
def test_get_filtered_place(self):
fake_place_raw = self.fake_place
fake_place_data = clean.get_filtered_place(fake_place_raw).__getattr__("data")
actual_result = fake_place_data.__getattr__("display_name")
expected_result = "Dorrance Street, Providence"
self.assertEqual(expected_result, actual_result)
#Testing make_url_geo, which creates a query URL from the input string.
def test_make_url_geo(self):
expected_result = GEOFABRIK_QUERY_URL + "/search?q=Providence%2C+Rhode+Island&format=json"
NominatimTest.nominatim("geofabrik")
actual_result = eco.Geocoder.make_url_geo("Providence, Rhode Island")
self.assertEqual(expected_result, actual_result)
#Testing make_url_reverse, which creates a query url from a lat and lon.
def test_make_url_reverse(self):
NominatimTest.nominatim("geofabrik")
lat, lon = 41.8239891, -71.4128343
expected_result = GEOFABRIK_QUERY_URL + (f"/reverse?lat={lat}&lon={lon}&format=json")
actual_result = (eco.Geocoder.make_url_reverse(lat, lon))
self.assertEqual(expected_result, actual_result)
#Testing get_json_geo, which passes in an address as a query. Compares three select k,v pairs in the results.
def test_get_json_geo(self):
NominatimTest.nominatim("geofabrik")
expected_result = {'place_id': 132490, 'licence': 'Data © OpenStreetMap contributors, ODbL 1.0. https://osm.org/copyright', 'osm_type': 'way', 'osm_id': 141567710, 'boundingbox': ['41.8325787', '41.8332278', '-71.4161848', '-71.4152064'], 'lat': '41.8330097', 'lon': '-71.41568124868104', 'display_name': 'State of Rhode Island Department of Administration, 1, Park Street, Downtown, Providence, Providence County, Rhode Island, 02908, United States', 'class': 'building', 'type': 'civic', 'importance': 1.75001}
actual_result = eco.Geocoder.get_json_geo("State of Rhode Island Department of Administration, 1, Park Street, Downtown, Providence, Providence County, 02908, United States")[0]
key_list = ['osm_id', 'boundingbox', 'display_name']
for k in key_list:
self.assertEqual(expected_result[k], actual_result[k])
#Testing the geocode function, which passes in an address and gets latitude and longitude.
# Test creates instance of coordinates using coordinate class. Getting lat and lon of the coordinate using get_lat and get_lon methods from the class.
def test_geocode(self):
NominatimTest.nominatim("geofabrik")
expected_result_lon = Coordinate(41.8239891, -71.4128343).get_lon()
expected_result_lat = Coordinate(41.8239891, -71.4128343).get_lat()
actual_result = eco.Geocoder.geocode("Providence, Rhode Island")
actual_result_lon = actual_result.get_lon()
actual_result_lat = actual_result.get_lat()
self.assertEqual(expected_result_lon, actual_result_lon)
self.assertEqual(expected_result_lat, actual_result_lat)
#Testing get_json_reverse, which reverse geocodes from a lat and lon. Tested result was modified to only look at the name returned with the coordinates, rather than the entire dictionary.
def test_get_json_reverse(self):
NominatimTest.nominatim("geofabrik")
expected_result = "Providence City Hall"
actual_result = eco.Geocoder.get_json_reverse(41.8239891, -71.4128343)["display_name"].split(",")[0]
self.assertEqual(expected_result, actual_result)
#Testing reverse_geocode, which reverse geocodes from a lat and lon and returns only the display name.
def test_reverse_geocode(self):
NominatimTest.nominatim("geofabrik")
expected_result = "Portugal Parkway, Fox Point, Providence, Providence County, Rhode Island, 02906, United States"
actual_result = eco.Geocoder.reverse_geocode(41.8174476, -71.3903767)
self.assertEqual(expected_result, actual_result)
if __name__ == '__main__':
unittest.main() | e-mission/e-mission-server | emission/individual_tests/TestNominatim.py | TestNominatim.py | py | 6,717 | python | en | code | 22 | github-code | 36 |
28297076587 | #!/bin/python3
"""
Creates new training data for all language directions (META_LANGS) according to META_RECIPES.
This may take some time, but is written in a functional way, so the files are not loaded into memory
in larger pieces.
"""
import file_utils
from recipes import META_RECIPES
import os
import argparse
parser = argparse.ArgumentParser(description='Create experiment datasets')
parser.add_argument('-r', '--recipes', nargs='+',
help='Recipes', required=True)
parser.add_argument('-l', '--langs', nargs='+',
help='Languages', required=True)
args = parser.parse_args()
args.recipes = set(args.recipes)
META_LANGS = {
'csen': 'cs-en',
'encs': 'cs-en',
'ende': 'de-en',
}
for lpairtrue, lpairorig in META_LANGS.items():
if not lpairtrue in args.langs:
continue
lsrc = lpairtrue[:2]
ltgt = lpairtrue[2:]
for meta_recipe_name, meta_recipe_generator in META_RECIPES.items():
if not meta_recipe_name in args.recipes:
continue
print(f'Processing recipe #{meta_recipe_name} for {lpairtrue}')
meta_recipe = {}
meta_recipe['generator_partial'] = meta_recipe_generator
meta_recipe['ftrans'] = f'teacher/train.{lpairorig}.{ltgt}'
meta_recipe['spm_model'] = f'models/teacher/{lpairtrue}/vocab.spm'
# create new keys and delete the old ones
meta_recipe['fsrc'] = f'original/train.{lpairorig}.{lsrc}'
meta_recipe['ftgt'] = f'original/train.{lpairorig}.{ltgt}'
meta_recipe['fnew_src'] = f'experiment/{meta_recipe_name}/{lpairtrue}/train.{lpairorig}.{lsrc}'
meta_recipe['fnew_tgt'] = f'experiment/{meta_recipe_name}/{lpairtrue}/train.{lpairorig}.{ltgt}'
# run the job
file_utils.load_process_save(**meta_recipe)
| zouharvi/reference-mt-distill | src/create_data.py | create_data.py | py | 1,805 | python | en | code | 0 | github-code | 36 |
42218937553 | import os
import pandas as pd
import time
from pathlib import Path
import shutil
from bs4 import BeautifulSoup
#loading all the files in the memory and parsing it using beautiful soap to get key financials
#problem in reading coal india.
#due to unrecognized encoding, it throws error
#we add encoding="utf8"
df=pd.read_csv('ndtv_nifty50.csv', names=['sr','link'])
df.drop('sr', axis=1, inplace=True)
df.drop(0,axis=0, inplace=True)
##stock_path=r'C:\My Program Files\Python\Python35-32\work\nse\Rediff\profit.ndtv.com\stock'
stock_path=r'C:\Program Files (x86)\Python\Python36-32\work\nse\Rediff\profit.ndtv.com\stock'
print(stock_path)
i=0
columns=[]
df1=pd.DataFrame()
try:
for item in df['link']:
company=item.split('/')[4]
data=[]
i=i+1
if company!='':#'coal-india-ltd_coalindia':
stock_index_file=Path(stock_path +'\\'+ company +'.html')
stock_index_dir=Path(stock_path +'\\'+ company)
if stock_index_file.is_file() and stock_index_dir.is_dir():
print('Reading data for company '+company)
f=open(str(stock_index_file),'r', encoding="utf8")
html=f.read()
soup=BeautifulSoup(html,"html.parser")
table=soup.find(id='keyfunda')
ticker=company.split('_')[1]
data.append(ticker)
columns.append('ticker')
for row in table.find_all('tr'):
j=0
for td in row.find_all('td'):
j=j+1
if j>1:
data.append(td.getText())
if j<=1 and i==1:
columns.append(td.getText())
if i==1:
df1=pd.DataFrame(data=[data],columns=columns)
else:
df1.loc[i]=data
df1.to_csv('key_fin.csv')
## if ticker=='coalindia':
## break
except Exception as e:
print(str(e))
# time.sleep(1)
| santoshjsh/invest | nse_7.py | nse_7.py | py | 2,312 | python | en | code | 0 | github-code | 36 |
5543843400 | N = int(input())
b = 0
a = -1
for i in range(1, N+1):
x = int(input())
if x <= 437:
a = i
break
if a != -1:
print(f'crash {a}')
else:
print('No crash')
| Grigorij-Kuzmin/Python | Автобусная экскурсия.py | Автобусная экскурсия.py | py | 184 | python | en | code | 0 | github-code | 36 |
35206645553 | from django.contrib import admin
from .models import *
class CostInline(admin.TabularInline):
model = Cost
extra = 0
class CoordinatesAdmin(admin.ModelAdmin):
list_display = ('latitude', 'longitude')
class LanguageAdmin(admin.ModelAdmin):
list_display = ('name', 'population')
class CountryAdmin(admin.ModelAdmin):
filter_horizontal = ('languages',)
list_display = ('name', 'emoji', 'population', 'is_marker')
class CityAdmin(admin.ModelAdmin):
list_display = ('name', 'country', 'population', 'hemisphere', 'continent', 'coastal', 'elevation', 'coordinates')
def save_model(self, request, obj, form, change):
from mapbox import Geocoder
import json
import os
if not form.cleaned_data['manual_coordinates']:
if ', ' in form.cleaned_data['name']:
location_list = form.cleaned_data['name'].split(', ')
location_list.reverse()
name = "{0}, {1}".format(form.cleaned_data['country'].name, ', '.join(location_list))
else:
name = "{0}, {1}".format(form.cleaned_data['country'].name, form.cleaned_data['name'])
try:
geocoder = Geocoder(access_token=os.getenv('MAPBOX_ACCESS_KEY'))
response = geocoder.forward(name)
mapbox_coords = response.json()['features'][0]['center']
coordinates = Coordinates.objects.create(longitude=mapbox_coords[0], latitude=mapbox_coords[1])
coordinates.save()
obj.coordinates = coordinates
super(CityAdmin, self).save_model(request, obj, form, change)
except:
super(CityAdmin, self).save_model(request, obj, form, change)
else:
super(CityAdmin, self).save_model(request, obj, form, change)
class SenderAdmin(admin.ModelAdmin):
list_display = ('name', 'country', 'gender', 'is_avatar')
actions = ['update_achievements']
def update_achievements(self, request, queryset):
for sender in queryset:
sender.update_achievements()
# class FormatPaperAdmin(admin.ModelAdmin):
# class TagAdmin(admin.ModelAdmin):
class NewspaperAdmin(admin.ModelAdmin):
filter_horizontal = ('senders', 'tags',)
list_display = ('title', 'city', 'id', 'number', 'number_2', 'date', 'language', 'is_photo', 'is_thumbnail', 'top')
inlines = [CostInline]
def save_model(self, request, obj, form, change):
tag_ids = [ tag.id for tag in form.cleaned_data['tags'] ]
year_tag, created = Tag.objects.get_or_create(name=obj.date.year)
year_tag.save()
tag_ids.append(year_tag.id)
continent_tag, created = Tag.objects.get_or_create(name=obj.city.continent)
continent_tag.save()
tag_ids.append(continent_tag.id)
color_tag, created = Tag.objects.get_or_create(name=obj.color)
color_tag.save()
tag_ids.append(color_tag.id)
if obj.city.coastal:
coastal_tag, created = Tag.objects.get_or_create(name='Coastal')
coastal_tag.save()
tag_ids.append(coastal_tag.id)
if obj.pravda():
pravda_tag, created = Tag.objects.get_or_create(name='Правда')
pravda_tag.save()
tag_ids.append(pravda_tag.id)
if obj.metro():
metro_tag, created = Tag.objects.get_or_create(name='Metro')
metro_tag.save()
tag_ids.append(metro_tag.id)
if obj.not_official_language():
not_official_language_tag, created = Tag.objects.get_or_create(name='Not Official Language')
not_official_language_tag.save()
tag_ids.append(not_official_language_tag.id)
if obj.city.is_polar():
polar_tag, created = Tag.objects.get_or_create(name='Polar')
polar_tag.save()
tag_ids.append(polar_tag.id)
if obj.frequency:
frequency_tag, created = Tag.objects.get_or_create(name=obj.frequency)
frequency_tag.save()
tag_ids.append(frequency_tag.id)
if obj.type_newspaper != 'Newspaper':
type_tag, created = Tag.objects.get_or_create(name=obj.type_newspaper)
type_tag.save()
tag_ids.append(type_tag.id)
if obj.format_paper:
format_tag, created = Tag.objects.get_or_create(name=obj.format_paper.name)
format_tag.save()
tag_ids.append(format_tag.id)
form.cleaned_data['tags'] = Tag.objects.order_by('name').filter(id__in=tag_ids)
super(NewspaperAdmin, self).save_model(request, obj, form, change)
class CurrencyAdmin(admin.ModelAdmin):
list_display = ('name', 'symbol', 'code', 'order')
admin.site.register(Achievement)
admin.site.register(Coordinates, CoordinatesAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Country, CountryAdmin)
admin.site.register(City, CityAdmin)
admin.site.register(Sender, SenderAdmin)
admin.site.register(FormatPaper)
admin.site.register(Tag)
admin.site.register(Newspaper, NewspaperAdmin)
admin.site.register(Currency, CurrencyAdmin)
admin.site.register(Cost)
| borisovodov/np | app/admin.py | admin.py | py | 4,586 | python | en | code | 0 | github-code | 36 |
262396930 | from bs4 import BeautifulSoup
import requests
import requests # request img from web
import shutil # save img locally
URL1 = 'http://localhost'
page = requests.get(URL1)
soup = BeautifulSoup(page.content, 'html.parser')
print(soup.prettify())
image_tags = soup.find_all('img')
for image_tag in image_tags:
url = image_tag['src']
res = requests.get(url, stream=True)
url = url[url.rfind("/")+1:]
print(url)
with open("res/img/ "+url, 'wb') as f:
shutil.copyfileobj(res.raw, f)
print('Image sucessfully Downloaded: ', url)
| FukudaYoshiro/singo | saudi/import image.py | import image.py | py | 555 | python | en | code | 4 | github-code | 36 |
15098306537 | #
#
#
import pandas as pd
import geopandas as gpd
from zipfile import ZipFile
from pathlib import Path
import sys,time
def Read_Glob_Bldg( country_geojson ):
CACHE = './PICKLE'
DIR = Path( '/home/phisan/GeoData/GlobML_BldgFP' )
GEOJSON = DIR.joinpath( country_geojson )
STEM = GEOJSON.stem
PICKLE = GEOJSON.parents[0].joinpath( CACHE, STEM+'.bz2' )
if PICKLE.is_file():
print( f'Reading cached "{PICKLE}"...' )
df = pd.read_pickle( PICKLE )
else:
print( f'Reading "{GEOJSON}" might take time ...' )
df = gpd.read_file( GEOJSON )
print( f'Writing cache "{PICKLE}"...')
df.to_pickle( PICKLE, compression='infer' )
return df
def Read_GADM( SYMB ):
''' SYMB = THA | LAO '''
SHP = f'/home/phisan/GeoData/GADM/{SYMB}/gadm40_{SYMB}_1.shp'
df = gpd.read_file( SHP )
return df
#import pdb; pdb.set_trace()
def MakeCentroid( dfBLDG, SYMB ):
FILE_CEN = Path(f'CACHE/dfCENTR_{SYMB}.bz2')
if FILE_CEN.is_file():
print( f'Reading cached "{FILE_CEN}"...' )
dfCENTR = pd.read_pickle( FILE_CEN )
else:
print( f'Caculate centroid ...' )
dfCENTR = dfBLDG[['geometry']].copy()
dfCENTR['geometry'] = dfCENTR['geometry'].centroid
print( f'Writing "{FILE_CEN}" ...' )
dfCENTR.to_pickle( FILE_CEN, compression='infer' )
return dfCENTR
#################################################################
#################################################################
#################################################################
FR,TO = int(sys.argv[1]) , int(sys.argv[2])
COUNTRY = 'Thailand.geojsonl', 'THA'
#COUNTRY = 'Laos.geojsonl', 'LAO'
dfADM = Read_GADM( COUNTRY[1] )
for i in range(FR,TO):
PROV = dfADM.iloc[i:i+1]
print( f'Check processing {i} {PROV.iloc[0].NAME_1} ok...' )
#import pdb; pdb.set_trace()
dfBLDG = Read_Glob_Bldg( COUNTRY[0] )
dfCENTR = MakeCentroid( dfBLDG, COUNTRY[1] )
for i in range(FR,TO):
print( '===========================================')
BEG = time.time()
PROV = dfADM.iloc[i:i+1]
print( f'Processing {i} {PROV.iloc[0].NAME_1} ' )
PROV_NAME = PROV.iloc[0].NAME_1
xmin,ymin,xmax,ymax = PROV.total_bounds
dfCENTR_ = dfCENTR.cx[xmin:xmax, ymin:ymax].copy()
#import pdb; pdb.set_trace()
df_bldg_prov = gpd.sjoin( dfCENTR_, PROV, how='inner', predicate='intersects' )
if len(df_bldg_prov)>0:
df_bldg_prov.to_file( f'CACHE/{i}_{PROV_NAME}.gpkg', driver='GPKG',
layer='Bldg_Centroid' )
dfBLDG.loc[df_bldg_prov.index].to_file( f'CACHE/{i}_{PROV_NAME}.gpkg',
driver='GPKG', layer='Bldg_Polygon' )
SUM_PROV = len( df_bldg_prov )
print( f'Buildings in province : {SUM_PROV:,} ...' )
END = time.time(); ELAP = END-BEG; print( f'{ELAP:,.0f} sec')
print( '=================== Finish ====================')
| phisan-chula/Thai_Bldg_Model | BreakProv_Bldg.py | BreakProv_Bldg.py | py | 2,944 | python | en | code | 0 | github-code | 36 |
28780066181 | """
HackerRank Python Numpy Polynomials
author: Manny egalli64@gmail.com
info: http://thisthread.blogspot.com/
https://www.hackerrank.com/challenges/np-polynomials/problem
given the coefficients of a polynomial, find its value at x
"""
import numpy as np
values = tuple(map(float, input().split()))
x = float(input())
print(np.polyval(values, x))
| egalli64/pythonesque | hr/numpy/polynomials.py | polynomials.py | py | 357 | python | en | code | 17 | github-code | 36 |
35809219103 | #! python2
# -*- coding: utf-8 -*-
import scrapy
import csv
import time
from sys import exit
import os
import logging
from scrapy import signals
from . import wikimallbottodbmy
import re
#from scrapy.utils.log import configure_logging
class WikimallbotSpider(scrapy.Spider):
name = 'wikimallbot'
allowed_domains = ['id.wikipedia.org']
start_urls = ['https://id.wikipedia.org/wiki/Daftar_pusat_perbelanjaan_di_Indonesia']
dir_path = os.path.dirname(os.path.realpath(__file__))
#configure_logging(install_root_handler = False)
#logging.basicConfig (
# filename = dir_path + '/../out/wikimall_log.txt',
# format = '%(levelname)s: %(message)s',
# level = logging.WARNING
#)
timestr = time.strftime("%Y%m%d-%H%M%S")
filename1 = dir_path + '/../out/wikimall_%s.csv' % timestr
filename2 = dir_path + '/../out/wikimall_detail_%s.csv' % timestr
filename3 = dir_path + '/../out/wikimall_links.csv'
fieldnames = ['id_ai','prov','kabkot','nama_mall','detail_link']
fieldnames_detail = ['nama_mall','alamat','lokasi','pemilik','pengembang','pengurus','tanggal_dibuka','jumlah_toko_dan_jasa','jumlah_toko_induk','total_luas_pertokoan','jumlah_lantai','parkir','situs_web','kantor','didirikan','industri','akses_transportasi_umum','pendapatan','arsitek']
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(WikimallbotSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider):
spider.logger.info('Signal sent then Spider closed. file out is : %s', self.filename1)
#self.connect()
#bumntodb.readcsvandupdate(self.allowed_domains[0],self.filename1)
wikimallbottodbmy.readcsvandupdate(self.allowed_domains[0],self.filename1)
wikimallbottodbmy.readcsvandupdate(self.allowed_domains[0],self.filename2)
# saving to mysql should load here
def parse(self, response):
#mw-headline
myyield = {'id_ai': 1}
open(self.filename3, 'a').close()
with open(self.filename2, 'a') as f:
w = csv.DictWriter(f, self.fieldnames_detail, lineterminator='\n', delimiter='|')
w.writeheader()
with open(self.filename1, 'a') as f: # Just use 'w' mode in 3.x
iterasi = 1
rows = response.css('div.mw-parser-output')
prov = ''
kabkot = ''
for row in rows.css('*') :
if row.xpath('name()').get() == 'h2' :
#myyield['id_ai'] = iterasi
myyield['prov'] = row.css('::text').get()
prov = row.css('::text').get()
#myyield['test'] = row.css('::text').get()
subiterasi = 1
if row.xpath('name()').get() == 'dl' :
if row.css('dt > a::text') :
myyield['id_ai'] = subiterasi
myyield['prov'] = prov
myyield['kabkot'] = row.css('dt > a::text').get()
kabkot = row.css('dt > a::text').get()
if row.xpath('name()').get() == 'li' :
if row.css('li') and row.css('li *::text') :
myyield['id_ai'] = iterasi
myyield['prov'] = prov.encode('utf-8')
myyield['kabkot'] = kabkot.encode('utf-8')
myyield['nama_mall'] = re.sub(r'[^\x00-\x7F]+',' ', (row.css('li *::text').get().encode('utf-8')))
if row.css('li > a::attr(href)') :
detail_link = response.urljoin(row.css('li > a::attr(href)').get().encode('utf-8'))
if 'index.php' not in detail_link :
myyield['detail_link'] = detail_link.encode('utf-8')
#yield scrapy.Request(detail_link.encode('utf-8'), self.parse_detail, meta={'timestr':timestr,'iterasi':iterasi,'row':myyield})
#with open(self.dir_path + '/../out/wikimall_links.csv', 'a') as f2:
# w2 = csv.DictWriter(f2, self.fieldnames, lineterminator='\n', delimiter='|')
# w2.writerow(myyield)
else :
myyield['detail_link'] = ''
else :
myyield['detail_link'] = ''
#link_detail = response.urljoin(link_detail)
iterasi += 1
subiterasi += 1
w = csv.DictWriter(f, self.fieldnames, lineterminator='\n', delimiter='|')
if iterasi ==2 :
w.writeheader()
w.writerow(myyield)
with open(self.filename3, 'a') as f2:
w2 = csv.DictWriter(f2, self.fieldnames, lineterminator='\n', delimiter='|')
if iterasi ==2 :
w2.writeheader()
w2.writerow(myyield)
for link in response.css('div.mw-parser-output li > a::attr(href)').getall() :
if 'index.php' not in link :
if ':' not in link.encode('utf-8') :
yield scrapy.Request(response.urljoin(link.encode('utf-8')), self.parse_detail)
#def parse_detail(self, response) :
# print(response.css('table.infobox tr').get())
def parse_detail(self,response) :
myyield = {'nama_mall': response.css('h1.firstHeading::text').get()}
with open(self.filename2, 'a') as f:
if response.css('table.infobox tr') :
rows = response.css('table.infobox tr')
for row in rows :
if row.css('th::text') and row.css('td *::text') :
#self.log('key file %s' % row.css('th::text').get())
if row.css('th::text').get().encode('utf-8').lower().replace(" ", "_").replace("/", "_").replace(",", "||") in self.fieldnames_detail :
if len(row.css('td *::text').getall()) > 1 :
myyield[row.css('th::text').get().encode('utf-8').lower().replace(" ", "_").replace("/", "_").replace(",", "||")] = re.sub(r'[^\x00-\x7F]+',' ', (' '.join(t.encode('utf-8').replace("\n", "").strip() for t in row.css('td *::text').getall()).strip()))
else :
myyield[row.css('th::text').get().encode('utf-8').lower().replace(" ", "_").replace("/", "_").replace(",", "||")] = re.sub(r'[^\x00-\x7F]+',' ', (row.css('td *::text').get().encode('utf-8').replace("\n", "")))
else :
myyield['alamat'] = ''
myyield['lokasi'] = ''
myyield['pemilik'] = ''
myyield['pengembang'] = ''
myyield['pengurus'] = ''
myyield['tanggal_dibuka'] = ''
myyield['jumlah_toko_dan_jasa'] = ''
myyield['jumlah_toko_induk'] = ''
myyield['total_luas_pertokoan'] = ''
myyield['jumlah_lantai'] = ''
myyield['parkir'] = ''
myyield['situs_web'] = ''
myyield['kantor'] = ''
myyield['didirikan'] = ''
myyield['industri'] = ''
myyield['akses_transportasi_umum'] = ''
myyield['pendapatan'] = ''
myyield['arsitek'] = ''
w = csv.DictWriter(f, self.fieldnames_detail, lineterminator='\n', delimiter='|')
#if response.meta.get('iterasi') ==2 :
# w.writeheader()
w.writerow(myyield) | rizanurhadi/webscraping1 | spiders/wikimallbot.py | wikimallbot.py | py | 7,971 | python | en | code | 0 | github-code | 36 |
41057893138 | lista_compras = ['leite em pó', 'mamão', 'queijo']
id_clientes_2 = (100, 125, 478, 547, 565)
cod_uf = {'mg':31, 'sp':35}
pessoa = 'Marcelo'
num_pessoas = 274
dist_km = 25.23
custo_carro = '500,00'
capitais_sul_br = ['Porto Alegre', 'Curitiba', 'Florianópolis'] | marcusvco/Simple-Side-Projects | IGTI/Python/trabalho_pratico.py | trabalho_pratico.py | py | 270 | python | pt | code | 0 | github-code | 36 |
16858290153 | # Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import deque
deq = deque()
for i in range(int(input())):
command = input().split()
if command[0] == 'append':
deq.append(command[1])
elif command[0] == 'appendleft':
deq.appendleft(command[1])
elif command[0] == 'pop':
deq.pop()
elif command[0] == 'popleft':
deq.popleft()
print(*deq)
'''EVAL is not the answer to everything.
AND it's dangerous''' | polemeest/daily_practice | hackerrank_deque.py | hackerrank_deque.py | py | 490 | python | en | code | 0 | github-code | 36 |
198052217 | from django.views import generic
from digifarming.models import User, Staff, Rating, \
RequestType, Commodity, Supply, \
Order, OrderItem, UserTrackingMovements, HarvestDispatch,FacilityType, Facility, \
JobTitle, JobShift, ArrivalView, DepartureView, CancellationView, \
TransportCategory, TransportType, TransportItems, Client, ClientType, CustomerTransportation, \
CommodityCategory, CommodityType, CommodityMetric, Commodity, HarvestDispatch
# Hotel, ArrivalView, DepartureView, CancellationView, TodayBookingView, \
# BookingSummaryView, InhouseGuestView, OverBookingView, RoomsOccupiedView, MostUsedFacilityView, \
# LeastUsedFacilityView, AllOrdersListView, Laundry, LaundryType, LaundryItems, FacilityType, CleaningFacilityView, \
# CleaningRoomView, User, Workers, Facilities
# Alerts, AlertType
from operator import itemgetter
from django.db.utils import DatabaseError
from django import http
import json
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.utils import timezone
from django.contrib import messages
from .forms import JobTitleForm, JobShiftForm, StaffForm, UserUpdateForm, UserForm, LoginForm, \
FacilityForm, FacilityTypeForm, ClientTypeForm, ClientForm, CommodityCategoryForm, CommodityTypeForm, \
CommodityMetricForm, CommodityForm, TransportCategoryForm, TransportTypeForm, TransportItemsForm, \
CustomerTransportationForm, HarvestDispatchForm, OrderItemForm, OrderForm, SupplyForm
# Defining Generic views here.
def parse_update_params(request_params):
result = dict()
pk = request_params['pk']
del request_params['pk']
del request_params['csrfmiddlewaretoken']
if 'name' in request_params and 'value' in request_params:
result[request_params['name']] = request_params['value']
del request_params['value']
del request_params['name']
result.update(**request_params)
return pk, result
def _update_ajax(model_class, request):
if request.method == 'POS,T' and request.is_ajax():
pk, request_params = parse_update_params(request.POST.dict())
model_class.objects.filter(pk=pk).update(**request_params)
return model_class.objects.get(pk=pk)
# calling index page
# Listing all the arrivals in the system
class ArrivalListView(generic.ListView):
template_name = ''
context_object_name = 'arrival_list'
model = ArrivalView
paginate_by = 10
def get_context_data(self, *, object_list=None, **kwargs):
context = super(ArrivalListView, self).get_context_data(**kwargs)
request_params = self.request.GET.copy()
if 'page' in request_params:
del request_params['page']
request_params = filter(itemgetter(1), request_params.items())
if request_params:
context['request_params'] = request_params
context['booking_id'] = self.kwargs['booking_id']
return context
def get_queryset(self):
# return ArrivalView.objects.filter(arrival_id=self.kwargs['arrival_id'])
return ArrivalView.objects.order_by('start_date')
# Listing all the departures in the system
class DepartureListView(generic.ListView):
template_name = ''
context_object_name = 'departure_list'
model = DepartureView
paginate_by = 10
def get_context_data(self, *, object_list=None, **kwargs):
context = super(DepartureListView, self).get_context_data(**kwargs)
request_params = self.request.GET.copy()
if 'page' in request_params:
del request_params['page']
request_params = filter(itemgetter(1), request_params.items())
if request_params:
context['request_params'] = request_params
context['booking_id'] = self.kwargs['booking_id']
return context
def get_queryset(self):
return DepartureView.objects.order_by('end_date')
# Listing all the cancellations in the system
class CancellationListView(generic.ListView):
template_name = ''
context_object_name = 'guest_list'
model = CancellationView
paginate_by = 10
def get_context_data(self, *, object_list=None, **kwargs):
context = super(CancellationListView, self).get_context_data(**kwargs)
request_params = self.request.GET.copy()
if 'page' in request_params:
del request_params['page']
request_params = filter(itemgetter(1), request_params.items())
if request_params:
context['request_params'] = request_params
context['booking_id'] = self.kwargs['booking_id']
return context
def get_queryset(self):
return CancellationView.objects.order_by('booking_date')
# Getting today's summary - all totals
# class TodaySummaryListView(generic.ListView):
# template_name = ''
# context_object_name = 'today_summary_list'
# model = TodayBookingView
# def get_queryset(self):
# return TodayBookingView.objects.all()
# creating a new check in to track users facility usage
def tracking_check_in_ajax(request, **kwargs):
if request.method == 'POST':
if request.is_ajax():
request_params = request.POST.dict()
print(request_params)
try:
check_in = UserTrackingMovements()
check_in.user_tracking = request_params.get('user_id')
check_in.user_tracking_facility = request_params.get('facility_id')
check_in.user_tracking_status = request_params.get('status')
check_in.save()
return http.HttpResponse(json.dumps(
{'id': check_in.id, 'checked_in_facility': check_in.facility_id,
'status': check_in.status}), status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content="A problem occurred. Tracking Check in not created")
# Getting tracking trends - most used facilities
# class MostUsedFacilityListView(generic.ListView):
# template_name = ''
# context_object_name = 'facilities_most_used_list'
# model = MostUsedFacilityView
# def get_queryset(self):
# return MostUsedFacilityView.objects.all()
# Getting tracking trends - least used facilities
# class LeastUsedFacilityListView(generic.ListView):
# template_name = ''
# context_object_name = 'facilities_least_used_list'
# model = LeastUsedFacilityView
# def get_queryset(self):
# return LeastUsedFacilityView.objects.all()
# TODO
# Creating a new order
def add_order_ajax(request, **kwargs):
if request.method == "POST":
form = OrderForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
order.order_created_by_id = request.user.id
order.save()
messages.success(request, 'Order was added successfully')
return redirect('add-order-item-ajax')
else:
form = OrderForm()
context = {
'form': form
}
return render(request, 'pages/add_order.html', context)
# List all orders
# class AllOrdersListView(generic.ListView):
# template_name = ''
# context_object_name = 'all_orders_list'
# model = AllOrdersListView
# def get_queryset(self):
# return AllOrdersListView.objects.all()
# Update order, cancell or process
def update_order_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
order = _update_ajax(Order, request)
return http.HttpResponse(
json.dumps({'pk': order.id, 'status': order.order_status, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a order
def delete_order_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
order = Order.objects.get(pk=request.POST.get('pk'))
order_id = order.id
order.delete()
return http.HttpResponse(
content='order <strong>{}</strong> has been successfully deleted'.format(order_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new order item
def add_order_item_ajax(request, **kwargs):
if request.method == "POST":
form = OrderItemForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
order.save()
messages.success(request, 'Order Item was added successfully')
return redirect('add-order-item-ajax')
else:
form = OrderItemForm()
context = {
'form': form
}
return render(request, 'pages/add_order_item.html', context)
# List all order items
class AllOrderItemListView(generic.ListView):
template_name = ''
context_object_name = 'all_order_list'
model = OrderItem
def get_queryset(self):
return AllOrderItemListView.objects.all()
# updating order item
def update_order_item_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
order = _update_ajax(OrderItem, request)
return http.HttpResponse(
json.dumps({'pk': order.id, 'order_name': order.order_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a order item
def delete_order_item_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
order = OrderItem.objects.get(pk=request.POST.get('pk'))
order_id = order.id
order.delete()
return http.HttpResponse(
content='order <strong>{}</strong> has been successfully deleted'.format(order_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new supply item
def add_supply_ajax(request, **kwargs):
if request.method == "POST":
form = SupplyForm(request.POST)
if form.is_valid():
supply = form.save(commit=False)
supply.supply_created_by_id = request.user.id
supply.save()
messages.success(request, 'Supply was added successfully')
return redirect('add-supply-ajax')
else:
form = SupplyForm()
context = {
'form': form
}
return render(request, 'pages/add_supply.html', context)
# List all supplies
class AllsupplyListView(generic.ListView):
template_name = ''
context_object_name = 'all_supplys_list'
model = Supply
def get_queryset(self):
return AllsupplyListView.objects.all()
# updating supplies
def update_supply_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
supply = _update_ajax(Supply, request)
return http.HttpResponse(
json.dumps({'pk': supply.id, 'supply_commodity': supply.supply_commodity, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting supply
def delete_supply_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
supply = Supply.objects.get(pk=request.POST.get('pk'))
supply_id = supply.id
supply.delete()
return http.HttpResponse(
content='supply <strong>{}</strong> has been successfully deleted'.format(supply_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new staff
def add_worker_ajax(request, **kwargs):
if request.method == 'POST':
if request.is_ajax():
request_params = request.POST.dict()
print(request_params)
try:
staff = Staff()
staff.staff_id = request_params.get('worker_id')
staff.staff_user = request_params.get('staff_user')
staff.staff_job_title = request_params.get('staff_job_title')
staff.staff_job_shift = request_params.get('staff_job_shift')
staff.is_hr = request_params.get('is_hr')
# staff.staff_created_by_id = request_params.get('staff_created_by')
staff.save()
return http.HttpResponse(
json.dumps({'id': staff.id, 'staff_id': staff.staff_id}),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content="A problem occurred. commodity not created")
# List all staff
# class AllWorkersListView(generic.ListView):
# template_name = ''
# context_object_name = 'all_workers_list'
# model = Staff
# def get_queryset(self):
# return AllWorkersListView.objects.all()
# # updating staff
# def update_worker_ajax(request, **kwargs):
# if request.method == 'POST' and request.is_ajax():
# try:
# worker = _update_ajax(Staff, request)
# return http.HttpResponse(
# json.dumps({'pk': staff.id, 'worker_staff': staff.staff_user }),
# status=201)
# except DatabaseError as e:
# return http.HttpResponse(status=400, content='An error occurred while processing your request')
# return http.HttpResponse(status=400)
# # deleting a staff
# def delete_worker_ajax(request, **kwargs):
# if request.method == 'POST' and request.is_ajax():
# try:
# worker = Staff.objects.get(pk=request.POST.get('pk'))
# worker_id = worker.id
# worker.delete()
# return http.HttpResponse(
# content='staff <strong>{}</strong> has been successfully deleted'.format(worker_id), status=200)
# except DatabaseError as e:
# return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new harvest dispatch
def add_harvest_dispatch_ajax(request, **kwargs):
if request.method == "POST":
form = HarvestDispatchForm(request.POST)
if form.is_valid():
harvest_dispatch = form.save(commit=False)
harvest_dispatch.dispatch_to_staff_id = request.user.id
harvest_dispatch.dispatch_created_by_id = request.user.id
harvest_dispatch.save()
messages.success(request, 'Transport dispatch was added successfully')
return redirect('add-harvest-dispatch-ajax')
else:
form = HarvestDispatchForm()
context = {
'form': form
}
return render(request, 'pages/add_harvest_dispatch.html', context)
# List all harvest dispatch
class AllHarvestDispatchListView(generic.ListView):
template_name = ''
context_object_name = 'all_harvest_dispatch_list'
model = HarvestDispatch
def get_queryset(self):
return AllHarvestDispatchListView.objects.all()
# updating harvest dispatch
def update_harvest_dispatch_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
harvest_dispatch = _update_ajax(HarvestDispatch, request)
return http.HttpResponse(
json.dumps({'pk': harvest_dispatch.id, 'dispatch_commodity': harvest_dispatch.dispatch_commodity, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a harvest dispatch
def delete_harvest_dispatch_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
harvest_dispatch = HarvestDispatch.objects.get(pk=request.POST.get('pk'))
harvest_dispatch_id = harvest_dispatch.id
harvest_dispatch.delete()
return http.HttpResponse(
content='harvest_dispatch <strong>{}</strong> has been successfully deleted'.format(harvest_dispatch_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new customer transportation
def add_customer_transportation_ajax(request, **kwargs):
if request.method == "POST":
form = CustomerTransportationForm(request.POST)
if form.is_valid():
customer_transportation = form.save(commit=False)
customer_transportation.customer_created_by_id = request.user.id
customer_transportation.save()
messages.success(request, 'Transport transportation was added successfully')
return redirect('add-customer-transportation-ajax')
else:
form = CustomerTransportationForm()
context = {
'form': form
}
return render(request, 'pages/add_customer_transportation.html', context)
# List all customer transportation
class AllCustomerTransportationListView(generic.ListView):
template_name = ''
context_object_name = 'all_customer_transportation_list'
model = CustomerTransportation
def get_queryset(self):
return AllCustomerTransportationListView.objects.all()
# updating customer transportation
def update_customer_transportation_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
customer_transportation = _update_ajax(CustomerTransportation, request)
return http.HttpResponse(
json.dumps({'pk': customer_transportation.id, 'customer_transportation_name': customer_transportation.customer_transportation_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a customer transportation
def delete_customer_transportation_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
customer_transportation = CustomerTransportation.objects.get(pk=request.POST.get('pk'))
customer_transportation_id = customer_transportation.id
customer_transportation.delete()
return http.HttpResponse(
content='customer_transportation <strong>{}</strong> has been successfully deleted'.format(customer_transportation_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new transport items
def add_transport_items_ajax(request, **kwargs):
if request.method == "POST":
form = TransportItemsForm(request.POST)
if form.is_valid():
transport_items = form.save(commit=False)
transport_items.transport_created_by_id = request.user.id
transport_items.save()
messages.success(request, 'Transport item was added successfully')
return redirect('add-transport-items-ajax')
else:
form = TransportItemsForm()
context = {
'form': form
}
return render(request, 'pages/add_transport_items.html', context)
# List all transport items
class AllTransportItemsListView(generic.ListView):
template_name = ''
context_object_name = 'all_transport_items_list'
model = TransportItems
def get_queryset(self):
return AllTransportItemsListView.objects.all()
# updating transport items
def update_transport_items_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_items = _update_ajax(TransportItems, request)
return http.HttpResponse(
json.dumps({'pk': transport_items.id, 'transport_items_name': transport_items.transport_items_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a transport items
def delete_transport_items_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_items = TransportItems.objects.get(pk=request.POST.get('pk'))
transport_items_id = transport_items.transport_items_id
transport_items.delete()
return http.HttpResponse(
content='transport_items <strong>{}</strong> has been successfully deleted'.format(transport_items_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new transport type
def add_transport_type_ajax(request, **kwargs):
if request.method == "POST":
form = TransportTypeForm(request.POST)
if form.is_valid():
transport_type = form.save(commit=False)
transport_type.transport_type_created_by_id = request.user.id
transport_type.save()
messages.success(request, 'Transport type was added successfully')
return redirect('add-transport-type-ajax')
else:
form = TransportTypeForm()
context = {
'form': form
}
return render(request, 'pages/add_transport_type.html', context)
# List all transport type
class AllTransportTypeListView(generic.ListView):
template_name = ''
context_object_name = 'all_transport_type_list'
model = TransportType
def get_queryset(self):
return AllTransportTypeListView.objects.all()
# updating transport type
def update_transport_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_type = _update_ajax(TransportType, request)
return http.HttpResponse(
json.dumps({'pk': transport_type.id, 'transport_type_name': transport_type.transport_type_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a transport type
def delete_transport_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_type = TransportType.objects.get(pk=request.POST.get('pk'))
transport_type_id = transport_type.transport_type_id
transport_type.delete()
return http.HttpResponse(
content='transport_type <strong>{}</strong> has been successfully deleted'.format(transport_type_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new transport category
def add_transport_category_ajax(request, **kwargs):
if request.method == "POST":
form = TransportCategoryForm(request.POST)
if form.is_valid():
transport_category = form.save(commit=False)
transport_category.transport_category_created_by_id = request.user.id
transport_category.save()
messages.success(request, 'Transport category was added successfully')
return redirect('add-transport-category-ajax')
else:
form = TransportCategoryForm()
context = {
'form': form
}
return render(request, 'pages/add_transport_category.html', context)
# List all transport category
class AllTransportCategoryListView(generic.ListView):
template_name = ''
context_object_name = 'all_transport_category_list'
model = TransportCategory
def get_queryset(self):
return AllTransportCategoryListView.objects.all()
# updating transport category
def update_transport_category_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_category = _update_ajax(TransportCategory, request)
return http.HttpResponse(
json.dumps({'pk': transport_category.id, 'transport_category_name': transport_category.transport_category_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a transport category
def delete_transport_category_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
transport_category = TransportCategory.objects.get(pk=request.POST.get('pk'))
transport_category_id = transport_category.transport_category_id
transport_category.delete()
return http.HttpResponse(
content='transport_category <strong>{}</strong> has been successfully deleted'.format(transport_category_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new commodity
def add_commodity_ajax(request, **kwargs):
if request.method == "POST":
form = CommodityForm(request.POST)
if form.is_valid():
commodity = form.save(commit=False)
commodity.commodity_created_by_id = request.user.id
commodity.save()
messages.success(request, 'Commodity was added successfully')
return redirect('add-commodity-ajax')
else:
form = CommodityForm()
context = {
'form': form
}
return render(request, 'pages/add_commodity.html', context)
# List all commodity
class AllCommodityListView(generic.ListView):
template_name = ''
context_object_name = 'all_commodity_list'
model = Commodity
def get_queryset(self):
return AllCommodityListView.objects.all()
# updating commodity
def update_commodity_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity = _update_ajax(Commodity, request)
return http.HttpResponse(
json.dumps({'pk': commodity.id, 'commodity_name': commodity.commodity_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a commodity
def delete_commodity_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity = Commodity.objects.get(pk=request.POST.get('pk'))
commodity_id = commodity.commodity_id
commodity.delete()
return http.HttpResponse(
content='commodity <strong>{}</strong> has been successfully deleted'.format(commodity_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new commodity metric
def add_commodity_metric_ajax(request, **kwargs):
if request.method == "POST":
form = CommodityMetricForm(request.POST)
if form.is_valid():
commodity_metric = form.save(commit=False)
commodity_metric.commodity_metric_created_by_id = request.user.id
commodity_metric.save()
messages.success(request, 'Commodity metric was added successfully')
return redirect('add-commodity-metric-ajax')
else:
form = CommodityMetricForm()
context = {
'form': form
}
return render(request, 'pages/add_commodity_metric.html', context)
# List all commodity metric
class AllCommodityMetricListView(generic.ListView):
template_name = ''
context_object_name = 'all_commodity_metric_list'
model = CommodityMetric
def get_queryset(self):
return AllCommodityMetricListView.objects.all()
# updating commodity metric
def update_commodity_metric_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_metric = _update_ajax(CommodityMetric, request)
return http.HttpResponse(
json.dumps({'pk': commodity_metric.id, 'commodity_metric_name': commodity_metric.commodity_metric_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a commodity metric
def delete_commodity_metric_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_metric = CommodityMetric.objects.get(pk=request.POST.get('pk'))
commodity_metric_id = commodity_metric.commodity_metric_id
commodity_metric.delete()
return http.HttpResponse(
content='commodity_metric <strong>{}</strong> has been successfully deleted'.format(commodity_metric_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new commodity type
def add_commodity_type_ajax(request, **kwargs):
if request.method == "POST":
form = CommodityTypeForm(request.POST)
if form.is_valid():
commodity_type = form.save(commit=False)
commodity_type.commodity_type_created_by_id = request.user.id
commodity_type.save()
messages.success(request, 'Commodity type was added successfully')
return redirect('add-commodity-type-ajax')
else:
form = CommodityTypeForm()
context = {
'form': form
}
return render(request, 'pages/add_commodity_type.html', context)
# List all commodity type
class AllCommodityTypeListView(generic.ListView):
template_name = ''
context_object_name = 'all_commodity_type_list'
model = CommodityType
def get_queryset(self):
return AllCommodityTypeListView.objects.all()
# updating commodity type
def update_commodity_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_type = _update_ajax(CommodityType, request)
return http.HttpResponse(
json.dumps({'pk': commodity_type.id, 'commodity_type_name': commodity_type.commodity_type_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a commodity type
def delete_commodity_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_type = CommodityType.objects.get(pk=request.POST.get('pk'))
commodity_type_id = commodity_type.commodity_type_id
commodity_type.delete()
return http.HttpResponse(
content='commodity_type <strong>{}</strong> has been successfully deleted'.format(commodity_type_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new commodity category
def add_commodity_category_ajax(request, **kwargs):
if request.method == "POST":
form = CommodityCategoryForm(request.POST)
if form.is_valid():
commodity_category = form.save(commit=False)
commodity_category.commodity_category_created_by_id = request.user.id
commodity_category.save()
messages.success(request, 'Commodity Category was added successfully')
return redirect('add-commodity-category-ajax')
else:
form = CommodityCategoryForm()
context = {
'form': form
}
return render(request, 'pages/add_commodity_category.html', context)
# List all commodity category
class AllCommodityCategoryListView(generic.ListView):
template_name = ''
context_object_name = 'all_commodity_category_list'
model = CommodityCategory
def get_queryset(self):
return AllCommodityCategoryListView.objects.all()
# updating commodity category
def update_commodity_category_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_category = _update_ajax(CommodityCategory, request)
return http.HttpResponse(
json.dumps({'pk': commodity_category.id, 'commodity_category_name': commodity_category.commodity_category_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a commodity category
def delete_commodity_category_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
commodity_category = CommodityCategory.objects.get(pk=request.POST.get('pk'))
commodity_category_id = commodity_category.commodity_category_id
commodity_category.delete()
return http.HttpResponse(
content='commodity_category <strong>{}</strong> has been successfully deleted'.format(commodity_category_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new client
def add_client_ajax(request):
if request.method == "POST":
form = ClientForm(request.POST)
if form.is_valid():
try:
client = form.save(commit=False)
client.client_created_by_id = request.user.id
client.save()
messages.success(request, 'client was added successfully')
return redirect('add-client-type-ajax')
# return reverse('digifarming:add-client-ajax')
except (ValueError, KeyError):
messages.add_message(request, messages.ERROR, 'Invalid values encountered, Server Error')
# if form.is_valid():
# client = form.save(commit=False)
# client.client_created_by_id = request.user.id
# client.save()
# messages.success(request, 'Client was added successfully')
# return redirect('add_client_ajax')
else:
form = ClientForm()
context = {
'form': form
}
return render(request, 'pages/add_client.html', context)
# List all Client
class AllClientListView(generic.ListView):
template_name = ''
context_object_name = 'all_client_list'
model = Client
def get_queryset(self):
return AllClientListView.objects.all()
# updating client
def update_client_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
client = _update_ajax(Client, request)
return http.HttpResponse(
json.dumps({'pk': client.id, 'client_name': client.client_full_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a client
def delete_client_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
client = Client.objects.get(pk=request.POST.get('pk'))
client_id = client.client_id
client.delete()
return http.HttpResponse(
content='client <strong>{}</strong> has been successfully deleted'.format(client_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new client type
def add_client_type_ajax(request):
if request.method == "POST":
form = ClientTypeForm(request.POST)
if form.is_valid():
try:
client_type = form.save(commit=False)
client_type.client_type_created_by_id = request.user.id
client_type.save()
messages.success(request, 'client type was added successfully')
return redirect('add-client-type-ajax')
# return reverse('digifarming:add-client-ajax')
except (ValueError, KeyError):
messages.add_message(request, messages.ERROR, 'Invalid values encountered, Server Error')
else:
form = ClientTypeForm()
context = {
'form': form
}
return render(request, 'pages/add_client_type.html', context)
# List all client types
class AllClientTypeListView(generic.ListView):
template_name = ''
context_object_name = 'all_client_type_list'
model = ClientType
def get_queryset(self):
return AllClientTypeListView.objects.all()
# updating client type
def update_client_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
client_type = _update_ajax(ClientType, request)
return http.HttpResponse(
json.dumps({'pk': client_type.id, 'client_type': client_type.client_type, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting client type
def delete_client_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
client = ClientType.objects.get(pk=request.POST.get('pk'))
client_type_id = client.client_type_id
client.delete()
return http.HttpResponse(
content='client type <strong>{}</strong> has been successfully deleted'.format(client_type_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new facility type
def add_facility_type_ajax(request, **kwargs):
if request.method == "POST":
form = FacilityTypeForm(request.POST)
if form.is_valid():
facility_type = form.save(commit=False)
facility_type.facility_type_created_by_id = request.user.id
facility_type.save()
messages.success(request, 'Facility type was added successfully')
return redirect('add-facility-type-ajax')
else:
form = FacilityTypeForm()
context = {
'form': form
}
return render(request, 'pages/add_facility_type.html', context)
# List all Facility types
class AllFacilityTypeListView(generic.ListView):
template_name = ''
context_object_name = 'all_facility_type_list'
model = FacilityType
def get_queryset(self):
return AllFacilityTypeListView.objects.all()
# updating facility type
def update_facility_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
facility = _update_ajax(FacilityType, request)
return http.HttpResponse(
json.dumps({'pk': facility.id, 'facility_type': facility.facility_type, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting facility type
def delete_facility_type_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
facility = FacilityType.objects.get(pk=request.POST.get('pk'))
facility_type_id = facility.facility_type_id
facility.delete()
return http.HttpResponse(
content='facility type <strong>{}</strong> has been successfully deleted'.format(facility_type_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# Creating a new facility
@login_required
def add_facility_ajax(request, **kwargs):
if request.method == "POST":
form = FacilityForm(request.POST)
if form.is_valid():
facility = form.save(commit=False)
facility.created_by_id = request.user.id
facility.save()
messages.success(request, 'Facility was added successfully')
return redirect('add-facility-ajax')
else:
form = FacilityForm()
context = {
'form': form
}
return render(request, 'pages/add_facility.html', context)
# List all Facility
class AllFacilityListView(generic.ListView):
template_name = ''
context_object_name = 'all_facility_list'
model = Facility
def get_queryset(self):
return AllFacilityListView.objects.all()
# updating facility
def update_facility_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
facility = _update_ajax(Facility, request)
return http.HttpResponse(
json.dumps({'pk': facility.id, 'facility_name': facility.facility_name, }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a facility
def delete_facility_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
facility = Facility.objects.get(pk=request.POST.get('pk'))
facility_id = facility.facility_id
facility.delete()
return http.HttpResponse(
content='facility <strong>{}</strong> has been successfully deleted'.format(facility_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# TODO
# Creating a new rating
def add_rating_ajax(request, **kwargs):
if request.method == 'POST':
if request.is_ajax():
request_params = request.POST.dict()
print(request_params)
try:
rate = Rating
rate.user_id = request_params.get('user_id')
rate.rating = request_params.get('rating')
rate.comment = request_params.get('comment')
rate.save()
return http.HttpResponse(
json.dumps({'id': rate.id, 'rating': rate.rating}),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content="A problem occurred. commodity not created")
# List all ratings
class AllRatingsListView(generic.ListView):
template_name = ''
context_object_name = 'all_ratings_list'
model = Facility
def get_queryset(self):
return AllRatingsListView.objects.all()
# updating a rating
def update_rating_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
rate = _update_ajax(Rating, request)
return http.HttpResponse(
json.dumps({'pk': rate.id }),
status=201)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
return http.HttpResponse(status=400)
# deleting a rating
def delete_rating_ajax(request, **kwargs):
if request.method == 'POST' and request.is_ajax():
try:
rate = Rating.objects.get(pk=request.POST.get('pk'))
rating_id = rate.rating_id
rate.delete()
return http.HttpResponse(
content='rating <strong>{}</strong> has been successfully deleted'.format(rating_id), status=200)
except DatabaseError as e:
return http.HttpResponse(status=400, content='An error occurred while processing your request')
# TODO change the renders and the redirects
def user_register(request):
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
messages.success(request, 'Registered successfully')
return redirect('user_login')
else:
return render(request, 'pages/register.html', {'form': form})
else:
form = UserForm()
return render(request, 'pages/register.html', {'form': form})
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
user = authenticate(email=email, password=password)
if user is not None:
login(request, user)
return redirect('add-facility-ajax')
else:
try:
user = User.objects.get(email=email)
form.add_error('password', "invalid password")
except User.DoesNotExist:
form.add_error('email', "invalid email address")
else:
form = LoginForm()
return render(request, 'pages/login.html', {'form': form})
def user_logout(request):
logout(request)
return redirect('user_login')
# @login_required
def add_job_title(request):
if request.method == "POST":
form = JobTitleForm(request.POST)
if form.is_valid():
job_title = form.save(commit=False)
job_title.job_created_by_id = request.user.id
job_title.save()
messages.success(request, 'Job title was created successfully')
return redirect('add-job-title')
else:
form = JobTitleForm()
context = {
'form': form
}
return render(request, 'pages/add_job_title.html', context)
@login_required
def all_job_title(request):
job_titles = JobTitle.objects.select_related().filter(job_title_status=1)
context = {
'job_titles': job_titles
}
return render(request, 'pages/all_job_titles.html', context)
# def job_title_details(request, job_title_id):
# job_title = get_object_or_404(JobTitle, id=job_title_id)
# staff = Staff.objects.filter(staff_job_title=job_title, staff_user__status=1)
# context = {
# 'job_title': job_title,
# 'staff': staff
# }
# return render(request, 'pages/job_title_details.html', context)
@login_required
def update_job_title(request, job_title_id):
job_title = JobTitle.objects.get(id=job_title_id)
if request.method == "POST":
form = JobTitleForm(request.POST, instance=job_title)
if form.is_valid():
job_title = form.save()
messages.success(request, 'Job title was updated successfully')
return redirect('update_job_title', job_title_id=job_title_id)
else:
form = JobTitleForm(instance=job_title)
context = {
'job_title': job_title,
'form': form
}
return render(request, 'pages/update_job_title.html', context)
@login_required
def deactivate_job_title(request, job_title_id):
job_title = JobTitle.objects.get(id=job_title_id)
job_title.job_title_status = 0
job_title.save(update_fields=['job_title_status'])
messages.add_message(request, messages.SUCCESS, 'Job title removed successfully')
return redirect('all_job_titles')
@login_required
def add_job_shift(request):
if request.method == "POST":
form = JobShiftForm(request.POST)
if form.is_valid():
job_shift = form.save(commit=False)
job_shift.created_by_id = request.user.id
job_shift.save()
messages.success(request, 'Job shift was added successfully')
return redirect('add-job-shift')
else:
form = JobShiftForm()
context = {
'form': form
}
return render(request, 'pages/add_job_shift.html', context)
@login_required
def all_job_shifts(request):
job_shifts = JobShift.objects.filter(job_shift_status=1)
context = {
'job_shifts': job_shifts
}
return render(request, 'pages/all_job_shifts.html', context)
@login_required
def update_job_shift(request, job_shift_id):
job_shift = JobShift.objects.get(id=job_shift_id)
if request.method == "POST":
form = JobShiftForm(request.POST, instance=job_shift)
if form.is_valid():
job_shift = form.save()
messages.success(request, 'Job shift was updated successfully')
return redirect('update_job_shift', job_shift_id=job_shift_id)
else:
form = JobShiftForm(instance=job_shift)
context = {
'job_shift': job_shift,
'form': form
}
return render(request, 'pages/update_job_shift.html', context)
@login_required
def deactivate_job_shift(request, job_shift_id):
job_shift = JobShift.objects.get(id=job_shift_id)
job_shift.job_shift_status = 0
job_shift.save(update_fields=['job_shift_status'])
messages.add_message(request, messages.SUCCESS, 'Job shift removed successfully')
return redirect('all_job_shifts')
# @login_required
def add_staff(request):
if request.method == "POST":
# user_form = UserForm(request.POST)
staff_form = StaffForm(request.POST)
if staff_form.is_valid():
# Save general user details
# user = user_form.save(commit=False)
# user.is_staff = True
# user.save()
# Save staff specific details
staff = staff_form.save(commit=False)
# staff.staff_user_id = user.id
staff.staff_created_by_id = request.user.id
staff.save()
# Success message
messages.success(request, 'The staff has been successfully created')
return redirect('add-staff')
else:
user_form = UserForm()
staff_form = StaffForm()
context = {
'user_form': user_form,
'staff_form': staff_form
}
return render(request, 'pages/add_staff.html', context)
@login_required
def current_staff(request):
staff = Staff.objects.select_related().filter(staff_user__status=1)
context = {
'staff': staff
}
return render(request, 'pages/current_staff.html', context)
@login_required
def past_staff(request):
staff = Staff.objects.select_related().filter(staff_user__status=0)
context = {'staff': staff}
return render(request, 'pages/past_staff.html', context)
@login_required
def update_staff(request, staff_id):
staff = Staff.objects.get(id=staff_id)
user = User.objects.get(id=staff.staff_user.id)
if request.method == "POST":
user_form = UserUpdateForm(request.POST, instance=user)
staff_form = StaffForm(request.POST, instance=staff)
if user_form.is_valid() and staff_form.is_valid():
user = user_form.save()
staff = staff_form.save()
messages.success(request, 'Staff was updated successfully')
return redirect('update_staff', staff_id=staff_id)
else:
user_form = UserUpdateForm(instance=user)
staff_form = StaffForm(instance=staff)
context = {
'user_form': user_form,
'staff_form': staff_form,
'staff': staff
}
return render(request, 'pages/update_staff.html', context)
@login_required
def deactivate_staff(request, staff_id):
# Update in user table
user = User.objects.get(id=staff_id)
user.status = 0
user.save(update_fields=['status'])
# Update in staff table
staff = Staff.objects.get(staff_user=staff_id)
staff.staff_end_date = timezone.now()
staff.save(update_fields=['staff_end_date'])
messages.add_message(request, messages.SUCCESS, 'Staff was removed successfully')
return redirect('current_staff')
def all_visualizations(request):
context = {'name': 'Visualization'}
return render(request, 'pages/visualization.html', context) | gatirobi/digifarming | digifarming/digifarming/views.py | views.py | py | 53,254 | python | en | code | 0 | github-code | 36 |
4567187720 | from app.models.roles_enums import Werewolves, Alignment
from app.services import game_info as gi
from app.utils.ability_utils import add_death, remove_death
from app.utils.info_utils import find_alignment
def werewolf(player: int, target: int) -> str:
gi.night_info["ww_kill"] = target
add_death(target)
return f"{target} killed!"
def seer(player: int, target: int) -> str:
role = gi.game_info.get("roles")[target-1]
if Werewolves.has_value(role):
return "Target Identity: BAD!"
else:
return "Target Identity: GOOD!"
def witch(player: int, target: int,) -> str:
# cure used, poisoning.
if gi.game_info["witch_save"] is not None:
if gi.game_info["witch_kill"] is not None: # poison also used, skipping
return "You have used both poison and cure potions. Game moves on."
else: # poisoning target
gi.game_info["witch_kill"] = target
gi.night_info["witch_kill"] = target
add_death(target)
return f"You have poisoned player {target}."
# cure not used.
else:
# curing target
if gi.night_info["ww_kill"] == target:
if player == target: # self-cure
if gi.game_info["round"] == 1:
gi.night_info["ww_kill"] = None
gi.night_info["witch_save"] = target
gi.game_info["witch_save"] = target
remove_death(target)
return f"You have saved player {target} with your cure."
else:
raise Exception("you cannot save yourself unless tonight is the first night.")
else: # cure others
gi.night_info["ww_kill"] = None
gi.night_info["witch_save"] = target
gi.game_info["witch_save"] = target
remove_death(target)
return f"You have saved player {target} with your cure."
# poisoning target
else:
gi.game_info["witch_kill"] = target
gi.night_info["witch_kill"] = target
add_death(target)
return f"You have poisoned player {target}."
def witch_info() -> str:
if gi.game_info["witch_save"] is None:
return f"Player {gi.night_info['ww_kill']} was killed by werewolves tonight!"
else:
return "You cannot know who is killed because you have used your cure."
def hunter(player: int, target: int):
if gi.game_info["hunter_status"]:
return "You CAN your ability if you are dead!"
else:
return "You CANNOT use your ability if you are dead!"
def guard(player: int, target: int):
if gi.game_info["last_guarded"] == target:
raise Exception("You cannot guard the same player 2 nights back to back.")
else:
gi.game_info["last_guarded"] = target
gi.night_info["guarded"] = target
if gi.night_info["witch_save"]:
add_death(target)
return f"You have guarded player {target}"
def elder(player: int, target: int):
gi.night_info["silenced"] = target
return f"player {target} has been silenced."
def thief_info():
temp_roles = gi.game_info.get("roles")
index = len(temp_roles)
id1 = temp_roles[index-2]
id2 = temp_roles[index-1]
return {f"You can type 1 or 2 to choose between 1.{id1} and 2.{id2} !"}
# todo: Thief role unfinished.
def thief(player: int, target: int):
temp_roles = gi.game_info.get("roles")
index = len(temp_roles)
role1 = temp_roles[index - 2]
role2 = temp_roles[index - 1]
if target != 1:
if target != 2:
raise Exception("Invalid input, please type input 1 or 2.")
else:
gi.game_info.get("players")[player-1].role = role2
gi.game_info.get("players")[player-1].alignment = find_alignment(role2)
return f"Your new identity is {role2}!"
else:
gi.game_info.get("players")[player - 1].role = role1
gi.game_info.get("players")[player - 1].alignment = find_alignment(role1)
return f"Your new identity is {role1}!"
def cupid(player: int, target1: int, target2: int):
gi.game_info["lover_1"] = target1
gi.game_info["lover_2"] = target2
al_1 = gi.game_info.get("players")[target1-1].alignment
al_2 = gi.game_info.get("players")[target2-1].alignment
if al_1 == Alignment.WEREWOLF:
if al_2 == Alignment.WEREWOLF: # bad - bad
gi.game_info.get("players")[player-1].alignment = Alignment.WEREWOLF
else: # bad - good
gi.game_info.get("players")[target1-1].alignment = Alignment.LOVERS
gi.game_info.get("players")[target2-1].alignment = Alignment.LOVERS
gi.game_info.get("players")[player - 1].alignment = Alignment.LOVERS
else:
if al_2 == Alignment.WEREWOLF: # good - bad
gi.game_info.get("players")[target1 - 1].alignment = Alignment.LOVERS
gi.game_info.get("players")[target2 - 1].alignment = Alignment.LOVERS
gi.game_info.get("players")[player - 1].alignment = Alignment.LOVERS
else: # good - good
gi.game_info.get("players")[player - 1].alignment = Alignment.VILLAGER
# todo: Unfinished whild_child logic
def wild_child(player:int, target:int):
pass
def test(player, target, target2):
return f"Test succeeded for player {player}, target 1 {target} and target 2 {target2}"
| wenyangzhang42/werewolf-backend | app/services/abilities.py | abilities.py | py | 5,438 | python | en | code | 0 | github-code | 36 |
11043626340 | from RiceClassifier.config.configuration import (ConfigurationManager,YAMLConfigReader,FilesystemDirectoryCreator)
from RiceClassifier.components.data_ingestion import (DataIngestion,FileDownloader,ZipExtractor)
from RiceClassifier.logger import logger
STAGE_NAME = "Data Ingestion stage"
class DataIngestionTrainingPipeline:
def __init__(self):
pass
def main(self):
config_reader = YAMLConfigReader()
dir_creator = FilesystemDirectoryCreator()
config_manger = ConfigurationManager(config_reader,dir_creator)
data_ingestion_config = config_manger.get_data_ingestion_config()
download_manger = FileDownloader(data_ingestion_config)
zip_extractor = ZipExtractor(data_ingestion_config)
download_manger.download_file()
zip_extractor.extract_zip_file()
if __name__ == '__main__':
try:
logger.info(f">>>>>> stage {STAGE_NAME} started <<<<<<")
obj = DataIngestionTrainingPipeline()
obj.main()
logger.info(f">>>>>> stage {STAGE_NAME} completed <<<<<<\n\nx==========x")
except Exception as e:
logger.exception(e)
raise e | nasserml/End-To-End_Rice-Classification-Project | src/RiceClassifier/pipeline/stage_01_data_ingestion.py | stage_01_data_ingestion.py | py | 1,172 | python | en | code | 0 | github-code | 36 |
34558226431 | #Stephen Duncanson
#Standard deviation
import math
total = 0
dataList = []
total_num = 0
def get_input():
how_many = 0
how_many = int(input("How many pieces of data: "))
for i in range(how_many):
data = float(input("Enter data point:"))
dataList.append(data)
def sd():
total = 0
total_num = 0
for i in dataList:
total += i
mean = total / len(dataList)
for x in dataList:
total_num += (x - mean)**2
s = math.sqrt(total_num /(len(dataList) -1))
print(format(s, '.2f'))
def main():
get_input()
sd()
main()
| kellyfitmore/random-python | sd.py | sd.py | py | 609 | python | en | code | 0 | github-code | 36 |
18375485118 | #!/bin/python3
# Complete the extraLongFactorials function below.
def extraLongFactorials(n):
factorial = 1
# check if the number is negative, positive or zero
if n < 0:
print("Sorry, factorial does not exist for negative numbers")
elif n == 0:
print(1)
else:
for i in range(1, n + 1):
factorial = factorial * i
print(factorial)
if __name__ == '__main__':
fact = 0
n = int(input())
extraLongFactorials(n)
| sauravsapkota/HackerRank | Practice/Algorithms/Implementation/Extra Long Factorials.py | Extra Long Factorials.py | py | 489 | python | en | code | 0 | github-code | 36 |
44675601513 | import pysubs2
import pysrt
archivo = 'subtitle.ass'
subs = pysubs2.load(archivo, encoding='utf-8')
for line in subs:
print(line.text)
textoplano = line.text
texto = open('textoplano.txt', 'a')
texto.write(textoplano)
texto.close()
| FukurOwl/subtitles_translate | load_files.py | load_files.py | py | 260 | python | es | code | 0 | github-code | 36 |
1529107170 | #Time complexity = O(n^2) | Space complexity = O(n)s
def threeNumberSum(array, targetSum):
resultArray = list()
for i in range(len(array) - 2):
left = i+1
right = len(array)-1
while (left < right):
currentSum = array[left] + array[right] + array[i]
if(currentSum == targetSum):
resultArray.append([array[i], array[left], array[right]])
left += 1
right -= 1
elif(currentSum < targetSum):
left += 1
elif(currentSum > targetSum):
right -= 1
return resultArray
array = [-8,-6,1,2,3,5,6,12]
targetSum = 0
print(threeNumberSum(array, targetSum))
| puneeth1999/InterviewPreparation | AlgoExpert/arrays/2. threeNumberSum/two_pointer_method.py | two_pointer_method.py | py | 705 | python | en | code | 2 | github-code | 36 |
12390972181 | class seating:
def __init__(self , filename):
self.seat = self.read_file(filename)
self.index = 0
@staticmethod
def read_file(filename):
try :
with open(filename , 'r') as f:
seat = [i.rstrip('\n').split(' ') for i in f]
return seat
except:
print('file open failed')
return None
def cal_column(self):
return len(self.seat)
def search_seat(self , x , y):
return self.seat[x][y]
def replacement(self , a , b):
for i in range(0 , len(self.seat) , 1):
for j in range(0 , len(self.seat[i]) , 1):
if self.seat[i][j] == a:
self.seat[i][j] = b
return True
return False
def __iter__(self):
self.index = 0
return self
def __next__(self):
try:
element = self.seat[self.index]
self.index += 1
return element
except IndexError:
raise StopIteration
def main():
a = seating('input.txt')
print('original seat :' , '\n'.join([str(i) for i in a]),sep='\n')
print('There are %d column' % (a.cal_column()))
print(u'第三排第二列 : ' , a.search_seat(1 , 2))
print(u'第四排第一列 : ' , a.search_seat(0 , 3))
print(u'第五排第三列 : ' , a.search_seat(2 , 4))
if a.replacement('Jack' , 'Zhangsan'):
print('Replace Jack with Zhangsan')
if a.replacement('Tim' , 'Lisi'):
print('Replace Tim with Lisi')
if a.replacement('Will' , 'Wangwu'):
print('Replace Will with Wangwu')
print('After replacement :' , '\n'.join([str(i) for i in a]),sep='\n')
if __name__ == '__main__':
main()
| homework2005/hw01 | hw01.py | hw01.py | py | 1,857 | python | en | code | 0 | github-code | 36 |
72908308584 | """
Чтобы решить данную задачу, можно воспользоваться алгоритмом поиска в ширину (BFS),
так как он позволяет находить кратчайшие пути в графе с невзвешенными ребрами.
В нашем случае города и дороги между ними образуют граф без весов на ребрах.
"""
from collections import deque
# Функция для вычисления расстояния между двумя городами.
def distance(city1, city2):
return abs(city1[0] - city2[0]) + abs(city1[1] - city2[1]) + abs(city1[2] - city2[2])
# Функция для поиска кратчайшего пути с помощью BFS (поиск в ширину).
def find_shortest_path(n, cities, max_distance, start_city, end_city):
visited = [False] * n # Массив для отслеживания посещенных городов
queue = deque([(start_city, 0)]) # Очередь для BFS, каждый элемент - (город, расстояние до него)
while queue:
current_city, current_distance = queue.popleft()
visited[current_city] = True
if current_city == end_city:
return current_distance # Мы достигли конечного города, возвращаем расстояние
for neighbor in range(n):
# Проверяем, что соседний город не посещён и расстояние до него не превышает максимальное
if not visited[neighbor] and distance(cities[current_city], cities[neighbor]) <= max_distance:
queue.append((neighbor, current_distance + 1)) # Добавляем в очередь соседний город
return -1 # Если не найден путь, возвращаем -1
if __name__ == "__main__":
# Чтение входных данных
n = int(input())
cities = [tuple(map(int, input().split())) for _ in range(n)]
max_distance = int(input())
start_city, end_city = map(int, input().split())
# Поиск кратчайшего пути и вывод результата
shortest_path = find_shortest_path(n, cities, max_distance, start_city - 1, end_city - 1)
print(shortest_path)
"""
В этом примере у нас 5 городов с указанными координатами и максимальное расстояние,
которое Петя может преодолеть без дозаправки, равно 10.
Также указаны начальный город (1) и конечный город (5).
Программа выводит минимальное количество дорог, которое нужно проехать, чтобы попасть из города 1 в город 5.
python .\6_find_shortest_path.py
5
0 0 0
1 2 3
4 5 6
7 8 9
10 11 12
10
1 5
4
"""
"""
Давайте оценим сложность данного алгоритма.
Создание списка visited и инициализация его значений занимает O(n) времени и памяти, где n - количество городов.
Создание очереди queue с одним элементом занимает O(1) времени и памяти.
Внутри цикла while queue выполняется BFS по всем соседним городам.
В худшем случае BFS может обойти все города, поэтому количество итераций в цикле не превысит n.
В каждой итерации цикла выполняется проверка соседних городов и добавление их в очередь,
что занимает константное время O(1).
Таким образом, общая сложность алгоритма составляет O(n) в худшем случае,
когда все города являются соседними друг к другу и BFS посетит каждый город.
В среднем случае сложность также будет близка к O(n), поскольку BFS обычно обходит только часть графа
до достижения конечного города.
Данный алгоритм хорошо масштабируется и быстро обрабатывает города в пределах нескольких тысяч.
Однако, при очень больших значениях n алгоритм может стать медленнее из-за обхода всех возможных соседей.
""" | TatsianaPoto/yandex | ML & Programming/6_find_shortest_path.py | 6_find_shortest_path.py | py | 4,858 | python | ru | code | 0 | github-code | 36 |
3733422218 | from argparse import Namespace
from pandas.core.frame import DataFrame
from app.command.sub_command import SubCommand
from app.error.column_already_exists_error import ColumnAlreadyExistsError
from app.error.column_not_found_error import ColumnNotFoundError
class Add(SubCommand):
def __init__(self, args: Namespace) -> None:
super().__init__(args)
self.column = args.column
self.default = args.default
self.first = args.first
self.after = args.after
def process(self, df: DataFrame) -> DataFrame:
headers = list(df.columns.values)
# 入力チェック
if self.column in headers:
message = "column `{}` already exist".format(self.column)
raise ColumnAlreadyExistsError(message)
if self.after and self.after not in headers:
message = "column `{}` is not found".format(self.after)
raise ColumnNotFoundError(message)
# 項目の追加
df[self.column] = self.default
# 出力する項目
new_headers = headers[:]
if self.first:
new_headers.insert(0, self.column)
elif self.after:
i = headers.index(self.after)
new_headers.insert(i + 1, self.column)
else:
new_headers.append(self.column)
return df[new_headers]
| takenoco82/alter_csv | src/app/command/add.py | add.py | py | 1,355 | python | en | code | 0 | github-code | 36 |
71551407784 | import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from textClustringAnalysis.feature.common import dict2Array, myTFIDF
from textClustringAnalysis.feature.main import TC, TC_PCA, PCA
from textClustringAnalysis.preprocessor.dataInfo import getWordCount
if __name__ == '__main__':
"""
当我们想要对高维数据进行分类,又不清楚这个数据集有没有很好的可分性(即同类之间间隔小,异类之间间隔大)
可以通过t - SNE投影到2维或者3维的空间中观察一下。如果在低维空间中具有可分性,则数据是可分的;
如果在高维空间中不具有可分性,可能是数据不可分,也可能仅仅是因为不能投影到低维空间。
"""
for i in ['txt1', 'txt2']: # ['txt_about1','txt_about2']:
# outDir = '/Users/brobear/PycharmProjects/TextClusteringAnalysis/txt1'
outDir = '/Users/brobear/PycharmProjects/TextClusteringAnalysis/' + i
txt_dict = getWordCount(outDir)
# tfidf_dict = myTFIDF(txt_dict, itc=True)
# data, textNames, wordName = dict2Array(tfidf_dict)
data, textNames = TC_PCA(txt_dict, minTC=5, itc=False, topN=0.8)[:2]
tsne = TSNE(n_components=2)
a = tsne.fit_transform(data) # 进行数据降维,降成两维
plt.scatter(a[:, 0], a[:, 1], s=2, alpha=1)
title = '%s TC5_PCA0_8' % i
plt.title(title)
plt.savefig('/Users/brobear/PycharmProjects/TextClusteringAnalysis/textClustringAnalysis/tsne-images/%s'
'.png' % title)
plt.show()
# TC_PCA(txt_dict, minTC=5, itc=True,topN=0.8)[:2] 680
# TC_PCA(txt_dict, minTC=5, itc=False,topN=0.8)[:2] 497
# PCA(txt_dict, itc=False, topN=0.8)[:2] 1198
# PCA(txt_dict, itc=True, topN=0.8)[:2] 1171
# data, textNames = TC(txt_dict, topN=1100)[:2] 1100 txt1:37.64
# data, textNames = TC(txt_dict, topN=600)[:2] 600 txt1:47.00
# TC_PCA(txt_dict, minTC=5, itc=True,topN=0.8)[:2] 680
# minTC 0 5 10 37.64 38.67 47.00 52.74
# txt1 36731 3638 2684 1100 600
# txt2 29958 3305 2503 1100 600
| bearbro/TextClusteringAnalysis | textClustringAnalysis/showdatafirst.py | showdatafirst.py | py | 2,127 | python | en | code | 3 | github-code | 36 |
30134924939 | import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, and_
from flask import Flask, jsonify
engine = create_engine("sqlite:///hawaii.sqlite")
base = automap_base()
base.prepare(engine, reflect=True)
measurement = base.classes.measurement
station = base.classes.station
app = Flask(__name__)
@app.route("/")
def welcome():
"""List all available api routes."""
return(
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
session=Session(engine)
"""Shows all dates and precipitation data"""
precip = session.query(measurement.date, measurement.prcp).\
order_by(measurement.date).all()
precip_dict = []
for date, prcp in precip:
new_dict = {}
new_dict["date"] = date
new_dict["prcp"] = prcp
precip_dict.append(new_dict)
return jsonify(precip_dict)
session.close()
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
"""Show all station names"""
stat = session.query(measurement.station).all()
stat_names = []
for station in stat:
all_stations = {}
all_stations["station"] = station
stat_names.append(all_stations)
return jsonify(stat_names)
session.close()
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
"""Dates and temperature observations of most active station"""
most_active = session.query(measurement.station, func.count(measurement.station)).group_by(measurement.station).\
order_by(func.count(measurement.station).desc()).all()
most_active_station = most_active[0][0]
last_year = session.query(measurement.date, measurement.tobs).filter(measurement.date >= "2016-03-23").\
filter(measurement.station == most_active_station).order_by(measurement.date).all()
date_list = []
for date, tobs in last_year:
tobs_date = {}
tobs_date[date] = tobs
date_list.append(tobs_date)
return jsonify(date_list)
session.close()
| Emziicles/sqlalchemy-challenge | app.py | app.py | py | 2,364 | python | en | code | 0 | github-code | 36 |
33743344827 | # import from module
from random import seed
from lab4 import rand_search
# random seed
seed(123)
some_string = "this is some string that will be used in the code"
search_string = "strings"
# test for strings
input_data = list(some_string.split())
# print status
rand_search(input_data, search_string) | KarloHasnek/Algoritmi-i-Strukture-podataka | Labovi/Lab4/lab4nastavak.py | lab4nastavak.py | py | 306 | python | en | code | 0 | github-code | 36 |
1355556431 | import pygame
import os
import time
import random
x = 100
y = 50
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (x,y)
width = 1640
height = 980
pygame.init()
screen = pygame.display.set_mode((width, height))
white = (255, 255, 255)
black = (0, 0, 0)
random_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
screen.fill(black)
pygame.display.set_caption("Sorting Algorithm Visualizer")
def createArr(bar_size):
i = 0
while i < width:
value = random.randint(0, height)
pygame.draw.rect(screen, white, (i, height - value, bar_size, height))
arr.append(value)
i += bar_size
pygame.display.flip()
return arr
def bubbleSort(arr, bar_size):
pygame.display.set_caption("Bubble Sort")
for iter_number in range(len(arr) - 1, 0, -1):
for index in range(iter_number):
if arr[index] > arr[index + 1]:
temp = arr[index]
arr[index] = arr[index + 1]
arr[index + 1] = temp
if index % 80 == 0:
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
screen.fill(black)
def insertionSort(arr, bar_size):
pygame.display.set_caption("Insertion Sort")
for i in range(1, len(arr)):
j = i - 1
nxt_element = arr[i]
while (arr[j] > nxt_element) and (j >= 0):
arr[j+1] = arr[j]
j -= 1
arr[j + 1] = nxt_element
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
screen.fill(black)
def shellSort(arr, bar_size):
pygame.display.set_caption("Shell Sort")
gap = len(arr) // 2
while gap > 0:
for i in range(gap, len(arr)):
temp = arr[i]
j = i
while j >= gap and arr[j - gap] > temp:
arr[j] = arr[j - gap]
j -= gap
arr[j] = temp
if i % 5 == 0:
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
gap = gap // 2
x = 0
screen.fill(black)
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
screen.fill(black)
def selectionSort(arr, bar_size):
pygame.display.set_caption("Selection Sort")
for idx in range(len(arr)):
min_idx = idx
for j in range( idx + 1, len(arr)):
if arr[min_idx] > arr[j]:
min_idx = j
arr[idx], arr[min_idx] = arr[min_idx], arr[idx]
x = 0
screen.fill((0, 0, 0))
for j in range(len(arr)):
pygame.draw.rect(screen, white, (x, height - arr[j], bar_size, height))
x += bar_size
pygame.display.flip()
screen.fill(black)
bar_size = 3
arr = []
arr = createArr(bar_size)
time.sleep(1)
bubbleSort(arr, bar_size)
time.sleep(1)
arr = []
arr = createArr(bar_size)
time.sleep(1)
insertionSort(arr, bar_size)
arr = []
arr = createArr(bar_size)
time.sleep(1)
shellSort(arr, bar_size)
arr = []
arr = createArr(bar_size)
time.sleep(1)
selectionSort(arr, bar_size)
time.sleep(1)
pygame.quit() | MagicLuxa/Python-Projects | sorting algorithms visualized.py | sorting algorithms visualized.py | py | 4,028 | python | en | code | 0 | github-code | 36 |
6435166822 | from collections import Counter
def maketemp(l,h):
templist = []
for i in range(l,h+1):
templist.append(i)
return templist
def themode(lst):
n = len(lst)
data = Counter(lst)
get_mode = dict(data)
mode = [k for k, v in get_mode.items() if v == max(list(data.values()))]
if len(mode) == n:
return -1
else:
return mode[0]
minions = int(input())
listoflist = []
listoftemps = []
rooms = 0
for x in range(minions):
x,y = [int(x) for x in input().split()]
listoflist.append(maketemp(x,y))
listoftemps += maketemp(x,y)
listoftemps.sort()
print(listoflist)
print(listoftemps)
while themode(listoftemps) != -1:
mode = themode(listoftemps)
print(mode)
for x in range(len(listoflist)-1,-1,-1):
if mode in listoflist[x]:
for y in listoflist[x]:
listoftemps.remove(y)
del listoflist[x]
print(listoflist)
print(listoftemps)
rooms += 1
rooms += len(listoflist)
print(rooms)
| DongjiY/Kattis | src/airconditioned.py | airconditioned.py | py | 1,012 | python | en | code | 1 | github-code | 36 |
37412585435 | import numpy as np
from ..functions import B_nu, dB_nu_dT
from ..integrate import integrate_loglog
from ..constants import sigma, k, c
def test_b_nu():
nu = np.logspace(-20, 20., 10000)
for T in [10, 100, 1000, 10000]:
# Compute planck function
b = B_nu(nu, T)
# Check that the intergral is correct
total = integrate_loglog(nu, b)
np.testing.assert_allclose(total, sigma * T ** 4 / np.pi, rtol=1e-4)
# Check that we reach the rayleigh-jeans limit at low frequencies
rj = 2. * nu ** 2 * k * T / c**2
np.testing.assert_allclose(b[nu < 1e-10], rj[nu < 1e-10], rtol=1.e-8)
def test_db_nu_dt():
nu = np.logspace(-20, 20., 10000)
for T in [10, 100, 1000, 10000]:
# Compute exact planck function derivative
db = dB_nu_dT(nu, T)
# Compute numerical planck function derivative
dT = T / 1e6
b1 = B_nu(nu, T - dT)
b2 = B_nu(nu, T + dT)
db_num = 0.5 * (b2 - b1) / dT
# Check that the two are the same
np.testing.assert_allclose(db, db_num, rtol=1.e-2)
| hyperion-rt/hyperion | hyperion/util/tests/test_functions.py | test_functions.py | py | 1,107 | python | en | code | 51 | github-code | 36 |
18518889020 | import os
def get_profits(folder):
all_profits = []
subfolders = os.listdir(folder)
subfolders.sort(key=int)
# go through all folders in the folder
for subfolder in subfolders:
# read profit from total_profit.txt
with open(os.path.join(folder, subfolder, 'total_profit.txt'),
'r') as f:
profit = int(f.read())
all_profits.append(profit)
if profit == -903300:
print(subfolder)
return all_profits
if __name__ == "__main__":
print(get_profits("gen_data_round_2"))
| BenPVandenberg/blackjack-ai | analysis.py | analysis.py | py | 584 | python | en | code | 1 | github-code | 36 |
4115122521 | import os
def transfer(fileName):
matrix = []
f = open(fileName, 'r')
for line in f.readlines():
matrix.append(line.split(','))
f.close()
f = open(fileName, 'w')
rows = len(matrix[1])
for i in range(rows - 1):
newLine = ''
for j in [1,4]:
newLine += (matrix[j][i] + ',')
f.write(newLine)
f.write('\n')
if __name__ == '__main__':
files = os.listdir('../matlab_Code/72-d')
print(files)
for file in files:
transfer('../matlab_Code/72-d/' + file)
# transfer('../72-distance/11-21.csv')
| LaputaRobot/STK_MATLAB | PMetis/reservCSV.py | reservCSV.py | py | 590 | python | en | code | 0 | github-code | 36 |
69845996905 | #!/usr/bin/env python
import pika
from pika.adapters import BlockingConnection
from pika import BasicProperties
#connection = BlockingConnection('172.20.14.192')
connection = pika.BlockingConnection(pika.ConnectionParameters('172.20.14.192'))
channel = connection.channel()
client_params = {"x-ha-policy": "all"}
exchange_name = 'public'
queue_name = 'test_queue1'
routing_key = 'test_routing_key1'
channel.exchange_declare(exchange=exchange_name, type='topic')
channel.queue_declare(queue=queue_name, durable=True, arguments=client_params )
channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=routing_key)
connection.close()
| appop/simple-test | createmessage/createqueue.py | createqueue.py | py | 654 | python | en | code | 2 | github-code | 36 |
25353856217 | import http.server
import socketserver
import cgi
import pymongo
import json
import bcrypt
import secrets
import hashlib
import base64
from datetime import datetime, timedelta
import helperFunction as helper
SOCKET_GUID = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
TEXT_FRAME = 1
OPCODE_MASK = 0b00001111
PAYLOAD_LEN_MASK = 0b01111111
FIRST_BYTE_TEXT_FRAME = b'\x81'
SECOND_BYTE_LEN126 = b'\x7E'
SECOND_BYTE_LEN127 = b'\x7F'
FRAME_LEN_NO_METADATA = 1010
projects_list = []
PORT = 8000
HOST = "0.0.0.0"
localHost = "mongo"
mongoclient = pymongo.MongoClient(localHost)
storedUsers = mongoclient["users"]
user_accounts = storedUsers["user_accounts"]
projects = storedUsers["projects"]
online_users = storedUsers["online"]
#new_online_user = storedUsers["timeout"]
postFormat = '<div class="post"><hr>Project Name: Project1<b style="position:relative; left: 480px;">Rating: 7 <button style="background-color:green">👍</button><button style="background-color:red">👎</button></b><br><img src="../images/test.png" style="width:400px;height:200px;"><br>Description:<br>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.<br><br><small>By: User1</small></div>'
def readFile(filename, type):
filename = filename.replace("%20", " ")
fileContent = None
file = open(filename, "r") if type == "str" else open(filename, "rb")
fileContent = file.read()
return fileContent
def loadProjects():
DBprojects = []
for project in projects.find():
projectHTML = helper.gen_project_post_html_asbytes(project["account"], project["projectname"], project["projectdescription"], project["imagepath"], project["rating"])
DBprojects.append(projectHTML)
return DBprojects
def replaceFormat(project):
projectLine = postFormat.replace("Project Name: Project1", "Project Name: " + project["name"])
projectLine = projectLine.replace("Description:<br>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.", "Description:<br>" + project["desc"])
projectLine = projectLine.replace('src="../images/test.png"', 'src="../images/projectImages/' + project["img"] + '"')
return projectLine
def serve_htmltext_and_goto(self, token, text, link, time):
if link != None:
text += '<meta http-equiv="refresh" content="'+str(time)+'; url='+link+'" />'
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("X-Content-Type-Options", "nosniff")
if token != None:
self.send_header("Set-Cookie", "session-token=" + token + "; Max-Age=600")
self.send_header("Content-Length", str(len(text)))
self.end_headers()
self.wfile.write(text.encode())
'''
Open a file at filepath(string) in bytes, get the mimetype and then serve it.
Return true if successful otherwise serve a 404 & return false.
Checks files for various html template placeholders, replaces them with data if encountered
Perform tasks specific to username(bytes) if it's not None
Return True on success, else False
'''
def serve_file(self, filepath, username)->bool:
#queriedaccount example -> localhost:8000/html/profile.html?user=somename
queriedaccount = None
#session token is later retrieved and used to reset log out timer
token = None
if '?user=' in filepath:
queriedaccount = filepath.split('?user=')[1]
filepath = filepath.split('?user=')[0]
#Open file, get content
try:
f = open(filepath, 'rb')
except:
give_404(self)
return False
b = f.read()
#Get mimetype, serve 403 if filetype not in mimetypes dictionary
mimetype = helper.get_mimetype(filepath)
if mimetype == None:
give_403(self)
return False
projectslist = b''
for project in projects_list:
projectslist += project
b = b.replace(b'{{projectslist}}',projectslist)
#Get all usernames in database, make the the html for the frontend, insert if placeholder found
alluserslist = b''
for item in user_accounts.find():
alluserslist += helper.gen_user_list_segment(item['account'].encode())
b = b.replace(b'{{alluserslist}}', alluserslist)
#Same as above but only for currently online users
onlineuserslist = b''
for item in online_users.find():
onlineuserslist += helper.gen_user_list_segment(item['account'].encode())
b = b.replace(b'{{onlineuserslist}}', onlineuserslist)
#Show login status if username exists otherwise dont, and hide anything with the {{hideornot}} placeholder
if username != None:
b = b.replace(b'{{loggedin_temp}}', b'Currently logged in as: '+ username.encode())
b = b.replace(b'{{username_placeholder}}', username.encode())
else:
b = b.replace(b'{{loggedin_temp}}', b'')
b = b.replace(b'{{hideornot}}',b'hidden')
'''NOTE: can currently comment this^ line out for testing purposes, but final version would have that line active'''
#If an account profile was not queried and the user is not logged in, hide certain frontend data
if queriedaccount == None and username == None:
b = b.replace(b'{{userbio}}',b'')
b = b.replace(b'{{hideifnotthisuser}}', b'hidden')
#else if a profile wasnt queried but a user name is supposedly logged in, make sure that account exists
#and refresh their session cookie and online status if so
elif queriedaccount == None and username != None:
#get queried account's bio and replace placeholder with it
retrieved_account = user_accounts.find_one({"account": username})
if retrieved_account == None:
self.serve_htmltext_and_goto(self, None,'<h1>That username does not exist. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/html/users.html', 5)
return
userbio = helper.gen_user_bio_html(username,retrieved_account['bio'])
b = b.replace(b'{{userbio}}',userbio)
#account login status refresh
token = helper.parse_cookies(self.headers.get("Cookie")).get("session-token", None)
account_to_refresh = online_users.find_one({"account": username})
account_to_refresh['date'] = datetime.utcnow()
online_users.save(account_to_refresh)
#if an account is queried(and exists), show their profile page and hide the bio updater form
elif queriedaccount != None:
retrieved_account = user_accounts.find_one({"account": queriedaccount.encode()})
if retrieved_account == None:
self.serve_htmltext_and_goto(self, None,'<h1>That username does not exist. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/html/users.html', 5)
return
userbio = helper.gen_user_bio_html(queriedaccount.encode(),retrieved_account['bio'])
b = b.replace(b'{{userbio}}',userbio)
b = b.replace(b'{{hideifnotthisuser}}', b'hidden')
#Create appropriate response
self.send_response(200)
self.send_header('Content-Type', mimetype)
self.send_header('X-Content-Type-Options', 'nosniff')
#reset session cookie to another 10 minutes
if username != None and token != None:
self.send_header('Set-Cookie', 'session-token=' + token + '; Max-Age=600')
self.send_header('Content-Length', str(len(b)))
self.end_headers()
self.wfile.write(b)
#Close file and send response
f.close()
return True
# looks at token and gets username
def get_username(self):
username = None
# get session token and check username
user_token = None
cookie = self.headers.get("Cookie")
if cookie != None:
cookies = helper.parse_cookies(cookie)
user_token = cookies.get("session-token", None)
if user_token != None:
retrieved_account = user_accounts.find_one({"token": hashlib.sha256(user_token.encode()).hexdigest()})
if retrieved_account != None:
username = retrieved_account['account'].replace('&','&').replace('<','<').replace('>','>')
#loop through all instances and do bcrypt.checkpw on the retrieved token and the
return username
def handleSocket(self):
socket_key = self.headers.get("Sec-WebSocket-Key").encode() + SOCKET_GUID
base64_socket_key = base64.b64encode(hashlib.sha1(socket_key).digest())
response = b'HTTP/1.1 101 Switching Protocols\r\n'
response += b'Connection: Upgrade\r\n'
response += b'Upgrade: websocket\r\n'
response += b'Sec-WebSocket-Accept: ' + base64_socket_key + b'\r\n\r\n'
self.request.sendall(response)
# keep track of sockets
self.active_sockets.append(self.request)
account_name = get_username(self)
if account_name != None:
self.dm_sockets[account_name] = self.request
socket_data = b' '
while socket_data:
#Try receiving data, break loop on any exception
try:
socket_data = self.request.recv(1024)
except:
break
#Get the opcode
opcode = None
if socket_data:
opcode = socket_data[0] & OPCODE_MASK
#if its a text frame(do nothing otherwise)
if opcode == TEXT_FRAME and account_name != None:
msg_type = None
#get payload length
payload_len = socket_data[1] & PAYLOAD_LEN_MASK
#Self explanatory: get data from the packets as defined for the three payload sizes
if payload_len < 126:
masking_key = socket_data[2:6]
payload_data = socket_data[6:(6 + payload_len)]
elif payload_len == 126:
payload_len = int.from_bytes(socket_data[2:4], byteorder='big', signed=False)
masking_key = socket_data[4:8]
if (FRAME_LEN_NO_METADATA - payload_len) < 0:
socket_data += self.request.recv(65536)
payload_data = socket_data[8:(8 + payload_len)]
elif payload_len == 127:
payload_len = int.from_bytes(socket_data[2:10], byteorder='big', signed=False)
masking_key = socket_data[10:14]
socket_data += self.request.recv(payload_len)
payload_data = socket_data[14:(14 + payload_len)]
#Decode payload with the masking key
decoded_payload = b''
for idx, byte in enumerate(payload_data):
decoded_payload += (byte ^ masking_key[idx % 4]).to_bytes(1, byteorder='big', signed=False)
#Remove html from payload
decoded_payload = decoded_payload.replace(b'&',b'&').replace(b'<',b'<').replace(b'>',b'>')
#Start the outgoing payload
outgoing_payload = None
#if websocket was used to rate project
if b'"projectname"' in decoded_payload:
msg_type = "rating"
#Extract project name and the value to be added to the rating (1 or -1)
project_name = helper.extract_segment(decoded_payload, b'"projectname":"',b'","addedvalue"')
added_value = int(helper.extract_segment(decoded_payload, b'"addedvalue":',b'}').decode())
#Get the project by name and update it with a +1 or -1
project_to_rate = projects.find_one({"projectname": project_name.decode()}) #change this
project_to_rate['rating'] = new_rating = str(int(project_to_rate['rating']) + added_value)
projects.save(project_to_rate)
#Refresh the projects_list list
projects_list.clear()
for item in projects.find():
formatted_project_post_html = helper.gen_project_post_html_asbytes(item['account'], item['projectname'], item['projectdescription'], item['imagepath'], item['rating'])
projects_list.append(formatted_project_post_html)
#Set up outgoing payload for project rating
outgoing_payload = b'{"projectname":"'+project_name+b'","updatedvalue":'+new_rating.encode()+b'}'
#else if websocket was used to send message
elif b'"chatmessage"' in decoded_payload:
msg_type = "dm"
#Extract the various data
msg_sender = None
sender_token = helper.extract_segment(decoded_payload, b'"sender":"',b'","recipient"')
msg_recipient = helper.extract_segment(decoded_payload, b'"recipient":"',b'","chatmessage"')
chat_message = helper.extract_segment(decoded_payload, b'"chatmessage":"',b'"}')
#Fine the account this message was sent from based on the token given
#if no account was found give them the name "Anonymous" THOUGH this shouldnt ever occur
msg_sender = b'Anonymous'
retrieved_account = user_accounts.find_one({"token": hashlib.sha256(sender_token).hexdigest()})
if retrieved_account != None:
msg_sender = retrieved_account['account'].encode()
#set up outgoing payload for a message
outgoing_payload = b'{"sender":"'+msg_sender+b'","recipient":"'+msg_recipient+b'","chatmessage":"'+chat_message+b'"}'
#Set up outgoing frame as required for different sized payloads
payload_len = len(outgoing_payload)
outgoing_frame = FIRST_BYTE_TEXT_FRAME
if payload_len < 126:
outgoing_frame += payload_len.to_bytes(1, byteorder='big', signed=False)
elif payload_len >= 65536:
outgoing_frame += SECOND_BYTE_LEN127
outgoing_frame += payload_len.to_bytes(8, byteorder='big', signed=False)
elif payload_len >= 126:
outgoing_frame += SECOND_BYTE_LEN126
outgoing_frame += payload_len.to_bytes(2, byteorder='big', signed=False)
outgoing_frame += outgoing_payload
if msg_type == "rating":
#Send outgoing frame to all connected sockets(includes itself)
for socket in self.active_sockets:
socket.sendall(outgoing_frame)
elif msg_type == "dm":
#Send dms only to the sockets for the two members, and only bother if they're online
if msg_sender.decode() in self.dm_sockets:
self.dm_sockets[msg_sender.decode()].sendall(outgoing_frame)
if msg_recipient.decode() in self.dm_sockets and msg_sender != msg_recipient:
self.dm_sockets[msg_recipient.decode()].sendall(outgoing_frame)
#remove this socket on socket close
self.active_sockets.remove(self.request)
self.dm_sockets.pop(account_name, None)
def pathLocation(path, self):
path = path.replace("%20", " ")
if path == '/':
username = get_username(self)
serve_file(self, './index.html', username)
elif path.find(".html") != -1: #make conditional statement for project.html, helper function to look thru all entries in projects database and populate placeholder with such entries
username = get_username(self)
serve_file(self, './' + path[1:], username)
elif path.find(".js") != -1:
response = readFile(path[1:], "str")
self.send_response(200)
self.send_header("Content-Type", "text/javascript")
self.send_header("Content-Length", str(len(response)))
self.send_header("X-Content-Type-Options", "nosniff")
self.end_headers()
self.wfile.write(response.encode())
elif path.find(".css") != -1:
response = readFile(path[1:], "str")
self.send_response(200)
self.send_header("Content-Type", "text/css")
self.send_header("Content-Length", str(len(response)))
self.send_header("X-Content-Type-Options", "nosniff")
self.end_headers()
self.wfile.write(response.encode())
elif path.find("/images/") != -1:
if path[1:5] == "html":
response = readFile(path[6:], "bytes")
else:
response = readFile(path[1:], "bytes")
imageType = path.split(".")[1]
self.send_response(200)
self.send_header("Content-Type", "image/" + imageType)
self.send_header("Content-Length", str(len(response)))
self.send_header("X-Content-Type-Options", "nosniff")
self.end_headers()
self.wfile.write(response)
elif path == "/logout":
if get_username(self) != None:
online_users.delete_many({"account" : get_username(self)})
helper.logout(self, helper.parse_cookies(self.headers.get("Cookie")).get("session-token", None),'<h1>You have logged out.</h1><br><h2>Returning in 3 seconds...</h2>', '/', 3)
elif path == "/websocket":
handleSocket(self)
else:
self.send_response(404)
self.end_headers()
def sendRedirect(self, path):
self.send_response(301)
self.send_header("Location", path)
self.end_headers()
def give_403(self):
self.send_response(403)
self.send_header("Content-Type", "text/plain")
self.send_header("Content-Length", "20")
self.end_headers()
self.wfile.write(b"Error 403: Forbidden")
def give_404(self):
self.send_response(404)
self.send_header("Content-Type", "text/plain")
self.send_header("Content-Length", "20")
self.end_headers()
self.wfile.write(b"Error 404: Not Found")
def postPathing(self, path, length, isMultipart):
if isMultipart:
boundary = {'boundary': self.headers.get_boundary().encode(), "CONTENT-LENGTH": length}
if path == "/enternewuser":
data = cgi.parse_multipart(self.rfile, boundary)
name = data["enternewuser"][0]
pwd = data["enternewpass"][0]
rePwd = data["confirmnewpass"][0]
entry = {"name": name, "pwd": pwd}
# inserted = entryQuery("insert", entry) #deal with front end warning depending on the boolean value, false means username already exists and cannot be duplicated
if pwd != rePwd:
serve_htmltext_and_goto(self,None,'<h1>The passwords do not match. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
if name == pwd:
serve_htmltext_and_goto(self, None,'<h1>You cant pick a password equal to your username. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
if user_accounts.find_one({"account": name}) != None:
name = name.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, None,'<h1>The account name ['+name+'] is already in use. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
if len(pwd) < 8:
serve_htmltext_and_goto(self, None,'<h1>The password did not meet the required length(>=8). Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
pass_salt = bcrypt.gensalt()
hashed_pass = bcrypt.hashpw(pwd.encode(), pass_salt)
new_account = {
'account': name,
'pass' : hashed_pass,
'token' : bcrypt.hashpw(secrets.token_urlsafe(16).encode(), pass_salt),
'bio' : 'Empty bio'
}
user_accounts.insert_one(new_account)
new_username = name.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, None,'<h1>Account created: '+new_username+'</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
elif path == "/loginuser":
data = cgi.parse_multipart(self.rfile, boundary)
name = data["loginusername"][0]
pwd = data["loginuserpass"][0]
retrieved_account = user_accounts.find_one({"account": name})
if retrieved_account == None:
name = name.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, None,'<h1>Login failed: The account['+name+'] does not exist. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
retrieved_pass = retrieved_account['pass']
if not bcrypt.checkpw(pwd.encode(), retrieved_pass):
login_username = name.replace('&','&').replace('<','<').replace('>','>')
login_pass = pwd.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, None,'<h1>Login failed: The password['+pwd+'] is incorrect for the account['+pwd+']. Please try again.</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
return
token = secrets.token_urlsafe(16)
tokenHashed = hashlib.sha256(token.encode()).hexdigest()
user_accounts.update({'account' : name}, {"$set": {'token': tokenHashed}})
'''NOTE: Accounts stay logged in for up to 10 minutes of idle time, timer is reset upon any recieved request'''
online_users.create_index("date", expireAfterSeconds=600)
new_online_user = {
'account':name,
'date':datetime.utcnow()
}
online_users.insert_one(new_online_user)
login_username = name.replace('&','&').replace('<','<').replace('>','>')
serve_htmltext_and_goto(self, token,'<h1>You successfully logged in as: '+name+'</h1><br><h2>Returning in 5 seconds...</h2>', '/', 5)
elif path == "/uploadproject": #parse manually for filename, add to database, redirect to project.html | associated filename with project index number, write file to directory (images/projectImages/filename)
fileData = self.rfile.read(length)
fileData = fileData.split(b'--' + self.headers.get_boundary().encode())
project_name = fileData[1].split(b'\r\n\r\n')[1].strip(b'\r\n').decode()
project_name = project_name.replace('&','&').replace('<','<').replace('>','>')
project_description = fileData[2].split(b'\r\n\r\n')[1].strip(b'\r\n').decode()
project_description = project_description.replace('&','&').replace('<','<').replace('>','>')
imageSection = fileData[3].split(b'\r\n\r\n')
image_path = imageSection[0].split(b'\r\n')[1].split(b'filename=')[1].strip(b'"').decode()
image_path = "images/projectImages/" + image_path
imageData = imageSection[1]
#Make sure user submitted an image, give a 403 error otherwise
'''NOTE: currently image uploads only work properly with jpegs'''
if helper.get_mimetype(image_path)[0:5] != 'image':
give_403(self)
return
# store image data in "images/projectImages/"
with open(image_path, "wb") as imageFile:
imageFile.write(imageData)
#Default username if project is submitted without being logged in, THOUGH this shouldnt ever occur
username = "Anonymous"
# get session token and check
user_token = None
cookie = self.headers.get("Cookie")
if cookie != None:
cookies = helper.parse_cookies(cookie)
user_token = cookies.get("session-token", None)
if user_token != None:
retrieved_account = user_accounts.find_one({"token": hashlib.sha256(user_token.encode()).hexdigest()})
if retrieved_account != None:
username = retrieved_account['account'].replace('&','&').replace('<','<').replace('>','>')
#Create a dictionary for this post submission, formatted for the db
project_post = {
"account":username,
"projectname":project_name,
"projectdescription":project_description,
"imagepath":"../" + image_path,
"rating":'0'
}
# add post to db
projects.insert_one(project_post)
formatted_project_post_html = helper.gen_project_post_html_asbytes(username, project_name, project_description, image_path, '0')
#Add this html to the projects_list list
projects_list.append(formatted_project_post_html)
sendRedirect(self, "/html/projects.html")
elif path == "/updatebio":
data = cgi.parse_multipart(self.rfile, boundary)
# get bio text
newbio = data["biotext"][0].replace('&','&').replace('<','<').replace('>','>')
#Get all cookies into a list, extract the session token cookie if present
'''NOTE: Currently there is only one cookie, the session token one'''
user_token = None
account_name = None
cookie = self.headers.get("Cookie")
if cookie != None:
cookies = helper.parse_cookies(cookie)
user_token = cookies.get("session-token", None)
if user_token != None:
retrieved_account = user_accounts.find_one({"token": hashlib.sha256(user_token.encode()).hexdigest()})
if retrieved_account != None:
retrieved_account['bio'] = newbio
user_accounts.save(retrieved_account)
else:
give_403(self)
sendRedirect(self, "html/profile.html")
else:
give_404(self)
class server(http.server.SimpleHTTPRequestHandler):
active_sockets = []
dm_sockets = {}
def do_GET(self):
path = self.path
response = pathLocation(path, self)
return response
def do_POST(self):
path = self.path
length = int(self.headers.get("Content-Length"))
isMultipart = True if "multipart/form-data" in self.headers.get("Content-Type") else False
postPathing(self, path, length, isMultipart)
with socketserver.ThreadingTCPServer((HOST, PORT), server) as httpd:
print("serving at port", PORT)
httpd.serve_forever()
| jackyzhu209/312-Project | website/httpserver.py | httpserver.py | py | 27,742 | python | en | code | 0 | github-code | 36 |
2503764043 | #!/usr/bin/env python
# *********************
# webcam video stream
# *********************
import time
import cv2
# 0 - Dell Webcam
# cap = cv2.VideoCapture('filename.avi')
cap = cv2.VideoCapture(0)
pTime = 0
while (cap.isOpened()):
cTime = time.time()
ret, frame = cap.read()
if ret == True:
fps = int(1/(cTime - pTime))
cv2.putText(frame, f'FPS: {int(fps)}', (420, 40), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 1), 3)
cv2.imshow('Frame', frame)
pTime = cTime
print(fps)
if cv2.waitKey(25) & 0xFF == ord('q'): break
else: break
cap.release()
cv2.destroyAllWindows()
| ammarajmal/cam_pose_pkg | script/webcam.py | webcam.py | py | 640 | python | en | code | 0 | github-code | 36 |
16567280409 | import tensorflow as tf
import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Lambda, InputLayer, concatenate, Dropout
from keras.models import Model, Sequential
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
from keras.utils import np_utils
# Start tf session so we can run code.
sess = tf.InteractiveSession()
# Connect keras to the created session.
K.set_session(sess)
def vlb_binomial(x, x_decoded_mean, t_mean, t_log_var):
"""Returns the value of negative Variational Lower Bound
The inputs are tf.Tensor
x: (batch_size x number_of_pixels) matrix with one image per row with zeros and ones
x_decoded_mean: (batch_size x number_of_pixels) mean of the distribution p(x | t), real numbers from 0 to 1
t_mean: (batch_size x latent_dim) mean vector of the (normal) distribution q(t | x)
t_log_var: (batch_size x latent_dim) logarithm of the variance vector of the (normal) distribution q(t | x)
Returns:
A tf.Tensor with one element (averaged across the batch), VLB
"""
vlb = tf.reduce_mean(
tf.reduce_sum(
x * tf.log(x_decoded_mean + 1e-19)
+ (1 - x) * tf.log(1 - x_decoded_mean + 1e-19),
axis=1,
)
- 0.5
* tf.reduce_sum(-t_log_var + tf.exp(t_log_var) + tf.square(t_mean) - 1, axis=1)
)
return -vlb
def create_encoder(input_dim):
# Encoder network.
# We instantiate these layers separately so as to reuse them later
encoder = Sequential(name="encoder")
encoder.add(InputLayer([input_dim]))
encoder.add(Dense(intermediate_dim, activation="relu"))
encoder.add(Dense(2 * latent_dim))
return encoder
def create_decoder(input_dim):
# Decoder network
# We instantiate these layers separately so as to reuse them later
decoder = Sequential(name="decoder")
decoder.add(InputLayer([input_dim]))
decoder.add(Dense(intermediate_dim, activation="relu"))
decoder.add(Dense(original_dim, activation="sigmoid"))
return decoder
# Sampling from the distribution
# q(t | x) = N(t_mean, exp(t_log_var))
# with reparametrization trick.
def sampling(args):
"""Returns sample from a distribution N(args[0], diag(args[1]))
The sample should be computed with reparametrization trick.
The inputs are tf.Tensor
args[0]: (batch_size x latent_dim) mean of the desired distribution
args[1]: (batch_size x latent_dim) logarithm of the variance vector of the desired distribution
Returns:
A tf.Tensor of size (batch_size x latent_dim), the samples.
"""
t_mean, t_log_var = args
# YOUR CODE HERE
epsilon = K.random_normal(t_mean.shape)
z = epsilon * K.exp(0.5 * t_log_var) + t_mean
return z
batch_size = 100
original_dim = 784 # Number of pixels in MNIST images.
latent_dim = 3 # d, dimensionality of the latent code t.
intermediate_dim = 128 # Size of the hidden layer.
epochs = 20
x = Input(batch_shape=(batch_size, original_dim))
encoder = create_encoder(original_dim)
get_t_mean = Lambda(lambda h: h[:, :latent_dim])
get_t_log_var = Lambda(lambda h: h[:, latent_dim:])
h = encoder(x)
t_mean = get_t_mean(h)
t_log_var = get_t_log_var(h)
t = Lambda(sampling)([t_mean, t_log_var])
decoder = create_decoder(latent_dim)
x_decoded_mean = decoder(t)
| tirthasheshpatel/Generative-Models | vae.py | vae.py | py | 3,413 | python | en | code | 0 | github-code | 36 |
7410224509 | from bs4 import BeautifulSoup
import requests
from selenium import webdriver
import re
import timeit
url = "https://www.investing.com/equities/trending-stocks"
driver = webdriver.Chrome(r"C:\Program Files\chromedriver.exe")
driver.get(url)
# x, stockPopularityData
page = driver.page_source
soup = BeautifulSoup(page, 'html.parser')
print(soup)
a = list(soup.find_all("a", {"class": "bold block"}))
for i in a:
a3 = list(str(i).split('>'))
a4 = a3[1].split('<')
print(str(a4[0]))
a = list(soup.find_all('script'))
# print(a)
b = str(a[43])
# Stock Popularity Data
print("Stock Popularity Data")
a1 = re.findall(r"stockPopularityData = .*;", b)
str = a1[0].replace(';', '')
str = str.split('=')
str1 = str[1].lstrip()
str3 = eval(str1)
for value1 in str3.values():
for key, values in value1.items():
print(key, values)
# Sector Popularity Data
print("Sector Popularity Data")
a3 = re.findall(r"sectorPopularityData = .*;", b)
str = a3[0].replace(';', '')
str = str.split('=')
str1 = str[1].lstrip()
str3 = eval(str1)
for value1 in str3.values():
for key, values in value1.items():
print(key, values)
# Trending Stock Quota by Price
print(soup.find_all('table', class_='genTbl closedTbl elpTbl elp20 crossRatesTbl'))
# Trending Stock Quota by Performance
python_button = driver.find_elements_by_xpath(xpath=r"/html/body/div[5]/section/div[7]/div/div[7]/div/a[2]")[0]
python_button.click()
# x, stockPopularityData
page = driver.page_source
soup = BeautifulSoup(page, 'html.parser')
# print(soup)
print(soup.find_all('table', class_='genTbl openTbl recentQuotesSideBlockTbl collapsedTbl elpTbl elp30'))
# Trending Stock Quota by Technical
python_button = driver.find_elements_by_xpath(xpath=r"/html/body/div[5]/section/div[7]/div/div[7]/div/a[3]")[0]
python_button.click()
# x, stockPopularityData
page = driver.page_source
soup = BeautifulSoup(page, 'html.parser')
print(soup)
| SRI-VISHVA/WebScrapping | scrapping_73.py | scrapping_73.py | py | 1,915 | python | en | code | 0 | github-code | 36 |
23413820054 | # -*- coding: utf-8 -*-
"""
Create doc tree if you follows
:ref:`Sanhe Sphinx standard <en_sphinx_doc_style_guide>`.
"""
from __future__ import print_function
import json
from pathlib_mate import PathCls as Path
from .template import TC
from .pkg import textfile
class ArticleFolder(object):
"""
Represent an ``index.rst`` or ``index.ipynb`` file with a Title in a directory.
:param index_file: the index file name (no file extension)
:param dir_path: A folder contains single rst file. The rst file path
**中文文档**
一篇 Article 代表着文件夹中有一个 ``index.rst`` 或 ``index.ipynb`` 文件的文件夹.
其中必然有至少一个标题元素.
"""
DEFAULT_INDEX_FILE = "index"
def __init__(self, index_file=None, dir_path=None):
if index_file is None:
index_file = self.DEFAULT_INDEX_FILE
self.index_file = index_file
self.dir_path = dir_path
self._title = None
@property
def rst_path(self):
"""
The actual rst file absolute path.
"""
return Path(self.dir_path, self.index_file + ".rst").abspath
@property
def ipynb_path(self):
"""
The actual ipynb file absolute path.
"""
return Path(self.dir_path, self.index_file + ".ipynb").abspath
@property
def rel_path(self):
"""
File relative path from the folder.
"""
return "{}/{}".format(Path(self.dir_path).basename, self.index_file)
@property
def title(self):
"""
Title for the first header.
"""
if self._title is None:
if Path(self.rst_path).exists():
self._title = self.get_title_from_rst()
elif Path(self.ipynb_path).exists():
self._title = self.get_title_from_ipynb()
else:
pass
return self._title
def get_title_from_rst(self):
"""
Get title line from .rst file.
**中文文档**
从一个 ``_filename`` 所指定的 .rst 文件中, 找到顶级标题.
也就是第一个 ``====`` 或 ``----`` 或 ``~~~~`` 上面一行.
"""
header_bar_char_list = "=-~+*#^"
lines = list()
for cursor_line in textfile.readlines(self.rst_path, strip="both", encoding="utf-8"):
if cursor_line.startswith(".. include::"):
relative_path = cursor_line.split("::")[-1].strip()
included_path = Path(Path(self.rst_path).parent.abspath, relative_path)
if included_path.exists():
cursor_line = included_path.read_text(encoding="utf-8")
lines.append(cursor_line)
rst_content = "\n".join(lines)
cursor_previous_line = None
for cursor_line in rst_content.split("\n"):
for header_bar_char in header_bar_char_list:
if cursor_line.startswith(header_bar_char):
flag_full_bar_char = cursor_line == header_bar_char * len(cursor_line)
flag_line_length_greather_than_1 = len(cursor_line) >= 1
flag_previous_line_not_empty = bool(cursor_previous_line)
if flag_full_bar_char \
and flag_line_length_greather_than_1 \
and flag_previous_line_not_empty:
return cursor_previous_line.strip()
cursor_previous_line = cursor_line
msg = "Warning, this document doesn't have any %s header!" % header_bar_char_list
return None
def get_title_from_ipynb(self):
"""
Get title line from .ipynb file.
**中文文档**
从一个 ``_filename`` 所指定的 .ipynb 文件中, 找到顶级标题.
也就是第一个 ``#`` 后面的部分
"""
data = json.loads(Path(self.ipynb_path).read_text())
for row in data["cells"]:
if len(row["source"]):
content = row["source"][0]
line = content.split("\n")[0]
if "# " in line:
return line[2:].strip()
msg = "Warning, this document doesn't have any level 1 header!"
return None
@property
def sub_article_folders(self):
"""
Returns all valid ArticleFolder sitting inside of
:attr:`ArticleFolder.dir_path`.
"""
l = list()
for p in Path.sort_by_fname(
Path(self.dir_path).select_dir(recursive=False)
):
af = ArticleFolder(index_file=self.index_file, dir_path=p.abspath)
try:
if af.title is not None:
l.append(af)
except:
pass
return l
def toc_directive(self, maxdepth=1):
"""
Generate toctree directive text.
:param table_of_content_header:
:param header_bar_char:
:param header_line_length:
:param maxdepth:
:return:
"""
articles_directive_content = TC.toc.render(
maxdepth=maxdepth,
article_list=self.sub_article_folders,
)
return articles_directive_content
def __repr__(self):
return "Article(index_file=%r, title=%r)" % (self.index_file, self.title,)
| MacHu-GWU/docfly-project | docfly/doctree.py | doctree.py | py | 5,328 | python | en | code | 0 | github-code | 36 |
4877559988 |
import os
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import pandas
data = pandas.read_csv('C:\\Users\\aruny\\Downloads\\ezyzip\\MusicApp\\songs.csv')
data = data.to_dict('records')
song_names = [i['name'] for i in data]
dir = 'C:\playlist songs\Malayalam_sad_songs'
files = os.listdir(dir)
for song in files:
downloaded_name = song.replace('_', ' ')
downloaded_name = downloaded_name.replace('.mp3', '')
print(downloaded_name)
max_ratio = 60
song_id = -1
for i in range(983,1053):
# db_song_name = songs.query.filter_by(id=i).first().song_name
ratio = fuzz.ratio(song_names[i], downloaded_name)
#print(db_song_name, " --> ", ratio)
if ratio > max_ratio:
max_ratio = ratio
song_id = i+1
print(song_id)
if song_id == -1:
print(song, " is not found")
else:
old = dir+'//'+song
new = dir+'//'+str(song_id)+'.mp3'
old = old.replace('//', '/')
new = new.replace('//', '/')
#print(old)
#print(new)
os.rename(old, new) | chmson/MusicApp_website | MusicApp/give_id.py | give_id.py | py | 1,094 | python | en | code | 0 | github-code | 36 |
34922299042 | """
shared dataloader for multiple issues
must gurantee that a batch only has data from same issue
but the batches can be shuffled
"""
import collections
from more_itertools import more
from numpy.core import overrides
import torch
from torch import tensor
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset
from typing import *
from myprompt.data.example import InputExample, InputFeatures
from torch.utils.data._utils.collate import default_collate
from tqdm.std import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.dataset import ConcatDataset, Dataset
from myprompt.utils.logging import logger
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.utils.dummy_pt_objects import PreTrainedModel
from myprompt.template import Template
from myprompt.verbalizer import Verbalizer
from myprompt.plm.utils import TokenizerWrapper
from collections import defaultdict
from myprompt.utils.logging import logger
from myprompt.utils.utils import round_list, signature
from torch.utils.data.sampler import RandomSampler, Sampler
import random
import math
class MultiDataLoader(object):
def __init__(self, dataloaders, shuffle_batch=True):
super().__init__()
self.dataloaders = dataloaders
self.batches = sum([list(iter(self.dataloaders[k])) for k in self.dataloaders],[])
if shuffle_batch:
random.shuffle(self.batches)
def __len__(self):
return len(self.batches)
def __iter__(self):
return (b for b in self.batches)
class myDataset(Dataset):
def __init__(self, data):
self.samples = data
def __getitem__(self, index):
return self.samples[index]
def __len__(self):
return len(self.samples)
class BatchSchedulerSampler(Sampler):
"""
iterate over tasks and provide a random batch per task in each mini-batch
"""
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
self.number_of_datasets = len(dataset.datasets)
self.largest_dataset_size = max([len(cur_dataset.samples) for cur_dataset in dataset.datasets])
def __len__(self):
return self.batch_size * math.ceil(self.largest_dataset_size / self.batch_size) * len(self.dataset.datasets)
def __iter__(self):
samplers_list = []
sampler_iterators = []
for dataset_idx in range(self.number_of_datasets):
cur_dataset = self.dataset.datasets[dataset_idx]
sampler = RandomSampler(cur_dataset)
samplers_list.append(sampler)
cur_sampler_iterator = sampler.__iter__()
sampler_iterators.append(cur_sampler_iterator)
push_index_val = [0] + self.dataset.cumulative_sizes[:-1]
step = self.batch_size * self.number_of_datasets
samples_to_grab = self.batch_size
# for this case we want to get all samples in dataset, this force us to resample from the smaller datasets
epoch_samples = self.largest_dataset_size * self.number_of_datasets
final_samples_list = [] # this is a list of indexes from the combined dataset
for _ in range(0, epoch_samples, step):
random_idx= list(range(self.number_of_datasets))
random.shuffle(random_idx)
#for i in range(self.number_of_datasets):
for i in random_idx:
cur_batch_sampler = sampler_iterators[i]
cur_samples = []
for _ in range(samples_to_grab):
try:
cur_sample_org = cur_batch_sampler.__next__()
cur_sample = cur_sample_org + push_index_val[i]
cur_samples.append(cur_sample)
except StopIteration:
# got to the end of iterator - restart the iterator and continue to get samples
# until reaching "epoch_samples"
sampler_iterators[i] = samplers_list[i].__iter__()
cur_batch_sampler = sampler_iterators[i]
cur_sample_org = cur_batch_sampler.__next__()
cur_sample = cur_sample_org + push_index_val[i]
cur_samples.append(cur_sample)
final_samples_list.extend(cur_samples)
return iter(final_samples_list)
class PromptShareDataLoader(object):
def __init__(self,
dataset: Dict[str, List],
template: Dict[str, Template],
tokenizer: PreTrainedTokenizer,
tokenizer_wrapper_class: TokenizerWrapper,
max_seq_length: Optional[str] = 512,
batch_size: Optional[int] = 1,
shuffle: Optional[bool] = False, #shuffle_sample
shuffle_batch: Optional[bool] = True, #shuffle_batch
teacher_forcing: Optional[bool] = False,
decoder_max_length: Optional[int] = -1,
predict_eos_token: Optional[bool] = False,
truncate_method: Optional[str] = "tail",
**kwargs,
):
self.raw_dataset = dataset
self.wrapped_dataset = collections.defaultdict(list)
self.tensor_dataset = collections.defaultdict(list)
self.template = template
self.batch_size = batch_size
self.shuffle = shuffle
self.shuffle_batch = shuffle_batch
self.teacher_forcing = teacher_forcing
tokenizer_wrapper_init_keys = signature(tokenizer_wrapper_class.__init__).args
prepare_kwargs = {
"max_seq_length" : max_seq_length,
"truncate_method" : truncate_method,
"decoder_max_length" : decoder_max_length,
"predict_eos_token" : predict_eos_token,
"tokenizer" : tokenizer,
**kwargs,
}
to_pass_kwargs = {key: prepare_kwargs[key] for key in prepare_kwargs if key in tokenizer_wrapper_init_keys}
self.tokenizer_wrapper = tokenizer_wrapper_class(**to_pass_kwargs)
#check the wrap function
for k in self.template:
assert hasattr(self.template[k], 'wrap_one_example'), "Your prompt template has no function variable \
named wrap_one_example"
#process: 2 main steps of dataloader
self.wrap()
self.tokenize()
concat_dataset = ConcatDataset([myDataset(self.tensor_dataset[k]) for k in self.tensor_dataset])
self.dataloader = DataLoader(
concat_dataset,
batch_size = self.batch_size,
sampler = BatchSchedulerSampler(concat_dataset, batch_size=batch_size),
collate_fn = InputFeatures.collate_fct
)
def wrap(self):
"""
wrap the text with template
"""
if isinstance(self.raw_dataset, Dict):
for k in self.raw_dataset:
for idx, example in enumerate(self.raw_dataset[k]):
wrapped_example = self.template[k].wrap_one_example(example)
self.wrapped_dataset[k].append(wrapped_example)
else:
raise NotImplementedError
def tokenize(self):
"""
Pass the wraped text into a prompt-specialized tokenizer
"""
for k in self.wrapped_dataset:
for idx, wrapped_example in tqdm(enumerate(self.wrapped_dataset[k]), desc ='tokenizing'):
inputfeatures = InputFeatures(**self.tokenizer_wrapper.tokenize_one_example(wrapped_example, self.teacher_forcing), **wrapped_example[1]).to_tensor()
self.tensor_dataset[k].append(inputfeatures)
def __len__(self):
return len(self.dataloader)
def __iter__(self):
return self.dataloader.__iter__()
| xymou/Frame_Detection | myprompt/myprompt/data/share_dataloader.py | share_dataloader.py | py | 7,930 | python | en | code | 1 | github-code | 36 |
3327955898 | import argparse
import re
_re_pattern_value_unit = r"^\s*(-?\d+(?:.\d+)?)\s*([a-zA-Z]*?[\/a-zA-Z]*)$"
# this regex captures the following example normal(3s,1ms) with/without spaces between parenthesis and comma(s)
# it's also able to capture the sample duration specified after the distribution e.g., normal(3s,1ms) H 2ms with/without spaces
_re_pattern_compact_distribution = re.compile(
r'^(?P<family>[a-z]+\b)\s*\(\s*(?P<args>-?\d*\.?\d+\s*[a-z]*\b(\s*,\s*-?\d*\.?\d+\s*[a-z]*\b)*)\s*\)\s*(H\s*(?P<sample_duration>\d*\.?\d+\s*[a-z]*)\b)*$')
_re_pattern_composition_compact_distribution = re.compile(r"[a-z]+\s*\(.+\).*\s*\+\s*([a-z]+\s*\(.+\).*\s*)+\s*$")
_re_pattern_compact_distance_strategy = re.compile(
r'^(?P<strategy_type>[a-z_]+\b)\s*\(\s*(?P<params>-?\d*\.?\d+\s*[a-z]*\b(\s*,\s*-?\d*\.?\d+\s*[a-z]*\b)*)\s*\)\s*$')
_re_pattern_compact_leader_maneuvers = re.compile(
r'^(?P<maneuvers_type>[a-z_]+\b)\s*\(\s*(?P<params>-?\d*\.?\d+\s*[a-z]*\b(\s*,\s*-?\d*\.?\d+\s*[a-z]*\b)*)\s*\)\s*$')
_re_pattern_compact_trace_leader_maneuvers = re.compile(
r'^(?P<maneuvers_type>[a-z_]+\b)\s*\(\s*(?P<params>[\w/\.-]+\s*,\s*(?:False|True)\s*)\)\s*$')
def parse_unit_measurement(config_dict):
"""
:param config_dict:
:return update the config_dict using the international system:
"""
config_dict_res = config_dict.copy()
for k, v in config_dict_res.items():
if isinstance(v, dict):
config_dict_res[k] = parse_unit_measurement(v)
elif isinstance(v, list):
config_dict_res[k] = [
parse_and_convert_value(e) if isinstance(e, str) else parse_unit_measurement(e) for e in v]
elif isinstance(v, str):
config_dict_res[k] = parse_and_convert_value(v)
else:
config_dict_res[k] = v
return config_dict_res
def parse_and_convert_value(value):
if isinstance(value, str):
# _re_pattern_composition_compact_distribution.match(value)
comp_compact_dist_res = parse_composition_compact_distribution(value)
if comp_compact_dist_res is not None:
return comp_compact_dist_res
compact_dist_res = parse_distribution_compact(value)
if compact_dist_res is not None:
return compact_dist_res
return _parse_single_unit_value(value)
return value
def _parse_single_unit_value(value_str):
"""
Convert human values in International System Units
It accept simple value, value and unit with and without space
:param value_str:
:return: SI converted unit value
"""
match = re.match(_re_pattern_value_unit, value_str)
if match:
value, unit = match.groups()
if not unit:
return float(value)
else:
return value_str
# speed units
if unit == 'kph':
return float(value) / 3.6
if unit == 'mph':
return float(value) * 0.44704
# time units
if unit == 's':
return float(value)
if unit == 'ms':
return float(value) / 1000
if unit == 'us':
return float(value) / 1000000
if unit == 'm/s':
return float(value)
if unit == 'km/h':
return float(value) / 3.6
# length unit
if unit == 'm':
return float(value)
if unit == 'mm':
return float(value) / (10 ** 3)
if unit == 'cm':
return float(value) / (10 ** 2)
if unit == 'dm':
return float(value) / 10
if unit == 'km':
return float(value) * 1000
if unit == '%':
return float(value) / 100
raise Exception("{} contains unknown unit {} {}".format(value_str, value, unit))
def _parse_distribution_compact_format(distribution_match):
grp_dict = distribution_match.groupdict()
family = grp_dict['family']
args = [_parse_single_unit_value(value.strip()) for value in grp_dict['args'].split(',')]
sample_duration = _parse_single_unit_value(grp_dict['sample_duration']) if grp_dict['sample_duration'] else None
return expand_compact_distribution_format(family, args, sample_duration)
def parse_distribution_compact(value, raise_if_error=False):
"""
Return a tuple (<parsed value>, <exception>)
if raise_if_error is true it raises and exception
"""
distribution_match = _re_pattern_compact_distribution.match(value)
if distribution_match:
return _parse_distribution_compact_format(distribution_match) # match , no error
def parse_composition_compact_distribution(value):
composition_compact_distributions_match = _re_pattern_composition_compact_distribution.match(value)
if composition_compact_distributions_match:
return [parse_distribution_compact(component.strip(), raise_if_error=True)[0] for component in
value.split('+')] # match , no error
# def _split_and_convert_param(param_str):
# p_name, p_value = param_str.split('=')
# return p_name, float(_parse_single_unit_value(p_value))
class DistributionParserAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# parse the compact version of distribution
values = parse_and_convert_value(values)
setattr(namespace, self.dest, values)
def _expand_constant(value):
return {'family': 'constant',
'parameters': {'value': value}}
def _expand_uniform(min_value, max_value):
return {'family': 'uniform',
'parameters': {'min_value': min_value,
'max_value': max_value}}
def _expand_normal(mu, sigma): # TODO min/max values <optional>
return {'family': 'normal',
'parameters': {'mu': mu,
'sigma': sigma}}
def _expand_exponential(rate, min_value=0):
return {'family': 'exponential',
'parameters': {'rate': rate,
'min_value': min_value}}
def _expand_lognormal(mu, sigma, min_value=0):
return {'family': 'lognormal',
'parameters': {'mu': mu,
'sigma': sigma,
'min_value': min_value}}
def _expand_erlang(k, u, min_value=0):
return {'family': 'erlang',
'parameters': {'k': k,
'u': u,
'min_value': min_value}}
def _expand_hypoexponential(*rates):
return {'family': 'hypoexponential',
'parameters': {'rates': rates}}
# family : (expansion function, function arity)
_distribution_dict = {'constant': (_expand_constant, [1]),
'normal': (_expand_normal, [2]),
'uniform': (_expand_uniform, [2]),
'exponential': (_expand_exponential, [1, 2]),
'lognormal': (_expand_lognormal, [2, 3]),
'erlang': (_expand_erlang, [2, 3]),
'hypoexponential': (_expand_hypoexponential, [i for i in range(2)])
# TODO change here, refactor with regex
}
class ProbabilityDistributionNotImplemented(Exception):
...
class ProbabilityDistributionWrongArity(Exception):
...
def expand_compact_distribution_format(family, args, sample_duration):
if family not in _distribution_dict:
raise ProbabilityDistributionNotImplemented(
f"{family} is not implemented. Use the families {list(_distribution_dict.keys())}")
expansion_func, num_args_options = _distribution_dict[family]
if len(args) not in num_args_options:
raise ProbabilityDistributionWrongArity(
f"{family} takes {num_args_options} argument/s, {len(args)} have provided instead.")
expanded_distribution_config = expansion_func(*args)
if sample_duration:
expanded_distribution_config['sample_duration'] = sample_duration
return expanded_distribution_config
if __name__ == '__main__':
delay = parse_unit_measurement(
{'delay.backhaul.uplink_extra_delay': 'exponential(10ms, 10ms) H 10ms'})
print(delay)
| connets/tod-carla | src/args_parse/parser_utils.py | parser_utils.py | py | 7,994 | python | en | code | 3 | github-code | 36 |
37914230085 | from pathlib import Path
import json
import plotly.express as px
import numpy as np
# Read data as a string and convert to a Python object.
path = Path("eq_data/eq_data.geojson")
contents = path.read_text(encoding="utf-8")
all_eq_data = json.loads(contents)
# Examine all the earthquakes in dataset
all_eq_dicts = all_eq_data["features"]
mags, lons, lats, eq_titles = [], [], [], []
for eq_dict in all_eq_dicts:
mag = eq_dict["properties"]["mag"]
lon = eq_dict["geometry"]["coordinates"][0]
lat = eq_dict["geometry"]["coordinates"][1]
eq_title = eq_dict["properties"]["title"]
mags.append(mag)
lons.append(lon)
lats.append(lat)
eq_titles.append(eq_title)
# Normalize magnitudes
normalized_mags = np.array(mags)
normalized_mags = (normalized_mags - np.min(normalized_mags)) / (
np.max(normalized_mags) - np.min(normalized_mags)
)
# Adjust marker size scaling
marker_scaling_factor = 10
scaled_marker_sizes = normalized_mags * marker_scaling_factor
title = "Global Magnitude 4.5+ Earthquakes, Past Month"
fig = px.scatter_geo(
lat=lats,
lon=lons,
title=title,
size=scaled_marker_sizes,
size_max=15,
color=mags,
color_continuous_scale="dense",
labels={"color": "Magnitude", 'lon':"Longitude", 'lat':'Latitude'},
projection="natural earth",
hover_name=eq_titles,
)
# Customize hover label format
hover_template = (
"<b>%{hovertext}</b><br>"
"<b>Magnitude:</b> %{marker.color:.2f}<br>"
"<b>Longitude:</b> %{lon}<br>"
"<b>Latitude:</b> %{lat}<extra></extra>"
)
fig.update_traces(hovertemplate=hover_template)
fig.show()
| hharpreetk/python-earthquake-data-viz | eq_explore_data.py | eq_explore_data.py | py | 1,615 | python | en | code | 0 | github-code | 36 |
10603730701 | import argparse
import time , datetime
from pythonosc import udp_client
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="127.0.0.1",
help="The ip of the OSC server")
parser.add_argument("--port", type=int, default=9000,
help="The port the OSC server is listening on")
args = parser.parse_args()
client = udp_client.SimpleUDPClient(args.ip, args.port)
#base time is set to 7am 0 min 0 sec , to be compare to now datetime
base = datetime.time(7, 0, 0)
if __name__ == '__main__':
while True:
# when realtime is beyond 7am then set timer osc parameter to true
dt_now = datetime.datetime.now()
now = dt_now.time()
if base > now :
client.send_message("/avatar/parameters/timer", 0)
print("timer off ")
else:
client.send_message("/avatar/parameters/timer", 1)
print("timer on ")
time.sleep(60)
| kakuchrome/OSCscript | timer.py | timer.py | py | 968 | python | en | code | 0 | github-code | 36 |
17079421444 | from glob import glob
from itertools import chain
import logging
from lib.language import canonicalize_tokens, clean_text, clean_tokens, tokenize_text
from lib.language.types import TokenStream
from lib.util import chain as chain_calls, extract_text
logger = logging.getLogger(__name__)
def tokenize_corpus(globby_path: str) -> TokenStream:
"""Convert a directory full of files into a stream of lemmatized tokens"""
return chain.from_iterable(
chain_calls(
extract_text,
clean_text,
tokenize_text,
clean_tokens,
list, # must force eager evaluation here for lemmatizing
canonicalize_tokens,
)(bill)
for bill
in glob(globby_path)
)
| amy-langley/tracking-trans-hate-bills | lib/tasks/tokenize_corpus.py | tokenize_corpus.py | py | 752 | python | en | code | 2 | github-code | 36 |
476391810 | from jwcrypto import jwk
import json
from .key_base import KeyBase
class Jwks(KeyBase):
def handle(self, input_output):
public_keys = self.fetch_and_check_keys(self.configuration("path_to_public_keys"))
keys = [
{
"kid": key["kid"],
"use": key["use"],
"e": key["e"],
"n": key["n"],
"kty": key["kty"],
"alg": key["alg"],
}
for key in public_keys.values()
]
return self.respond_unstructured(input_output, {"keys": keys}, 200)
| cmancone/clearskies-auth-server | src/clearskies_auth_server/handlers/jwks.py | jwks.py | py | 597 | python | en | code | 0 | github-code | 36 |
73520719463 | from Functions import Functions
from log import Log
from webdriver import WebDriver
import PySimpleGUI as sg
import json
class Main:
def __init__(self):
self.ind = 0 # Indice utilizado para acessar o json.
self.log = Log() # Log de registro.
self.url = Functions().Json('Collector', 'url') # Url da plataforma.
self.webdriver = WebDriver().getDriver() # Options webdriver.
self.webdriver.get(self.url) # Get na url.
self.Json_File = Functions().Json() # Recebe o Steps_json.
if self.Json_File['Steps']: # Verificação para saber se o Steps_json está vazio .
SeleniumActions().LoopDriver(self.log, self.webdriver)
def open_window(self):
sg.theme('Dark Blue 2')
layout = [[sg.Text('Action '),
sg.Combo(['Digitar', 'Enter', 'Click', 'Click_js', 'Double_Click', 'Iframe', 'IframeOFF', 'Link',
'Attribute_ID', 'set_class', 'Alert', 'New_Pag', 'Close', 'Refresh', 'Clear'],
key='Action'),
sg.Checkbox('Search_element', default=True, key='Search')],
[sg.Text('Find '),
sg.Combo(['CSS_SELECTOR', 'ID', 'XPATH', 'Attribute_ID', 'Seletor_Js'],
key='Find'),
sg.Text('Sleep '), sg.Spin(['0', '1', '2', '3', '4', '5'], key='Sleep')],
[sg.Text('Element'), sg.InputText(key='Element')],
[sg.Text('Value '), sg.InputText(key='Value')],
[sg.Button('Save'), sg.Button('Delete')],
[sg.Output(size=(50, 7), key='-OUT-')]]
window = sg.Window('Debug Selenium', layout)
while True:
self.event, values = window.read()
if self.event == sg.WIN_CLOSED:
break
self.dic_steps = {
"Search": f"{str(values['Search'])}",
"Action": f"{values['Action']}",
"Sleep": f"{values['Sleep']}",
"Find": f"{values['Find']}",
"Element": f"{values['Element']}",
"Value": f"{values['Value']}"
}
if self.event == 'Delete':
self.Json_File['Steps'].pop(-1)
self.ind -= 1
with open('Steps.json', 'w') as json_Steps:
json.dump(self.Json_File, json_Steps, indent=4)
print('The last action successfully deleted!!')
if self.event == 'Save':
self.Json_File['Steps'].append(self.dic_steps)
with open('Steps.json', 'w') as json_Steps:
json.dump(self.Json_File, json_Steps, indent=4)
SeleniumActions().Driver(self.webdriver, self.log, self.ind)
self.ind += 1
self.event = ''
class SeleniumActions:
def __init__(self):
self.Json_Steps = Functions().Json('Steps')
def Driver(self, webdriver, log, ind):
log.debug(f"")
log.debug(f"--- Started Step {ind + 1}/{len(self.Json_Steps)} ---")
print()
print(f" --- Started Step {ind + 1}.. ---")
if self.Json_Steps[ind]['Search'] == 'True':
element = Functions().Element(webdriver, self.Json_Steps[ind]['Find'], self.Json_Steps[ind]['Element'])
Functions().Actions(self.Json_Steps[ind]['Action'], self.Json_Steps[ind]['Value'],
webdriver, self.Json_Steps[ind]['Sleep'], element)
def LoopDriver(self, log, webdriver):
for ind, json_steps in enumerate(self.Json_Steps):
log.debug(f"")
log.debug(f"--- Started Step {ind + 1}/{len(self.Json_Steps)} ---")
print()
print(f" --- Started Step {ind + 1}/{len(self.Json_Steps)} ---")
if json_steps['Search'] == 'True':
element = Functions().Element(webdriver, json_steps['Find'], json_steps['Element'])
Functions().Actions(json_steps['Action'], json_steps['Value'], webdriver,
json_steps['Sleep'], element)
if __name__ == "__main__":
Debug = Functions().Json('Collector', 'debug')
if Debug == 'True':
Main().open_window()
elif Debug == 'False':
Main()
| LucasAmorimDC/SeleniumCollector | Selenium_Collector/Main.py | Main.py | py | 4,318 | python | en | code | 0 | github-code | 36 |
14128327348 | #!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
class Solution(object):
def searchInsert(self, nums, target):
low = 0
high = len(nums)-1
while low<=high:
mid = int((low+high)/2)
if nums[mid] == target:
return mid
elif nums[mid]<target:
low = mid+1
else:
high = mid-1
return high+1
solution = Solution()
print(solution.searchInsert([0,2,4,6,8],-1)) | zenmeder/leetcode | 35.py | 35.py | py | 408 | python | en | code | 0 | github-code | 36 |
35198885002 | # -*- coding: utf-8 -*-
"""
Test to check api calls in in varonis assignment
"""
import json
import pytest
import requests
URL = "http://localhost:8000"
data = {
"data": [{"key": "key1", "val": "val1", "valType": "str"}]
}
credentials_data = {
"username": "test",
"password": "1234"
}
wrong_data = {
"username": "wrong",
"password": "1111"
}
def get_server_token(log_data):
"""
This function gets the authorization token to the server
Arg:
(dict) log_data: to connect to the server
Returns:
(str)token: the token, empty otherwise
"""
response = requests.post(f"{URL}/api/auth/", json=log_data)
if response.status_code == 200:
token = response.json()["access_token"]
print("Authenticated successfully! token: " + token)
return token
else:
print("Error! Response code: " + response.status_code)
return None
def get_objects_ids(headers):
"""
This function gets objects from the server
Returns:
(list) objects
"""
object_ids = []
res = requests.get(f"{URL}/api/poly/", headers=headers)
poly_objects = json.loads(res.text)
# Retrieving all objects
for poly in poly_objects:
object_ids.append(poly['object_id'])
return object_ids
def create_obj(headers):
"""
This function creates an object
Returns:
(obj) poly_object
"""
# Call to server
res = requests.post(f"{URL}/api/poly/", json=data, headers=headers)
print(json.dumps(res.json(), indent=4, default=str))
# Object loaded
poly_object = json.loads(res.text)
return poly_object
def delete_obj(object_id, headers):
"""
This function deletes an object
Args:
object_id (int): the object we delete
headers(dict): to connect to the server
Returns:
(int) response code
"""
res = requests.delete(f"{URL}/api/poly/{object_id}", headers=headers)
# Return answer
return res.status_code
def test_wrong_auth():
"""Test to check connection with wrong credentials
:return: None
"""
# Get access token
access_token = get_server_token(wrong_data)
assert access_token == "", "Could connect to the server with wrong credentials"
def test_auth():
"""Test to verify we can access server with authentication
:return: None
"""
# Get access token
access_token = get_server_token(credentials_data)
assert access_token != "", "Failure in server authentication!"
| eldar101/EldarRep | Python/Varonis/api_assignment/test_api.py | test_api.py | py | 2,627 | python | en | code | 0 | github-code | 36 |
2769961219 | from weixin_api_two.api.contact.get_wework_token import WeworkToken
from weixin_api_two.uitls.get_data import GetData
import loguru
class Tag(WeworkToken):
def __init__(self):
self.baseurl=GetData()
self.log= loguru.logger
self.tagurl=self.baseurl.get_UrlData('url','tag')
self.addurl=self.baseurl.get_UrlData('action','tag','add')
self.searchurl=self.baseurl.get_UrlData('action','tag','search')
self.deleteurl=self.baseurl.get_UrlData('action','tag','delete')
def search(self):
data = {
# "url": "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/get_corp_tag_list",
"url": self.tagurl + self.searchurl,
"method": "post",
"params": {"access_token": self.token},
"json": {}
}
return self.request(data)
def add(self, tag_list, group_name):
data = {
# "url": "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/add_corp_tag",
"url": self.tagurl+self.addurl,
"method": "post",
"params": {"access_token": self.token},
"json": {
"group_name": group_name,
"tag": [{
"name": tag_list,
}],
}
}
return self.request(data)
def delete(self, group_id: list = None, tag_id: list = None):
data = {
# "url": "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/del_corp_tag",
"url": self.tagurl + self.deleteurl,
"method": "post",
"params": {"access_token": self.token},
"json": {
"group_id": group_id,
"tag_id": tag_id
}
}
return self.request(data)
def delete_list(self, tag_id_list):
data = {
# "url": "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/del_corp_tag",
"url": self.tagurl + self.deleteurl,
"method": "post",
"params": {"access_token": self.token},
"json": {
"tag_id": tag_id_list
}
}
return self.request(data)
def clear(self):
r = self.search()
tag_id_list = [tag['id'] for group in r.json()['tag_group'] for tag in group['tag']]
r = self.delete_list(tag_id_list)
return r
| liwanli123/HogwartProjectPractice | weixin_api_two/api/externalcontact/tag_api.py | tag_api.py | py | 2,429 | python | en | code | 0 | github-code | 36 |
2922317279 | import os
DATA_DIR = '/home/sdemyanov/synapse/UniversalGAN/data/mnist'
RESULTS_DIR = '/home/sdemyanov/synapse/UniversalGAN/results/mnist'
#DATA_DIR = '/home/ge/Project/ICCV2017/lib/UniversalGAN/data/mnist'
#RESULTS_DIR = '/home/ge/Project/ICCV2017/lib/UniversalGAN/results/mnist'
TRAIN_FOLD = 'train'
VALID_FOLD = 'valid'
TEST_FOLD = 'test'
MODEL_NAME = 'model.ckpt'
RESTORING_FILE = None
PARAMS_FILE = os.path.join(RESULTS_DIR, 'params.json')
| sdemyanov/tensorflow-worklab | paths.py | paths.py | py | 450 | python | en | code | 24 | github-code | 36 |
23268554610 | import os
import asyncio
from user import User
class MessageError(Exception):
pass
class Message:
@classmethod
async def create(cls, **args):
self = Message()
self.channel = args.get("channel")
self.text = args.get("text")
self.user = args.get("user", (await User.create()))
if (self.channel is None or self.text is None or not isinstance(self.user, User)):
raise MessageError("Invalid message construction!")
return self
class Embed(Message):
@classmethod
async def create(cls, **pargs):
self = Embed()
self.channel = pargs.get("channel")
self.text = pargs.get("text")
self.user = pargs.get("user", (await User.create()))
self.title = pargs.get("title")
self.color = pargs.get("color", 0xFFFFFF)
self.image = pargs.get("image")
self.description = pargs.get("description", self.text)
self.thumbnail = pargs.get("thumbnail")
self.url = pargs.get("url")
self.fields = pargs.get("fields", [])
self.author = pargs.get("author")
self.footer = pargs.get("footer")
return self
async def setAuthor(self, a):
self.author = a
async def setFooter(self, f):
self.footer = f
async def addField(self, f):
self.fields.append(f)
class Author:
@classmethod
async def create(cls, **args):
self = Embed.Author()
self.name = args.get("name")
self.url = args.get("url")
self.iconURL = args.get("icon_url")
return self
class Footer:
@classmethod
async def create(cls, **args):
self = Embed.Footer()
self.text = args.get("text")
self.iconURL = args.get("icon_url")
return self
class Field:
@classmethod
async def create(cls, **args):
self = Embed.Field()
self.name = args.get("name")
self.value = args.get("value")
self.inline = args.get("inline", True)
return self
| Liyara/Tracker | message.py | message.py | py | 1,771 | python | en | code | 0 | github-code | 36 |
41675250827 | # Daily Coding Problem: Problem #4 [Hard]
# Given an array of integers, find the first missing positive integer in linear time and constant space.
# In other words, find the lowest positive integer that does not exist in the array. The array can contain duplicates and negative numbers as well.
#
# For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.
def first_positive(list):
list.sort()
indx = list[0]
for i in range(1, len(list)):
if indx > 0 & indx < list[i]:
if (list[i]-1) != indx and list[i] != indx:
return (list[i]-1)
indx = list[i]
return (indx+1)
if __name__ == '__main__':
integers = [3, 4, -1, 1,1]
print("List: ",integers)
print("Value:",first_positive(integers))
integers = [1, 2, 0, -1]
print("List: ", integers)
print("Value:",first_positive(integers))
| mglacayo07/DailyCodingProblem4 | main.py | main.py | py | 898 | python | en | code | 0 | github-code | 36 |
38440361902 | import requests
from decouple import config
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
@login_required(login_url='/accounts/login/')
def home(request):
disk = requests.get(config("API") + 'disk_space._')
load = requests.get(config("API") + 'system.load')
ram = requests.get(config("API") + 'system.ram')
net = requests.get(config("API") + 'net.eth0')
context = {
"disk_label": ['Disponível', 'Usado', 'Reservado para o root'],
"disk_data": disk.json()['data'][0][1:],
"load_labels": ['1 minuto', '5 minutos', '15 minutos'],
"load_data": load.json()['data'][0][1:],
"ram_labels": ['Mem. livre', 'Mem. usada', 'Mem. cacheada', 'Buffers'],
"ram_data": ram.json()['data'][0][1:],
"recebido": net.json()['data'][0][1],
"enviado": net.json()['data'][0][2] * -1,
}
return render(request, 'index.html', context)
| carlos-moreno/dashboard | dashboard/core/views.py | views.py | py | 950 | python | en | code | 0 | github-code | 36 |
3855378751 | # Applying the algorithms
n_simulation = 100000
# From module 6
predcorr_forward = np.ones([n_simulation, N]) \
* (model_prices[:-1]-model_prices[1:]) \
/ (delta_t*model_prices[1:])
predcorr_capfac = np.ones([n_simulation, N+1])
delta = np.ones([n_simulation, N])*delta_t
# calculate the forward rate for each steps from the bond price
for i in range(1, opt.N):
# generate random numbers follow normal distribution
Z = opt.sigma*sqrt(delta[:,i:])*norm.rvs(size = [n,1])
# predictor-corrector Monte Carlo simulation
mu_initial = np.cumsum(delta[:,i:]*predcorr_forward[:,i:]*opt.sigma**2/(1+delta[:,i:]*predcorr_forward[:,i:]), axis = 1)
temp = predcorr_forward[:,i:]*exp((mu_initial-opt.sigma**2/2)*delta[:,i:]+Z)
mu_term = np.cumsum(delta[:,i:]*temp*opt.sigma**2/(1+delta[:,i:]*temp), axis = 1)
predcorr_forward[:,i:] = predcorr_forward[:,i:]*exp((mu_initial + mu_term - opt.sigma**2)*delta[:,i:]/2+Z)
# implying capitalization factors from the forward rates
predcorr_capfac[:,1:] = np.cumprod(1+delta*predcorr_forward,axis = 1)
# inverting the capitalization factors to imply bond prices (discount factors)
predcorr_price = predcorr_capfac**(-1)
# taking averages: Forward Rate, Bond Price, Capitalization Factors
# mean Forward Rate
opt.forward_rate = np.mean(predcorr_forward,axis = 0)
# mean Price
predcorr_price = np.mean(predcorr_price,axis = 0)
# mean Capitalization Factors
opt.capfac = np.mean(predcorr_capfac,axis = 0)
# plot results
plt.subplots(figsize=(16, 8))
plt.xlabel('Maturity')
plt.ylabel('Bond Price')
plt.plot(maturity, zcb_prices, label = 'Actual Bond Prices')
plt.plot(maturity, opt.model_prices, 'o', label = 'Calibration Prices')
plt.plot(maturity, predcorr_price,'x',label = "Predictor-Corrector Bond Prices")
plt.legend()
plt.show() | haininhhoang94/wqu | MScFE630/gwa_computational_finance/GWA3_1_simulation.py | GWA3_1_simulation.py | py | 1,813 | python | en | code | 21 | github-code | 36 |
17613252101 | import plotly.plotly as py
import plotly.graph_objs as go
from pymongo import MongoClient
import sys
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
from networkx.algorithms import approximation as approx
from nxpd import draw
from networkx.drawing.nx_agraph import graphviz_layout
client = MongoClient()
client = MongoClient('mongodb://m49dy:admin12345@ds251799.mlab.com:51799/sna_project')
db = client['sna_project']
userCollection = db['users']
groupCollection = db['groups']
postCollection=db['posts']
array_users = list(userCollection.find())
array_groups = list(groupCollection.find())
print(array_users[0])
names=[]
no_of_friends=[]
no_of_posts=[]
group_names=[]
users_groups=[]
posts=[]
use=[]
males=0
females=0
group_posts=[]
print(list(postCollection.find())[0]["group"])
for user in array_users:
names.append(user["name"])
no_of_friends.append(len(user["friends"]))
no_of_posts.append(len(user["posts"]))
if 'gender' in user:
if(user["gender"]==1):
males=males+1
else:
females=females+1
for group in array_groups:
group_names.append(group["name"])
users_groups.append(len(group["users"]))
for group in array_groups:
no=0
for post in list(postCollection.find()):
if 'group' in post:
if post["group"]==group["_id"]:
no=no+1
group_posts.append(no)
# Replace the username, and API key with your credentials.
py.sign_in('diaa56', 'QaMy3cKad5uFqnLP8oaL')
trace = go.Bar(x=names, y= no_of_friends)
data = [trace]
layout=go.Layout(title = 'no of friends for each user', width = 800, height = 640,xaxis= dict(
title='users',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis = dict(
title='no of friends',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/a-simple-plot.png')
trace = go.Bar(x=names, y=no_of_posts)
data = [trace]
layout = go.Layout(title='no of posts for each user', width=800, height=640, xaxis=dict(
title='users',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='no of posts',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/posts.png')
trace = go.Bar(x=group_names, y=users_groups)
data = [trace]
layout = go.Layout(title='no of users for each group', width=800, height=640, xaxis=dict(
title='groups',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='no of users',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/groups.png')
trace = go.Bar(x=group_names, y=group_posts)
data = [trace]
layout = go.Layout(title='no of posts for each group', width=800, height=640, xaxis=dict(
title='groups',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='no of posts',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/groups_posts.png')
if len(sys.argv) > 1:
target=sys.argv[1]
print(sys.argv[1])
gp = groupCollection.find_one( {"name":target})
print(gp)
G = nx.Graph()
for user in gp["users"]:
G.add_node(userCollection.find_one({"_id": user})["name"])
for user in gp["users"]:
data = userCollection.find_one({"_id": user})
if 'friends' in data:
for one in data["friends"]:
if one in gp["users"]:
G.add_edge(data["name"], userCollection.find_one({"_id": one})["name"], color='grey')
edges = G.edges()
colors = [G[u][v]['color'] for u, v in edges]
pos = nx.nx_pydot.graphviz_layout(G)
nx.draw(G, pos, with_labels=True, font_weight='bold', node_color='red',font_size=18, node_size=0, edge_color=colors, width=5)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(20, 15)
plt.savefig("assets/connect.png")
if gp:
print("lol")
if 'users' in gp:
for user in gp["users"]:
no = 0
us = userCollection.find_one({"_id": user})
use.append(us["name"])
for post in us["posts"]:
ps = postCollection.find_one({"_id": post})
if ps:
if 'group' in ps:
if(ps["group"] == gp["_id"]):
no = no + 1
posts.append(no)
trace = go.Bar(x=use, y=posts)
data = [trace]
layout = go.Layout(title='no of posts for each user in group '+target, width=800, height=640, xaxis=dict(
title='users',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='no of posts',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename='assets/habd.png')
| diaaahmed850/snaproject | diaa.py | diaa.py | py | 5,831 | python | en | code | 0 | github-code | 36 |
73450401385 | # !/usr/bin/python
# -*-coding=utf-8-*-
# Example site:@http://www.apostilando.com/pagina.php?cod=1
# 将要扫描的网站写入当前目录文件中。python xxx.py xxx.txt
import urllib
import os
import sys
if os.name == "nt":
os.system("cls")
else:
os.system("clear")
def usage():
print
"""
=================SQL INJECTION=====================
Usage:python %s %s
""" % (sys.argv[0], sys.argv[1])
def scanner(url):
try:
page = urllib.urlopen(url).read()
except:
print
"[-]Error!!!\n"
return (0)
# 如果一个网站存在SQL注入的话就,当你使用基本的尝试方法去测试时页面会出现如下报错。
sqls = ("mysql_result(): supplied argument is not a valid MySQL result resource in",
"[Microsoft][ODBC SQL Server Driver][SQL Server]",
"Warning:ociexecute",
"Warning: pq_query[function.pg-query]:")
i = 0
page = str(page.lower())
while i < len(sqls):
sql = str(sqls[i]).lower()
if page.find(sql[i]) == -1:
check = 0
else:
check = 1
i += 1
if check == 0:
print
"[-]" + url + " <No Vulneravel>"
else:
print
"[+]" + url + " <Vulneravel>"
def main(args):
if len(args) != 1:
usage()
print
"\t[-]Mode to use: %s <File>\n" % sys.argv[0]
print
"\t[-]Example: %s Site.txt\n" % sys.argv[0]
# print sys.argv[0],sys.argv[1],len(args)
sys.exit(0)
usage()
try:
f = open(str(sys.argv[1]), "r")
urls = f.readlines()
# print urls
except:
print
"[+]Error to open the file " + sys.argv[1] + ""
return (-1)
f.close()
i = 0
while i < len(urls):
if urls[i].find("http://") == -1:
urls[i] = "http://" + urls[i]
urls[i] = urls[i].replace("\n", "")
# 利用基本放法进行测试,如:and 1=1,and 1=2,’,查看是否出现sqls中的错误信息
a = scanner(urls[i] + "and 1=2")
i += 1
if __name__ == "__main__":
main(sys.argv[1:])
| Guaijs/Sql_injection_detection | crawler/Test2.py | Test2.py | py | 2,176 | python | en | code | 0 | github-code | 36 |
28984839741 | #! /usr/bin/env python
import argparse
import re
import numpy as np
def to_binary_string(value):
"""
Converts F or L to zeros, and B or R to 1 and interprets the string as a binary value
>>> to_binary_string("FBFBBFF")
('0101100', 44)
>>> to_binary_string("RLR")
('101', 5)
:param value:
:return:
"""
value = re.sub(r"[FL]", "0", value)
value = re.sub(r"[BR]", "1", value)
return value, int(value, 2)
def get_seat_id(value):
"""
Splits the string into row and column parts and interprets each as binary locations. Then
multiplies the row by 8 and adds the column.
>>> get_seat_id("FBFBBFFRLR")
357
>>> get_seat_id("BFFFBBFRRR")
567
>>> get_seat_id("FFFBBBFRRR")
119
>>> get_seat_id("BBFFBBFRLL")
820
:param value:
:return:
"""
row = to_binary_string(value[:7])
col = to_binary_string(value[7:])
return row[1]*8 + col[1]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Day 5 of Advent of Code 2020')
parser.add_argument('file', metavar='filename', type=argparse.FileType('rt'),
help='filename to your personal inputs')
parser.add_argument('--test', '-t', action='store_true')
args = parser.parse_args()
if args.test:
import doctest
doctest.testmod()
print("Tests completed")
exit(0)
with args.file as FILE:
file_content = FILE.readlines()
seat_ids = [get_seat_id(line) for line in file_content]
print(f"There are {len(seat_ids)} boardings cards in the the input, and the highest value is {np.max(seat_ids)}")
for v in range(0, np.max(seat_ids)):
if v not in seat_ids and v-1 in seat_ids and v+1 in seat_ids:
print(f"The value {v} is not in the list, but {v-1} and {v+1} are")
| SocialFinanceDigitalLabs/AdventOfCode | solutions/2020/kws/day_05.py | day_05.py | py | 1,856 | python | en | code | 2 | github-code | 36 |
43301216014 | import py
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import lltype
from rpython.jit.codewriter.flatten import SSARepr, Label, TLabel, Register
from rpython.jit.codewriter.flatten import ListOfKind, IndirectCallTargets
from rpython.jit.codewriter.jitcode import SwitchDictDescr
from rpython.jit.metainterp.history import AbstractDescr
def format_assembler(ssarepr):
"""For testing: format a SSARepr as a multiline string."""
from cStringIO import StringIO
def repr(x):
if isinstance(x, Register):
return '%%%s%d' % (x.kind[0], x.index) # e.g. %i1 or %r2 or %f3
elif isinstance(x, Constant):
if (isinstance(x.concretetype, lltype.Ptr) and
isinstance(x.concretetype.TO, lltype.Struct)):
return '$<* struct %s>' % (x.concretetype.TO._name,)
return '$%r' % (x.value,)
elif isinstance(x, TLabel):
return getlabelname(x)
elif isinstance(x, ListOfKind):
return '%s[%s]' % (x.kind[0].upper(), ', '.join(map(repr, x)))
elif isinstance(x, SwitchDictDescr):
return '<SwitchDictDescr %s>' % (
', '.join(['%s:%s' % (key, getlabelname(lbl))
for key, lbl in x._labels]))
elif isinstance(x, (AbstractDescr, IndirectCallTargets)):
return '%r' % (x,)
else:
return '<unknown object: %r>' % (x,)
seenlabels = {}
for asm in ssarepr.insns:
for x in asm:
if isinstance(x, TLabel):
seenlabels[x.name] = -1
elif isinstance(x, SwitchDictDescr):
for _, switch in x._labels:
seenlabels[switch.name] = -1
labelcount = [0]
def getlabelname(lbl):
if seenlabels[lbl.name] == -1:
labelcount[0] += 1
seenlabels[lbl.name] = labelcount[0]
return 'L%d' % seenlabels[lbl.name]
output = StringIO()
insns = ssarepr.insns
if insns and insns[-1] == ('---',):
insns = insns[:-1]
for i, asm in enumerate(insns):
if ssarepr._insns_pos:
prefix = '%4d ' % ssarepr._insns_pos[i]
else:
prefix = ''
if isinstance(asm[0], Label):
if asm[0].name in seenlabels:
print >> output, prefix + '%s:' % getlabelname(asm[0])
else:
print >> output, prefix + asm[0],
if len(asm) > 1:
if asm[-2] == '->':
if len(asm) == 3:
print >> output, '->', repr(asm[-1])
else:
lst = map(repr, asm[1:-2])
print >> output, ', '.join(lst), '->', repr(asm[-1])
else:
lst = map(repr, asm[1:])
if asm[0] == '-live-': lst.sort()
print >> output, ', '.join(lst)
else:
print >> output
res = output.getvalue()
return res
def assert_format(ssarepr, expected):
asm = format_assembler(ssarepr)
if expected != '':
expected = str(py.code.Source(expected)).strip() + '\n'
asmlines = asm.split("\n")
explines = expected.split("\n")
for asm, exp in zip(asmlines, explines):
if asm != exp:
msg = [""]
msg.append("Got: " + asm)
msg.append("Expected: " + exp)
lgt = 0
for i in range(min(len(asm), len(exp))):
if exp[i] == asm[i]:
lgt += 1
else:
break
msg.append(" " + " " * lgt + "^^^^")
raise AssertionError('\n'.join(msg))
assert len(asmlines) == len(explines)
def unformat_assembler(text, registers=None):
# XXX limited to simple assembler right now
#
def unformat_arg(s):
if s.endswith(','):
s = s[:-1].rstrip()
if s[0] == '%':
try:
return registers[s]
except KeyError:
num = int(s[2:])
if s[1] == 'i': reg = Register('int', num)
elif s[1] == 'r': reg = Register('ref', num)
elif s[1] == 'f': reg = Register('float', num)
else: raise AssertionError("bad register type")
registers[s] = reg
return reg
elif s[0] == '$':
intvalue = int(s[1:])
return Constant(intvalue, lltype.Signed)
elif s[0] == 'L':
return TLabel(s)
elif s[0] in 'IRF' and s[1] == '[' and s[-1] == ']':
items = split_words(s[2:-1])
items = map(unformat_arg, items)
return ListOfKind({'I': 'int', 'R': 'ref', 'F': 'float'}[s[0]],
items)
elif s.startswith('<SwitchDictDescr '):
assert s.endswith('>')
switchdict = SwitchDictDescr()
switchdict._labels = []
items = split_words(s[len('<SwitchDictDescr '):-1])
for item in items:
key, value = item.split(':')
value = value.rstrip(',')
switchdict._labels.append((int(key), TLabel(value)))
return switchdict
else:
raise AssertionError("unsupported argument: %r" % (s,))
#
if registers is None:
registers = {}
ssarepr = SSARepr('test')
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith('L') and line.endswith(':'):
ssarepr.insns.append((Label(line[:-1]),))
else:
try:
opname, line = line.split(None, 1)
except ValueError:
opname, line = line, ''
words = list(split_words(line))
if '->' in words:
assert words.index('->') == len(words) - 2
extra = ['->', unformat_arg(words[-1])]
del words[-2:]
else:
extra = []
insn = [opname] + [unformat_arg(s) for s in words] + extra
ssarepr.insns.append(tuple(insn))
return ssarepr
def split_words(line):
word = ''
nested = 0
for i, c in enumerate(line):
if c == ' ' and nested == 0:
if word:
yield word
word = ''
else:
word += c
if c in '<([':
nested += 1
if c in '])>' and (' '+line)[i:i+4] != ' -> ':
nested -= 1
assert nested >= 0
if word:
yield word
assert nested == 0
| mozillazg/pypy | rpython/jit/codewriter/format.py | format.py | py | 6,680 | python | en | code | 430 | github-code | 36 |
26763674457 | import pandas as pd
from mega_analysis.crosstab.gif_sheet_names import gif_sheet_names
from mega_analysis.crosstab.file_paths import file_paths
def gif_lobes_from_excel_sheets():
"""
sort the gif parcellations as per excel gif sheet lobes.
e.g. GIF FL = GIF Frontal Lobe - has a list of gif parcellations
which we want to see in 3D slicer, using the GUI
"""
_, _, excel_path, _ = file_paths()
GIF_SHEET_NAMES = gif_sheet_names()
lobes_mapping = {}
for gif_lobe in GIF_SHEET_NAMES:
gif_parcellations = pd.read_excel(
excel_path,
header=None, usecols="A:B",
sheet_name=gif_lobe, engine="openpyxl",
)
gif_parcellations.dropna(axis=0, how='any', inplace=True)
gif_parcellations.dropna(axis=1, how='all', inplace=True)
gifs = gif_parcellations.astype({1: 'uint16'})
gifs = gifs.iloc[:, 1]
lobes_mapping[gif_lobe] = gifs.values
return lobes_mapping
| thenineteen/Semiology-Visualisation-Tool | mega_analysis/crosstab/gif_lobes_from_excel_sheets.py | gif_lobes_from_excel_sheets.py | py | 980 | python | en | code | 9 | github-code | 36 |
12514324819 | import torch
from torchtext.datasets import AG_NEWS
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torch.utils.data import DataLoader
UNK = '<unk>'
tok = get_tokenizer('basic_english')
train_iter = AG_NEWS(split='train')
def yield_tokens(data_iter, tokenizer):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter, tok), specials=['<unk>'])
vocab.set_default_index(vocab['<unk>'])
# print(vocab['my'])
# print(vocab(['this', 'is', 'my', 'car']))
# print(vocab['<unk>']) #0
text_pipeline = lambda x: vocab(tok(x))
label_pipeline = lambda x: int(x) - 1
# print(text_pipeline('this is my car'))
# print(vocab(['this is my car']))
# print(tok('this is my car'))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def collate_batch(batch):
label_list, text_list, offsets = [], [], [0]
for (_label, _text) in batch:
label_list.append(label_pipeline(_label)) # 0 ~ 3
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64) # int list to tensor
text_list.append(processed_text) # tensor of int-list-tensor
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text_list = torch.cat(text_list)
return label_list.to(device), text_list.to(device), offsets.to(device)
dataloader = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch)
collate_batch(train_iter) | moon0331/TorchTutorial | seq2seq.py | seq2seq.py | py | 1,590 | python | en | code | 0 | github-code | 36 |
10844238227 | import shutil
executables = [
"flex",
"yacc",
]
def is_available(exe: str) -> bool:
return shutil.which(exe) is not None
def test_executables() -> None:
missing = [exe
for exe in executables
if not is_available(exe)]
assert not missing, f"Missing executables: {', '.join(missing)}"
| GravityDrowned/teaching-notebook | test_executables.py | test_executables.py | py | 337 | python | en | code | 0 | github-code | 36 |
30055768931 | import argparse
import numpy as np
from Data import Data
from Experiment import Experiment
from FrameStackExperiment import FrameStackExperiment
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Evaluate termination classifier performance')
parser.add_argument('filepath', type=str, help='filepath of pkl file containing trajectories with RAM states and frames')
parser.add_argument('dest', type=str, help='directory to write results and plots to')
parser.add_argument('term_classifier', type=str, choices=['OneClassSVM', 'TwoClassSVM', 'FullCNN'], help='termination classifier to be used')
parser.add_argument('feature_extractor', type=str, choices=['RawImage', 'DownsampleImage', 'RawRAM', 'MonteRAMState', 'MonteRAMXY', 'BOVW', 'RND', 'CNN'], help='feature extractor to be used')
parser.add_argument('label_extractor', type=str, choices=['BeforeAfterExtractor', 'AfterExtractor', 'OracleExtractor', 'TransductiveExtractor', 'PositiveAugmentExtractor'], help='label extractor to be used')
parser.add_argument('--extract_only_pos', default=False, action='store_true', help='whether label extractor should only extract positive egs')
parser.add_argument('--frame_stack', default=False, action='store_true', help='whether states are frame stacks')
args = parser.parse_args()
#data = Data(args.filepath, train_skip=2000, train_num=200, test_skip=0, test_num=100)
data = Data(args.filepath, train_skip=25, train_num=75, test_skip=25, test_num=25)
# (player_x, player_y, screen) of good subgoals
# [right plat, bottom of ladder of right plat, bottom of ladder of left plat,
# top of ladder of left plat, key, left door, right door]
#subgoals = [(133, 192, 1), (132, 148, 1), (20, 148, 1), (20, 192, 1), (13, 198, 1), (24, 235, 1), (130, 235, 1)]
#subgoals = [(24, 235, 1), (130, 235, 1)]
#subgoals = [(52, 235, 1)]
subgoals = [(133, 148, 1), (58, 192, 1), (35, 235, 1), (119, 235, 1), (49, 235, 1), (88, 192, 1), (142, 192, 1)]
# Prepare hyperparams
if args.label_extractor == 'OracleExtractor':
window_sz_hyperparms = [None]
else:
#window_sz_hyperparms = range(0, 7)
#window_sz_hyperparms = range(2, 3)
window_sz_hyperparms = range(1, 2)
if args.feature_extractor == 'BOVW':
#num_clusters_hyperparams = range(110, 121, 10)
#num_sift_keypoints_hyperparams = range(25, 40, 5)
num_clusters_hyperparams = range(110, 111, 10)
num_sift_keypoints_hyperparams = range(25, 26, 5)
else:
num_clusters_hyperparams = [None]
num_sift_keypoints_hyperparams = [None]
if args.term_classifier == 'OneClassSVM':
nu_hyperparams = np.arange(0.3, 0.5, 0.1)
else:
nu_hyperparams = [None]
if args.term_classifier == 'FullCNN':
gamma_hyperparams = [None]
else:
#gamma_hyperparams = [0.0001, 0.001, 0.01, 0.1, 'scale', 'auto']
#gamma_hyperparams = [0.001, 0.01, 'auto']
#gamma_hyperparams = [0.001]
#gamma_hyperparams = [0.1]
#gamma_hyperparams = [0.1, 'auto']
#gamma_hyperparams = ['scale']
#gamma_hyperparams = [0.000001]
gamma_hyperparams = [0.000000004]
# Prepare information on each subgoal
subgoals_info = {}
for subgoal in subgoals:
traj_idx, state_idx = data.find_first_instance(data.train_ram_trajs, subgoal)
if traj_idx is None:
continue
subgoal_ram = data.train_ram_trajs[traj_idx][state_idx]
ground_truth_idxs = data.filter_in_term_set(data.test_ram_trajs, subgoal_ram)
subgoals_info[subgoal] = {'traj_idx': traj_idx,
'state_idx': state_idx,
'ground_truth_idxs': ground_truth_idxs}
# Run experiments
for num_clusters in num_clusters_hyperparams:
for num_sift_keypoints in num_sift_keypoints_hyperparams:
for window_sz in window_sz_hyperparms:
for nu in nu_hyperparams:
for gamma in gamma_hyperparams:
for i in range(1):
print(f"[+] clusters={num_clusters}, kps={num_sift_keypoints}, window_sz={window_sz}, nu={nu}, gamma={gamma}")
if args.feature_extractor in ['RawImage', 'DownsampleImage', 'BOVW', 'RND', 'CNN'] or args.term_classifier == 'FullCNN':
train_trajs = data.train_frame_trajs
test_trajs = data.test_frame_trajs
elif args.feature_extractor in ['RawRAM', 'MonteRAMState', 'MonteRAMXY']:
train_trajs = data.train_raw_ram_trajs
test_trajs = data.test_raw_ram_trajs
# Run experiment
hyperparams = {
"num_sift_keypoints": num_sift_keypoints,
"num_clusters": num_clusters,
"window_sz": window_sz,
"nu": nu,
"gamma": gamma,
}
if args.frame_stack:
experiment = FrameStackExperiment(train_trajs, data.train_raw_ram_trajs, test_trajs, data.test_raw_ram_trajs,
subgoals, subgoals_info,
args, hyperparams)
else:
experiment = Experiment(train_trajs, data.train_raw_ram_trajs, test_trajs, data.test_raw_ram_trajs,
subgoals, subgoals_info,
args, hyperparams)
experiment.run()
| jwnicholas99/option-term-classifier | run.py | run.py | py | 5,905 | python | en | code | 0 | github-code | 36 |
15771336312 | import ctypes
import typing as t
from . import sdk
from .enum import Result
from .event import bind_events
from .exception import get_exception
from .model import UserAchievement
class AchievementManager:
_internal: sdk.IDiscordAchievementManager = None
_garbage: t.List[t.Any]
_events: sdk.IDiscordAchievementEvents
def __init__(self):
self._garbage = []
self._events = bind_events(
sdk.IDiscordAchievementEvents,
self._on_user_achievement_update
)
def _on_user_achievement_update(self, event_data, user_achievement):
self.on_user_achievement_update(UserAchievement(copy=user_achievement.contents))
def set_user_achievement(
self,
achievement_id: int,
percent_complete: int,
callback: t.Callable[[Result], None]
) -> None:
"""
Updates the current user's status for a given achievement.
Returns discordsdk.enum.Result via callback.
"""
def c_callback(callback_data, result):
self._garbage.remove(c_callback)
result = Result(result)
callback(result)
c_callback = self._internal.set_user_achievement.argtypes[-1](c_callback)
self._garbage.append(c_callback) # prevent it from being garbage collected
self._internal.set_user_achievement(
self._internal,
achievement_id,
percent_complete,
ctypes.c_void_p(),
c_callback
)
def fetch_user_achievements(self, callback: t.Callable[[Result], None]) -> None:
"""
Loads a stable list of the current user's achievements to iterate over.
Returns discordsdk.enum.Result via callback.
"""
def c_callback(callback_data, result):
self._garbage.remove(c_callback)
result = Result(result)
callback(result)
c_callback = self._internal.fetch_user_achievements.argtypes[-1](c_callback)
self._garbage.append(c_callback) # prevent it from being garbage collected
self._internal.fetch_user_achievements(self._internal, ctypes.c_void_p(), c_callback)
def count_user_achievements(self) -> int:
"""
Counts the list of a user's achievements for iteration.
"""
count = ctypes.c_int32()
self._internal.count_user_achievements(self._internal, count)
return count.value
def get_user_achievement_at(self, index: int) -> UserAchievement:
"""
Gets the user's achievement at a given index of their list of achievements.
"""
achievement = sdk.DiscordUserAchievement()
result = Result(self._internal.get_user_achievement_at(
self._internal,
index,
achievement
))
if result != Result.ok:
raise get_exception(result)
return UserAchievement(internal=achievement)
def get_user_achievement(self, achievement_id: int) -> None:
"""
Gets the user achievement for the given achievement id.
"""
achievement = sdk.DiscordUserAchievement()
result = Result(self._internal.get_user_achievement(
self._internal,
achievement_id,
achievement
))
if result != Result.ok:
raise get_exception(result)
return UserAchievement(internal=achievement)
def on_user_achievement_update(self, achievement: UserAchievement) -> None:
"""
Fires when an achievement is updated for the currently connected user
"""
| Maselkov/GW2RPC | gw2rpc/lib/discordsdk/achievement.py | achievement.py | py | 3,610 | python | en | code | 47 | github-code | 36 |
73456512104 | # -*- coding: utf-8 -*-
# Time : 2023/10/5 22:53
# Author : QIN2DIM
# GitHub : https://github.com/QIN2DIM
# Description:
import csv
from dataclasses import dataclass, field
from pathlib import Path
from typing import List
class Level:
first = 1
second = 2
third = 3
fourth = 4
fifth = 5
sixth = 6
none = 0
@staticmethod
def get_bonus(level: int):
level2bonus = {0: 0, 6: 5, 5: 10, 4: 200, 3: 3000, 2: -1, 1: -1}
return level2bonus[level]
@staticmethod
def get_zh_level(level: int):
level2zh = {0: "无", 6: "六等奖", 5: "五等奖", 4: "四等奖", 3: "三等奖", 2: "二等奖", 1: "一等奖"}
return level2zh[level]
def is_bingo(red: int, blue: int):
assert 0 <= red <= 6
assert 0 <= blue <= 1
if blue == 0:
if red in [0, 1, 2, 3]:
return Level.none
if red == 4:
return Level.fifth
if red == 5:
return Level.fourth
if red == 6:
return Level.second
if blue == 1:
if red in [0, 1, 2]:
return Level.sixth
if red == 3:
return Level.fifth
if red == 4:
return Level.fourth
if red == 5:
return Level.third
if red == 6:
return Level.first
def compare_nums(mc: List[str], bingo_nums: List[str]):
red = 0
blue = 0
for num_red in mc[:-1]:
if num_red in bingo_nums[:-1]:
red += 1
if mc[-1] == bingo_nums[-1]:
blue += 1
return red, blue
@dataclass
class SSQResult:
red: int = field(default=int)
blue: int = field(default=int)
level: int = field(default=int)
bonus: int = field(default=int)
zh_level: str = field(default=str)
term: str = field(default=str)
class SSQNumsChecker:
def __init__(self, my_nums: List[List[str]]):
self.my_nums = my_nums
def get_results(self, bingo_nums: List[str]):
for i, mc in enumerate(self.my_nums):
red, blue = compare_nums(mc, bingo_nums)
level = is_bingo(red, blue)
yield mc, SSQResult(
red=red,
blue=blue,
level=level,
bonus=Level.get_bonus(level),
zh_level=Level.get_zh_level(level),
)
def trace_results(self, cache_path: Path):
text = cache_path.read_text(encoding="utf8")
reader = csv.reader([k for k in text.split("\n")[1:] if k])
for j, tn in enumerate(reader):
term = tn[0]
bingo_nums = tn[1:]
for i, mc in enumerate(self.my_nums):
red, blue = compare_nums(mc, bingo_nums)
level = is_bingo(red, blue)
yield mc, SSQResult(
red=red,
blue=blue,
level=level,
bonus=Level.get_bonus(level),
zh_level=Level.get_zh_level(level),
term=term,
)
| QIN2DIM/hysterical_ticket | hysterical_ticket/component/bingo_ssq.py | bingo_ssq.py | py | 3,044 | python | en | code | 3 | github-code | 36 |
3579460229 | import sys
import os
import string
def int_input():
string = input()
if not string:
return (-10)
elif (len(string) == 1) and (string[0] == '0'):
return (0)
else:
try:
numb = int(string)
return (numb)
except ValueError:
return (-10)
psql = 'psql '
location = '-h localhost '
db_name = 'test_db '
user = 'test_user '
team=''
main_query = psql + location + db_name + user
'''
list of standard queries
they will be called from func 'out_std_queries'
'''
std_queries = {
1 : '../sql/std_queries/out_devices.sql',
2 : '../sql/std_queries/out_employees.sql',
3 : '../sql/std_queries/out_instrument.sql',
4 : '../sql/std_queries/out_laptops.sql',
5 : '../sql/std_queries/out_printers.sql',
6 : '../sql/std_queries/out_projects.sql',
7 : '../sql/std_queries/out_team_info.sql',
8 : '../sql/std_queries/out_team_members.sql'
}
'''
list of special queries
they will be called from func 'out_spec_queries'
WARNING: printer query must added with parameters ':palette' and ':format'
'''
spec_queries = {
1 : '''psql -h localhost test_db test_user -f ../sql/spec_queries/out_my_team_members.sql -v team_name="\'''',
2 : '''psql -h localhost test_db test_user -f ../sql/spec_queries/out_next_deadlines.sql -v team_name="\'''',
3 : '''psql -h localhost test_db test_user -f ../sql/spec_queries/out_all_team_projects.sql -v team_name="\'''',
4 : '''psql -h localhost test_db test_user -f ../sql/spec_queries/out_free_laptops.sql''',
5 : '''psql -h localhost test_db test_user -f ../sql/spec_queries/out_free_instrument.sql''',
6 : '''psql -h localhost test_db test_user -f ../sql/spec_queries/out_var_printers.sql '''
}
'''
This function get users query-choice between all standard queries
'''
def out_std_queries():
os.system('clear')
print(
''' list of tables
-------------------------
1 | devices
2 | employees
3 | instrument
4 | laptops
5 | printers
6 | projects
7 | team_info
8 | team_members
print '9' for exit
print '-1' for exit from this menu
''')
choice = -10
while choice == -10:
choice = int_input()
if (choice == -10):
print('''Invalid Number
Press ENTER to coninue''')
input()
if choice == 9:
raise SystemExit
if choice == -1:
return
if (choice < 1) or (choice > 8):
print('''There no choice like this
Press ENTER to coninue''')
input()
out_std_queries()
return
minor_query = main_query + '-f ' + std_queries[choice]
os.system(minor_query)
print(''' Press ENTER to coninue''')
input()
out_std_queries()
palette = {
'RGB',
'CMYK',
'WB',
'other'
}
paper_format={
'A1',
'A2',
'A3',
'A4',
'A5',
'custom'
}
'''
This function get users choices about printers paper format and color palette
'''
def out_print():
os.system('clear')
print(''' Enter the paper format (A1, A2, A3, A4, A5, custom):''')
format = input()
if (format not in paper_format):
print (" There is no paper like this\n Press ENTER to coninue")
input()
return
print(''' Enter the color palette (RGB, CMYK, WB, other):''')
palette = input()
if (palette not in palette):
print (" There is no palette like this\n Press ENTER to coninue")
input()
return
minor_query = spec_queries[6] + '-v format="\'' + format + '\'"' + ' -v palette="\'' + palette + '\'"'
print(minor_query)
os.system(minor_query)
print(''' Press ENTER to coninue''')
input()
'''
This function get users query-choice between all special queries
If user choose printer-query, this function call out_print function
'''
def out_spec_queries():
os.system('clear')
print(
''' list of special queries
-----------------------------------------------------
1 | information about your teammates
2 | information about next deadlines of your team
3 | information about all projects of your team
4 | information about free laptops, you can use
5 | information about free instruments, you can use
6 | information about printers in office
print '9' for exit
print '-1' for exit from this menu
''')
choice = -10
while choice == -10:
choice = int_input()
if (choice == -10):
print('''Invalid Number
Press ENTER to coninue''')
input()
if choice == 9:
raise SystemExit
if choice == -1:
return
if (choice < 1) or (choice > 6):
print('''There no choice like this
Press ENTER to coninue''')
input()
else:
if choice == 6:
out_print()
else :
minor_query = spec_queries[choice]
os.system(minor_query)
print(''' Press ENTER to coninue''')
input()
out_spec_queries()
'''
This function get users query-choice between std and spec queries (look in the head of document)
'''
def main_choice():
os.system('clear')
print(''' Choose queries you want to display:
-------------------------
1 | standard queries (all information without redaction)
2 | special queries (special redacted information)
print '9' for exit
''')
choice = -10
while choice == -10:
choice = int_input()
if (choice == -10):
print('''Invalid Number
Press ENTER to coninue''')
input()
if choice == 9:
raise SystemExit
if (choice < 1) or (choice > 2):
print('''There no choice like this
Press ENTER to coninue''')
input()
main_choice()
return
if choice == 1:
out_std_queries()
if choice == 2:
out_spec_queries()
main_choice()
'''
This function get name of team, check it, fill call of sql-query and call main_choice function
'''
def main_info():
os.system('psql -h localhost test_db test_user -f ../sql/std_queries/out_team_info.sql > .team_base')
os.system('clear')
f = open('.team_base')
all_file = f.read()
f.close()
print(all_file)
print(''' Enter your team''')
team = input()
if all_file.find('| ' + team + '''
''') == -1:
print('ERROR: There is no team with this name')
exit()
#adding team_name to sql_scripts
spec_queries[1] += team + '\'"'
spec_queries[2] += team + '\'"'
spec_queries[3] += team + '\'"'
main_choice()
main_info() | gquence/term_db | python/main.py | main.py | py | 6,548 | python | en | code | 0 | github-code | 36 |
3465936756 | import json, requests, subprocess, sys, yaml
from pip._vendor.distlib.compat import raw_input
class JiraClient():
board_status_to_env = {"Ready to Deploy": "QAX",
"QAX Done": "STGX",
"StgX Done": "PROD-EU",
"Prod EU Done": "PROD-US",
"Prod US Done": "UNKNOWN"}
env_to_status_id = {"QAX": "21", "STGX": "31", "PROD-EU": "41", "PROD-US": "51"}
def __init__(self, token):
self.auth_token = token
def build_headers(self):
return {
'Content-Type': "application/json",
'Authorization': "Basic " + self.auth_token,
'Cache-Control': "no-cache"
}
def fetch_tickets_to_deploy(self):
payload = {
"jql": "project = MBI AND issuetype = Story AND status in (\"Ready to Deploy\", \"StgX Done\", \"QAX Done\", \"Prod EU Done\")",
"fields": ["summary"]}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/search'
r = requests.post(url, data=json.dumps(payload), headers=headers)
# Filter only the tickets required for the current deploy date...
issues = r.json()['issues']
return list(map(lambda x: x["key"], issues))
def fetch_ticket_info(self, id):
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/issue/' + id
r = requests.get(url, headers=headers)
return r.json()
def fetch_subtask_from_id(self, id):
ticket_info = self.fetch_ticket_info(id)
subtasks_ids = ticket_info['fields']["subtasks"]
return list(map(lambda x: x["key"], subtasks_ids))
def fetch_artifact_from_info(self, sid):
ticket_info = self.fetch_ticket_info(sid)
comp = ticket_info["fields"]["components"]
# Fetch component ...
if len(comp) == 0:
raise ValueError(sid + " must have component defined")
artifact_id = comp[0]["name"]
# Fetch version ...
version = ticket_info["fields"]["versions"]
if len(version) == 0:
raise ValueError(sid + " must have version defined")
artifact_version = version[0]["name"]
if len(comp) == 0:
raise ValueError(sid + " must have version defined")
jira_key = ticket_info["key"]
return {"jira_key": jira_key, "artifact_id": artifact_id, "version": artifact_version}
def fetch_artifacts(self, date):
# Fetch all the events ...
all_stories_keys = self.fetch_tickets_to_deploy()
# Filter events to be deployed ...
story_keys = list(
filter(lambda id: self.fetch_ticket_info(id)["fields"]["customfield_13861"] == date, all_stories_keys))
# Fetch the first ticket to be deployed ...
return list(map(lambda sid: self.story_to_deployment_unit(sid), story_keys))
def story_to_deployment_unit(self, story_key):
subtask_ids = self.fetch_subtask_from_id(story_key)
# Fetch artifact version ...
artifacts = list(map(lambda x: self.fetch_artifact_from_info(x), subtask_ids))
# Fetch next environment ...
next_env = self.fetch_next_env_to_deploy(story_key)
return {"jira_key": story_key, "next_env_to_deploy": next_env, "artifacts": artifacts}
def fetch_ticket_status(self, id):
# Fetch ticket info ...
ticket_info = self.fetch_ticket_info(id)
# Get current state ...
status = ticket_info["fields"]["status"]["name"]
return status
def fetch_next_env_to_deploy(self, sid):
board_status = self.fetch_ticket_status(sid)
return self.board_status_to_env.get(board_status)
def fetch_stories(self):
payload = {
"jql": "project = MBI AND issuetype = Story",
"fields": ["summary"]}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/search'
r = requests.post(url, data=json.dumps(payload), headers=headers)
# Filter only the tickets required for the current deploy date...
issues = r.json()['issues']
return list(map(lambda x: x["key"], issues))
def move_next_stage(self, sid):
# Fetch ticket status ...
board_status = self.fetch_ticket_status(sid)
print(board_status)
next_status = self.board_status_to_env[board_status]
# Move ticket to a new status ...
status_id = self.env_to_status_id[next_status]
payload = {
"update": {
"comment": [
{
"add": {
"body": "Automatic flow transitioning based on flow"
}
}
]
},
"transition": {
"id": status_id
}
}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/issue/' + sid + '/transitions'
# Move to next status ...
requests.post(url, data=json.dumps(payload), headers=headers)
def description_commit(self):
pull = "git pull"
diff = "git diff HEAD^ HEAD"
processPull = subprocess.Popen(pull.split(), stdout=subprocess.PIPE)
output, error = processPull.communicate()
if (error is None):
processDiff = subprocess.Popen(diff.split(), stdout=subprocess.PIPE)
output, error = processDiff.communicate()
if (error is None):
return str(output.decode("utf-8"))
else:
return "error"
else:
return "error"
def get_tag(self):
tag = "git describe --tag"
processPull = subprocess.Popen(tag.split(), stdout=subprocess.PIPE)
output, error = processPull.communicate()
if (error is None):
return str(output.decode("utf-8"))
def cli_mbi(self):
project = input("Enter the project initials: ")
description = self.description_commit()
print("You have the followings MBI:")
print(self.fetch_stories())
issue = input("Enter MBI: ")
version = input("The last version is " + self.get_tag()+ ". Enter Version:")
component = self.find_component()
if self.validate_input(project, issue, version, component):
self.create_subtask(project, issue, description, component, version)
else:
print("Exit")
def create_subtask(self, project, issue, description, component, version):
payload = {
"fields":
{
"project":
{
"key": project
},
"parent":
{
"key": issue
},
"summary": "Change log " + issue,
"description": description,
"issuetype":
{
"name": "Sub-task"
},
"components": [
{
"name": component
}
],
"versions": [
{
"name": version
}
]
}
}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/issue/'
try:
r = requests.post(url, data=json.dumps(payload), headers=headers)
resp = r.content.decode('utf-8')
jiraKey = json.loads(resp)
print("Issue created: " + jiraKey["key"])
except r.exceptions.HTTPError as err:
print(err)
def validate_input(self, project, mbi, version, component):
question1 = "Project: " + project + " \nMBI: " + mbi + "\nVersion: " + version + "\nComponent: " + component + "\nIt's correct? "
return self.query_yes_no(question1)
def query_yes_no(self, question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def find_component(self):
with open("popeye.yaml") as stream:
try:
file = (yaml.load(stream))
return file["id"]
except yaml.YAMLError as exc:
print(exc)
def update_comment(self, mib_key, comment):
payload = {
"body": comment
}
headers = self.build_headers()
url = 'https://www.mulesoft.org/jira/rest/api/2/issue/' + mib_key + '/comment'
response = requests.post(url, data=json.dumps(payload), headers=headers) | mulesoft-labs/popeye | JiraClient.py | JiraClient.py | py | 9,957 | python | en | code | 1 | github-code | 36 |
2346859029 | import numpy as np
from PIL import Image
# Goal: convert an image file from normal pixels to ANSI art made of dots "."
# of same color with canvas-like color background
# ANSI foreground color (n, 0-255) based on 256-bit -> \033[38;5;nm
# ANSI background color (n, 0-255) based on 256-bit -> \033[48;5;nm
# end with \033[m
# colors: https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit
# example: print("\033[48;5;19m\033[38;5;214mHello world!\033[m")
# Open the image
def open_image_array(img_dir="razorback.png"):
# img_dir = input("Name your picture's filename: ") # input image path
img_rgb = Image.open(img_dir).convert('RGB') # convert from RGBA to RGB
# img_rgb.show()
# convert image into 3D array of 3 8-bit RGB values for each pixel
rgb_array = np.array(img_rgb, dtype=np.uint8)
size_d = list(rgb_array.shape) # pic dims [y, x, 3]
size_d[2] = -1 # change 3 -> -1
# convert 3D 8-bit color array to 2D int32 color array (each pixel has 1 ANSI color value)
colorint32 = np.dstack((rgb_array, np.zeros(rgb_array.shape[:2], 'uint8'))).view('uint32').squeeze(-1)
ansi_array = np.floor(colorint32**(1/3)) # cube root & round down to get 256 ANSI color codes
# convert 2d int32 array back to 3D 8-bit array, if needed
rgb_convert = colorint32.view('uint8').reshape(size_d)[:,:,:3]
# ANSI array of colored dots based on ansi_colors array
# BG = 230 # off-white background canvas color ANSI code
ansi_list = ansi_array.astype('uint8').tolist() # convert array to list of lists
for lst in ansi_list:
dot_list = ['\033[48;5;230m'] # BG color
for val in lst:
dot = '\033[38;5;' + str(val) + 'm.' # add FG color values
dot_list.append(dot)
dot_list.append('\033[m')
row = ''.join(dot_list)
print(row)
# Image.fromarray(canvas_array).show()
# print("\033[48;5;230m\033[38;5;45m.\033[38;5;7m.\033[m")
# print(len(canvas_list))
# print(rgb_array)
# print(size_d)
# print(colorint32)
# print(canvas_array)
# print(canvas_list)
if __name__ == "__main__":
open_image_array() | jakecharris/pointillism | source.py | source.py | py | 2,172 | python | en | code | 0 | github-code | 36 |
36749002781 | import cv2
thres = 0.45 # Threshold to detect object
# hog = cv2.HOGDescriptor()
# hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
#
# cv2.startWindowThread()
cap = cv2.VideoCapture(0)
### for IP CAM
# cap = cv2.VideoCapture('rtsp://admin:admin@192.168.1.108/',apiPreference=cv2.CAP_FFMPEG)
cap.set(3,1280)
cap.set(4,720)
cap.set(10,70)
classNames= []
classFile = 'coco.names'
with open(classFile,'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320,320)
net.setInputScale(1.0/ 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
while True:
success,img = cap.read()
classIds, confs, bbox = net.detect(img,confThreshold=thres)
# print(classIds,bbox)
if classIds.__len__() != 0:
for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
cv2.rectangle(img,box,color=(0,255,0),thickness=2)
cv2.putText(img,classNames[classId-1].upper(),(box[0]+10,box[1]+30),
cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
cv2.putText(img,str(round(confidence*100,2)),(box[0]+200,box[1]+30),
cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
cv2.imshow("Output",img)
cv2.waitKey(ord('q')) | Quant1766/detectObjects | main.py | main.py | py | 1,420 | python | en | code | 0 | github-code | 36 |
2722455823 | class Solution(object):
def sortSentence(self, s):
"""
:type s: str
:rtype: str
"""
tmp = []
for sub in s.split(" "):
tmp.append([sub[-1], sub[:-1]])
tmp.sort()
return ' '.join([x[1] for x in tmp])
| ZhengLiangliang1996/Leetcode_ML_Daily | contest/biweekcontest52/sortsentence.py | sortsentence.py | py | 296 | python | en | code | 1 | github-code | 36 |
18665738715 | import os, requests
index_file_path = input("Enter Index file path with extension m3u8 : ")
index_file = open(index_file_path,'r')
indexes = index_file.read()
index_file.close()
output_file_path = input("Enter output file path : ")
output_file = open(output_file_path,'wb')
folder_path = input("Enter folder path with extension m3u8_contents ('#<internet>' for get file from internet) : ")
if folder_path == '#<internet>' :
indexes = indexes.split('http')[1:]
indexes = ['http'+x.split('\n')[0] for x in indexes]
for index in indexes :
content = requests.get(index)
output_file.write(content.content)
else :
indexes = indexes.split('file:')[1:]
indexes = [x.split('\n')[0].split('/')[-1] for x in indexes]
for index in indexes :
content_file = open(os.path.join(folder_path,index),'rb')
output_file.write(content_file.read())
content_file.close()
output_file.close()
| nkpro2000sr/m3u8ToVideo | m3u8tovideo.py | m3u8tovideo.py | py | 952 | python | en | code | 1 | github-code | 36 |
74172105702 | import math
import centrosome.outline
import numpy
import numpy.testing
import pytest
import skimage.measure
import skimage.segmentation
import cellprofiler_core.image
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import (
EXPERIMENT,
COLTYPE_FLOAT,
C_LOCATION,
)
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.preferences
import cellprofiler_core.workspace
cellprofiler_core.preferences.set_headless()
import plugins.measureobjectintensitymultichannel as momc
IMAGE_NAME = "MyImage"
OBJECT_NAME = "MyObjects"
N_CHANNELS = 4
@pytest.fixture(scope="function")
def image():
return cellprofiler_core.image.Image()
@pytest.fixture(scope="function")
def measurements():
return cellprofiler_core.measurement.Measurements()
@pytest.fixture(scope="function")
def module():
module = momc.MeasureObjectIntensityMultichannel()
module.images_list.value = IMAGE_NAME
module.objects_list.value = OBJECT_NAME
return module
@pytest.fixture(scope="function")
def objects(image):
objects = cellprofiler_core.object.Objects()
objects.parent_image = image
return objects
@pytest.fixture(scope="function")
def workspace(image, measurements, module, objects):
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add(IMAGE_NAME, image)
object_set = cellprofiler_core.object.ObjectSet()
object_set.add_objects(objects, OBJECT_NAME)
return cellprofiler_core.workspace.Workspace(
cellprofiler_core.pipeline.Pipeline(),
module,
image_set,
object_set,
measurements,
image_set_list,
)
def test_init():
x = momc.MeasureObjectIntensityMultichannel()
def assert_features_and_columns_match(measurements, module):
object_names = [
x
for x in measurements.get_object_names()
if x
not in (
"Image",
EXPERIMENT,
)
]
features = [
[f for f in measurements.get_feature_names(object_name) if f != "Exit_Status"]
for object_name in object_names
]
columns = module.get_measurement_columns(None)
assert sum([len(f) for f in features]) == len(columns)
for column in columns:
index = object_names.index(column[0])
assert column[1] in features[index]
assert column[2] == COLTYPE_FLOAT
def test_supplied_measurements(module):
"""Test the get_category / get_measurements, get_measurement_images functions"""
module.images_list.value = "MyImage"
module.objects_list.value = "MyObjects1, MyObjects2"
expected_categories = tuple(
sorted(
[
momc.INTENSITY,
C_LOCATION,
]
)
)
assert (
tuple(sorted(module.get_categories(None, "MyObjects1"))) == expected_categories
)
assert module.get_categories(None, "Foo") == []
measurements = module.get_measurements(None, "MyObjects1", momc.INTENSITY)
assert len(measurements) == len(momc.ALL_MEASUREMENTS)
measurements = module.get_measurements(None, "MyObjects1", C_LOCATION)
assert len(measurements) == len(momc.ALL_LOCATION_MEASUREMENTS)
assert all([m in momc.ALL_LOCATION_MEASUREMENTS for m in measurements])
assert (
module.get_measurement_images(
None,
"MyObjects1",
momc.INTENSITY,
momc.MAX_INTENSITY,
)
== ["MyImage"]
)
def test_get_measurement_columns(module):
"""test the get_measurement_columns method"""
module.images_list.value = "MyImage"
module.objects_list.value = "MyObjects1, MyObjects2"
module.nchannels.value = N_CHANNELS
columns = module.get_measurement_columns(None)
assert len(columns) == N_CHANNELS * 2 * (
len(momc.ALL_MEASUREMENTS) + len(momc.ALL_LOCATION_MEASUREMENTS)
)
for column in columns:
assert column[0] in ("MyObjects1", "MyObjects2")
assert column[2], COLTYPE_FLOAT
category = column[1].split("_")[0]
assert category in (
momc.INTENSITY,
C_LOCATION,
)
if category == momc.INTENSITY:
assert column[1][column[1].find("_") + 1 :] in [
m + "_MyImage" + f"_c{c+1}"
for m in momc.ALL_MEASUREMENTS
for c in range(N_CHANNELS)
]
else:
assert column[1][column[1].find("_") + 1 :] in [
m + "_MyImage" + f"_c{c+1}"
for m in momc.ALL_LOCATION_MEASUREMENTS
for c in range(N_CHANNELS)
]
def test_zero(image, measurements, module, objects, workspace):
"""Make sure we can process a blank image"""
image.pixel_data = numpy.zeros((10, 10, N_CHANNELS))
objects.segmented = numpy.zeros((10, 10))
module.nchannels.value = N_CHANNELS
module.run(workspace)
for category, features in (
(
momc.INTENSITY,
momc.ALL_MEASUREMENTS,
),
(
C_LOCATION,
momc.ALL_LOCATION_MEASUREMENTS,
),
):
for meas_name in features:
for c in range(N_CHANNELS):
feature_name = "%s_%s_%s_c%s" % (category, meas_name, "MyImage", c + 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 0, (
"Got data for feature %s" % feature_name
)
assert_features_and_columns_match(measurements, module)
def test_masked(image, measurements, module, objects, workspace):
"""Make sure we can process a completely masked image
Regression test of IMG-971
"""
image.pixel_data = numpy.zeros((10, 10, N_CHANNELS))
image.mask = numpy.zeros((10, 10), bool)
objects.segmented = numpy.ones((10, 10), int)
module.nchannels.value = N_CHANNELS
module.run(workspace)
for meas_name in momc.ALL_MEASUREMENTS:
for c in range(N_CHANNELS):
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
meas_name,
"MyImage",
c + 1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 1
assert numpy.all(numpy.isnan(data) | (data == 0))
assert_features_and_columns_match(measurements, module)
def test_one(image, measurements, module, objects, workspace):
"""Check measurements on a 3x3 square of 1's"""
data = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
image.pixel_data = data.astype(float)
objects.segmented = data.astype(int)
module.nchannels.value = 1
module.run(workspace)
for category, meas_name, value in (
(
momc.INTENSITY,
momc.INTEGRATED_INTENSITY,
9,
),
(
momc.INTENSITY,
momc.MEAN_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.STD_INTENSITY,
0,
),
(
momc.INTENSITY,
momc.MIN_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.MAX_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.INTEGRATED_INTENSITY_EDGE,
8,
),
(
momc.INTENSITY,
momc.MEAN_INTENSITY_EDGE,
1,
),
(
momc.INTENSITY,
momc.STD_INTENSITY_EDGE,
0,
),
(
momc.INTENSITY,
momc.MIN_INTENSITY_EDGE,
1,
),
(
momc.INTENSITY,
momc.MAX_INTENSITY_EDGE,
1,
),
(
momc.INTENSITY,
momc.MASS_DISPLACEMENT,
0,
),
(
momc.INTENSITY,
momc.LOWER_QUARTILE_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.MEDIAN_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.UPPER_QUARTILE_INTENSITY,
1,
),
(
C_LOCATION,
momc.LOC_CMI_X,
3,
),
(
C_LOCATION,
momc.LOC_CMI_Y,
2,
),
):
feature_name = "%s_%s_%s_c%s" % (category, meas_name, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 1
assert data[0] == value, "%s expected %f != actual %f" % (
meas_name,
value,
data[0],
)
def test_one_masked(image, measurements, module, objects, workspace):
"""Check measurements on a 3x3 square of 1's"""
img = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
mask = img > 0
image.pixel_data = img.astype(float)
image.mask = mask
objects.segmented = img.astype(int)
module.run(workspace)
for meas_name, value in (
(momc.INTEGRATED_INTENSITY, 9),
(momc.MEAN_INTENSITY, 1),
(momc.STD_INTENSITY, 0),
(momc.MIN_INTENSITY, 1),
(momc.MAX_INTENSITY, 1),
(momc.INTEGRATED_INTENSITY_EDGE, 8),
(momc.MEAN_INTENSITY_EDGE, 1),
(momc.STD_INTENSITY_EDGE, 0),
(momc.MIN_INTENSITY_EDGE, 1),
(momc.MAX_INTENSITY_EDGE, 1),
(momc.MASS_DISPLACEMENT, 0),
(momc.LOWER_QUARTILE_INTENSITY, 1),
(momc.MEDIAN_INTENSITY, 1),
(momc.MAD_INTENSITY, 0),
(momc.UPPER_QUARTILE_INTENSITY, 1),
):
feature_name = "%s_%s_%s_c%s" % (momc.INTENSITY, meas_name, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 1
assert data[0] == value, "%s expected %f != actual %f" % (
meas_name,
value,
data[0],
)
def test_intensity_location(image, measurements, module, objects, workspace):
data = (
numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 2, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
]
).astype(float)
/ 2.0
)
image.pixel_data = data
labels = (data != 0).astype(int)
objects.segmented = labels
module.run(workspace)
for feature, value in (
(momc.LOC_MAX_X, 5),
(momc.LOC_MAX_Y, 2),
):
feature_name = "%s_%s_%s_c%s" % (C_LOCATION, feature, "MyImage", 1)
values = measurements.get_current_measurement(OBJECT_NAME, feature_name)
assert len(values) == 1
assert values[0] == value
def test_mass_displacement(image, measurements, module, objects, workspace):
"""Check the mass displacement of three squares"""
labels = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
data = numpy.zeros(labels.shape, dtype=float)
#
# image # 1 has a single value in one of the corners
# whose distance is sqrt(8) from the center
#
data[1, 1] = 1
# image # 2 has a single value on the top edge
# and should have distance 2
#
data[7, 3] = 1
# image # 3 has a single value on the left edge
# and should have distance 2
data[15, 1] = 1
image.pixel_data = data
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MASS_DISPLACEMENT,
"MyImage",
1,
)
mass_displacement = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(mass_displacement.shape) == 3
numpy.testing.assert_almost_equal(mass_displacement[0], math.sqrt(8.0))
numpy.testing.assert_almost_equal(mass_displacement[1], 2.0)
numpy.testing.assert_almost_equal(mass_displacement[2], 2.0)
def test_mass_displacement_masked(image, measurements, module, objects, workspace):
"""Regression test IMG-766 - mass displacement of a masked image"""
labels = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
data = numpy.zeros(labels.shape, dtype=float)
#
# image # 1 has a single value in one of the corners
# whose distance is sqrt(8) from the center
#
data[1, 1] = 1
# image # 2 has a single value on the top edge
# and should have distance 2
#
data[7, 3] = 1
# image # 3 has a single value on the left edge
# and should have distance 2
data[15, 1] = 1
mask = numpy.zeros(data.shape, bool)
mask[labels > 0] = True
image.pixel_data = data
image.mask = mask
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MASS_DISPLACEMENT,
"MyImage",
1,
)
mass_displacement = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(mass_displacement.shape) == 3
numpy.testing.assert_almost_equal(mass_displacement[0], math.sqrt(8.0))
numpy.testing.assert_almost_equal(mass_displacement[1], 2.0)
numpy.testing.assert_almost_equal(mass_displacement[2], 2.0)
def test_quartiles_uniform(image, measurements, module, objects, workspace):
"""test quartile values on a 250x250 square filled with uniform values"""
labels = numpy.ones((250, 250), int)
numpy.random.seed(0)
data = numpy.random.uniform(size=(250, 250))
image.pixel_data = data
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.LOWER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.25, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MEDIAN_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.50, 2)
feature_name = "%s_%s_%s_c%s" % (momc.INTENSITY, momc.MAD_INTENSITY, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.25, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.UPPER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.75, 2)
def test_quartiles_one_pixel(image, module, objects, workspace):
"""Regression test a bug that occurs in an image with one pixel"""
labels = numpy.zeros((10, 20))
labels[2:7, 3:8] = 1
labels[5, 15] = 2
numpy.random.seed(0)
data = numpy.random.uniform(size=(10, 20))
image.pixel_data = data
objects.segmented = labels
# Crashes when pipeline runs in measureobjectintensity.py revision 7146
module.run(workspace)
def test_quartiles_four_objects(image, measurements, module, objects, workspace):
"""test quartile values on a 250x250 square with 4 objects"""
labels = numpy.ones((250, 250), int)
labels[125:, :] += 1
labels[:, 125:] += 2
numpy.random.seed(0)
data = numpy.random.uniform(size=(250, 250))
#
# Make the distributions center around .5, .25, 1/6 and .125
#
data /= labels.astype(float)
image.pixel_data = data
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.LOWER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 1.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[1], 1.0 / 8.0, 2)
numpy.testing.assert_almost_equal(data[2], 1.0 / 12.0, 2)
numpy.testing.assert_almost_equal(data[3], 1.0 / 16.0, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MEDIAN_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 1.0 / 2.0, 2)
numpy.testing.assert_almost_equal(data[1], 1.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[2], 1.0 / 6.0, 2)
numpy.testing.assert_almost_equal(data[3], 1.0 / 8.0, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.UPPER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 3.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[1], 3.0 / 8.0, 2)
numpy.testing.assert_almost_equal(data[2], 3.0 / 12.0, 2)
numpy.testing.assert_almost_equal(data[3], 3.0 / 16.0, 2)
feature_name = "%s_%s_%s_c%s" % (momc.INTENSITY, momc.MAD_INTENSITY, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 1.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[1], 1.0 / 8.0, 2)
numpy.testing.assert_almost_equal(data[2], 1.0 / 12.0, 2)
numpy.testing.assert_almost_equal(data[3], 1.0 / 16.0, 2)
def test_median_intensity_masked(image, measurements, module, objects, workspace):
numpy.random.seed(37)
labels = numpy.ones((10, 10), int)
mask = numpy.ones((10, 10), bool)
mask[:, :5] = False
pixel_data = numpy.random.uniform(size=(10, 10, N_CHANNELS)).astype(numpy.float32)
pixel_data[~mask, :] = 1
image.pixel_data = pixel_data
image.mask = mask
objects.segmented = labels
expected = [
numpy.sort(pixel_data[mask, c])[numpy.sum(mask) // 2] for c in range(N_CHANNELS)
]
module.nchannels.value = N_CHANNELS
module.run(workspace)
assert isinstance(measurements, cellprofiler_core.measurement.Measurements)
for c, exp in enumerate(expected):
values = measurements.get_current_measurement(
OBJECT_NAME,
"_".join((momc.INTENSITY, momc.MEDIAN_INTENSITY, IMAGE_NAME, f"c{c+1}")),
)
assert len(values) == 1
assert exp == values[0]
| BodenmillerGroup/ImcPluginsCP | tests/test_measureobjectintensitymultichannel.py | test_measureobjectintensitymultichannel.py | py | 20,306 | python | en | code | 10 | github-code | 36 |
16211931413 | # 두 요소의 위치를 바꿔주는 helper function
def swap_elements(my_list, index1, index2):
tmpValue = my_list[index2]
my_list[index2] = my_list[index1]
my_list[index1] = tmpValue
return my_list
# 퀵 정렬에서 사용되는 partition 함수
def partition(my_list, start, end):
p = end # pivot 인덱스
b = start # big 그룹 이동 인덱스
i = start # 체크 이동 인덱스
for i in range(start, end):
# pivot과 해당값을 비교히고 파티셔닝
if my_list[p] < my_list[i]:
i += 1
else:
my_list = swap_elements(my_list, b, i)
b += 1
i += 1
swap_elements(my_list, b, p)
return b
# 테스트 1
list1 = [4, 3, 6, 2, 7, 1, 5]
pivot_index1 = partition(list1, 0, len(list1) - 1)
print(list1)
print(pivot_index1)
# 테스트 2
list2 = [6, 1, 2, 6, 3, 5, 4]
pivot_index2 = partition(list2, 0, len(list2) - 1)
print(list2)
print(pivot_index2)
| hwiVeloper/zzamzzam2 | codeit-algorithm-python/15-partition/main.py | main.py | py | 914 | python | ko | code | 0 | github-code | 36 |
17795055591 | from typing import List
class Solution:
def highFive(self, items: List[List[int]]) -> List[List[int]]:
items.sort(key=lambda x: (x[0], x[1]))
ans = list()
for i in range(len(items)):
if i == len(items) - 1 or items[i][0] != items[i + 1][0]:
temp = list()
temp.append(items[i][0])
sum = 0
for j in range(5):
sum += items[i - j][1]
sum //= 5
temp.append(sum)
ans.append(temp)
return ans
| fastso/learning-python | leetcode_cn/solved/pg_1086.py | pg_1086.py | py | 569 | python | en | code | 0 | github-code | 36 |
24486912101 | """ Clean up of hail endpoint column in three steps:
- remove unused staging column
- remove now obsolete testing column
- make hail_endpoint_production non null
Revision ID: aa6d3d875f28
Revises: 8bd62cba881a
Create Date: 2020-11-17 09:28:10.910999
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'aa6d3d875f28'
down_revision = '8bd62cba881a'
branch_labels = None
depends_on = None
def upgrade():
op.drop_column('user', 'hail_endpoint_staging')
op.drop_column('user', 'hail_endpoint_testing')
op.execute(sa.text('''update "user" set hail_endpoint_production = '' where hail_endpoint_production is null'''))
op.alter_column('user', 'hail_endpoint_production', existing_type=sa.VARCHAR(), server_default='', nullable=False)
def downgrade():
op.alter_column('user', 'hail_endpoint_production',
existing_type=sa.VARCHAR(),
nullable=True)
op.add_column('user', sa.Column('hail_endpoint_testing', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('user', sa.Column('hail_endpoint_staging', sa.VARCHAR(), autoincrement=False, nullable=True))
| openmaraude/APITaxi | APITaxi_models2/migrations/versions/20201117_09:28:10_aa6d3d875f28_clean_hail_endpoints.py | 20201117_09:28:10_aa6d3d875f28_clean_hail_endpoints.py | py | 1,214 | python | en | code | 24 | github-code | 36 |
32058791507 | from datetime import datetime
from binance_functions.client_functions.binance_client import CreateClient
from decouple import config
from settings.bot_settings import *
from multiprocessing import Pool
import timeit
import json
class HistoricalData:
def __init__(self):
if config('API_KEY') != None and config('SECRET_KEY') != None:
self.api_key = config('API_KEY')
self.secret_key = config('SECRET_KEY')
else:
self.api_key = ""
self.secret_key = ""
self.symbol_list = SYMBOL_LIST
self.exchange_pair = EXCHANGE_PAIR
self.interval = INTERVAL
self.all_data = dict()
self.my_client = CreateClient(self.api_key, self.secret_key).client()
def historical(self, symbol):
all_datas = self.my_client.get_historical_klines(symbol=symbol+self.exchange_pair, interval=self.interval, limit=1000)
converted_datas = list()
data_dict = dict()
for value in all_datas:
__date = datetime.fromtimestamp(int(str(value[6])[:10]))
__date = __date.strftime('%d/%m/%Y %H:%M')
__open_price = float(value[1])
__high_price = float(value[2])
__low_price = float(value[3])
__close_price = float(value[4])
__new_data = [__date, __open_price, __high_price, __low_price, __close_price]
converted_datas.append(__new_data)
converted_datas.pop()
data_dict[symbol+self.exchange_pair] = converted_datas
return data_dict
def collect_historical(self):
historical_process_start_time = timeit.default_timer()
p = Pool()
result = p.map(self.historical, self.symbol_list)
p.close()
p.join()
historical_process_finish_time = timeit.default_timer()
for data in result:
self.all_data[list(data.keys())[0]] = list(data.values())[0]
with open("./data/all_data.json", 'w') as file:
json.dump(self.all_data, file)
print("Collect Historical Data Process Take:",historical_process_finish_time - historical_process_start_time, "Seconds")
return True | turancan-p/binance-trade-bot | collecting_functions/historical_data.py | historical_data.py | py | 2,183 | python | en | code | 25 | github-code | 36 |
44649010863 | #!/usr/bin/env python
# coding: utf-8
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import os
import pandas as pd
from divexplorer_generalized.FP_Divergence import FP_Divergence
DATASET_DIRECTORY = os.path.join(os.path.curdir, "datasets")
# # Import data
def abbreviateValue(value, abbreviations={}):
for k, v in abbreviations.items():
if k in value:
value = value.replace(k, v)
#TODO
if value[0:2] not in ["q_", "u_"]:
value = value.replace("_", " ")
return value
def abbreviate_dict_value(input_dict, abbreviations):
conv ={}
for k1, dict_i in input_dict.items():
conv[k1] = { abbreviateValue(k, abbreviations): d for k, d in dict_i.items()}
return conv
def get_predefined_color_labels(abbreviations = {}):
color_labels = {}
color_labels[abbreviateValue(f'entropy_base', abbreviations)]="#7fcc7f"
color_labels[abbreviateValue(f'divergence_criterion_base', abbreviations)]="#009900"
color_labels[abbreviateValue(f'entropy_generalized', abbreviations)]="mediumblue"
color_labels[abbreviateValue(f'divergence_criterion_generalized', abbreviations)]="orangered"
color_labels[abbreviateValue(f'entropy_base_pruned', abbreviations)]="yellow"
color_labels[abbreviateValue(f'divergence_criterion_base_pruned', abbreviations)]="#C179EE"
color_labels[abbreviateValue(f'entropy_generalized_pruned', abbreviations)]="gray"
color_labels[abbreviateValue(f'divergence_criterion_generalized_pruned', abbreviations)]="#C01FB1"
return color_labels
def run_pruning_experiemnt(dataset_name = 'wine', min_support_tree = 0.1,
min_sup_divergences = [0.1, 0.15, 0.2],
type_criterion="divergence_criterion",
type_experiment = "one_at_time",
metric = "d_fpr",
save = True,
output_dir = 'output_results_2',
saveFig = True,
dataset_dir = DATASET_DIRECTORY):
print(dataset_name)
print(min_sup_divergences)
print(output_dir)
if dataset_name == 'wine':
from import_process_dataset import import_process_wine, train_classifier_kv
df, class_map, continuous_attributes = import_process_wine()
# # Train and predict with RF classifier
df = train_classifier_kv(df)
elif dataset_name== "compas":
from import_process_dataset import import_compas
df, class_map, continuous_attributes = import_compas()
elif dataset_name== "adult":
from import_process_dataset import import_process_adult
df, class_map, continuous_attributes = import_process_adult()
from sklearn.preprocessing import LabelEncoder
attributes = df.columns.drop("class")
X = df[attributes].copy()
y = df["class"].copy()
encoders = {}
for column in attributes:
if df.dtypes[column] == np.object:
print(column)
le = LabelEncoder()
X[column] = le.fit_transform(df[column])
encoders[column] = le
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_predict
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
cv = StratifiedKFold(n_splits=10, random_state=42, shuffle=True
) # Added to fix the random state #Added shuffle=True for new version sklearn, Value Error
y_predicted = cross_val_predict(clf, X, y.values, cv=cv)
df["predicted"] = y_predicted
else:
raise ValueError()
# # Tree divergence
true_class_name = "class"
pred_class_name = "predicted"
cols_c = [true_class_name, pred_class_name]
df_analyze = df.copy()
from tree_discretization import TreeDiscretization
tree_discr = TreeDiscretization()
# ## Extract tree
generalization_dict, discretizations = tree_discr.get_tree_discretization(
df_analyze,
type_splitting=type_experiment,
min_support=min_support_tree,
metric=metric,
class_map=class_map,
continuous_attributes=list(continuous_attributes),
class_and_pred_names=cols_c,
storeTree=True,
type_criterion=type_criterion,
#minimal_gain = 0.0015
)
# # Extract patterns
out_support = {}
out_time = {}
out_fp = {}
from utils_extract_divergence_generalized import (
extract_divergence_generalized,
)
import time
for apply_generalization in [False, True]:
type_gen = 'generalized' if apply_generalization else 'base'
print(type_gen)
for keep in [True, False]:
if keep:
keep_items = tree_discr.get_keep_items_associated_with_divergence()
keep_str = "_pruned"
else:
keep_items = None
keep_str = ""
print(keep_str)
for min_sup_divergence in min_sup_divergences:
print(min_sup_divergence, end = " ")
s_time = time.time()
FP_fm = extract_divergence_generalized(
df_analyze,
discretizations,
generalization_dict,
continuous_attributes,
min_sup_divergence=min_sup_divergence,
apply_generalization=apply_generalization,
true_class_name=true_class_name,
predicted_class_name=pred_class_name,
class_map=class_map,
metrics_divergence = [metric],
FPM_type="fpgrowth",
save_in_progress = False,
keep_only_positive_divergent_items=keep_items
)
key = type_gen + keep_str
out_time.setdefault(min_sup_divergence, {})[key] = time.time()-s_time
print(f"({(time.time()-s_time):.2f})")
fp_divergence_i = FP_Divergence(FP_fm, metric=metric)
most_divergent = (
fp_divergence_i.getDivergence(th_redundancy=0)
.sort_values(
[fp_divergence_i.metric, fp_divergence_i.t_value_col], ascending=False
)
.head(1)
)
out_support.setdefault(min_sup_divergence, {})[key] = most_divergent
out_fp.setdefault(min_sup_divergence, {})[key] = len(FP_fm)
import os
output_fig_dir = os.path.join(os.path.curdir, output_dir, "figures", "output_performance")
if saveFig:
from pathlib import Path
Path(output_fig_dir).mkdir(parents=True, exist_ok=True)
abbreviations = {"one_at_time":"indiv t.", \
"divergence_criterion":"g$\\Delta$", "entropy":"entr"}
color_labels = get_predefined_color_labels(abbreviations)
lines_style = {k:"-" for k in color_labels}
lines_style.update({k:"--" for k in color_labels if( "base" in k and abbreviations["entropy"] in k)})
lines_style.update({k:"-." for k in color_labels if( 'base' in k and abbreviations["divergence_criterion"] in k)})
size_fig = (3,3)
from utils_plot import plotDicts
out_support_max = {}
for sup in sorted(out_support.keys()):
out_support_max[sup] = {}
for type_gen in out_support[sup]:
out_support_max[sup][type_gen] = out_support[sup][type_gen][metric].iloc[0]
for info_i, results in [('time', out_time), (f"max_{metric}", out_support_max), ('FP', out_fp)]:
info_plot = {}
for sup in sorted(results.keys()):
for type_gen in results[sup]:
type_gen_str = abbreviateValue(f"{type_criterion}_{type_gen}", abbreviations)
if type_gen_str not in info_plot:
info_plot[type_gen_str] = {}
info_plot[type_gen_str][sup] = results[sup][type_gen]
figure_name = os.path.join(output_fig_dir, f"{dataset_name}_stree_{min_support_tree}_{metric}_{info_i}.pdf")
title, ylabel = '', ''
if info_i == 'time':
title = 'Execution time'
ylabel="Execution time $(seconds)$"
elif info_i == f"max_{metric}":
ylabel="Max $\\Delta_{FPR}$"
title="Highest $\\Delta_{FPR}$"
elif info_i == 'FP':
ylabel="#FP"
title="#FP"
plotDicts(info_plot, marker=True, \
title = title, sizeFig=size_fig,\
linestyle=lines_style, color_labels=color_labels, \
xlabel="Minimum support s",ylabel=ylabel , labelSize=10.2,\
outside=False, saveFig=saveFig, nameFig = figure_name)
# # Store performance results
if save:
import os
output_results = os.path.join(os.path.curdir, output_dir, 'performance')
from pathlib import Path
Path(output_results).mkdir(parents=True, exist_ok=True)
conf_name = f"{dataset_name}_{metric}_{type_criterion}_{min_support_tree}"
import json
with open(os.path.join(output_results, f'{conf_name}_time.json'), 'w') as output_file:
output_file.write(json.dumps(out_time))
import json
with open(os.path.join(output_results, f'{conf_name}_fp.json'), 'w') as output_file:
output_file.write(json.dumps(out_fp))
out_support_max = {}
for sup in sorted(out_support.keys()):
out_support_max[sup] = {}
for type_gen in out_support[sup]:
out_support_max[sup][type_gen] = out_support[sup][type_gen][metric].iloc[0]
with open(os.path.join(output_results, f'{conf_name}_div.json'), 'w') as output_file:
output_file.write(json.dumps(out_support_max))
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--name_output_dir",
default="output_red",
help="specify the name of the output folder",
)
parser.add_argument(
"--dataset_dir",
default=DATASET_DIRECTORY,
help="specify the dataset directory",
)
parser.add_argument(
"--no_show_figs",
action="store_true",
help="specify not_show_figures to vizualize the plots. The results are stored into the specified outpur dir.",
)
parser.add_argument(
"--dataset_name",
default="wine",
help="specify the name of the dataset",
)
parser.add_argument(
"--min_support_tree",
type=float,
default=0.1,
help="specify the name of the dataset",
)
parser.add_argument(
"--min_sup_divergences",
type=float,
nargs="*",
default=[0.15, 0.2],
help="specify the minimum support scores",
)
parser.add_argument(
"--type_criterion",
type=str,
default="divergence_criterion",
help="specify the split criterion",
)
parser.add_argument(
"--metric",
type=str,
default='d_fpr',
help="specify the metric",
)
args = parser.parse_args()
run_pruning_experiemnt(min_support_tree = args.min_support_tree,
min_sup_divergences = args.min_sup_divergences,
type_criterion=args.type_criterion,
metric = args.metric,
#save = True,
#saveFig = True,
dataset_name = args.dataset_name,
output_dir=args.name_output_dir,
dataset_dir=args.dataset_dir,
) | elianap/h-divexplorer | run_experiments_pruning.py | run_experiments_pruning.py | py | 11,632 | python | en | code | 2 | github-code | 36 |
34116401936 | '''
Detects a grid.
'''
import ujson
import cv2
from math import floor
from operator import itemgetter
from argparse import ArgumentParser
from pytesseract import image_to_string
parser = ArgumentParser(description = 'Detect grid.')
parser.add_argument(
"-f",
"--filename",
dest = "filename",
help = "filename prefix of images"
)
parser.add_argument(
"-d",
"--dev",
dest = "dev",
help = "run in development mode (preview image)",
action = "store_true"
)
parser.add_argument(
"--parts",
dest = "parts",
help = "which parts to read",
nargs = "+",
default = ["grid", "rides", "day", "roads"]
)
args = parser.parse_args()
dev = args.dev
parts = args.parts
image = cv2.imread("screen.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(imageHeight, imageWidth, channels) = image.shape
def hasTheRightAspectRatio(boundingRect):
[x, y, w, h] = boundingRect
return round((w / h) * 10) == 12 or round((h / w) * 10) == 12
def isBigEnough(boundingRect):
[x, y, w, h] = boundingRect
return w > round(imageHeight / 30) and w < round(imageHeight / 5)
def topIsBrighterThanBottom(boundingRect, image):
[x, y, w, h] = boundingRect
centerX = x + (w / 2)
centerY = y + (h / 2)
centerYAbove = centerY - (h / 6)
centerYBelow = centerY + (h / 6)
pixelAbove = image[round(centerYAbove), round(centerX)]
pixelBelow = image[round(centerYBelow), round(centerX)]
return sum(pixelAbove) > sum(pixelBelow)
def isAHouse(boundingRect, image):
return (
hasTheRightAspectRatio(boundingRect) and
isBigEnough(boundingRect) and
topIsBrighterThanBottom(boundingRect, image)
)
def drawBoundingRects(image, contours):
for contour in contours:
[contourX, contourY, w, h] = contour
point1 = (contourX, contourY)
point2 = (contourX + w, contourY + h)
cv2.rectangle(image, point1, point2, (0, 0, 255), 2)
def cellFromBuilding(contour, offset, scale):
[contourX, contourY, w, h] = contour
[offsetX, offsetY] = offset
[scaleX, scaleY] = scale
newX = (contourX - (w * offsetX))
newY = (contourY - (h * offsetY))
newW = scale * w
return {"x": newX, "y": newY, "width": newW}
def topLeftCellFromSample(cell, imageWidth, imageHeight, borders):
x, y, width = itemgetter("x", "y", "width")(cell)
topBorder, leftBorder = borders
widthPadding = (imageWidth * leftBorder)
heightPadding = (imageHeight * topBorder)
usableWidth = x - widthPadding
usableHeight = y - heightPadding
newX = x - (floor(usableWidth / width) * width)
newY = y - (floor(usableHeight / width) * width)
return {"x": newX, "y": newY, "width": width}
def bottomRightCellFromSample(cell, imageWidth, imageHeight, borders):
x, y, width = itemgetter("x", "y", "width")(cell)
bottomBorder, rightBorder = borders
remainingWidth = imageWidth - x - width
remainingHeight = imageHeight - y - width
widthPadding = (imageWidth * rightBorder)
heightPadding = (imageHeight * bottomBorder)
usableWidth = remainingWidth - widthPadding
usableHeight = remainingHeight - heightPadding
newX = x + ((floor(usableWidth / width)) * width)
newY = y + ((floor(usableHeight / width)) * width)
return {"x": newX, "y": newY, "width": width}
def getCells(contour, cellSettings, borderSettings):
[contourX, contourY, w, h] = contour
if w < h:
w, h = h, w
[offset, scale] = cellSettings
sampleCell = cellFromBuilding([contourX, contourY, w, h], offset, scale)
[
topBorder,
bottomBorder,
leftBorder,
rightBorder,
] = borderSettings
topLeftBorder = [topBorder, leftBorder]
topLeftCell = topLeftCellFromSample(sampleCell, imageWidth, imageHeight, topLeftBorder)
bottomRightBorder = [bottomBorder, rightBorder]
bottomRightCell = bottomRightCellFromSample(sampleCell, imageWidth, imageHeight, bottomRightBorder)
return [sampleCell, topLeftCell, bottomRightCell]
def drawCell(image, cell):
x, y, width = itemgetter("x", "y", "width")(cell)
point1 = tuple(map(round, (x, y)))
point2 = tuple(map(round, (x + width, y + width)))
cv2.rectangle(image, point1, point2, (0, 0, 255), 2)
def drawAll(image, cellSettings, borderSettings, contours):
for contour in contours:
cells = getCells(contour, cellSettings, borderSettings)
for cell in cells:
drawCell(image, cell)
def arithmeticMean(numbers):
return sum(numbers) / len(numbers)
def reciprocal(number):
return 1 / number
def reciprocalSum(numbers):
return sum(list(map(reciprocal, numbers)))
def harmonicMean(numbers):
return len(numbers) / reciprocalSum(numbers)
def selectPart(collection, i):
return list(map(lambda c: c[i], collection))
def getAverageCells(cells):
keys = ["x", "y", "width"]
indices = [1, 2]
if len(cells) == 0:
return [
{
"x": 1,
"y": 1,
"width": 10
},
{
"x": 1,
"y": 1,
"width": 10
}
]
return [
{
key: harmonicMean(selectPart(selectPart(cells, i), key)) for key in keys
} for i in indices
]
def drawAverage(image, cellSettings, borderSettings, contours):
cells = list(map(lambda c: getCells(c, cellSettings, borderSettings), contours))
averageCells = getAverageCells(cells)
# print(cells)
# print(averageCells)
# drawCell(image, cells[0]) # sampleCell
drawCell(image, averageCells[0]) # average topLeftCell
drawCell(image, averageCells[1]) # average bottomRightCell
def getGrid(topLeftCell, bottomRightCell):
topLeftX, topLeftY, width = itemgetter("x", "y", "width")(topLeftCell)
bottomRightX, bottomRightY = itemgetter("x", "y", "width")(bottomRightCell)[:2]
gridWidth = (bottomRightX + width) - topLeftX
gridHeight = (bottomRightY + width) - topLeftY
return list(
map(
lambda y: list(
map(
lambda x: {"x": round(x), "y": round(y), "width": round(width)},
list(range(round(topLeftX), round(gridWidth + width), round(width)))
)
),
list(range(round(topLeftY), round(gridHeight + width), round(width)))
)
)
def drawGrid(image, grid):
for row in grid:
for cell in row:
drawCell(image, cell)
def linearConversion(oldRange, newRange, value):
[oldMin, oldMax] = oldRange
[newMin, newMax] = newRange
return (((value - oldMin) * (newMax - newMin)) / (oldMax - oldMin)) + newMin
sliderMax = 100
windowTitle = "slider"
defaultOffsetX = 120 / 1000
defaultOffsetY = 120 / 1000
defaultScaleX = 660 / 1000
defaultScaleY = 660 / 1000
defaultTopBorder = linearConversion([480, 1050], [15, 50], imageHeight) / 1000
defaultBottomBorder = linearConversion([480, 1050], [120, 140], imageHeight) / 1000
defaultLeftBorder = 30 / 1000
defaultRightBorder = 30 / 1000
borders = [
["top", defaultTopBorder],
["bottom", defaultBottomBorder],
["left", defaultLeftBorder],
["right", defaultRightBorder]
]
borderTrackbarNamesAndDefaults = list(map(lambda d: [f"{d[0]}Border", d[1]], borders))
def getSettings():
[
offsetX,
offsetY,
scaleX,
scaleY,
topBorder,
bottomBorder,
leftBorder,
rightBorder
] = [
defaultOffsetX,
defaultOffsetY,
defaultScaleX,
defaultScaleY,
defaultTopBorder,
defaultBottomBorder,
defaultLeftBorder,
defaultRightBorder
]
cellSettings = [[offsetX, offsetY], [scaleX, scaleY]]
borderSettings = [topBorder, bottomBorder, leftBorder, rightBorder]
return [cellSettings, borderSettings]
def cropCell(image, cell):
x, y, width = itemgetter("x", "y", "width")(cell)
crop = image.copy()[y:y + width, x:x + width]
crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
(cropHeight, cropWidth, cropChannels) = crop.shape
targetWidth = 64
crop = cv2.resize(
crop,
(targetWidth, targetWidth),
cv2.INTER_AREA if cropWidth > targetWidth else cv2.INTER_LINEAR
)
return crop.tolist()
def cropRides(image):
return image.copy()[
round(imageHeight / 40):round(imageHeight / 15),
round(imageWidth - (imageWidth / 4)):round(imageWidth - imageWidth / 8.7)
]
def getRides(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedRides = cropRides(image)
(ret, croppedRides) = cv2.threshold(croppedRides, 127, 255, cv2.THRESH_TOZERO)
croppedRides = ~croppedRides
return image_to_string(croppedRides, *tesseractConfig).replace("\n", "").replace("\f", "")
def cropDay(image):
return image.copy()[
round(imageHeight / 40):round(imageHeight / 15),
round(imageWidth - (imageWidth / 12)):round(imageWidth - (imageWidth / 22))
]
def getDay(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedDay = cropDay(image)
(ret, croppedDay) = cv2.threshold(croppedDay, 127, 255, cv2.THRESH_TOZERO)
croppedDay = ~croppedDay
return image_to_string(croppedDay, *tesseractConfig).replace("\n", "").replace("\f", "")
def cropRoads(image):
return image.copy()[
round(imageHeight - (imageHeight / 16)):round(imageHeight - (imageHeight / 21.5)),
round((imageWidth / 2) + (imageWidth / 65)):round((imageWidth / 2) + (imageWidth / 33))
]
def getRoads(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedRoads = cropRoads(image)
(ret, croppedRoads) = cv2.threshold(croppedRoads, 127, 255, cv2.THRESH_TOZERO)
croppedRoads = ~croppedRoads
return image_to_string(croppedRoads, *tesseractConfig).replace("\n", "").replace("\f", "")
def cropGameOver(image):
return image.copy()[
round((imageHeight / 2) - (imageHeight / 3)):round((imageHeight / 2) - (imageHeight / 4)),
round((imageWidth / 2) - (imageWidth / 7)):round((imageWidth / 2) + (imageWidth / 7))
]
def getGameOver(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedGameOver = cropGameOver(image)
(ret, croppedGameOver) = cv2.threshold(croppedGameOver, 127, 255, cv2.THRESH_TOZERO)
croppedGameOver = ~croppedGameOver
return image_to_string(croppedGameOver, *tesseractConfig).replace("\n", "").replace("\f", "")
def cropUpgrade(image):
return image.copy()[
round(
(imageHeight / 2) - (imageHeight / 4.5)
):
round(
(imageHeight / 2) - (imageHeight / 7)
),
round(
(imageWidth / 2) - (imageWidth / 4.5)
):
round(
(imageWidth / 2) + (imageWidth / 10)
)
]
def getUpgrade(image):
tesseractConfig = ["eng", "--psm 7 --oem 1"]
croppedUpgrade = cropUpgrade(image)
(ret, croppedUpgrade) = cv2.threshold(croppedUpgrade, 127, 255, cv2.THRESH_TOZERO)
croppedUpgrade = ~croppedUpgrade
return image_to_string(croppedUpgrade, *tesseractConfig).replace("\n", "").replace("\f", "")
def detectGrid(value=0):
[cellSettings, borderSettings] = getSettings()
cells = list(map(lambda c: getCells(c, cellSettings, borderSettings), contours))
averageCells = getAverageCells(cells)
grid = getGrid(*averageCells)
if dev:
print(cellSettings)
print(borderSettings)
image = cv2.imread("screen.png")
# drawGrid(image, grid)
drawBoundingRects(image, contours)
[cellSettings, borderSettings] = getSettings()
drawAverage(image, cellSettings, borderSettings, contours)
drawAll(image, cellSettings, borderSettings, contours)
return grid
def getContours(image):
div = 32
quantized = image // div * div + div // 2
hsv = cv2.cvtColor(quantized, cv2.COLOR_BGR2HSV)
hsvThreshold = cv2.inRange(hsv, (0, 100, 150), (255, 255, 255))
blurred = cv2.GaussianBlur(hsvThreshold, (3, 3), 0)
canny = cv2.Canny(blurred, 120, 255, 1)
# Find contours
contours = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
contours = list(map(cv2.boundingRect, contours))
contours = list(filter(lambda b: isAHouse(b, quantized), list(map(list, contours))))
return contours
if dev:
contours = getContours(image)
cv2.namedWindow(windowTitle)
grid = detectGrid()
drawGrid(image, grid)
drawBoundingRects(image, contours)
[cellSettings, borderSettings] = getSettings()
drawAverage(image, cellSettings, borderSettings, contours)
drawAll(image, cellSettings, borderSettings, contours)
cv2.imwrite(f"./images/test/{args.filename}.png", image)
else:
gameOverString = getGameOver(gray)
upgradeString = getUpgrade(gray)
if gameOverString == "Game Over":
print("GAME OVER")
elif upgradeString.startswith("Woche"):
print(upgradeString)
else:
if "grid" in parts:
contours = getContours(image)
[cellSettings, borderSettings] = getSettings()
cells = list(map(lambda c: getCells(c, cellSettings, borderSettings), contours))
grid = getGrid(*getAverageCells(cells))
gridData = list(
map(
lambda row: list(
map(
lambda cell: {key: cell[key] for key in ["x", "y"]} | {"pixels": cropCell(image, cell)},
row
)
),
grid
)
)
else:
gridData = None
if "rides" in parts:
rides = getRides(gray)
else:
rides = None
if "day" in parts:
day = getDay(gray)
else:
day = None
if "roads" in parts:
roads = getRoads(gray)
else:
roads = None
data = {
"grid": gridData,
"rides": rides,
"day": day,
"roads": roads
}
print(ujson.dumps(data))
| pumpncode/mimoai | read-game.py | read-game.py | py | 12,747 | python | en | code | 0 | github-code | 36 |
34647682320 | from pathlib import Path
from uuid import uuid4
from django.core.validators import FileExtensionValidator
from django.db.models import (
CASCADE,
CharField,
DateField,
FileField,
ForeignKey,
IntegerChoices,
IntegerField,
Model,
TextField,
URLField,
)
from django.utils.translation import gettext_lazy as _
def get_file_name_in_storage(instance, filename):
name = f"{uuid4()}-{filename}"
return Path(instance._meta.model_name) / str(instance.kid.id) / name
class Kid(Model):
class Color(IntegerChoices):
BLACK = 1, _("Preto")
BROWN = 2, _("Castanhos")
BLONDE = 3, _("Loiro")
RED = 4, _("Ruivo")
BLUE = (
5,
_("Azul"),
)
SWARTHY = 6, _("Morena")
WHITE = 7, _("Branca")
# required fiels
name = CharField("Nome", max_length=255, db_index=True, unique=True)
url = URLField("URL")
full_text = TextField()
# optional indexed fields
dob = DateField("Data de nascimento", null=True, blank=True, db_index=True)
missing_since = DateField(
"Desaparecida(o) desde", null=True, blank=True, db_index=True
)
eyes = CharField(
"Cor dos olhos",
max_length=50,
choices=Color.choices,
null=True,
blank=True,
db_index=True,
)
hair = CharField(
"Cor dos cabelos",
max_length=50,
choices=Color.choices,
null=True,
blank=True,
db_index=True,
)
skin = CharField(
"Cor da pele",
max_length=50,
choices=Color.choices,
null=True,
blank=True,
db_index=True,
)
# optional fields
mother = CharField("Mãe", max_length=255, null=True, blank=True)
father = CharField("Pai", max_length=255, null=True, blank=True)
last_seen_at_city = CharField(
"Cidade onde foi vista(o) pela última vez",
max_length=255,
null=True,
blank=True,
db_index=True,
)
last_seen_at_state = CharField(
"UF onde foi vista(o) pela última vez",
max_length=2,
null=True,
blank=True,
db_index=True,
)
age_at_occurrence = IntegerField("Idade quando desapareceu", null=True, blank=True)
class Meta:
verbose_name = "criança"
ordering = ("name",)
def __str__(self):
return self.name
class KidImage(Model):
kid = ForeignKey(Kid, on_delete=CASCADE, verbose_name="criança")
image = FileField(
verbose_name="Foto",
upload_to=get_file_name_in_storage,
validators=(
FileExtensionValidator(allowed_extensions=("jpg", "jpeg", "png", "gif")),
),
)
def __str__(self):
return self.kid.name
class Meta:
verbose_name = "foto"
verbose_name_plural = "fotos"
| cuducos/fio-de-ariadne | web/core/models.py | models.py | py | 2,873 | python | en | code | 78 | github-code | 36 |
23682193376 |
file_object = open("advent.of.code.03.txt", "r")
lines = file_object.readlines()
lines[-1] = lines[-1] + '\n'
treesmap = list(map(lambda s: s[:-1], lines))
class Position:
x = 0
y = 0
currentpos = Position()
treescount = 0
def getnextposfunc(rightmov, downmov, length):
def getnextpos(position):
result = Position()
result.x = (position.x + rightmov) % length
result.y = position.y + downmov
return result
return getnextpos
def istreeposition(position, map):
return map[position.y][position.x] == '#'
nextpos = getnextposfunc(3, 1, len(treesmap[0]))
while currentpos.y < len(treesmap) - 1:
currentpos = nextpos(currentpos)
treescount += (1 if istreeposition(currentpos, treesmap) else 0)
print(treescount) | jfornasin/Advent-of-code-2020 | 03/01.py | 01.py | py | 774 | python | en | code | 0 | github-code | 36 |
15795277469 | #!/usr/bin/env python2
from __future__ import print_function
import struct
import sys
import killerbee
class RZVictim(object):
def __init__(self, channel=26):
self.kb = killerbee.KillerBee()
self.kb.set_channel(channel)
self.kb.sniffer_on()
print("RZVictim: listening on '%s', link-type DLT_IEEE802_15_4, "
"capture size 127 bytes" % self.kb.get_dev_info()[0])
# print("\nFCF # PAN dest src 6LoW payload CRC")
def __del__(self):
self.kb.sniffer_off()
self.kb.close()
def pnext(self):
packet = None
while not packet:
packet = self.kb.pnext()
return packet
parser = killerbee.Dot154PacketParser()
filename = sys.argv[1]
logfile = open(filename, "w")
def log(x):
global logfile
print(x)
logfile.write(x + "\n")
r = RZVictim()
try:
while True:
p = r.pnext()
# print(p)
crc_state = p["validcrc"]
if not crc_state:
log("Frame with invalid CRC received.")
continue
try:
packet = parser.pktchop(p[0])
except:
log("Undecodeable packet: %s" % (p))
continue
src, dst, seq = 0, 0, 0
if packet[1] and packet[3] and packet[5]: # has src, dst and seqno
# parsed data in native endianess
try:
data = struct.unpack("=BHH", packet[1]+packet[3]+packet[5])
except struct.error:
log("Undecodeable packet: %s" % (packet))
continue
src = data[2]
dst = data[1]
seq = data[0]
# FCF | Seq# | DPAN | DA | SPAN | SA | [Beacon Data] | PHY Payload
log("%d(%s) -> %d(%s) (#%d): Len: %d, Payload-Len: "
"%d, FCF:%s, Payload:%s, crc:%s" %
(src, repr(packet[4]), dst, repr(packet[2]), seq, len(p[0]),
len(packet[7]), repr(packet[0]), repr(packet[7]), crc_state))
elif packet[0][0] == "\x02": # ack frame
seq = ord(packet[1])
log("ACK: #%d, crc:%s" % (seq, crc_state))
else: # uninteresting packet
log(packet)
# payload in network endianess!
if packet[7][:2] == "\x3F\x70": # ctp routing frame
# byte 4 seems to be sequence number, byte 3 seems to be constant 0
try:
data = struct.unpack("!BHH", packet[7][4:9])
except struct.error:
print("Error decoding CTP routing frame")
continue
option = data[0]
parent = data[1]
etx = data[2]
# remaining two bytes seem to be crc
log(" CTP Routing: option:%d, parent:%d, etx:%d" %
(option, parent, etx))
elif packet[7][:2] == "\x3F\x71": # ctp data frame
try:
data = struct.unpack("!BBHHBB", packet[7][2:10])
except struct.error:
print("Error decoding CTP data frame")
continue
option = data[0]
thl = data[1]
etx = data[2]
origin = data[3]
seqno = data[4]
collect_id = data[5]
payload = packet[7][10:-2]
log(" CTP Data: option:%d, thl:%d, etx:%d, origin:%d, "
"seqno:%d, collect_id:%d, data:%s" %
(option, thl, etx, origin, seqno, collect_id, repr(payload)))
elif packet[7][:2] == "\x3F\x73": # lqi routing frame
try:
data = struct.unpack("!HHHHHH", packet[7][2:14])
except struct.error:
print("Error decoding LQI routing frame")
continue
originaddr = data[0]
seqno = data[1]
originseqno = data[2]
parent = data[3]
cost = data[4]
hopcount = data[5]
log(" LQI routing: origin:%d, seqno:%d, originseqno:%d, "
"parent:%d, cost:%d, hopcount:%d" %
(originaddr, seqno, originseqno, parent, cost, hopcount))
elif packet[7][:2] == "\x3F\x74": # lqi data frame
try:
data = struct.unpack("!HHHHBH", packet[7][2:13])
except struct.error:
print("Error decoding LQI data frame")
continue
originaddr = data[0]
seqno = data[1]
originseqno = data[2]
hopcount = data[3]
collectionid = data[4]
payload = data[5]
log(" LQI data: origin:%d, seqno:%d, originseqno:%d, "
"hopcount:%d, collect_id:%d, data:%d" %
(originaddr, seqno, originseqno,
hopcount, collectionid, payload))
elif packet[7][:2] == "\x3F\xAA": # measurement information
try:
data = struct.unpack("!IHHHH", packet[7][2:14])
except struct.error:
print("Error decoding measurement information")
continue
timediff = data[0]
framecount = data[1]
duplicates = data[2]
packetcount = data[3]
missedcount = data[4]
log(" Measurement packet: framecount:%d, packetcount:%d, "
"missedcount:%d, timediff:%d, duplicates:%d" %
(framecount, packetcount, missedcount, timediff, duplicates))
except KeyboardInterrupt:
logfile.close()
| mwil/wifire | src/tools/killerbee_monitor.py | killerbee_monitor.py | py | 5,542 | python | en | code | 5 | github-code | 36 |
74332806822 | from time import sleep
import traceback
from django.forms import model_to_dict
from django.shortcuts import redirect, render
from .models import Transaction
from .form.CreateTransactionForm import CreateTransactionForm
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from FinApp.decorators import basic_auth
import json
from django.core import serializers
from django.shortcuts import render, redirect
import pandas as pd
import os
from django.core.files.storage import FileSystemStorage
from Transaction.models import Transaction
from .form.UploadTransactionForm import UploadTransactionForm
from FinApp.settings import BASE_DIR
from django.contrib import messages
from .constants import (
START_DATE_QUERY_PARAM,
END_DATE_QUERY_PARAM,
TRANSACTION_ID_VAR,
INCOME_TABLE_HEADER,
EXPENSE_TABLE_HEADER
)
from Budget.models import(
Category
)
from django.contrib import messages
# Create your views here.
def transaction_view(request):
if request.user.is_authenticated:
if request.method == 'GET':
start_date = request.GET.get(START_DATE_QUERY_PARAM, None)
end_date = request.GET.get(END_DATE_QUERY_PARAM, None)
context = {'income_table_header': INCOME_TABLE_HEADER, 'expense_table_header': EXPENSE_TABLE_HEADER}
if not start_date and not end_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user)
context['range'] = 'Any'
# return JsonResponse({'range': 'Any', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
elif not start_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, end_date=tuple(end_date.split('-')))
context['range'] = f'Before {end_date}'
# return JsonResponse({'range': f'Before {end_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
elif not end_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, start_date=tuple(start_date.split('-')))
context['range'] = f'After {start_date}'
# return JsonResponse({'range': f'After {start_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
else:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, start_date=tuple(start_date.split('-')), end_date=tuple(end_date.split('-')))
context['range'] = f'From {start_date} to {end_date}'
# return JsonResponse({'range': f'From {start_date} to {end_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
serialized_data = json.loads(serializers.serialize('json', query_set, use_natural_foreign_keys=True, use_natural_primary_keys=True))
for data in serialized_data:
transaction = data["fields"]
del transaction["user"]
transaction.update({"id": data["pk"]})
context['transactions'] = [data["fields"] for data in serialized_data]
#context['transactions'] = [model_to_dict(transaction) for transaction in query_set]
# print(request.user, context)
return render(request, 'transaction.html', context)
if request.method == 'POST':
if request.method == 'POST':
if 'myfile' in request.FILES:
fs = FileSystemStorage()
user = request.user
myfile = request.FILES['myfile']
filename = fs.save(myfile.name, myfile)
file_path = os.path.join(BASE_DIR, fs.url(filename)[1:])
data = pd.read_csv(file_path, header = 0)
#Checking for correct headers
required_headers = ["category", "type", "amount", "description", "date"]
actual_headers = data.columns.values.tolist()
missing_headers = list(set(required_headers).difference(set(actual_headers)))
error_headers = list(set(actual_headers).difference(set(required_headers)))
if len(missing_headers) > 0:
messages.error(request, "Missing columns are: {}".format(missing_headers))
if len(error_headers) > 0:
messages.error(request, "Columns: {}, do not exist for Transacation Model!".format(error_headers))
return redirect("/transactions/")
data_dict = data.to_dict(orient='records')
i = 0
for row in data_dict:
form = UploadTransactionForm(request.user, **row)
if form.is_valid():
new_transaction = Transaction.transaction_manager.create_transaction(
user=request.user,
**form.cleaned_data
)
else:
messages.error(request, "Row {} has some errors! ".format(i))
messages.error(request, form.errors)
return redirect("/transactions/")
i+=1
messages.success(request, "Upload Successful!" )
return redirect("/transactions/")
else:
return redirect('/login')
@csrf_exempt
@basic_auth
def get_transactions(request, start_date: str = None, end_date: str = None):
if request.user.is_authenticated:
if request.method == 'GET':
start_date = request.GET.get(START_DATE_QUERY_PARAM, None)
end_date = request.GET.get(END_DATE_QUERY_PARAM, None)
context = {'income_table_header': INCOME_TABLE_HEADER, 'expense_table_header': EXPENSE_TABLE_HEADER}
if not start_date and not end_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user)
context['range'] = 'Any'
# return JsonResponse({'range': 'Any', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
elif not start_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, end_date=tuple(end_date.split('-')))
context['range'] = f'Before {end_date}'
# return JsonResponse({'range': f'Before {end_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
elif not end_date:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, start_date=tuple(start_date.split('-')))
context['range'] = f'After {start_date}'
# return JsonResponse({'range': f'After {start_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
else:
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, start_date=tuple(start_date.split('-')), end_date=tuple(end_date.split('-')))
context['range'] = f'From {start_date} to {end_date}'
# return JsonResponse({'range': f'From {start_date} to {end_date}', 'transactions': [model_to_dict(transaction) for transaction in query_set]})
serialized_data = json.loads(serializers.serialize('json', query_set, use_natural_foreign_keys=True, use_natural_primary_keys=True))
for data in serialized_data:
transaction = data["fields"]
del transaction["user"]
transaction.update({"id": data["pk"]})
context['transactions'] = [data["fields"] for data in serialized_data]
#context['transactions'] = [model_to_dict(transaction) for transaction in query_set]
# print(request.user, context)
return JsonResponse(context, status=201)
return render(request, 'transaction.html', context)
else:
return redirect('/login')
@csrf_exempt
@basic_auth
def create_transaction(request):
if request.user.is_authenticated:
if request.method == 'POST':
form_data = CreateTransactionForm.map_fields(request.POST.dict())
form = CreateTransactionForm(request.user, **form_data)
if form.is_valid():
new_transaction = Transaction.transaction_manager.create_transaction(
user=request.user,
**form.cleaned_data
)
context = model_to_dict(new_transaction)
return JsonResponse(context, status=201)
else:
return JsonResponse({'message': 'Failed to create transaction', 'field_errors': CreateTransactionForm.map_fields(form.errors, reverse=True)}, status=422)
elif request.method == 'GET':
categories = Category.category_manager.get_categories(user=request.user).filter(is_active = True)
context = {'categories': [model_to_dict(category)['name'] for category in categories]}
return JsonResponse(context, status=201)
else:
return redirect('login')
@csrf_exempt
@basic_auth
def delete_transaction(request):
if request.user.is_authenticated:
if request.method == 'POST':
id = request.POST.get(TRANSACTION_ID_VAR)
try:
Transaction.transaction_manager.delete_transaction(user=request.user, id=id)
return JsonResponse({},status=201)
except Exception as e:
return JsonResponse({'non_field_errors': 'Failed to delete transaction'}, status=422)
else:
return redirect('login')
@csrf_exempt
@basic_auth
def update_transaction(request, id: str=None):
if request.user.is_authenticated:
if request.method == 'POST':
transaction_id = request.POST.get(TRANSACTION_ID_VAR)
print(f"post: {transaction_id}")
try:
form_data = CreateTransactionForm.map_fields(request.POST.dict())
form = CreateTransactionForm(request.user, **form_data)
if form.is_valid():
updated_transaction = Transaction.transaction_manager.update_transaction(
user=request.user,
id=transaction_id,
**form.cleaned_data
)
if not updated_transaction:
return JsonResponse({'non_field_errors': 'Invalid Transaction'}, status=422)
else:
return JsonResponse(model_to_dict(updated_transaction), status=201)
else:
return JsonResponse({'field_errors': CreateTransactionForm.map_fields(form.errors, reverse=True)}, status=422)
except Exception as e:
print(traceback.format_exc())
return JsonResponse({'non_field_errors': 'Failed to update transaction. Contact Administrator'}, status=500)
elif request.method == 'GET':
transaction_id = request.GET.get(TRANSACTION_ID_VAR)
print(f"get: {transaction_id}")
query_set = Transaction.transaction_manager.retrieve_transaction(user=request.user, id=transaction_id)
serialized_data = json.loads(serializers.serialize('json', query_set, use_natural_foreign_keys=True, use_natural_primary_keys=True))
for data in serialized_data:
transaction = data["fields"]
del transaction["user"]
transaction.update({"id": data["pk"]})
categories = Category.category_manager.get_categories(user=request.user)
context = {
'transaction': CreateTransactionForm.map_fields(serialized_data[0]['fields'], reverse=True),
'categories': [model_to_dict(category)['name'] for category in categories]
}
return JsonResponse(context, status=200)
return render(request, "", context)
else:
return redirect('login')
| edwinlowxh/CZ3002---Advanced-Software-Engineering | FinApp/Transaction/views.py | views.py | py | 12,542 | python | en | code | 0 | github-code | 36 |
6318280878 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 15 12:38:24 2019
@author: MHozayen
Simple Linear Regression
Weighted Linear Regression is commented out
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def predict(x, y, pred):
#degree is unused here
mu = 0.9
ns = len(y)
weights = np.ones(ns)*mu
for k in range(ns):
weights[k] = weights[k]**k
weights = np.flip(weights, 0)
# Fitting SVR to the dataset
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
#weighted linear regression
#lr.fit(x, y, sample_weight=weights)
lr.fit(x, y)
y_pred = lr.predict(pred)
return y_pred | mohamedhozayen/Diabetes-Analytics-Engine | Joe_Deliverable/LR.py | LR.py | py | 772 | python | en | code | 0 | github-code | 36 |
6637403650 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse
# Create your views here.
def home_view(request):
if request.user.is_authenticated():
context = {
'isim': 'Emine'
}
else:
context = {
'isim': 'Guest'
}
return render(request, 'home.html', context)
| emineksknc/veterinerim | home/views.py | views.py | py | 382 | python | en | code | 0 | github-code | 36 |
36402183870 | from datetime import datetime, timedelta
date_format = "%d.%m.%Y"
print("Laenukalkulaator")
amount = None
while amount is None:
try:
amount = int(input("laenusumma (täisarv): "))
if amount <= 0:
print("Sisestatud väärtus peab olema suurem kui 0")
amount = None
except ValueError:
print("Kontrolli sisestatud väärtust")
laenuperiood_aastates = None
while laenuperiood_aastates is None:
try:
laenuperiood_aastates = int(input("laenuperiood aastates (täisarv): "))
if laenuperiood_aastates <= 0:
print("Sisestatud väärtus peab olema suurem kui 0")
laenuperiood_aastates = None
except ValueError:
print("Kontrolli sisestatud väärtust")
intressi_protsent = None
while intressi_protsent is None:
try:
intressi_protsent = float(input("intressi protsent (ujukomaarv): "))
if intressi_protsent < 0:
print("Sisestatud intressi protsent peab olema positiivne")
except ValueError:
print("Kontrolli sisestatud väärtust")
maksegraafik = None
while maksegraafik not in ("a", "p"):
maksegraafik = input("tagasimaksegraafiku tüüp a) annuiteet; p) võrdsed põhiosad: ")
if maksegraafik not in ("a", "p"):
print("Kontrolli sisestatud väärtust (a või p)")
start_date = None
while start_date is None:
start_date_str = input("maksete alguskuupäev (pp.kk.aaaa): ")
try:
start_date = datetime.strptime(start_date_str, date_format)
except ValueError:
print("Sisesta kuupäevad õiges vormingus")
amount_left = amount
payment_date = start_date
print("Maksekuupäev\tJääk\t\tPõhiosa tagasimakse\tIntressimakse\tKokku")
payment_i = 0
total_payment_per_month = 0
main_payment = 0
if maksegraafik == "a":
total_payment_per_month = amount/(
(1 - 1/(1+intressi_protsent/100/12)**(laenuperiood_aastates*12))/(intressi_protsent/100/12)
)
else:
main_payment = amount / (12 * laenuperiood_aastates)
while amount_left > 0.001:
payment_month = start_date.month + payment_i
payment_year = start_date.year + (payment_month - 1) // 12
payment_month = (payment_month - 1) % 12 + 1
days_in_month = (datetime(payment_year + (payment_month // 12), payment_month % 12 + 1, 1) -
datetime(payment_year, payment_month, 1)).days
payment_day = min(days_in_month, start_date.day)
payment_date = datetime(payment_year, payment_month, payment_day)
interest_payment = amount_left * (intressi_protsent / 100 / 12)
if maksegraafik == "a":
main_payment = total_payment_per_month - interest_payment
else:
total_payment_per_month = main_payment + interest_payment
print("{}\t{:9.2f}\t{:.2f}\t\t\t{:.2f}\t\t{:.2f}".format(
payment_date.strftime(date_format),
amount_left,
main_payment,
interest_payment,
total_payment_per_month
))
amount_left -= main_payment
payment_i += 1
| marianntoots/Programmeerimine_2021 | laenukalkulaator.py | laenukalkulaator.py | py | 3,000 | python | en | code | 0 | github-code | 36 |
70606642025 | import os
import sys
# 在linux会识别不了包 所以要加临时搜索目录
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
import pandas as pd
import pymysql
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from clickhouse_sqlalchemy import make_session
from util.CommonUtils import get_process_num, get_spark
class sqlalchemyUtil(object):
def __init__(self):
self.process_num= get_process_num()
self.engine = create_engine('mysql+pymysql://root:123456@hadoop102:3306/stock?charset=utf8',
pool_size=self.process_num * 2, max_overflow=self.process_num * 2, pool_timeout=50,
pool_recycle=3600,pool_pre_ping=True)
self.conn = self.engine.connect()
self.session = sessionmaker(self.engine)
self.txn=self.conn.begin()
#链接数据库
def mysqlConcnet(self):
'''
链接数据库
'''
print('连接主机',self.engine.dialect)
def closeEngine(self):
self.conn.close()
self.engine.dispose()
class pymysqlUtil(object):
def __init__(self,host='hadoop102',user='root',password='123456',port=3306,db='stock',charset='utf8'):
self.process_num = get_process_num()
self.db=pymysql.connect(host=host,user=user,password=password,port=port,db=db,charset=charset)
self.cursor=self.db.cursor()
#链接数据库
def mysqlConcnet(self):
'''
链接数据库
'''
print('连接主机',self.db.get_host_info())
def closeResource(self):
self.curson.close()
self.db.close()
# Liunx系统 window系统可能会有问题
class hiveUtil():
'''sql末尾不能放;号'''
def __init__(self):
self.engine = create_engine('hive://cgy:123456@hadoop102:10000/stock?auth=CUSTOM')
self.conn = self.engine.connect()
def __enter__(self):
return self.engine
def __exit__(self):
self.conn.close()
self.engine.dispose()
class clickhouseUtil():
'''sql末尾不能放;号'''
def __init__(self):
self.process_num = get_process_num()
self.engine = create_engine('clickhouse://default:''@hadoop102:8123/stock?auth=CUSTOM',
pool_size=self.process_num * 2, max_overflow=self.process_num * 2, pool_timeout=50,
pool_recycle=3600, pool_pre_ping=True
)
self.session = make_session(self.engine)
# def execute_query(self,sql):
# """查询"""
# self.cursor = self.session.execute(sql)
# try:
# fields = self.cursor._metadata.keys
# return pd.DataFrame([dict(zip(fields, item)) for item in self.cursor.fetchall()])
# except Exception as e:
# print(e)
def execute(self,sql):
try:
self.cursor = self.session.execute(sql)
except Exception as e:
print(e)
def execute_query(self, sql):
return pd.read_sql(sql, self.engine)
def execute_insert(self, tableName, df, if_exists='append'):
# append追加 replace全量覆盖
df.to_sql(name=tableName, con=self.engine, if_exists=if_exists, index=False, index_label=False, chunksize=10000)
print('{}插入CK成功!!!'.format(tableName))
def spark_insert_ck(self, tableName,spark_df,if_exists='append'):
'''不弄了烦 Caused by: java.lang.ClassNotFoundException: com.clickhouse.client.logging.LoggerFactory'''
properties = {'driver': 'ru.yandex.clickhouse.ClickHouseDriver',
"socket_timeout": "300000",
"rewriteBatchedStatements": "true",
"batchsize": "10000",
"numPartitions": "8",
'user': 'default',
'password': '',
'isolationLevel': 'NONE'}
spark_df.write.jdbc(url='jdbc:clickhouse://default:''@hadoop102:8123/hive',table=tableName, mode=if_exists, properties=properties)
# spark_df.write.jdbc(url='jdbc:clickhouse://{url}:8123/hive',table=tableName, mode=if_exists, properties=properties)
def spark_read_ck(self, tableName,spark_df):
properties = {'driver': 'ru.yandex.clickhouse.ClickHouseDriver',
"socket_timeout": "300000",
"rewriteBatchedStatements": "true",
"batchsize": "10000",
"numPartitions": "8",
'user': 'default',
'password': ''}
spark_df.read.jdbc(url='jdbc:clickhouse://{url}:8123/hive',table=tableName, properties=properties)
def __exit__(self):
self.cursor.close()
self.session.close()
self.engine.dispose()
# python /opt/code/pythonstudy_space/05_quantitative_trading_hive/util/DBUtils.py
if __name__ == '__main__':
# sql = 'SHOW TABLES'
# sql = 'select * from test'
appName = os.path.basename(__file__)
spark = get_spark(appName)
df = pd.DataFrame({"json": ['c', 'd']})
print(df)
# spark_df = spark.sql("""
# select trade_date,
# industry_plate_code as plate_code,
# industry_plate as plate_name,
# open_price
# from stock.ods_dc_stock_industry_plate_hist_di
# where td = '2023-02-07'
# """)
spark_df = spark.createDataFrame(df)
print(spark_df.show())
properties = {'driver': 'ru.yandex.clickhouse.ClickHouseDriver',
"socket_timeout": "300000",
"rewriteBatchedStatements": "true",
"batchsize": "10000",
"numPartitions": "8",
'user': 'default',
'password': '',
'isolationLevel': 'NONE'}
# spark_df.write.jdbc(url='jdbc:clickhouse://default:''@hadoop102:8123/hive', table='test', mode='append',properties=properties)
spark_df.write.jdbc(url='jdbc:clickhouse://{url}:8123/hive', table='test', mode='append',properties=properties)
clickhouseUtil().spark_insert_ck('test',spark_df)
spark.stop()
print('插入成功!!!') | cgyPension/pythonstudy_space | 05_quantitative_trading_hive/util/DBUtils.py | DBUtils.py | py | 6,392 | python | en | code | 7 | github-code | 36 |
25043548577 | # coding:utf-8
import os
DEBUG = True
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'myweb.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_COMMIT_TEARDOWN = True
WTF_CSRF_ENABLED = False
parameter={
"create_user_success":"创建用户成功",
"create_user_error":"创建用户失败 ",
"signauth_error":"登录认证错错误",
"login_error":"登录失败确认是否缺少参数",
}
'''
设置认证
200表示成功
404表示失败
''' | Thecoldrain/vuejs_flask | flask_web/config/config.py | config.py | py | 598 | python | en | code | 0 | github-code | 36 |
22808427576 | #!/usr/bin/env python
#
# Looks through the run directory for "*.enzotest" and makes a csv spreadsheet of all answer test scripts and their properties.
#
# Author: David Collins (dcollins4096@gmail.com), 2011-06-14 11:19 AM. It's a bright sunny day here in Los Alamos.
#
import fnmatch
import os
#Hunt for enzotest files.
dbg = 0
matches = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*.enzotest'):
matches.append(os.path.join(root, filename))
#Generate dictionary, make list of attributes
tests={}
attribute_list=['name','nprocs','max_time_minutes','dimensionality','runtime','answer_testing_script','hydro','gravity','cooling','chemistry','cosmology','author','mhd','radiation','AMR']
for file in matches:
if dbg > 0:
print(file)
lines = open(file,'r').readlines()
tests[file]={}
for line in lines:
if line.strip():
key, value = line.split("=")
if value[-1]=='\n':
value = value[:-1]
if value.count("#") > 0:
value = value[0:value.index("#")]
key=key.strip()
value=value.strip()
if key not in attribute_list:
attribute_list.append(key)
tests[file][key]=value
#make csv
csv = open('test_spreadsheet.csv','w')
dir( csv)
#head row
csv.write("filename"+",%s"*len(attribute_list)%tuple(attribute_list)+"\n")
#lines
for file in matches:
csv.write(file)
for attr in attribute_list:
csv.write(", %s"%(tests[file].get(attr,'')))
csv.write("\n")
csv.close()
#end
| enzo-project/enzo-dev | run/test_makespreadsheet.py | test_makespreadsheet.py | py | 1,609 | python | en | code | 72 | github-code | 36 |
11761396412 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
def solve(s):
shift = True
another = list()
char = None
for i in range(len(s)):
char = s[i]
if s[i] is " " or i == 0:
shift = True
if s[i] is not " ":
if shift is True:
# print("Here")
char = s[i].upper()
shift = False
another.append(char)
# print("".join(another))
return "".join(another)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = solve(s)
fptr.write(result + '\n')
fptr.close()
| g2des/taco4ways | hackerrank/python/capitalize_exc.py | capitalize_exc.py | py | 718 | python | en | code | 0 | github-code | 36 |
2909760362 | import streamlit as st
import leafmap.kepler as leafmap
import geopandas as gpd
def app():
st.title("Kaavoitetut rakennukset tyypeittäin")
st.markdown(
"""
Väritä, visualisoi ja filtteröi aineistoa kartan vasemmasta yläkulmasta avautuvan työkalupakin avulla.
"""
)
m = leafmap.Map(center=[60.174, 24.802], zoom=15.5, height=600, widescreen=False)
gdf = gpd.read_file("http://pygeoapi-testing.gispocoding.fi/collections/koonti_koko_suomi_kaavakohteet/items?f=json&limit=1000")
gdf['kaavoitusteema'] = gdf['kaavoitusteema'].astype('str')
gdf['kaavamaarayslaji'] = gdf['kaavamaarayslaji'].astype('str')
df = gdf[gdf["kaavamaarayslaji"].str.contains("rakennus")]
df = df[['id_kaava','kaavoitusteema','kaavamaarayslaji', 'numeerinen_arvo']]
df.groupby('id_kaava')['kaavamaarayslaji'].value_counts()
m.to_streamlit(height=700)
st.markdown(
"""
## Yhteenlaskettu kerrosala kaavakohtaisesti
"""
)
df = gdf[["id_kaava","kaavoitusteema", "kaavamaarayslaji", "numeerinen_arvo"]]
st.dataframe(df.groupby('id_kaava')['numeerinen_arvo'].sum().rename_axis('Kaava-id').reset_index(name='Määrä'), width=400) | SanttuVP/SpatialPlanning_vizualization_streamlit | apps/rakennustyypit.py | rakennustyypit.py | py | 1,216 | python | fi | code | 0 | github-code | 36 |
30222648461 | import torch.nn as nn
class RPN(nn.Module):
def __init__(self, input_channels, anchor_count):
super(RPN, self).__init__()
self.ContainsObjectClassifier = nn.Conv2d(
in_channels = input_channels,
out_channels = 2*anchor_count,
kernel_size = 1,
stride = 1,
padding = 0)
self.RegionRegressor = nn.Conv2d(
in_channels = input_channels,
out_channels = 4*anchor_count,
kernel_size = 1,
stride = 1,
padding = 0)
def forward(self, features):
class_predictions = self.ContainsObjectClassifier(features)
original_class_predictions_shape = class_predictions.shape
class_predictions = class_predictions.view((
original_class_predictions_shape[0], # Batch size
2, # Class predictions (per anchor)
original_class_predictions_shape[1]//2, # Anchor count
original_class_predictions_shape[2], # Feature map width
original_class_predictions_shape[3])) # Feature map height
region_predictions = self.RegionRegressor(features)
original_region_predictions_shape = region_predictions.shape
region_predictions = region_predictions.view((
original_region_predictions_shape[0], # Batch size
4, # Bounding box regression outputs (per anchor)
original_region_predictions_shape[1]//4,# Anchor count
original_region_predictions_shape[2], # Feature map width
original_region_predictions_shape[3])) # Feature map height
return class_predictions, region_predictions | jday96314/Kuzushiji | ImageProcessing/DualNetworkApproach/RegionProposal/RPN.py | RPN.py | py | 1,450 | python | en | code | 0 | github-code | 36 |
41217273106 | # coding=utf-8
from tkinter import Button
__author__ = 'zjutK'
'''__call__使用'''
class Call(object):
def __call__(self, *args, **kwargs):
print('Called:', args, kwargs)
class ColorBack(object):
def __init__(self, color):
self.color = color
def __call__(self, *args, **kwargs):
print('turn', self.color)
if __name__ == '__main__':
C = Call()
C(1, 2, 3) # Called: (1, 2, 3) {}
C(1, 2, 3, a='qq', b='lll') # Called: (1, 2, 3) {'b': 'lll', 'a': 'qq'}
cb1 = ColorBack('blue')
cb2 = ColorBack('red')
print(cb1())
B1 = Button(command=cb1)
print(B1)
| kzrs55/learnpython | oop/call.py | call.py | py | 623 | python | en | code | 0 | github-code | 36 |
17364039643 | # Imports from Third Party Modules
from bs4 import BeautifulSoup
from urllib2 import urlopen
BASE_URL = "http://www.portlandhikersfieldguide.org"
REGIONS = ['Gorge', 'Mount Hood', 'Central OR', 'OR Coast', 'East OR',
'South OR', 'Portland', 'SW WA', 'WA Coast']
REGION_INDEXS = [
'http://www.portlandhikersfieldguide.org/wiki/Category:Columbia_River_Gorge', # noqa
'http://www.portlandhikersfieldguide.org/wiki/Category:Mount_Hood_Area',
'http://www.portlandhikersfieldguide.org/wiki/Category:Central_Oregon',
'http://www.portlandhikersfieldguide.org/wiki/Category:Oregon_Coast',
'http://www.portlandhikersfieldguide.org/wiki/Category:Eastern_Oregon',
'http://www.portlandhikersfieldguide.org/wiki/Category:Southern_Oregon',
'http://www.portlandhikersfieldguide.org/wiki/Category:Portland',
'http://www.portlandhikersfieldguide.org/wiki/Category:Southwest_Washington', # noqa
'http://www.portlandhikersfieldguide.org/wiki/Category:Washington_Coast'
]
EXCLUDE_LINKS = [
'http://www.portlandhikersfieldguide.org/wiki/148th_Avenue_Trailhead',
'http://www.portlandhikersfieldguide.org/wiki/Quartz_Creek_Trailhead',
'http://www.portlandhikersfieldguide.org/wiki/Jefferson_Park_from_South_Breitenbush_Trailhead', # noqa
'http://www.portlandhikersfieldguide.org/wiki/Latourell_Falls_Trailhead',
]
def make_soup(url):
html = urlopen(url).read()
return BeautifulSoup(html, "lxml")
def get_region_pages(region_url_list):
soup = make_soup(region_url_list[-1])
pages = soup.find('div', id='mw-pages')
region_links = [BASE_URL + a['href'] for a in pages.findAll('a', limit=2)]
for link in region_links:
if not link.find('pagefrom') == -1:
region_url_list.append(link)
get_region_pages(region_url_list)
return region_url_list
def create_regions_dict():
region_dict = {}
for i in range(0, len(REGIONS)):
region_list = get_region_pages([REGION_INDEXS[i]])
region_dict[REGIONS[i]] = region_list
return region_dict
def get_trailhead_links(section_url):
soup = make_soup(section_url)
content = soup.find('div', 'mw-content-ltr')
hike_links = [BASE_URL + li.a['href'] for li in content.findAll('li')]
trailhead_links = []
for hike in hike_links:
if hike.endswith('Trailhead') and hike not in EXCLUDE_LINKS:
trailhead_links.append(hike)
return trailhead_links
def get_trailhead_details(section_url):
soup = make_soup(section_url)
content = soup.find('div', 'mw-content-ltr')
trailhead_name = soup.find('h1').string
hikes_here = soup.find('span', 'mw-headline')
hikes_here = hikes_here.findNext('ul')
hike_links = [BASE_URL + li.a['href'] for li in hikes_here.findAll('li')]
lat_long = [li.string for li in content.findAll('li', limit=2)]
good_hike_links = []
for hike in hike_links:
if hike.find('.php') == -1 and hike.find('usda') == -1:
good_hike_links.append(hike)
return trailhead_name, lat_long, good_hike_links
def get_hike_details(section_url):
soup = make_soup(section_url)
hike_name = soup.find('h1').string
hikes_here = hike_name.findNext('ul').findNext('ul')
hike_details = [li.string for li in hikes_here.findAll('li')]
return hike_name, hike_details
def write_to_file(filename, dict):
f = open(filename, 'w')
for key, value in dict.items():
try:
print(key)
f.write("\n{}\t{}".format(key, value))
except BaseException:
f.write("\nunicode error")
f.close()
# initialize dictionary variables to links for next stage
trailhead_links_dict = {}
hike_links_dict = {}
# initialize dictionary variables to hold date for each section to send to file
region_count_dict = {}
trailheads_dict = {}
hike_details_dict = {}
# compile all the links for regional sub pages
region_dict = create_regions_dict()
# follow all region sub page links to gather links to trailheads and get
# count of trailheads per region.
for key, value in region_dict.items():
trailhead_links = []
for link in value:
links = get_trailhead_links(link)
trailhead_links += links
trailhead_links_dict[key] = trailhead_links
region_count_dict[key] = len(trailhead_links)
# follow all trailhead links by region to get hike links and trailhead details
# (lat/long, count of hikes).
for key, value in trailhead_links_dict.items():
for link in value:
if link not in EXCLUDE_LINKS:
print(link)
name, coords, hikes = get_trailhead_details(link)
hike_links_dict[name] = hikes
trailheads_dict[name] = (key, coords, len(hikes))
# follow all hike links by trailhead to get details for each hike
for key, value in hike_links_dict.items():
for link in value:
if link not in EXCLUDE_LINKS:
print(key, link)
name, details = get_hike_details(link)
hike_details_dict[name] = (key, details)
write_to_file('hikedetails', hike_details_dict)
write_to_file('trailheads', trailheads_dict)
| RAINSoftwareTech/hiketheplanet | backend/datascrape.py | datascrape.py | py | 5,124 | python | en | code | 1 | github-code | 36 |
20736597554 | import random
import datetime
st = datetime.datetime.now()
tai_moji = 10 # 対象文字数
ke_moji = 2 # 欠損文字数
chance = 2 # 試行回数
def shutudai(alh):
moji = random.sample(alh, tai_moji)
print("対象文字", end = " ")
for i in moji:
print(i, end = " ")
print()
nai_moji = random.sample(moji, ke_moji)
print("表示文字", end = " ")
for i in moji:
if i not in nai_moji:
print(i, end = " ")
print()
print("デバック用欠損文字", nai_moji)
return nai_moji
def kaito(ans):
num = int(input("欠損文字はいくつあるでしょうか?:"))
if num != ke_moji:
print("不正解です")
for i in range(num):
a = input(f"{i + 1}文字目を入力してください:")
if a not in ans:
print("不正解です。残念です")
return False
else:
ans.remove(a)
else:
print("完全クリアです!!!!")
return True
return False
if __name__ == "__main__":
alh = [chr(i + 65) for i in range(26)]
nai_moji = shutudai(alh)
for i in range(chance):
hantei = kaito(nai_moji)
if hantei:
break
else:
print("-" * 20)
ed = datetime.datetime.now()
print(f"所要時間 :{(ed-st).seconds} s") | c0b2108596/ProjExD | ex01/alphabet.py | alphabet.py | py | 1,363 | python | ja | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.