seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18294098339 | from flask import Flask
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from flask import jsonify
sid = SentimentIntensityAnalyzer()
app = Flask(name)
def analysis(input1):
output=sid.polarity_scores(input1)
if output['compound']>0:
return "positive",output
elif output['compound']<0:
return "negative",output
else:
return "neutral",output
@app.route("/")
def homepage():
return "Api returns sentiment,compound,positive,negative,neutral"
@app.route('/<string:sentence>/')
def hello(sentence):
analsed_sentence=analysis(sentence)
return jsonify({"sentiment":analsed_sentence[0],"compound":analsed_sentence[1]['compound'],
"positive":analsed_sentence[1]['pos'],"negative":analsed_sentence[1]['neg'],"neutral":analsed_sentence[1]['neu']})
if name == 'main':
app.run() | AdityaSolanki189/PythonProjects | WebTranscript_SentimentAnalysis/SentimentAnalysis.py | SentimentAnalysis.py | py | 865 | python | en | code | 0 | github-code | 13 |
72467371219 |
# Returns True if the given CSP solution dictionary csp_sol satisfies all
# the constraints in the friendship graph, and False otherwise.
def check_teams(graph, csp_sol):
# iterate through graph
for person in graph:
friends = graph[person]
for friend in friends:
# person is on the same team as the friend
if csp_sol[person] == csp_sol[friend]:
return False
return True
| emilychen98/aima-python | a2_q2.py | a2_q2.py | py | 441 | python | en | code | 0 | github-code | 13 |
73212294416 | from imports_nps import *
class ProcessROI:
"""
Calculate NPS and create files with results.
Attributes
----------
workbook_series : XlsxWriter's Workbook object
Workbook containing NPS info for each image in current series.
Each worksheet (except the last one) contains NPS info for current dcm-image.
Last worksheet contains averaged NPS info ansd is called 'averaged'.
Near description is to find in the manual.
workbook_averaged : XlsxWriter's Workbook object
Workbook containing all averaged worksheets of attributes workbook_series
for current study.
I.e. each worksheet contains averaged NPS info for current series.
workbook_summary : openpyxl's Workbook object
Workbook containing summarized information of averaged NPS xlsx-files.
worksheet_averaged : XlsxWriter's Worksheet object
Worksheet of attribute workbook_averaged.
worksheet_summary : openpyxl'S Worksheet object
Worksheet of attribute workbook_summary.
col_number, col_folder, col_series, col_peak_freq,
col_peak_value, col_left_dev, col_right_dev,
col_area, col_ave_m_HU : strings
Excel letters for each information column in workbook_summary.
metadata_columns : list of strings
List of Excel letetrs for columns containing metadata info in
workbook_summary.
all_roi_dict : dict of dict of lists of tuples
(See attribute all_roi_dict of class GUI)
sorted_all_roi_dict : dict of dicts
Dict with same structure as attribute filedict of class StartClass.
The lists of files in series folder are transformed into dictionaries:
Keys : absolute path to image in current series folder
Values : list of tuples with ROIs' diagonal coordinates:
x0 - x coordinate of left upper corner
y0 - y coordinate of left upper corner
x1 - x coordinate of right lower corner
y1 - y coordinate of right lower corner
image_rect_coord : list
(See attribute image_rect_coord of class GUI)
image_rect_coord_record : list
(See attribute image_rect_coord_record of class GUI)
roi_image_mean_HU : list of floats
List of mean HU for each ROI in current image.
image_sd : list of floats
List of standard deviation for each ROI in current image.
all_mean_HU_dict : dict of lists of floats
Dict of attributes roi_image_mean_HU for current series folder.
Keys : paths to dcm-image file.
Values : respective attribute roi_image_mean_HU
(See description of roi_image_mean_HU above)
all_SD_dict : dict of lists of floats
Dict of attributes image_sd for current series folder.
Keys : paths to dcm-image file.
Values : respective attribute image_sd
(See description of image_sd above)
nps_image : list of dicts
For current image.
List of range_dict attributes for each ROI
on current image.
For each ROI on image, range_dict consists of:
'values': interpolated NPS list of the ROI
'frequencies': respective frequencies
'AUC': area under not interpolated 1d-NPS profile
'integral_of_2d_nps': integral of 2d NPS of teh ROI
(not interpolated!!!)
all_nps_dict : dict_ of lists of dicts
For images in current series folder.
Keys : paths to images in current series folder
Values : nps_image attributes for respective image
all_average_nps : dict of dicts
For images in current series folder.
Keys : absolute paths to images in current series folder.
Values : dict
Key : 'value'
Value : NPS list of averaged ROIs' NPS lists for respective image.
Key : 'frequencies'
Value : list of respective frequencies.
mean_of_averaged_nps_dict : dict
For current series folder.
'values' : NPS list averaged among all images in current series folder.
'frequencies' : respective frequencies.
peak_info_dict_ave : dict
Peak information of NPS list averaged among images in current series folder.
Keys : 'mean_value' (peak NPS value)
'mean_freq' (peak NPS frequency)
'left_dev' (freq distance between peak freq and freq, at which NPS
sinks under 60% of peak NPS value when moving to left)
'right_dev' (freq distance between peak freq and freq, at which NPS
sinks under 60% of peak NPS value when moving to right)
Methods
-------
"""
def __init__(self, *, obj_roi, obj_arr, fit_order,
crop_perc, useFitting, im_height_in_mm,
im_width_in_mm, extensions, trunc_percentage,
useCentralCropping, start_freq_range, end_freq_range, step,
useTruncation, multipleFiles, pixel_size_in_mm, first_data_set):
"""
Start initialiazation and sorting of all_roi_dict.
(See all_roi_dict attribute's description in class GUI).
:param obj_roi: instance of class GUI
Used to get access to its attributes:
- all_roi_dict
- master
- image_rect_coord_record
- array
:param obj_arr: instance of class StartClass
Used to get access to its attribute:
- metadata_subdict
and method create_base_array()
:param fit_order: int (1 or 2)
Order of 2d fit of image for background removal.
:param crop_perc: float
:param useFitting: boolean
Choose method of background removal.
True: 2d fitting is used (order is defined by attribute
fit_order);
False: mean value subtraction is used.
:param im_height_in_mm: float or string 'undefined'
Height of current dcm-image in mm.
:param im_width_in_mm: float or string 'undefined'
Width of current dcm-image in mm.
:param extensions: list of strings
Extensions of files to be searched for in selected order.
(Probably not needed in this class).
:param trunc_percentage: float
Percentage of maximum value, under which NPS is truncated
at higher frequencies.
(Probably not needed in this class).
:param x0: int
For the mode 'Array_ROIs':
absolute x coordinate of left upper corner of
left upper ROI in ROIs array. (???)
:param x1:
:param y0:
:param y1:
:param useCentralCropping:
:param start_freq_range: float
Start frequency of interest. Specified in init_dict.
:param end_freq_range: float
End frequency of interest. Specified in init_dict.
:param step: float
Interval between frequency samples in freq array.
:param useTruncation: boolean
Whether use truncation as explained in description
of attribute trunc_percentage.
:param multipleFiles: not needed
:param pixel_size_in_mm: float
Default pixel size in mm. used when dealing with
not-DICOMs.
:param first_data_set: boolean
Specify folder structure of dataset.
Near description is to find in manual.
"""
print('Constructor of class ProcessROI is being executed')
# ************Initialization of attributes*********************
# all xlsx-files
self.all_xlsx_range = []
self.all_xlsx = []
# dictionary for averaged nps and freqs
self.all_average_nps = {}
# info about all averaged nps
self.all_nps_peak_info = {}
self.all_nps_peak_info_ave = {}
# paths to all cropped images
self.all_cropped_im = []
# paths to all one d nps images
self.all_1_d_nps = []
# collect all mean HU for each ROI
self.image_mean_HU = []
# collect mean HU for each image
self.all_mean_HU_dict = {}
# collect all st dev in dictionary
self.all_SD_dict = {}
# dictionary for roi sizes in each image
self.roi_size_dict = {}
# dictionary for AUC
self.auc_dict = {}
# dictionary for integral of 2d NPS
self.integral_2d_nps_dict = {}
# whether central cropping should be applied
self.useCentralCropping = useCentralCropping
# whether lower nps should be deleted
self.useTruncation = useTruncation
# extensions of image files
self.extensions = extensions
# image measurements in mm
self.im_height_in_mm = im_height_in_mm
self.im_width_in_mm = im_width_in_mm
# truncate lower nps
self.trunc_percentage = trunc_percentage
# maximal size of the image (height or width)
self.max_size = max(im_height_in_mm, im_width_in_mm)
# cropping percentage
self.crop_perc = crop_perc
# fitting order for 2d-fit
self.fit_order = fit_order
# whether fitting should be applied
# or background removal should be used
self.useFitting = useFitting
# remove raw csv-files
self.files_to_remove = []
self.pixel_size_in_mm = pixel_size_in_mm
self.multipleFiles = multipleFiles
# declaring attributes, that are specified later
self.nps = []
self.new_dict = {}
# type of data set
self.first_data_set = first_data_set
self.object_roi = obj_roi
self.object_arr = obj_arr
self.fit_order = fit_order
self.all_roi = self.object_roi.all_roi_dict
self.directories_dict = self.object_arr.filedict
self.metadata_subdict = self.object_arr.metadata_subdict
# list for paths to Excel-files
self.xlsx_paths = []
# numbers of rows and columns of ROI pixel array
self.px_width = self.object_roi.array.shape[1]
self.px_height = self.object_roi.array.shape[0]
# self.arrays_dict = self.object_roi.arrays_dict
self.all_roi_dict = self.object_roi.all_roi_dict
self.image_rect_coord = self.object_roi.image_rect_coord
self.image_rect_coord_record = self.object_roi.image_rect_coord_record
# ************End of initialization*********************
# get metadata and prepare for creating summary_info xlsx
self.headers_list = ['Number', 'Folder', 'Series', 'peak_freq', 'peak_value',
'left_dev', 'right_dev', 'area', 'Integral',
'ave_m_HU', 'ave_SD']
self.metadata_headers = [key for key in self.metadata_subdict]
self.headers_list += self.metadata_headers
# initialize start row (row of first series in current folder)
self.start_row = 3
# create xlsx-file, that will contain summary information
self.name_workbook_summary = 'Summary_information.xlsx'
self.workbook_summary = opxl.Workbook()
# create single worksheet in this workbook
self.worksheet_summary = self.workbook_summary.active
# get column letters
self.col_number = opxl.utils.get_column_letter(1)
self.worksheet_summary.column_dimensions[self.col_number].width = 6.73
self.col_folder = opxl.utils.get_column_letter(2)
self.worksheet_summary.column_dimensions[self.col_folder].width = 30.00
self.col_series = opxl.utils.get_column_letter(3)
self.worksheet_summary.column_dimensions[self.col_series].width = 5.18
self.col_peak_freq = opxl.utils.get_column_letter(4)
self.worksheet_summary.column_dimensions[self.col_peak_freq].width = 9.09
self.col_peak_value = opxl.utils.get_column_letter(5)
self.worksheet_summary.column_dimensions[self.col_peak_value].width = 9.2
self.col_left_dev = opxl.utils.get_column_letter(6)
self.col_right_dev = opxl.utils.get_column_letter(7)
self.col_area = opxl.utils.get_column_letter(8)
self.col_int_2d_nps = opxl.utils.get_column_letter(9)
self.col_ave_m_HU = opxl.utils.get_column_letter(10)
self.worksheet_summary.column_dimensions[self.col_ave_m_HU].width = 10.00
self.col_ave_SD = opxl.utils.get_column_letter(11)
self.metadata_columns = [opxl.utils.get_column_letter(i + 12) for i
in range(len(self.metadata_headers))]
for col in self.metadata_columns:
self.worksheet_summary.column_dimensions[col].width = 33.00
# name of xlsx-file
self.name_xlsx = 'NPS_ranged_GUI.xlsx'
# letters as column names in excel
self.letters_for_excel = ['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y',
'Z', 'AA', 'AB', 'AC', 'AD',
'AE', 'AF', 'AG', 'AH', 'AI',
'AJ', 'AK', 'AL']
# freq range
self.start_freq = start_freq_range
self.end_freq = end_freq_range
self.num_of_steps = int((self.end_freq - self.start_freq) // step + 1)
self.freq_range = np.linspace(start=self.start_freq,
stop=self.end_freq,
num=self.num_of_steps)
# sort all_roi_dict
self.sorted_all_roi_dict = ProcessROI.sort_all_roi_dict(directories_dict=self.directories_dict,
all_roi_dict=self.all_roi)
print('Constructor of class ProcessROI is done')
def execute_calc_nps_sorted(self):
"""
Main method of the class ProcessROI.
Calculate NPS by iterating over pixel array of each ROI.
Create xlsx-files with results.
:return: nothing
"""
ave_folder = GUI.create_aux_folder(cur_fold=os.getcwd(), folder_name='Only_averaged_sheets')
# iterate over keys of the passed dict, i.e. folder paths
for self.num_folder, self.folder in enumerate(self.sorted_all_roi_dict):
# log the process
print('\n\nFolder %s is been processed: %d of %d\n\n' % (os.path.basename(self.folder), self.num_folder + 1,
len(self.sorted_all_roi_dict)))
if self.first_data_set:
self.folder_part = os.path.basename(self.folder)
else:
self.folder_part = ProcessROI.drop_part_of_name(
name=os.path.basename(self.folder),
pattern_of_dropped_part=r' \- \d+',
dropped_from_end=True)
# create workbook for only averaged sheets
name_averaged_workbook = ave_folder + '/' + self.folder_part + '.xlsx'
self.workbook_averaged = xlsx.Workbook(name_averaged_workbook)
# iterate over series
for self.num_series, series in enumerate(self.sorted_all_roi_dict[self.folder]):
global start_time_series
start_time_series = time.time()
if self.first_data_set:
self.serie_part = serie
self.folder_part = os.path.basename(self.folder)
else:
self.serie_part = ProcessROI.drop_part_of_name(
name=series,
pattern_of_dropped_part=r'\w*\d*_',
dropped_from_end=False)[1:]
self.folder_part = ProcessROI.drop_part_of_name(
name=os.path.basename(self.folder),
pattern_of_dropped_part=r' \- \d+',
dropped_from_end=True)
# log the process
print('\nseries %s: %d of %d\nFolder %d of %d\n' % (
series, self.num_series + 1, len(self.sorted_all_roi_dict[self.folder]),
self.num_folder + 1,
len(self.sorted_all_roi_dict)
))
# if len(series) > 3:
# print('This is not a folder with images. Skip')
# continue
# create folder Results
# GUI.create_aux_folder(cur_fold=folder, folder_name='Results')
GUI.create_aux_folder(cur_fold=self.folder, folder_name='Results_%s' % self.folder_part)
# create worksheet to write averaged data into
self.worksheet_averaged = self.workbook_averaged.add_worksheet(name=self.serie_part)
name_for_xlsx = self.folder \
+ '/Results_%s/' % (self.folder_part) \
+ self.folder_part + \
self.serie_part + '.xlsx'
# open new workbook in Excel
self.workbook_series = xlsx.Workbook(name_for_xlsx)
self.execute_nps_comp(all_roi_dict=self.sorted_all_roi_dict[self.folder][series])
print('++++++++++\n'
'execution time per series: %f seconds\n' % (time.time() - start_time_series))
# self.execute_nps_comp(all_roi_dict=self.sorted_all_roi_dict[self.folder][series])
num_remaining_folders = len(self.sorted_all_roi_dict) - self.num_folder
num_rem_series_in_folder = len(self.sorted_all_roi_dict[self.folder]) - self.num_series - 1
time_for_one_series = time.time() - start_time_series
remaining_time = ((num_remaining_folders - 1) * len(self.sorted_all_roi_dict[self.folder]) +
num_rem_series_in_folder) * time_for_one_series
remaining_hours = remaining_time // 3600
remaining_minutes = (remaining_time - remaining_hours * 3600) // 60
remaining_seconds = remaining_time - remaining_minutes * 60
print(
'%d hours, %d minutes, %f seconds remain' % (remaining_hours, remaining_minutes, remaining_seconds))
self.workbook_averaged.close()
# increment start row for summary workbook
self.start_row += self.num_series + 2
self.workbook_summary.save(self.name_workbook_summary)
if init_dict['destroy_main_window']:
self.object_roi.master.destroy()
def execute_nps_comp(self, all_roi_dict):
"""
Calculate NPS and side variables inside series folder loop.
:param all_roi_dict: dict
(See attribute sorted_all_roi_dict)
:return: nothing
"""
# flush dict of ave nps for the current serie
self.all_average_nps = {}
# dictionary to store nps for each image
self.all_nps_dict = {}
# flush dictionaries
self.all_mean_HU_dict = {}
self.all_SD_dict = {}
self.integral_2d_nps_dict = {}
self.auc_dict = {}
# iterate through all images
for num_of_image, self.key_image in enumerate(all_roi_dict):
# initialize list of image ROIs' AUC
image_auc_list = []
# initialize list of image ROI's integral of 2d NPS
image_integral_2d_nps_list = []
data_from_dicom = self.object_arr.create_base_array(self.key_image)
metadata_from_dicom = data_from_dicom['whole_dcm']
try:
pixel_spacing = [float(i) for i in metadata_from_dicom['0x0028', '0x0030'].value]
except ValueError:
pixel_spacing = self.pixel_size_in_mm
print('There is no property \'Pixel Spacing\'')
except TypeError:
pixel_spacing = [0.378, 0.378]
pixel_array_image = data_from_dicom['base_array']
# if new series begins
if num_of_image == 0:
self.metadata = data_from_dicom['metadata_subdict']
# build dict of mean HU and SD
self.build_all_mean_HU_SD_dict(array_to_operate=pixel_array_image,
all_roi_dict=all_roi_dict,
key=self.key_image)
print('ROIs on image %s are being processed: %d of %d; '
'Folder %d of %d; '
'series %d of %d ' % (os.path.basename(self.key_image),
num_of_image + 1,
len(all_roi_dict),
self.num_folder + 1,
len(self.sorted_all_roi_dict),
self.num_series + 1,
len(self.sorted_all_roi_dict[self.folder])))
# counter_roi_inside_image = 0
# list to store all nps for current image
self.nps_image = []
# collect lengths of one d nps of rois in image
self.lengths = []
self.image_roi_sizes = []
# iterate through all rois inside one image
for num_of_roi, self.item_roi in enumerate(all_roi_dict[self.key_image]):
subarray = pixel_array_image[self.item_roi[1]:self.item_roi[3], self.item_roi[0]:self.item_roi[2]]
# print progress
print('ROI is being processed: %d of %d' % (num_of_roi + 1, len(all_roi_dict[self.key_image])))
# basename of image without extensions
self.basename = os.path.basename(self.key_image)[:-4]
# basename of image with extension
self.basename_w_ext = os.path.basename(self.key_image)
# array from the image
array_to_operate = subarray
# get shape of the ROI
shape_of_roi = array_to_operate.shape
# store the shape in list
self.image_roi_sizes.append(shape_of_roi)
# # apply fitting of the image
# self.create_pol_fit(array_to_operate)
# create dictionary of nps and respective frequencies (unranged)
dict = self.compute_nps(array=array_to_operate, pixel_spacing=pixel_spacing)
AUC = dict['AUC']
integral_of_2d_NPS = dict['integral_of_2d_NPS']
# append ROI's AUC und integral of 2d NPS to resp. lists
image_auc_list.append(dict['AUC'])
image_integral_2d_nps_list.append(dict['integral_of_2d_NPS'])
if self.useTruncation: # setting in init_dict
# truncate lower nps and respective frequencies
self.new_dict = self.truncate_nps_freq(dict=dict)
else:
# use nps_dict as it is
self.new_dict = dict
# create raw csv-file (with empty rows between data)
# self.all_xlsx.append(self.create_xlsx_file_nps(dict=self.new_dict, prefix='One_D_NPS_'))
# create nps range
# get equations of lines connecting each two points of nps array
eqs_prop = ProcessROI.nps_equation(self.new_dict['values'])
# initialize empty list for ranged NPS (with specified s´distance
# between samples)
nps_range = []
# we need this array because freq_range is restricted through
# size of ROI, frequencies beyond the last available freq
# should be truncated (dropped)
new_freq_range = []
# iterate through frequencies in freq_range
for item_freq in self.freq_range:
try:
nps_range.append(self.get_current_nps(freq_array=self.new_dict['frequencies'],
freq_value=item_freq))
new_freq_range.append(item_freq)
except ValueError:
# if there are no more frequencies available
# print('finished')
# print('nps range: ', len(nps_range))
break
# nps_range = list(map(fut.partial(self.get_current_nps, freq_array=self.new_dict['frequencies'],
# eqs=eqs_prop), self.freq_range))
range_dict = {'values': nps_range,
'frequencies': new_freq_range,
'AUC': AUC,
'integral_of_2d_NPS': integral_of_2d_NPS}
# store ranged nps and resp. freq in a list
self.nps_image.append(range_dict)
assert len(nps_range) == len(new_freq_range)
self.lengths.append(len(range_dict['values']))
# print('continued: ', self.lengths)
# update dict for AUC and integral of 2d NPS
self.auc_dict.update({self.key_image: image_auc_list})
self.integral_2d_nps_dict.update({self.key_image: image_integral_2d_nps_list})
# average stored nps
averaged_dict = self.average_roi_nps(list_of_dict=self.nps_image)
self.all_average_nps.update({self.key_image: averaged_dict})
self.roi_size_dict.update({self.key_image: self.image_roi_sizes})
# recognize all peaks in nps-array
peaks = ProcessROI.collect_all_max_peaks_nps(averaged_dict)
# handle peak info
peak_info_dict = self.handle_peak_info(peak_dict=peaks,
all_val_arr=averaged_dict['values'],
all_freq_arr=averaged_dict['frequencies'])
self.all_nps_peak_info.update({self.key_image: peak_info_dict})
self.all_nps_dict.update({self.key_image: self.nps_image})
# create mean HU and SD info dictionaries
# self.build_all_mean_HU_SD_dict(all_roi_dict=all_roi_dict)
# self.build_all_sd_dict(all_roi_dict=all_roi_dict,
# all_mean_dict=self.all_mean_HU_dict)
# print('SD: ', self.all_SD_dict)
# print('mean HU: ', self.all_mean_HU_dict)
# calculate mean of averaged nps
self.mean_of_averaged_nps_dict = ProcessROI.mean_of_ave_nps(all_average_nps=self.all_average_nps)
# recognize all peaks in nps-array
peaks_ave = ProcessROI.collect_all_max_peaks_nps(self.mean_of_averaged_nps_dict)
# handle peak info
self.peak_info_dict_ave = self.handle_peak_info(peak_dict=peaks_ave,
all_val_arr=self.mean_of_averaged_nps_dict['values'],
all_freq_arr=self.mean_of_averaged_nps_dict['frequencies'])
# self.all_nps_peak_info_ave.update({self.key_image: self.peak_info_dict_ave})
# calculate SD of mean HU
self.sd_of_mean_HU_dict = ProcessROI.sd_of_dictionary(dict=self.all_mean_HU_dict)
# calculate SD of SD
self.sd_of_sd_dict = ProcessROI.sd_of_dictionary(dict=self.all_SD_dict)
# get total mean values for mean_HU and SD
self.total_mean_HU = ProcessROI.mean_of_mean(all_values_dict=self.all_mean_HU_dict)
self.total_mean_sd = ProcessROI.mean_of_mean(all_values_dict=self.all_SD_dict)
# create workbook for displaying results
# self.workbook_series = xlsx.Workbook(self.name_xlsx)
self.create_xlsx_file_nps(all_nps_dict=self.all_nps_dict)
self.workbook_series.close()
pass
return
@staticmethod
def sort_all_roi_dict(directories_dict, all_roi_dict):
"""
Sort passed all_roi_dict to reproduce
directory structure of dcm-images data set
(see description below).
:param directories_dict: dict of dicts of lists of strings
(See description of attribute filedict of class StartClass)
:param all_roi_dict: dict of lists of tuples
(See decription of attribute all_roi_dict of class GUI)
:return: sorted all_roi_dict
Keys : paths to study folder
Values : dicts
Keys : paths to series folders
Values : dict
Keys : paths to image file
Values : lists of tuples containing coordinates of ROIs:
- x coordinate of upper left corner
- y coordinate of upper left corner
- x coordinate of lower right corner
- y coordinate of lower right corner
"""
print('sort_all_roi_dict is being executed')
# empty dict for sorted ROIs
sorted_all_roi_dict = {}
total_files = len(all_roi_dict.keys())
# iterate over keys of all_roi_dict, i.e. file names
for numf, file_name_prim in enumerate(natsorted(all_roi_dict.keys(), key=lambda f: f.split('_')[-1])):
local_start_time = time.time()
print('Progress: file %d of %d' % ((numf + 1), total_files))
# iterate over the keys of directories_dict
for classdirname in natsorted(directories_dict.keys(), key=lambda f: f.split('_')[-1]):
# update dict
# sorted_all_roi_dict.update({classdirname: {}})
# iterate over keys of subdict
subdict = directories_dict[classdirname]
for serie_name in natsorted(subdict.keys(), key=lambda f: f.split('_')[-1]):
# update dict
# sorted_all_roi_dict[classdirname].update({serie_name: {}})
# iterate over the files in file list
for file_name_second in subdict[serie_name]:
# compare key of all_roi_dict and filename
if file_name_prim in file_name_second:
try:
temp_dict_prim = sorted_all_roi_dict[classdirname]
except KeyError:
sorted_all_roi_dict.update({classdirname: {}})
temp_dict_prim = sorted_all_roi_dict[classdirname]
try:
temp_dict = temp_dict_prim[serie_name]
except KeyError:
temp_dict_prim.update({serie_name: {}})
temp_dict = temp_dict_prim[serie_name]
temp_dict.update({file_name_prim: all_roi_dict[file_name_prim]})
sorted_all_roi_dict[classdirname].update({serie_name: temp_dict})
duration_for_loop = time.time() - local_start_time
print('sort_all_roi_dict is done')
return sorted_all_roi_dict
def build_all_mean_HU_SD_dict(self, all_roi_dict, array_to_operate, key):
"""
Calculate mean HU and standard deviation for each ROI
on the current image and update respective dictionaries
all_mean_HU_dict and all_SD_dict (See description in class' docs)
:param all_roi_dict: dict
(See description of attribute sorted_all_roi_dict).
:param array_to_operate: ndarray (2d)
Current ROI's pixel array.
:param key: string
Path to current dcm-image file.
:return: nothing
"""
# all mean HU for the current image
roi_image_mean_HU = []
# all mean sd for current image
image_sd = []
# iterate through all rois in image
for coord in all_roi_dict[key]:
# get pixel array of current roi
roi_array = array_to_operate[coord[1]:coord[3], coord[0]:coord[2]]
# calculate mean HU
mean_HU = np.mean(roi_array)
roi_image_mean_HU.append(mean_HU)
# calculate SD
# build homogen mean matrix
mean_matrix = np.ones(shape=np.array(roi_array).shape) * mean_HU
# calculate difference between roi image and mean image
diff_matrix = roi_array - mean_matrix
# flatten diff matrix to access all its elements easier
diff_flattened = diff_matrix.ravel()
# calculate SD for current ROI
sd_roi = np.sqrt(np.mean([i ** 2 for i in diff_flattened]))
image_sd.append(sd_roi)
self.all_mean_HU_dict.update({key: roi_image_mean_HU})
self.all_SD_dict.update({key: image_sd})
pass
@staticmethod
def mean_of_ave_nps(all_average_nps):
"""
Averaging of passed nps values dict
among all images in current series folder.
:param all_average_nps: dict of lists of dicts
Keys : paths to images in current series folder.
Values : list range_dict attribute for each ROI.
(See attribute range_dict)
:return: dict
(Key :) 'values' : (Value :) nps list averaged
"""
averaged_nps_as_list = []
for key in all_average_nps:
averaged_nps_as_list.append(all_average_nps[key]['values'])
frequens = all_average_nps[key]['frequencies']
mean_of_averaged_nps = np.mean(np.array(averaged_nps_as_list), axis=0)
mean_of_averaged_nps_dict = {'values': mean_of_averaged_nps,
'frequencies': frequens}
return mean_of_averaged_nps_dict
@staticmethod
def sd_of_dictionary(dict):
"""
Calculate standard deviation of list values of given dict.
:param dict: dict
Keys : whatever keys;
Values : lists of numeric values;
:return: dict
Keys : the same keys as of dict argument;
Values : sd value of respective list.
"""
# build array from dictionary
sd_dict = {}
for key in dict:
one_d_array = dict[key]
mean_value = np.mean(one_d_array)
squared_diff_list = []
for item in one_d_array:
squared_diff = (item - mean_value) ** 2
squared_diff_list.append(squared_diff)
sd = np.sqrt(np.mean(squared_diff_list))
sd_dict.update({key: sd})
return sd_dict
def average_roi_nps(self, list_of_dict):
"""
Build dictionary of nps lists averaged among ROIs in each image.
:param list_of_dict: list_of_dict
(See attribute nps_image)
:return: dict of dicts
Keys : absolute paths to images in current series folder.
Values : dict
Key : 'value'
Value : NPS list of averaged ROIs' NPS lists for respective image.
Key : 'frequencies'
Value : list of respective frequencies.
"""
# initialize lists for nps and resp freqs
values_to_average = []
resp_freq_to_average = []
# get max length of roi nps and its index
max_length = max(self.lengths)
max_length_idx = np.argmax(self.lengths)
# iterate through all rois nps in image
for roi_item_dict in list_of_dict:
values = roi_item_dict['values']
frequencies = roi_item_dict['frequencies']
# fill lacking items with zeros
# length difference with the longest nps
len_diff = max_length - len(values)
# convert values und freqs to python lists
values = list(values)
frequencies = list(frequencies)
values += [0] * len_diff
frequencies += [0] * len_diff
# store transformed nps values and freqs in lists
values_to_average.append(values)
resp_freq_to_average.append(frequencies)
# get mean array of value arrays
averaged_nps = np.mean(values_to_average, axis=0)
# max_length_idx tells us, which ROI has the largest NPS range
# then the frequencies array of this ROI is retrieved from nps_image
averaged_freqs = list_of_dict[max_length_idx]['frequencies']
# store averaged value and frequencies in one dictionary
averaged_dict = {'values': averaged_nps,
'frequencies': averaged_freqs}
return averaged_dict
# method is used to get total average values from all_mean_HU_dict and all_sd_dict
@staticmethod
def mean_of_mean(all_values_dict):
"""
Build mean value of numerical values of given dictionary.
:param all_values_dict: dict
Keys : Any
Values : single numerical values
:return: mean value of the values
"""
mean_of_mean_list = []
for key in all_values_dict:
mean_of_mean_list.append(np.mean(all_values_dict[key]))
mean_of_mean_value = np.mean(mean_of_mean_list)
return mean_of_mean_value
def compute_nps(self, array, pixel_spacing):
"""
Compute 2d and 1d NPS of given pixel array.
:param array: ndarray (2d)
Pixel array of current ROI.
:param pixel_spacing: tuple of two floats
Pixel spacing of dcm-image in y and
x direction.
:return: dict
Keys : 'values' - 1d NPS of ROI (not interpolated),
'frequencies' - respective frequencies,
'AUC' - area under 1d NPS profile,
'integral_of_2d_nps' - as in the name.
"""
# if image measurements in mm are undefined
# the default image sizing is applied
if self.im_width_in_mm == 'undefined':
self.im_width_in_mm = self.px_width * self.pixel_size_in_mm
if self.im_height_in_mm == 'undefined':
self.im_height_in_mm = self.px_height * self.pixel_size_in_mm
# mean pixel value of whole image
mean_value = np.mean(array)
# transform list into numpy array
# to access the array's properties
np_arr = np.array(array)
# get ROI size
roi_width = np_arr.shape[0]
roi_height = np_arr.shape[1]
# maximal size of the array (height or width)
max_size = max(np_arr.shape)
# building mean value array (background)
mean_arr = mean_value * np.ones(np_arr.shape)
# if 2d fitting should be used
if self.useFitting:
# self.polyfit is the 2d-fit of the image
detrended_arr = array - self.pol_fit
else:
detrended_arr = array - mean_arr
# create file of detrended image
# StartClass.create_image_from_2d_array(arr_2d=detrended_arr,
# filename='09.Detrended_images/Detrended_image__' +
# self.basename + '__.png')
# apply FFT to detrended image
DFT_list = np.fft.fftshift(np.fft.fft2(detrended_arr))
# calculate 2d-NPS
# nps = 1 / self.px_width / self.px_height * np.abs(DFT_list)**2
# nps = (self.pixel_size_in_mm ** 2) / self.px_width / self.px_height * np.abs(DFT_list) ** 2
nps = 1 / roi_height ** 2 / roi_width ** 2 * np.abs(DFT_list) ** 2
integral_of_2d_NPS = np.sum(nps)
# create file of 2d-NPS-image
StartClass.create_image_from_2d_array(arr_2d=nps,
filename='01.2d_NPS_images/NPS_2D__' +
self.basename + '__.jpg')
# building 1d-NPS from 2d_NPS using radial average
nps_1d = ProcessROI.radial_mean(nps)
AUC = np.sum(nps_1d)
# self.nps_norm = self.norm_array(arr_to_normalize=nps_1d,
# all_val_array=nps_1d)
# freqs = np.fft.fftfreq(max_size, self.im_width_in_mm/10/self.px_width)[:max_size // 2]
# calculate respective frequencies (line pairs per cm)
# freqs = np.fft.fftfreq(max_size, self.im_width_in_mm/10/self.px_width)[:max_size // 2]
freqs = np.fft.fftfreq(max_size, pixel_spacing[0] / 10)[:max_size // 2]
# dictionary with all NPS- and freq-values, that will be
# truncated afterwards
nps_dict = {'values': nps_1d,
'frequencies': freqs,
'integral_of_2d_NPS': integral_of_2d_NPS,
'AUC': AUC}
return nps_dict
@staticmethod
def drop_part_of_name(name, pattern_of_dropped_part, dropped_from_end):
"""
Recognize part of name and drop it.
:param name: string
String to be truncated.
:param pattern_of_dropped_part: raw string
RegEx-pattern of part to be dropped.
:param dropped_from_end: boolean
True: if dropped part is at the left end of string.
False: if dropped part is at the right end of string.
:return: string
Truncated string.
"""
# DEBUG_dropped_part = re.findall(pattern=pattern_of_dropped_part, string=name)
dropped_part = re.findall(pattern=pattern_of_dropped_part, string=name)[0]
if dropped_from_end:
used_part = name[:-(len(dropped_part))]
else:
used_part = name[(len(dropped_part) - 1):]
return used_part
def create_xlsx_file_nps(self, all_nps_dict):
"""
Create several xlsx-files with results:
- <foldername_seriesname>.xlsx (for each series folder;
with info on all ROIs' NPS-values, ended with 'averaged'-worksheet
saved in folder 'Results' inside each study-folder)
- <foldername>.xlsx (for each study;
with all collected averaged-worksheets from previous files
saved in executive file's dir in dir 'Only_averaged_worksheets')
- Summary_information.xlsx (with summarized information from foldername.xlsx-files
saved in executive file's dir)
:param all_nps_dict: dict
Dictionary containing NPS info for each ROI in each image. Has following
structure:
{'path_to_image_0': [roi_0 = {'values': [],
'frequencies' [],
'AUC': ...,
'integral_of_2d_NPS':...},
roi_1 = {'values': [],
'frequencies' [],
'AUC': ...,
'integral_of_2d_NPS':...},
...],
'path_to_image_1': ...}
:return: nothing
"""
# iterate through all images
for num_of_image, image_key in enumerate(all_nps_dict):
# print progress
print('Writing worksheets in xlsx-file: %d of %d' % (num_of_image + 1, len(all_nps_dict)))
# counter for roi in image multiplied by 2
counter_roi = 0
worksheet = self.workbook_series.add_worksheet(os.path.basename(image_key)[:-4])
for item_nps_dict in all_nps_dict[image_key]:
val_arr = item_nps_dict['values']
freq_arr = item_nps_dict['frequencies']
# initialization of cells in worksheet
row = 2
col = counter_roi
# headers of the table
worksheet.write(1, counter_roi, 'Lp')
worksheet.write(1, counter_roi + 1, 'NPS')
worksheet.write(0, counter_roi, 'ROI_%d' % (counter_roi // 2 + 1))
worksheet.write(0, counter_roi + 1, '%dx%d px' % (self.roi_size_dict[image_key][counter_roi // 2][0],
self.roi_size_dict[image_key][counter_roi // 2][1]))
for frequency, value_nps in zip(freq_arr, val_arr):
worksheet.write(row, col, frequency)
worksheet.write(row, col + 1, value_nps)
row += 1 # next row
counter_roi += 2
# retrieve mean of AUC and integral of 2d NPS for the current image
AUC = np.mean(self.auc_dict[image_key])
integral_of_2d_NPS = np.mean(self.integral_2d_nps_dict[image_key])
row_ave = 2
col_ave = counter_roi + 1
# averaged nps data
worksheet.write(row_ave - 2, col_ave, 'averaged')
worksheet.write(row_ave - 1, col_ave, 'Lp')
worksheet.write(row_ave - 1, col_ave + 1, 'NPS')
for frequency, value_nps in zip(self.all_average_nps[image_key]['frequencies'],
self.all_average_nps[image_key]['values']):
worksheet.write(row_ave, col_ave, frequency)
worksheet.write(row_ave, col_ave + 1, value_nps)
row_ave += 1 # next row
# create a new Chart object
chart = self.workbook_series.add_chart({'type': 'line'})
# configure the chart
chart.add_series({'values': '=%s!$%s$3:$%s$%d' % (os.path.basename(image_key)[:-4],
self.letters_for_excel[counter_roi + 2],
self.letters_for_excel[counter_roi + 2],
len(self.all_average_nps[image_key]['frequencies']) + 2),
'categories': '%s!$%s$3:$%s$%d' % (os.path.basename(image_key)[:-4],
self.letters_for_excel[counter_roi + 1],
self.letters_for_excel[counter_roi + 1],
len(self.all_average_nps[image_key][
'frequencies']) + 2),
'name': os.path.basename(image_key),
'legend': False,
'trendline': {'type': 'polynomial',
'order': 3,
'line': {
'color': 'red',
'width': 1,
'dash_type': 'long_dash',
},
'display_equation': True,
}})
chart.set_x_axis({'name': 'Line pairs per cm'})
chart.set_y_axis({'name': 'NPS_1D_averaged'})
# Insert the chart into the worksheet.
worksheet.insert_chart('%s1' % self.letters_for_excel[counter_roi + 3], chart)
# additional information about size of cropped image
# and characteristics of nps curve
worksheet.write(19, counter_roi + 4, 'max_peak_nps')
worksheet.write(20, counter_roi + 4, 'max_peak_freq')
worksheet.write(19, counter_roi + 5, self.all_nps_peak_info[image_key]['mean_value'])
worksheet.write(20, counter_roi + 5, self.all_nps_peak_info[image_key]['mean_freq'])
worksheet.write(21, counter_roi + 4, 'left_dev')
worksheet.write(22, counter_roi + 4, 'right_dev')
worksheet.write(21, counter_roi + 5, self.all_nps_peak_info[image_key]['left_dev'])
worksheet.write(22, counter_roi + 5, self.all_nps_peak_info[image_key]['right_dev'])
# area under NPS-curve
worksheet.write(24, counter_roi + 4, 'area')
worksheet.write(24, counter_roi + 5, AUC)
# integral of 2d NPS
worksheet.write(25, counter_roi + 4, 'Integral_2d_NPS')
worksheet.write(25, counter_roi + 5, integral_of_2d_NPS)
# fitting info
if self.useFitting:
worksheet.write(17, counter_roi + 4, 'Fitting')
worksheet.write(17, counter_roi + 5, self.fit_order)
else:
worksheet.write(17, counter_roi + 4, 'BG_remove')
# make column wider
worksheet.set_column(first_col=counter_roi + 4, last_col=counter_roi + 4, width=20)
# write info about mean HU and standard deviation
row_mean_HU_SD_info = 19
col_mean_HU_SD_info_header = counter_roi + 7
col_mean_HU = col_mean_HU_SD_info_header + 1
col_SD = col_mean_HU + 1
col_x_coord = col_SD + 1
col_y_coord = col_x_coord + 1
for mean_HU, SD, coord in zip(self.all_mean_HU_dict[image_key],
self.all_SD_dict[image_key],
self.image_rect_coord_record):
worksheet.write(row_mean_HU_SD_info, col_mean_HU_SD_info_header,
'ROI_%d' % (row_mean_HU_SD_info - 18))
worksheet.write(row_mean_HU_SD_info, col_mean_HU, mean_HU)
worksheet.write(row_mean_HU_SD_info, col_SD, SD)
worksheet.write(row_mean_HU_SD_info, col_x_coord, coord[0])
worksheet.write(row_mean_HU_SD_info, col_y_coord, coord[1])
row_mean_HU_SD_info += 1
worksheet.write(18, counter_roi + 8, 'Mean_HU')
worksheet.write(18, counter_roi + 9, 'SD')
worksheet.write(18, counter_roi + 10, 'x_coord')
worksheet.write(18, counter_roi + 11, 'y_coord')
worksheet.write(27, counter_roi + 4, 'averaged_Mean_HU')
worksheet.write(28, counter_roi + 4, 'averaged_SD')
worksheet.write(29, counter_roi + 4, 'SD_of_Mean_HU')
worksheet.write(30, counter_roi + 4, 'SD_of_SD')
worksheet.write(row_mean_HU_SD_info + 1, col_mean_HU_SD_info_header,
'averaged')
worksheet.write(row_mean_HU_SD_info + 2, col_mean_HU_SD_info_header,
'SD')
worksheet.write(row_mean_HU_SD_info + 1, col_mean_HU,
np.mean(self.all_mean_HU_dict[image_key]))
worksheet.write(row_mean_HU_SD_info + 1, col_SD,
np.mean(self.all_SD_dict[image_key]))
worksheet.write(27, counter_roi + 5, np.mean(self.all_mean_HU_dict[image_key]))
worksheet.write(28, counter_roi + 5, np.mean(self.all_SD_dict[image_key]))
worksheet.write(29, counter_roi + 5, np.mean(self.sd_of_mean_HU_dict[image_key]))
worksheet.write(30, counter_roi + 5, np.mean(self.sd_of_sd_dict[image_key]))
# write sd of mean HU and SD
worksheet.write(row_mean_HU_SD_info + 2, col_mean_HU,
np.mean(self.sd_of_mean_HU_dict[image_key]))
worksheet.write(row_mean_HU_SD_info + 2, col_SD,
np.mean(self.sd_of_sd_dict[image_key]))
worksheet_ave = self.workbook_series.add_worksheet('averaged')
val_arr = self.mean_of_averaged_nps_dict['values']
freq_arr = self.mean_of_averaged_nps_dict['frequencies']
# initialization of cells in worksheet
row = 2
col = 0
# headers of the table
worksheet_ave.write(0, 0, 'Total average')
worksheet_ave.write(1, 0, 'Lp')
worksheet_ave.write(1, 0 + 1, 'NPS')
self.worksheet_averaged.write(0, 0, 'Total average')
self.worksheet_averaged.write(1, 0, 'Lp')
self.worksheet_averaged.write(1, 0 + 1, 'NPS')
# worksheet.write(0, 1, 'ROI_%d' % (1 // 2 + 1))
# worksheet.write(0, 1 + 1,
# '%dx%d px' % (self.roi_size_dict[image_key][1 // 2][0],
# self.roi_size_dict[image_key][1 // 2][1]))
for frequency, value_nps in zip(freq_arr, val_arr):
worksheet_ave.write(row, col, frequency)
worksheet_ave.write(row, col + 1, value_nps)
self.worksheet_averaged.write(row, col, frequency)
self.worksheet_averaged.write(row, col + 1, value_nps)
row += 1 # next row
# additional information about size of cropped image
# and characteristics of nps curve
worksheet_ave.write(19 - 4, 1 + 3, 'max_peak_nps')
worksheet_ave.write(20 - 4, 1 + 3, 'max_peak_freq')
worksheet_ave.write(19 - 4, 1 + 4, self.peak_info_dict_ave['mean_value'])
worksheet_ave.write(20 - 4, 1 + 4, self.peak_info_dict_ave['mean_freq'])
worksheet_ave.write(21 - 4, 1 + 3, 'left_dev')
worksheet_ave.write(22 - 4, 1 + 3, 'right_dev')
worksheet_ave.write(21 - 4, 1 + 4, self.peak_info_dict_ave['left_dev'])
worksheet_ave.write(22 - 4, 1 + 4, self.peak_info_dict_ave['right_dev'])
self.worksheet_averaged.write(19 - 4, 1 + 3, 'max_peak_nps')
self.worksheet_averaged.write(20 - 4, 1 + 3, 'max_peak_freq')
self.worksheet_averaged.write(19 - 4, 1 + 4, self.peak_info_dict_ave['mean_value'])
self.worksheet_averaged.write(20 - 4, 1 + 4, self.peak_info_dict_ave['mean_freq'])
self.worksheet_averaged.write(21 - 4, 1 + 3, 'left_dev')
self.worksheet_averaged.write(22 - 4, 1 + 3, 'right_dev')
self.worksheet_averaged.write(21 - 4, 1 + 4, self.peak_info_dict_ave['left_dev'])
self.worksheet_averaged.write(22 - 4, 1 + 4, self.peak_info_dict_ave['right_dev'])
# writing info averaged Mean_HU, averaged SD, and area
worksheet_ave.write(24 - 4, 1 + 3, 'Int of 2d-NPS')
worksheet_ave.write(26 - 4, 1 + 3, 'averaged Mean_HU')
worksheet_ave.write(27 - 4, 1 + 3, 'averaged SD')
worksheet_ave.write(24 - 4, 1 + 4, np.mean(
[self.integral_2d_nps_dict[key] for key in self.integral_2d_nps_dict]
))
worksheet_ave.write(26 - 4, 1 + 4, self.total_mean_HU)
worksheet_ave.write(27 - 4, 1 + 4, self.total_mean_sd)
self.worksheet_averaged.write(24 - 4, 1 + 3, 'Int of 2d-NPS')
self.worksheet_averaged.write(26 - 4, 1 + 3, 'averaged Mean_HU')
self.worksheet_averaged.write(27 - 4, 1 + 3, 'averaged SD')
self.worksheet_averaged.write(24 - 4, 1 + 4, np.mean(
[self.integral_2d_nps_dict[key] for key in self.integral_2d_nps_dict]
))
self.worksheet_averaged.write(26 - 4, 1 + 4, self.total_mean_HU)
self.worksheet_averaged.write(27 - 4, 1 + 4, self.total_mean_sd)
# make column wider
worksheet_ave.set_column(first_col=4, last_col=4, width=20)
self.worksheet_averaged.set_column(first_col=4, last_col=4, width=20)
# info about averaged mean_HU and SD
worksheet_ave.write(19 - 4, 1 + 7, 'mean_HU')
worksheet_ave.write(19 - 4, 1 + 8, 'SD')
worksheet_ave.write(20 - 4, 1 + 6, 'averaged')
worksheet_ave.write(20 - 4, 1 + 7, self.total_mean_HU)
worksheet_ave.write(20 - 4, 1 + 8, self.total_mean_sd)
self.worksheet_averaged.write(19 - 4, 1 + 7, 'mean_HU')
self.worksheet_averaged.write(19 - 4, 1 + 8, 'SD')
self.worksheet_averaged.write(20 - 4, 1 + 6, 'averaged')
self.worksheet_averaged.write(20 - 4, 1 + 7, self.total_mean_HU)
self.worksheet_averaged.write(20 - 4, 1 + 8, self.total_mean_sd)
# create a new Chart object
chart_ave = self.workbook_series.add_chart({'type': 'line'})
chart_averaged = self.workbook_averaged.add_chart({'type': 'line'})
# configure the chart
chart_ave.add_series({'values': '=%s!$%s$3:$%s$%d' % ('averaged',
'B',
'B',
len(self.mean_of_averaged_nps_dict['frequencies']) + 2),
'categories': '%s!$%s$3:$%s$%d' % ('averaged',
'A',
'A',
len(self.mean_of_averaged_nps_dict[
'frequencies']) + 2),
'name': 'Total Average',
'legend': False,
'trendline': {'type': 'polynomial',
'order': 3,
'line': {
'color': 'red',
'width': 1,
'dash_type': 'long_dash',
},
'display_equation': False,
}})
chart_averaged.add_series({'values': '=%s!$%s$3:$%s$%d' % (self.serie_part,
'B',
'B',
len(self.mean_of_averaged_nps_dict['frequencies']) +
2),
'categories': '%s!$%s$3:$%s$%d' % (self.serie_part,
'A',
'A',
len(self.mean_of_averaged_nps_dict[
'frequencies']) + 2),
'name': 'Total Average',
'legend': False,
})
chart_ave.set_x_axis({'name': 'Line pairs per cm'})
chart_ave.set_y_axis({'name': 'NPS_1D_averaged'})
# Insert the chart into the worksheet.
worksheet_ave.insert_chart('C1', chart_ave)
chart_averaged.set_x_axis({'name': 'Line pairs per cm'})
chart_averaged.set_y_axis({'name': 'NPS_1D_averaged'})
# Insert the chart into the worksheet.
self.worksheet_averaged.insert_chart('C1', chart_averaged)
# create summary info xlsx
# write headers to worksheet
for num_header, header_name in enumerate(self.headers_list):
self.worksheet_summary['%s1' % opxl.utils.get_column_letter(num_header + 1)] = header_name
# row to write
row_to_write = self.start_row + self.num_series
# get worksheet's name
name_of_folder = self.folder_part
name_of_series = self.serie_part
# write information from worksheet
self.worksheet_summary['%s%d' % (self.col_number, row_to_write)] = self.num_folder + 1
self.worksheet_summary['%s%d' % (self.col_folder, row_to_write)] = name_of_folder
self.worksheet_summary['%s%d' % (self.col_series, row_to_write)] = name_of_series
self.worksheet_summary['%s%d' % (self.col_peak_freq, row_to_write)] = self.peak_info_dict_ave['mean_freq']
self.worksheet_summary['%s%d' % (self.col_peak_value, row_to_write)] = self.peak_info_dict_ave['mean_value']
self.worksheet_summary['%s%d' % (self.col_left_dev, row_to_write)] = self.peak_info_dict_ave['left_dev']
self.worksheet_summary['%s%d' % (self.col_right_dev, row_to_write)] = self.peak_info_dict_ave['right_dev']
self.worksheet_summary['%s%d' % (self.col_area, row_to_write)] = np.mean(
[self.auc_dict[key] for key in self.auc_dict]
)
self.worksheet_summary['%s%d' % (self.col_int_2d_nps, row_to_write)] = np.mean(
[self.integral_2d_nps_dict[key] for key in self.integral_2d_nps_dict]
)
self.worksheet_summary['%s%d' % (self.col_ave_m_HU, row_to_write)] = self.total_mean_HU
self.worksheet_summary['%s%d' % (self.col_ave_SD, row_to_write)] = self.total_mean_sd
for num_metadata, (metadata_tag, col_metadata) in enumerate(
zip(self.metadata_headers, self.metadata_columns)
):
self.worksheet_summary['%s%d' % (col_metadata, row_to_write)] = self.metadata[metadata_tag]
@staticmethod
def radial_mean(array):
"""
Build radial mean of 2d-array. In our case: 2d-NPS.
:param array: ndarray (2d)
Two-dimensional NPS of current ROI.
:return: ndarray (1d)
Radial mean of the 2d-NPS.
"""
image = array
image_height = array.shape[0]
image_width = array.shape[1]
center_x = image_width // 2
center_y = image_height // 2
max_size = max(image_height, image_width)
# create array of radii
x, y = np.meshgrid(np.arange(image.shape[1]), np.arange(image.shape[0]))
R = np.sqrt((x - center_x) ** 2 + (y - center_y) ** 2)
# calculate the mean
f = lambda r: image[(R >= r - .5) & (R < r + .5)].mean()
# r = np.linspace(1, 302, num=302)
r = np.linspace(0, max_size // 2, num=max_size // 2 + 1)
mean = np.vectorize(f)(r)
mean = [array[center_y][center_x]] + mean
return mean
def polyfit2d(self, x, y, z):
"""2d-fitting of 2d-array. Used for background extraction"""
size_x = np.array(x).shape[0]
size_y = np.array(y).shape[0]
if size_x > size_y:
y = np.concatenate((y, [y[-1]] * (size_x - size_y)))
else:
x = np.concatenate((x, [x[-1]] * (size_y - size_x)))
order = self.fit_order
ncols = (order + 1) ** 2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order + 1), range(order + 1))
for k, (i, j) in enumerate(ij):
G[:, k] = x ** i * y ** j
try:
m, _, _, _ = np.linalg.lstsq(G, z, rcond=None)
except:
print('There is a problem in file (fitting): ', file)
return m
@staticmethod
def polyval2d(self, x, y, m):
"""Auxiliar function for 2d-fitting"""
size_x = np.array(x).shape[0]
size_y = np.array(y).shape[0]
if size_x > size_y:
y = np.concatenate((y, [y[-1]] * (size_x - size_y)))
else:
x = np.concatenate((x, [x[-1]] * (size_y - size_x)))
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order + 1), range(order + 1))
z = np.zeros_like(x)
for a, (i, j) in zip(m, ij):
z += a * x ** i * y ** j
return z
def prepare_f_1(self, xy, a, b, c, d):
"""Auxiliar function for 2d-fitting"""
i = xy // self.image_width_1 # reconstruct y coordinates
j = xy % self.image_width_1 # reconstruct x coordinates
out = i * a + j * b + i * j * c + d
return out
@staticmethod
def nps_equation(nps_array):
"""Input: not ranged nps array (with larger distance between
samples). Return list of tuples, containing slope and bias of
line connecting each two points of nps array"""
line_equations = []
for s in range(len(nps_array) - 1):
line_equations.append(ProcessROI.determine_line_equation(point_index_1=s,
point_index_2=s + 1,
point_val_1=nps_array[s],
point_val_2=nps_array[s + 1]))
return line_equations
def get_current_nps(self, freq_value, freq_array):
"""
Get NPS value respective to frequency in freq range.
:param freq_value: float
Current frequency value of freq_range.
:param freq_array: list of floats
Not interpolated list of NPS frequencies.
:return: float
Interpolated value of NPS respective to given freq_value.
"""
"""
using equations of lines between points of unranged NPS array"""
# all frequencies, that less than freq_value
less_values = [i for i in freq_array if i <= freq_value]
# all frequencies, that greater than freq_value
greater_values = [i for i in freq_array if i >= freq_value]
# lower boundary freq value
min_bound_val = max(less_values)
# upper boundary freq value
max_bound_val = min(greater_values)
# lower boundary index
min_bound_idx = list(freq_array).index(min_bound_val)
max_bound_idx = list(freq_array).index(max_bound_val)
line_prop = ProcessROI.determine_line_equation(point_index_1=min_bound_val,
point_index_2=max_bound_val,
point_val_1=self.new_dict['values'][min_bound_idx],
point_val_2=self.new_dict['values'][max_bound_idx])
if min_bound_val == max_bound_val:
current_nps = self.new_dict['values'][min_bound_idx]
else:
current_nps = line_prop[0] * freq_value + line_prop[1]
# print('min bound value: ', self.new_dict['values'][min_bound_idx],
# ' freq_value: ', self.new_dict['values'][max_bound_idx],
# 'min bound index: ', min_bound_idx, '\n',
# 'max bound value: ', max_bound_val, 'max bound index: ', max_bound_idx)
if current_nps < 0:
current_nps = 0
return current_nps
def prepare_f_2(self, xy, a, b, c, d, e, f, g, h, k):
"""Auxiliar function for 2d-fitting"""
i = xy // self.image_width_1 # reconstruct y coordinates
j = xy % self.image_width_1 # reconstruct x coordinates
out = i * a + j * b + i * j * c + i * j ** 2 * d + \
i ** 2 * j * e + i ** 2 * j ** 2 * f + \
i ** 2 * g + j ** 2 * h + k
return out
def create_pol_fit(self, array):
"""
Create 2d fit of current ROI, either of first or of second order.
:param array: ndarray (2d)
Pixel array of current ROI.
:return: ndarray (2d)
Fitted pixel array.
"""
self.pol_fit = []
self.image_width_1 = array.shape[1]
self.image_height_1 = array.shape[0]
x = np.linspace(0, self.image_width_1 - 1, num=self.image_width_1)
y = np.linspace(0, self.image_height_1 - 1, num=self.image_height_1)
z = array[0: self.image_height_1, 0: self.image_width_1]
xy = np.arange(z.size)
if self.fit_order == 1:
mvfs = np.ravel(z)
res = opt.curve_fit(self.prepare_f_1, xy, np.ravel(z))
z_est = self.prepare_f_1(xy, *res[0])
else:
mvfs = np.ravel(z)
res = opt.curve_fit(self.prepare_f_2, xy, np.ravel(z))
z_est = self.prepare_f_2(xy, *res[0])
self.pol_fit = z_est.reshape(self.image_height_1, self.image_width_1)
# self.fitting = np.array(self.polyfit2d(x, y, z))
# for item_fit_ind in range(self.fitting.shape[1]):
# self.pol_fit_sub = self.polyval2d(x, y, self.fitting[:, item_fit_ind])
# self.pol_fit.append(self.pol_fit_sub)
return self.pol_fit
def truncate_nps_freq(self, *, dict):
"""
Truncate low NPS values at higher frequencies.
:param dict: dict
See return value of method compute_nps.
:return: dict
Dict truncated NPS values and respective frequencies.
Keys : 'values', 'frequencies'.
"""
"""Takes as parameter dict: raw NPS-dict.
Truncate higher frequencies with small respective NPS-values"""
# select element greater than 10**(-4)
truncated_nps = []
for i in dict['values']:
if i > self.trunc_percentage / 100 * np.max(dict['values']):
truncated_nps.append(i)
else:
break
# difference of length between normal and truncated NPS-lists
# = last index of freq-list
tr_idx = len(truncated_nps)
# truncate freq-list
truncated_freqs = dict['frequencies'][: tr_idx]
new_dict = {'values': truncated_nps,
'frequencies': truncated_freqs}
return new_dict
@staticmethod
def determine_line_equation(*,
point_val_1,
point_index_1,
point_val_2,
point_index_2):
"""
Find slope and bias (shift) of the line containing two passed points.
:param point_val_1: float
y value of first point.
:param point_index_1: float
x value of first point.
:param point_val_2: float
y value of second point.
:param point_index_2: float
x value of second point.
:return: tuple of floats
Slope and bias of the aforementioned line.
"""
# determination of line equation
# based on two points of the line
if point_index_1 == point_index_2:
slope = 1
shift = 0
else:
slope = (point_val_2 - point_val_1) / (point_index_2 - point_index_1)
shift = point_val_1 - slope * point_index_1
return slope, shift
@staticmethod
def collect_all_max_peaks_nps(dict):
"""
Find all peak values and their indices and respective frequencies in passed 1d-list.
:param dict: dict
Keys : 'values', 'frequencies', 'AUC', 'Integral_of_2d_NPS'.
Values : resp.: 1d-NPS values of current ROI, respective frequencies,
area under 1d-NPS profile, integral of 2d-NPS.
:return: dict
Dict with all peaks info.
Keys : 'values', 'indices', frequencies'
Values : resp. list of peak values,
list of respective indices,
list of respective frequencies.
"""
val_arr = dict['values']
freq_arr = dict['frequencies']
# counter for nps array items
counter = 0
# initialize max value - first threshold level
max_value = 0
# list to store all peaks values
max_peaks_array = []
# list to store all peaks indices
max_ind_array = []
# list to store respective frequencies
resp_freq_max = []
# auxiliary boolean variable
switcher = True
# initialize max index
max_index = 0
for item in val_arr:
# if the next item has larger value than the previous
# and if its value is more than the threshold level
if item >= max_value:
max_value = item
max_index = counter
# ability to store max value in the list
switcher = True
# if the next item is less than the previous (peak condition)
elif switcher:
max_peaks_array.append(max_value)
max_ind_array.append(max_index)
resp_freq_max.append(freq_arr[max_index])
# prevent storing not peaks values
switcher = False
else:
max_value = item
counter += 1
return {'values': max_peaks_array, # [1:],
'indices': max_ind_array, # [1:],
'frequencies': resp_freq_max} # [1:]}
def handle_peak_info(self, peak_dict, all_val_arr, all_freq_arr):
"""
Extract peak information from peak_dict.
:param peak_dict: dict
(See return value of static method collect_all_max_peaks_nps)
:param all_val_arr: list
List of values from which peak_dict has been built.
:param all_freq_arr: list
List of respective frequencies.
:return: dict
Dict with peak information:
Keys : 'mean_value' - peak NPS value,
'mean_freq' - peak NPS frequency,
'left_dev' - frequency distance between peak freq. and
freq. at which NPS value falls under 60% of max value
to left side from peak.
'right_dev' - frequency distance between peak freq. and
freq. at which NPS value falls under 60% of max value
to right side from peak.
"""
"""Extract following info from peak_dict:
- mean_value (i.e. absolute max peak value);
- mean_freq (i.e. absolute max peak freq);
- left deviation (freq, at which NPS-value falls underneath
the 60% of max value to the left side)
- right deviation (the same, but to the right side)"""
peak_val_arr = peak_dict['values']
freq_arr = peak_dict['frequencies']
left_dev = 'undefined'
right_dev = 'undefined'
# indices = peak_dict['indices']
# if there are peaks
no_peaks = False
if len(peak_val_arr) > 0:
only_right_dev = False
# define max value and whether there is only right deviation
try:
if max(peak_val_arr) < max(all_val_arr) * 0.1:
mean_distr = max(all_val_arr)
index_max = list(all_val_arr).index(mean_distr)
only_right_dev = True
else:
mean_distr = max(peak_val_arr)
index_max = list(all_val_arr).index(mean_distr)
if all([i > 0.6 * mean_distr for i in all_val_arr[:index_max]]):
only_right_dev = True
except ValueError:
print('peak dict: ', peak_dict)
print('all values: ', all_val_arr)
print('file: ', self.basename)
else:
mean_distr = max(all_val_arr)
index_max = list(all_val_arr).index(mean_distr)
only_right_dev = True
no_peaks = True
# collect nps information
if not no_peaks:
try:
# index_max_peak = peak_val_arr.index(mean_distr)
mean_freq = all_freq_arr[list(all_val_arr).index(mean_distr)]
except ValueError:
print('file: ', self.basename)
print('peak dict: ', peak_dict)
print('mean: ', mean_distr)
else:
# index_max_peak = 0
mean_freq = self.start_freq
# right deviation
for item in all_val_arr[index_max:]:
if item < 0.6 * mean_distr:
r_dev_value = item
resp_idx = list(all_val_arr).index(item)
r_dev_freq = all_freq_arr[resp_idx]
right_dev = r_dev_freq - mean_freq
break
# left deviation
if only_right_dev:
l_dev_value = 'undefined'
l_dev_freq = 'undefined'
left_dev = 'undefined'
else:
for item in all_val_arr[:index_max]:
if item > 0.6 * mean_distr:
l_dev_value = item
resp_idx = list(all_val_arr).index(item)
l_dev_freq = all_freq_arr[resp_idx]
left_dev = mean_freq - l_dev_freq
break
peak_info_dict = {'mean_value': mean_distr,
'mean_freq': mean_freq,
'left_dev': left_dev,
'right_dev': right_dev
}
return peak_info_dict | HombreOso/NPS_PyQt | ProcessROI.py | ProcessROI.py | py | 76,899 | python | en | code | 0 | github-code | 13 |
33371505383 | import pandas as pd
FILEPATH = "TaipeiMRTStationList.csv"
STATION_ADDED = []
def read_and_slice_station_csv(filepath):
df = pd.read_csv(filepath)
df_slice = df[["stationNameEng", "stationName", "startDate"]]
return df_slice
def generate_station_sql_inject(column_inject, row):
datas = ""
data = row[1]
datas += f"'{data['stationNameEng']}', " \
f"'{data['stationName']}', " \
f"'{data['startDate']}'"
return f"INSERT INTO station ({column_inject}) VALUES ({datas});\n"
with open("SQL/InsertStationData.sql", 'w') as file:
iter_row = read_and_slice_station_csv(FILEPATH).iterrows()
columns = "stationNameEng, stationName, startDate"
station_added = []
for i in iter_row:
if i[1]['stationName'] not in station_added:
file.write(generate_station_sql_inject(columns, i))
station_added.append(i[1]['stationName'])
| ker07/downloadMRTData | generate_inject_sql_for_station.py | generate_inject_sql_for_station.py | py | 920 | python | en | code | 0 | github-code | 13 |
18712032773 | # 写法一
def rob(nums: [int]) -> int:
if not nums:
return 0
n = len(nums)
if n < 3:
return max(nums)
for i in range(2, n):
nums[i] += max(nums[:i - 1])
return max(nums)
def rob2(nums: [int]) -> int:
if not nums:
return 0
size = len(nums)
if size == 1:
return nums[0]
dp = [0] * size
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i in range(2, size):
dp[i] = max(dp[i - 2] + nums[i], dp[i - 1])
return dp[size - 1]
if __name__ == "__main__":
nums = [3, 5, 2, 1, 6, 7]
s1 = rob2(nums)
s2 = rob(nums)
print(s1 == s2)
| russellgao/algorithm | dailyQuestion/2020/2020-05/05-29/python/solution.py | solution.py | py | 645 | python | en | code | 3 | github-code | 13 |
74564789458 | from builtins import object
import unittest
import WMCore.Database.CouchUtils as CouchUtils
from WMQuality.TestInitCouchApp import TestInitCouchApp
class CouchUtils_t(unittest.TestCase):
def setUp(self):
self.testInit = TestInitCouchApp(__file__)
self.testInit.setupCouch("wmcore-acdc-couchutils", "GroupUser", "ACDC")
def tearDown(self):
self.testInit.tearDownCouch()
def testA(self):
"""object driven connection via initialiseCouch method"""
class Thingy(object):
"""misc object with couch access attrs"""
def __init__(self):
self.couchdb = None
self.database = None
self.url = None
@CouchUtils.connectToCouch
def __call__(self):
return True
couchThingy = Thingy()
# test throws with everything None
self.assertRaises(CouchUtils.CouchConnectionError, CouchUtils.initialiseCouch, couchThingy)
couchThingy.url = self.testInit.couchUrl
self.assertRaises(CouchUtils.CouchConnectionError, CouchUtils.initialiseCouch, couchThingy)
couchThingy.database = self.testInit.couchDbName
try:
CouchUtils.initialiseCouch(couchThingy)
except Exception as ex:
msg = "Error initialising couch client for test object:\n %s " % str(ex)
self.fail(msg)
self.assertIsNotNone(couchThingy.couchdb)
# test decorator on already connected object
try:
couchThingy()
except Exception as ex:
msg = "Error invoking connectToCouch decorator:\n %s" % str(ex)
self.fail(msg)
newCouchThingy = Thingy()
newCouchThingy.database = self.testInit.couchDbName
newCouchThingy.url = self.testInit.couchUrl
# 2nd call will make sure full connection is called
try:
newCouchThingy()
except Exception as ex:
msg = "Error invoking connectToCouch decorator:\n %s" % str(ex)
self.fail(msg)
self.assertIsNotNone(newCouchThingy)
def testB(self):
"""check requirement tests"""
class Thingy(dict):
"""test object with required attrs"""
def __init__(self):
super(Thingy, self).__init__()
self.collection = "NotNone"
self.owner = "NotNone"
self['fileset_id'] = "NotNone"
self['owner_id'] = "NotNone"
@CouchUtils.requireCollection
def call1(self):
return True
@CouchUtils.requireOwner
def call4(self):
return True
thingy = Thingy()
try:
thingy.call1()
except Exception as ex:
msg = "Failure in requireCollection decorator: %s" % str(ex)
self.fail(msg)
try:
thingy.call4()
except Exception as ex:
msg = "Failure in requireOwner decorator: %s" % str(ex)
self.fail(msg)
# now screw it up
thingy.collection = None
thingy.owner = None
self.assertRaises(RuntimeError, thingy.call1)
self.assertRaises(RuntimeError, thingy.call4)
if __name__ == '__main__':
unittest.main()
| dmwm/WMCore | test/python/WMCore_t/Database_t/CouchUtils_t.py | CouchUtils_t.py | py | 3,318 | python | en | code | 44 | github-code | 13 |
1491938597 | import random
def GameResult(prev_coords, new_coords, player, dimensions, width, part_runs):
diff = [] # Displacement
diff2 = [] # Displacement as a multiple or fraction of the first
diff3 = [] # Displacement as a multiple or fraction of the first
diff4 = [] # Displacement as a multiple or fraction of the first
ref = [] # Reference coordinate to check that the coordinates
# can exist in a run.
# First let's check if a run is completed.
if part_runs:
for run in part_runs:
# Check if the new point shares the same symmetric displacement
# with two or more other points with qualifying displacements.
line_ref1 = [abs(c1-c2) for c1, c2 in zip(run[0], new_coords)]
line_ref2 = [abs(c1-c2) for c1, c2 in zip(run[1], new_coords)]
if width == 3:
if ((line_ref1 == run[-1]
or line_ref1 == run[-2])
and (line_ref2 == run[-1]
or line_ref2 == run[-2])):
# If it does, add it to that partial run.
run.insert(-2, new_coords)
elif width == 4:
if ((line_ref1 == run[-1]
or line_ref1 == run[-2]
or line_ref1 == run[-3])
and (line_ref2 == run[-1]
or line_ref2 == run[-2]
or line_ref2 == run[-3])):
run.insert(-3, new_coords)
elif width == 5:
if ((line_ref1 == run[-1]
or line_ref1 == run[-2]
or line_ref1 == run[-3]
or line_ref1 == run[-4])
and (line_ref2 == run[-1]
or line_ref2 == run[-2]
or line_ref2 == run[-3]
or line_ref2 == run[-4])):
run.insert(-4, new_coords)
# If its length is the width of the board plus the
# displacement values
# then it is a win scenario.
if len(run) == (width*2 - 1):
part_runs = []
prev_coords = []
return player
# Get the displacement values between the new point and all other previous points
for coords in prev_coords:
diff = [abs(c1-c2) for c1, c2 in zip(coords, new_coords)]
"""
Ignore a pairing of points that have an uneven
displacement as they can't exist in a run. If any
two or more axis values have not expereienced
equal displacement then it can't be a line with an
angle of 0, 45, or 90 degrees, required to exist in a run.
"""
# """Take the displacement values and create a diff which will be a
# multiple or factor of the first, for referencing
# closer or more distant points in a potential run."""
"""Add the new point to the partial run with the two/three/four displacement values
,for that run, at the end for referencing new points against"""
if (set([1, 2]).issubset(diff)
or set([1, 3]).issubset(diff)
or set([1, 4]).issubset(diff)
or set([2, 3]).issubset(diff)
or set([2, 4]).issubset(diff)
or set([3, 4]).issubset(diff)):
continue
elif 1 in diff:
diff2 = [c*2 for c in diff]
diff3 = [c*3 for c in diff]
diff4 = [c*4 for c in diff]
elif 2 in diff:
diff2 = [c/2 for c in diff]
diff3 = [(c/2)*3 for c in diff]
diff4 = [(c/2)*4 for c in diff]
elif 3 in diff:
diff2 = [c/3 for c in diff]
diff3 = [(c/3)*2 for c in diff]
diff4 = [(c/3)*4 for c in diff]
elif 4 in diff:
diff2 = [c/4 for c in diff]
diff3 = [(c/4)*2 for c in diff]
diff4 = [(c/4)*3 for c in diff]
"""Create a reference list with the coordinate values of the axis,
of the new point, which have experienced displacement. If there
are value pairings which can't exist in a run, ignore that point"""
for i in range(dimensions):
if diff[i] == 0:
pass
else:
ref.append(new_coords[i])
# If There are the following pairs of values in the
# axis experiencing displacement ignore that point.
# If these pairings exist the point is invalid due
# to the potential for a line to wrap around the board,
# as vectors may be the same for different lines.
if (((set([0, 1]).issubset(ref) or set([1, 2]).issubset(ref)) and width == 3)
or ((set([0, 2]).issubset(ref) or set([0, 1]).issubset(ref)
or set([1, 3]).issubset(ref) or set([2, 3]).issubset(ref)) and width == 4)
or ((set([0, 1]).issubset(ref) or set([0, 2]).issubset(ref)
or set([0, 3]).issubset(ref) or set([1, 2]).issubset(ref)
or set([1, 4]).issubset(ref) or set([2, 3]).issubset(ref)
or set([2, 4]).issubset(ref) or set([3, 4]).issubset(ref)) and width == 5)):
pass
elif width == 3:
part_runs.append([coords, new_coords, diff, diff2])
elif width == 4:
part_runs.append([coords, new_coords, diff, diff2, diff3])
elif width == 5:
part_runs.append([coords, new_coords, diff, diff2, diff3, diff4])
ref = []
ref = []
def CompPlay(player_part_runs,
spent_runs,
human_coords,
computer_coords,
width,
dimensions):
# This number list will be cross referenced by the computer to see which
# values have not been filled and create a coordinate with those values
# to complete that run.
number_list = [0, 1, 2, 3, 4][:width]
ref_list = []
comp_coords = []
loop_breaker = True
# Check if there are partially completed runs
if player_part_runs:
# Go through the runs in reverse order so the lastes attempt is blocked
for run in reversed(player_part_runs):
comp_coords = []
# The run should be one away from completion
# for the computer to block it.
if (len(run) == ((width*2)-2) and run not in spent_runs):
for i in range(dimensions):
# If an axis in the diff list does not experience
# displacement, i.e. has a value of 0 in the diff list
# then make the same as the other values for that axis
# in that run.
if run[width-1][i] == 0:
comp_coords.append(run[0][i])
else:
for j in range(width-1):
ref_list.append(run[j][i])
comp_coords.append(list(set(
number_list) - set(ref_list))[0])
ref_list = []
# If the coordinate created already exists ditch it and the
# potential run which it blocks and create a random point.
if (comp_coords in computer_coords
or comp_coords in human_coords):
spent_runs.append(run)
comp_coords = []
if len(computer_coords) == 1:
# Radom point generator.
for i in range(dimensions):
n = random.randint(0, width-1)
comp_coords.append(n)
return comp_coords[:dimensions]
else:
return comp_coords[:dimensions]
while loop_breaker:
# Radom point generator.
for i in range(dimensions):
n = random.randint(0, width-1)
comp_coords.append(n)
if (comp_coords in computer_coords
or comp_coords in human_coords):
comp_coords = []
else:
loop_breaker = False
return comp_coords[:dimensions]
else:
while loop_breaker:
# Radom point generator.
for i in range(dimensions):
n = random.randint(0, width-1)
comp_coords.append(n)
if (comp_coords in computer_coords
or comp_coords in human_coords):
comp_coords = []
else:
loop_breaker = False
return comp_coords[:dimensions]
return comp_coords[:dimensions]
| JonathanDelaney/TryTrickThatThough | tictactoe.py | tictactoe.py | py | 8,892 | python | en | code | 0 | github-code | 13 |
24425057460 | from rez.packages_ import get_latest_package
from rez.vendor.version.version import Version
from rez.vendor.distlib import DistlibException
from rez.vendor.distlib.database import DistributionPath
from rez.vendor.distlib.markers import interpret
from rez.vendor.distlib.util import parse_name_and_version
from rez.vendor.enum.enum import Enum
from rez.resolved_context import ResolvedContext
from rez.utils.system import popen
from rez.utils.logging_ import print_debug, print_info, print_warning
from rez.exceptions import BuildError, PackageFamilyNotFoundError, \
PackageNotFoundError, convert_errors
from rez.package_maker__ import make_package
from rez.config import config
from rez.system import System
from tempfile import mkdtemp
from StringIO import StringIO
from pipes import quote
import subprocess
import os.path
import shutil
import sys
import os
class InstallMode(Enum):
# don't install dependencies. Build may fail, for example the package may
# need to compile against a dependency. Will work for pure python though.
no_deps = 0
# only install dependencies that we have to. If an existing rez package
# satisfies a dependency already, it will be used instead. The default.
min_deps = 1
# install dependencies even if an existing rez package satisfies the
# dependency, if the dependency is newer.
new_deps = 2
# install dependencies even if a rez package of the same version is already
# available, if possible. For example, if you are performing a local install,
# a released (central) package may match a dependency; but with this mode
# enabled, a new local package of the same version will be installed as well.
#
# Typically, if performing a central install with the rez-pip --release flag,
# max_deps is equivalent to new_deps.
max_deps = 3
def _get_dependencies(requirement, distributions):
def get_distrubution_name(pip_name):
pip_to_rez_name = pip_name.lower().replace("-", "_")
for dist in distributions:
_name, _ = parse_name_and_version(dist.name_and_version)
if _name.replace("-", "_") == pip_to_rez_name:
return dist.name.replace("-", "_")
result = []
requirements = ([requirement] if isinstance(requirement, basestring)
else requirement["requires"])
for package in requirements:
if "(" in package:
try:
name, version = parse_name_and_version(package)
version = version.replace("==", "")
name = get_distrubution_name(name)
except DistlibException:
n, vs = package.split(' (')
vs = vs[:-1]
versions = []
for v in vs.split(','):
package = "%s (%s)" % (n, v)
name, version = parse_name_and_version(package)
version = version.replace("==", "")
versions.append(version)
version = "".join(versions)
name = get_distrubution_name(name)
result.append("-".join([name, version]))
else:
name = get_distrubution_name(package)
result.append(name)
return result
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def run_pip_command(command_args, pip_version=None, python_version=None):
"""Run a pip command.
Args:
command_args (list of str): Args to pip.
Returns:
`subprocess.Popen`: Pip process.
"""
pip_exe, context = find_pip(pip_version, python_version)
command = [pip_exe] + list(command_args)
if context is None:
return popen(command)
else:
return context.execute_shell(command=command, block=False)
def find_pip(pip_version=None, python_version=None):
"""Find a pip exe using the given python version.
Returns:
2-tuple:
str: pip executable;
`ResolvedContext`: Context containing pip, or None if we fell back
to system pip.
"""
pip_exe = "pip"
try:
context = create_context(pip_version, python_version)
except BuildError as e:
# fall back on system pip. Not ideal but at least it's something
from rez.backport.shutilwhich import which
pip_exe = which("pip")
if pip_exe:
print_warning(
"pip rez package could not be found; system 'pip' command (%s) "
"will be used instead." % pip_exe)
context = None
else:
raise e
return pip_exe, context
def create_context(pip_version=None, python_version=None):
"""Create a context containing the specific pip and python.
Args:
pip_version (str or `Version`): Version of pip to use, or latest if None.
python_version (str or `Version`): Python version to use, or latest if
None.
Returns:
`ResolvedContext`: Context containing pip and python.
"""
# determine pip pkg to use for install, and python variants to install on
if pip_version:
pip_req = "pip-%s" % str(pip_version)
else:
pip_req = "pip"
if python_version:
ver = Version(str(python_version))
major_minor_ver = ver.trim(2)
py_req = "python-%s" % str(major_minor_ver)
else:
# use latest major.minor
package = get_latest_package("python")
if package:
major_minor_ver = package.version.trim(2)
else:
# no python package. We're gonna fail, let's just choose current
# python version (and fail at context creation time)
major_minor_ver = '.'.join(map(str, sys.version_info[:2]))
py_req = "python-%s" % str(major_minor_ver)
# use pip + latest python to perform pip download operations
request = [pip_req, py_req]
with convert_errors(from_=(PackageFamilyNotFoundError, PackageNotFoundError),
to=BuildError, msg="Cannot run - pip or python rez "
"package is not present"):
context = ResolvedContext(request)
# print pip package used to perform the install
pip_variant = context.get_resolved_package("pip")
pip_package = pip_variant.parent
print_info("Using %s (%s)" % (pip_package.qualified_name, pip_variant.uri))
return context
def pip_install_package(source_name, pip_version=None, python_version=None,
mode=InstallMode.min_deps, release=False):
"""Install a pip-compatible python package as a rez package.
Args:
source_name (str): Name of package or archive/url containing the pip
package source. This is the same as the arg you would pass to
the 'pip install' command.
pip_version (str or `Version`): Version of pip to use to perform the
install, uses latest if None.
python_version (str or `Version`): Python version to use to perform the
install, and subsequently have the resulting rez package depend on.
mode (`InstallMode`): Installation mode, determines how dependencies are
managed.
release (bool): If True, install as a released package; otherwise, it
will be installed as a local package.
Returns:
2-tuple:
List of `Variant`: Installed variants;
List of `Variant`: Skipped variants (already installed).
"""
installed_variants = []
skipped_variants = []
pip_exe, context = find_pip(pip_version, python_version)
# TODO: should check if packages_path is writable before continuing with pip
#
packages_path = (config.release_packages_path if release
else config.local_packages_path)
tmpdir = mkdtemp(suffix="-rez", prefix="pip-")
stagingdir = os.path.join(tmpdir, "rez_staging")
stagingsep = "".join([os.path.sep, "rez_staging", os.path.sep])
destpath = os.path.join(stagingdir, "python")
binpath = os.path.join(stagingdir, "bin")
incpath = os.path.join(stagingdir, "include")
datapath = stagingdir
if context and config.debug("package_release"):
buf = StringIO()
print >> buf, "\n\npackage download environment:"
context.print_info(buf)
_log(buf.getvalue())
# Build pip commandline
cmd = [pip_exe, "install",
"--install-option=--install-lib=%s" % destpath,
"--install-option=--install-scripts=%s" % binpath,
"--install-option=--install-headers=%s" % incpath,
"--install-option=--install-data=%s" % datapath]
if mode == InstallMode.no_deps:
cmd.append("--no-deps")
cmd.append(source_name)
_cmd(context=context, command=cmd)
_system = System()
# Collect resulting python packages using distlib
distribution_path = DistributionPath([destpath], include_egg=True)
distributions = [d for d in distribution_path.get_distributions()]
for distribution in distribution_path.get_distributions():
requirements = []
if distribution.metadata.run_requires:
# Handle requirements. Currently handles conditional environment based
# requirements and normal requirements
# TODO: Handle optional requirements?
for requirement in distribution.metadata.run_requires:
if "environment" in requirement:
if interpret(requirement["environment"]):
requirements.extend(_get_dependencies(requirement, distributions))
elif "extra" in requirement:
# Currently ignoring optional requirements
pass
else:
requirements.extend(_get_dependencies(requirement, distributions))
tools = []
src_dst_lut = {}
for installed_file in distribution.list_installed_files(allow_fail=True):
source_file = os.path.normpath(os.path.join(destpath, installed_file[0]))
if os.path.exists(source_file):
destination_file = installed_file[0].split(stagingsep)[1]
exe = False
if is_exe(source_file) and \
destination_file.startswith("%s%s" % ("bin", os.path.sep)):
_, _file = os.path.split(destination_file)
tools.append(_file)
exe = True
data = [destination_file, exe]
src_dst_lut[source_file] = data
else:
_log("Source file does not exist: " + source_file + "!")
def make_root(variant, path):
"""Using distlib to iterate over all installed files of the current
distribution to copy files to the target directory of the rez package
variant
"""
for source_file, data in src_dst_lut.items():
destination_file, exe = data
destination_file = os.path.normpath(os.path.join(path, destination_file))
if not os.path.exists(os.path.dirname(destination_file)):
os.makedirs(os.path.dirname(destination_file))
shutil.copyfile(source_file, destination_file)
if exe:
shutil.copystat(source_file, destination_file)
# determine variant requirements
# TODO detect if platform/arch/os necessary, no if pure python
variant_reqs = []
variant_reqs.append("platform-%s" % _system.platform)
variant_reqs.append("arch-%s" % _system.arch)
variant_reqs.append("os-%s" % _system.os)
if context is None:
# since we had to use system pip, we have to assume system python version
py_ver = '.'.join(map(str, sys.version_info[:2]))
else:
python_variant = context.get_resolved_package("python")
py_ver = python_variant.version.trim(2)
variant_reqs.append("python-%s" % py_ver)
name, _ = parse_name_and_version(distribution.name_and_version)
name = distribution.name[0:len(name)].replace("-", "_")
with make_package(name, packages_path, make_root=make_root) as pkg:
pkg.version = distribution.version
if distribution.metadata.summary:
pkg.description = distribution.metadata.summary
pkg.variants = [variant_reqs]
if requirements:
pkg.requires = requirements
commands = []
commands.append("env.PYTHONPATH.append('{root}/python')")
if tools:
pkg.tools = tools
commands.append("env.PATH.append('{root}/bin')")
pkg.commands = '\n'.join(commands)
installed_variants.extend(pkg.installed_variants or [])
skipped_variants.extend(pkg.skipped_variants or [])
# cleanup
shutil.rmtree(tmpdir)
return installed_variants, skipped_variants
def _cmd(context, command):
cmd_str = ' '.join(quote(x) for x in command)
_log("running: %s" % cmd_str)
if context is None:
p = popen(command)
else:
p = context.execute_shell(command=command, block=False)
p.wait()
if p.returncode:
raise BuildError("Failed to download source with pip: %s" % cmd_str)
_verbose = config.debug("package_release")
def _log(msg):
if _verbose:
print_debug(msg)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| ColinKennedy/tk-config-default2-respawn | vendors/rez-2.23.1-py2.7/rez/pip.py | pip.py | py | 14,168 | python | en | code | 10 | github-code | 13 |
35784906048 | import os
import pickle
import numpy as np
import imageio
import scipy.signal as sig
from torch.utils.data import Dataset
import rf.organizer as org
from rf.proc import create_fast_slow_matrix, find_range
class RGBData(Dataset):
def __init__(self, datapath, datapaths, recording_str="rgbd_rgb", ppg_str="rgbd",
video_length = 900, frame_length = 64) -> None:
# There is an offset in capturing the signals w.r.t the ground truth.
self.ppg_offset = 25
# Number of samples to be created by oversampling one trial.
self.num_samps = 30
# Name of the files being read. Name depends on how the file was save. We have saved the file as rgbd_rgb
self.id_str = recording_str
self.ppg_str = ppg_str
# Number of frames in the input video. (Requires all data-samples to have the same number of frames).
self.video_length = video_length
# Number of frames in the output tensor sample.
self.frame_length = frame_length
# Data structure for videos.
self.datapath = datapath
# Load videos and signals.
self.video_list = datapaths
# The PPG files for the RGB are stored as rgbd_ppg and not rgbd_rgb_ppg.
self.signal_list = []
# Load signals
remove_folders = []
for folder in self.video_list:
file_path = os.path.join(datapath, folder)
# Make a list of the folder that do not have the PPG signal.
if(os.path.exists(file_path)):
if(os.path.exists(os.path.join(file_path, f"{self.ppg_str}_ppg.npy"))):
signal = np.load(os.path.join(file_path, f"{self.ppg_str}_ppg.npy"))
self.signal_list.append(signal[self.ppg_offset:])
else:
print(folder, "ppg doesn't exist.")
remove_folders.append(folder)
else:
print(folder, " doesn't exist.")
remove_folders.append(folder)
# Remove the PPGs
for i in remove_folders:
self.video_list.remove(i)
print("Removed", i)
# Extract the stats for the vital signs.
self.signal_list = np.array(self.signal_list)
self.vital_mean = np.mean(self.signal_list)
self.vital_std = np.std(self.signal_list)
self.signal_list = (self.signal_list - self.vital_mean)/self.vital_std
# Create a list of video number and valid frame number to extract the data from.
self.video_nums = np.arange(0, len(self.video_list))
self.frame_nums = np.arange(0, self.video_length - frame_length - self.ppg_offset)
# Create all possible sampling combinations.
self.all_idxs = []
for num in self.video_nums:
# Generate the start index.
cur_frame_nums = np.random.randint(low=0,
high = self.video_length - frame_length - self.ppg_offset,
size = self.num_samps)
# Append all the start indices.
for cur_frame_num in cur_frame_nums:
self.all_idxs.append((num,cur_frame_num))
def __len__(self):
return int(len(self.all_idxs))
def __getitem__(self, idx):
# Get the video number and the starting frame index.
video_number, frame_start = self.all_idxs[idx]
# Get video frames for the output video tensor.
# (Expects each sample to be stored in a folder with the sample name. Each frame is stored as a png)
item = []
for img_idx in range(self.frame_length):
image_path = os.path.join(self.datapath,
str(self.video_list[video_number]),
f"{self.id_str}_{frame_start+img_idx}.png")
item.append(imageio.imread(image_path))
item = np.array(item)
# Add channel dim if no channels in image.
if(len(item.shape) < 4):
item = np.expand_dims(item, axis=3)
item = np.transpose(item, axes=(3,0,1,2))
# Get signal.
item_sig = self.signal_list[int(video_number)][int(frame_start):int(frame_start+self.frame_length)]
# Patch for the torch constructor. uint16 is a not an acceptable data-type.
if(item.dtype == np.uint16):
item = item.astype(np.int32)
return np.array(item), np.array(item_sig)
# class RFRppgData_RAM(Dataset):
class RFDataRAMVersion(Dataset):
def __init__(self, datapath, datapaths, ppg_signal_length=900, frame_length_ppg=512, sampling_ratio=4, \
window_size=5, samples=256, samp_f=5e6, freq_slope=60.012e12, static_dataset_samples = 30) -> None:
# There is an offset in capturing the signals w.r.t the ground truth.
self.ppg_offset = 25
# Number of samples to be created by oversampling one trial.
self.num_samps = static_dataset_samples
# Data structure for videos.
self.datapath = datapath
# Load videos and signals.
self.rf_file_list = datapaths
self.signal_list = []
# Load signals.
remove_list_folder = []
for folder in self.rf_file_list:
file_path = os.path.join(datapath, folder)
if(os.path.exists(os.path.join(file_path,"vital_dict.npy"))):
signal = np.load(f"{file_path}/vital_dict.npy", allow_pickle=True).item()['rgbd']['NOM_PLETHWaveExport']
self.signal_list.append(signal[self.ppg_offset:])
else:
remove_list_folder.append(folder)
# Remove unwanted folders from the list.
# NOTE: This is done in-place. So the folders will be removed from the list passed into this class.
for folder in remove_list_folder:
self.rf_file_list.remove(folder)
print(f"Removed {folder} from the RF file list (In-place execution).")
# Normalize the GT
self.signal_list = np.array(self.signal_list)
self.vital_mean = np.mean(self.signal_list)
self.vital_std = np.std(self.signal_list)
self.signal_list = (self.signal_list - self.vital_mean)/self.vital_std
# The ratio of the sampling frequency of the RF signal and the PPG signal.
self.sampling_ratio = sampling_ratio
# Save the RF config parameters.
self.window_size = window_size
self.samples = samples
self.samp_f = samp_f
self.freq_slope = freq_slope
# Window the PPG and the RF samples.
self.ppg_signal_length = ppg_signal_length
self.frame_length_ppg = frame_length_ppg
self.frame_nums_rf = np.arange(0, sampling_ratio \
* (self.ppg_signal_length - frame_length_ppg \
- self.ppg_offset), step=sampling_ratio)
self.frame_nums_ppg = np.arange(0, self.ppg_signal_length \
- frame_length_ppg - self.ppg_offset)
self.frame_nums = [(i,j) for i,j in zip(self.frame_nums_rf, self.frame_nums_ppg) ]
self.rf_file_nums = np.arange(len(self.rf_file_list))
self.all_idxs = []
for num in self.rf_file_nums:
cur_frame_nums = np.random.randint(
low=0, high = self.ppg_signal_length - frame_length_ppg - self.ppg_offset, size = self.num_samps)
rf_cur_frame_nums = cur_frame_nums*4
for rf_frame_num, cur_frame_num in zip(rf_cur_frame_nums, cur_frame_nums):
self.all_idxs.append((num,(rf_frame_num, cur_frame_num)))
# High-ram, compute FFTs before starting training.
self.rf_data_list = []
for rf_file in self.rf_file_list:
# Read the raw RF data
rf_fptr = open(os.path.join(self.datapath, rf_file, "rf.pkl"),'rb')
s = pickle.load(rf_fptr)
# Organize the raw data from the RF.
# Number of samples is set ot 256 for our experiments.
rf_organizer = org.Organizer(s, 1, 1, 1, 2*self.samples)
frames = rf_organizer.organize()
# The RF read adds zero alternatively to the samples. Remove these zeros.
frames = frames[:,:,:,0::2]
# Process the organized RF data
data_f = create_fast_slow_matrix(frames)
range_index = find_range(data_f, self.samp_f, self.freq_slope, self.samples)
# Get the windowed raw data for the network
raw_data = data_f[:, range_index-self.window_size//2:range_index+self.window_size//2 + 1]
# Note that item is a complex number due to the nature of the algorithm used to extract and process the pickle file.
# Hence for simplicity we separate the real and imaginary parts into 2 separate channels.
raw_data = np.array([np.real(raw_data), np.imag(raw_data)])
raw_data = np.transpose(raw_data, axes=(0,2,1))
self.rf_data_list.append(raw_data)
def __len__(self):
return int(len(self.all_idxs))
def __getitem__(self, idx):
# This part is hard-coded for our settings. TX and RX = 1.
file_num, (rf_start, ppg_start) = self.all_idxs[idx]
# Get the RF data.
data_f = self.rf_data_list[file_num]
data_f = data_f[:,:,rf_start : rf_start + (self.sampling_ratio * self.frame_length_ppg)]
item = data_f
# Get the PPG signal.
item_sig = self.signal_list[file_num][ppg_start:ppg_start+self.frame_length_ppg]
assert len(item_sig) == self.frame_length_ppg, f"Expected signal of length {self.frame_length_ppg}, but got signal of length {len(item_sig)}"
return item, np.array(item_sig)
class FusionDatasetObject(Dataset):
def __init__(self, datapath, datafiles, \
compute_fft=True, fs=30, l_freq_bpm=45, u_freq_bpm=180, \
desired_ppg_len=None, fft_resolution = 1, num_static_samples=1, window_rf=False, rf_window_size=5) -> None:
# There is an offset in the dataset between the captured video and GT
self.ppg_offset = 25
#Data structure for videos
self.datapath = datapath
self.datafiles = datafiles
self.desired_ppg_len = desired_ppg_len
self.compute_fft = compute_fft
self.fs = fs
self.l_freq_bpm = l_freq_bpm
self.u_freq_bpm = u_freq_bpm
self.window_rf = window_rf
self.fft_resolution = fft_resolution
self.rf_window_size = rf_window_size
# Load the data from the pickle file
with open(datapath, 'rb') as f:
pickle_data = pickle.load(f)
# Is any of the 4 keys (video path, estimated ppg from rgb, ground truth ppg, ppg from rf) is missing, we drop that point
self.usable_data = []
for data_pt in pickle_data:
if data_pt['video_path'] in self.datafiles:
if len(data_pt) != 4:
# self.usable_data.remove(data_pt)
print(f"{data_pt['video_path']} is dropped")
continue
self.usable_data.append(data_pt)
# If we want to use smaller window of the signals rather than the whole signal itself
self.all_combs = []
if self.desired_ppg_len is not None:
self.num_static_samples = num_static_samples
for data_pt in self.usable_data:
static_idxs = np.random.randint(0, len(data_pt['gt_ppgs']) - self.desired_ppg_len - self.ppg_offset, size=self.num_static_samples)
for idx in static_idxs:
self.all_combs.append((data_pt, idx))
seq_len = self.desired_ppg_len*self.fft_resolution##HERE
else:
for data_pt in self.usable_data:
self.all_combs.append((data_pt, None))
seq_len = len(data_pt['gt_ppgs'])*self.fft_resolution
print(f"Dataset Ready. There are {self.__len__()} samples")
freqs_bpm = np.fft.fftfreq(seq_len, d=1/self.fs) * 60
self.l_freq_idx = np.argmin(np.abs(freqs_bpm - self.l_freq_bpm))
self.u_freq_idx = np.argmin(np.abs(freqs_bpm - self.u_freq_bpm))
print(self.l_freq_idx, self.u_freq_idx)
print(freqs_bpm[self.l_freq_idx], freqs_bpm[self.u_freq_idx])
assert self.l_freq_idx < self.u_freq_idx
def __len__(self):
return len(self.all_combs)
def __getitem__(self, idx):
dict_item, start_idx = self.all_combs[idx]
# dict_keys(['video_path', 'est_ppgs', 'gt_ppgs', 'rf_ppg'])
# Get the ppg data of the rgb, gt and rf
item = {'est_ppgs':dict_item['est_ppgs'], 'rf_ppg':dict_item['rf_ppg']}
item_sig = dict_item['gt_ppgs']
if self.desired_ppg_len is not None:
assert start_idx is not None
item_sig = item_sig[start_idx+self.ppg_offset:start_idx+self.ppg_offset+self.desired_ppg_len]
item['est_ppgs'] = item['est_ppgs'][start_idx:start_idx+self.desired_ppg_len]
item['rf_ppg'] = item['rf_ppg'][start_idx:start_idx+self.desired_ppg_len]
item_sig = (item_sig - np.mean(item_sig)) / np.std(item_sig)
item['est_ppgs'] = (item['est_ppgs'] - np.mean(item['est_ppgs'])) / np.std(item['est_ppgs'])
item['rf_ppg'] = (item['rf_ppg'] - np.mean(item['rf_ppg'], axis = 0)) / np.std(item['rf_ppg'], axis = 0)
if self.compute_fft:
n_curr = len(item_sig) * self.fft_resolution
fft_gt = np.abs(np.fft.fft(item_sig, n=int(n_curr), axis=0))
fft_gt = fft_gt / np.max(fft_gt, axis=0)
fft_est = np.abs(np.fft.fft(item['est_ppgs'], n=int(n_curr), axis=0))
fft_est = fft_est / np.max(fft_est, axis=0)
fft_est = fft_est[self.l_freq_idx : self.u_freq_idx + 1]
fft_rf = np.abs(np.fft.fft(item['rf_ppg'], n=int(n_curr), axis=0))
fft_rf = fft_rf[self.l_freq_idx : self.u_freq_idx + 1]
if(self.window_rf):
center_idx = np.argmax(fft_est)
window_size = self.rf_window_size
if(center_idx - window_size <= 0):
center_idx = window_size + 1
elif(center_idx + window_size + 1 >= len(fft_est)):
center_idx = len(fft_est) - window_size - 1
mask = np.zeros_like(fft_rf)
mask[center_idx-window_size:center_idx+window_size+1,:] = 1
fft_rf = np.multiply(fft_rf, mask)
fft_rf = fft_rf / np.max(fft_rf)
else:
fft_rf = fft_rf / np.max(fft_rf, axis=0)
return {'est_ppgs':fft_est, 'rf_ppg':fft_rf}, fft_gt[self.l_freq_idx : self.u_freq_idx + 1]
else:
item_sig = self.lowPassFilter(item_sig)
item['est_ppgs'] = self.lowPassFilter(item['est_ppgs'])
for i in range(item['rf_ppg'].shape[1]):
item['rf_ppg'][:,i] = self.lowPassFilter(item['rf_ppg'][:,i])
item_sig = (item_sig - np.mean(item_sig)) / np.std(item_sig)
item['est_ppgs'] = (item['est_ppgs'] - np.mean(item['est_ppgs'])) / np.std(item['est_ppgs'])
item['rf_ppg'] = (item['rf_ppg'] - np.mean(item['rf_ppg'], axis = 0)) / np.std(item['rf_ppg'], axis = 0)
return item, np.array(item_sig)
def lowPassFilter(self, BVP, butter_order=4):
[b, a] = sig.butter(butter_order, [self.l_freq_bpm/60, self.u_freq_bpm/60], btype='bandpass', fs = self.fs)
filtered_BVP = sig.filtfilt(b, a, np.double(BVP))
return filtered_BVP
class FusionEvalDatasetObject(Dataset):
def __init__(self, datapath, datafiles, \
compute_fft=True, fs=30, l_freq_bpm=45, u_freq_bpm=180, \
desired_ppg_len=None, fft_resolution = 1, num_static_samples=7, window_rf=False, rf_window_size=5) -> None:
# There is an offset in the dataset between the captured video and GT
self.ppg_offset = 25
#Data structure for videos
self.datapath = datapath
self.datafiles = datafiles
self.desired_ppg_len = desired_ppg_len
self.compute_fft = compute_fft
self.fs = fs
self.l_freq_bpm = l_freq_bpm
self.u_freq_bpm = u_freq_bpm
self.window_rf = window_rf
self.fft_resolution = fft_resolution
self.rf_window_size = rf_window_size
# Load the data from the pickle file
with open(datapath, 'rb') as f:
pickle_data = pickle.load(f)
# Is any of the 4 keys (video path, estimated ppg from rgb, ground truth ppg, ppg from rf) is missing, we drop that point
self.usable_data = []
for data_pt in pickle_data:
if data_pt['video_path'] in self.datafiles:
if len(data_pt) != 4:
# self.usable_data.remove(data_pt)
print(f"{data_pt['video_path']} is dropped")
continue
self.usable_data.append(data_pt)
# If we want to use smaller window of the signals rather than the whole signal itself
self.all_combs = []
if self.desired_ppg_len is not None:
self.num_static_samples = num_static_samples
for data_pt in self.usable_data:
# TODO crosscheck this and pass as a param
static_idxs = np.array([0,128,256,384,512])
for idx in static_idxs:
self.all_combs.append((data_pt, idx))
seq_len = self.desired_ppg_len*self.fft_resolution
else:
for data_pt in self.usable_data:
self.all_combs.append((data_pt, None))
seq_len = len(data_pt['gt_ppgs'])*self.fft_resolution
print(f"Dataset Ready. There are {self.__len__()} samples")
freqs_bpm = np.fft.fftfreq(seq_len, d=1/self.fs) * 60
self.l_freq_idx = np.argmin(np.abs(freqs_bpm - self.l_freq_bpm))
self.u_freq_idx = np.argmin(np.abs(freqs_bpm - self.u_freq_bpm))
print(self.l_freq_idx, self.u_freq_idx)
print(freqs_bpm[self.l_freq_idx], freqs_bpm[self.u_freq_idx])
assert self.l_freq_idx < self.u_freq_idx
def __len__(self):
return len(self.all_combs)
def __getitem__(self, idx):
dict_item, start_idx = self.all_combs[idx]
# dict_keys(['video_path', 'est_ppgs', 'gt_ppgs', 'rf_ppg'])
# Get the ppg data of the rgb, gt and rf
item = {'est_ppgs':dict_item['est_ppgs'], 'rf_ppg':dict_item['rf_ppg']}
item_sig = dict_item['gt_ppgs']
if self.desired_ppg_len is not None:
assert start_idx is not None
item_sig = item_sig[start_idx+self.ppg_offset:start_idx+self.ppg_offset+self.desired_ppg_len]
item['est_ppgs'] = item['est_ppgs'][start_idx:start_idx+self.desired_ppg_len]
item['rf_ppg'] = item['rf_ppg'][start_idx:start_idx+self.desired_ppg_len]
item_sig = (item_sig - np.mean(item_sig)) / np.std(item_sig)
item['est_ppgs'] = (item['est_ppgs'] - np.mean(item['est_ppgs'])) / np.std(item['est_ppgs'])
item['rf_ppg'] = (item['rf_ppg'] - np.mean(item['rf_ppg'], axis = 0)) / np.std(item['rf_ppg'], axis = 0)
if self.compute_fft:
n_curr = len(item_sig) * self.fft_resolution
fft_gt = np.abs(np.fft.fft(item_sig, n=int(n_curr), axis=0))
fft_gt = fft_gt / np.max(fft_gt, axis=0)
fft_est = np.abs(np.fft.fft(item['est_ppgs'], n=int(n_curr), axis=0))
fft_est = fft_est / np.max(fft_est, axis=0)
fft_est = fft_est[self.l_freq_idx : self.u_freq_idx + 1]
fft_rf = np.abs(np.fft.fft(item['rf_ppg'], n=int(n_curr), axis=0))
fft_rf = fft_rf[self.l_freq_idx : self.u_freq_idx + 1]
#Get full ffts
rppg_fft = np.fft.fft(item['est_ppgs'], n=int(n_curr), axis=0)
rf_fft = np.fft.fft(item['rf_ppg'], n=int(n_curr), axis=0)
gt_fft = np.fft.fft(item_sig, n=int(n_curr), axis=0)
if(self.window_rf):
center_idx = np.argmax(fft_est)
window_size = self.rf_window_size
if(center_idx - window_size <= 0):
center_idx = window_size + 1
elif(center_idx + window_size + 1 >= len(fft_est)):
center_idx = len(fft_est) - window_size - 1
mask = np.zeros_like(fft_rf)
mask[center_idx-window_size:center_idx+window_size+1,:] = 1
fft_rf = np.multiply(fft_rf, mask)
fft_rf = fft_rf / np.max(fft_rf)
else:
fft_rf = fft_rf / np.max(fft_rf, axis=0)
return {'est_ppgs':fft_est, 'rf_ppg':fft_rf, 'rppg_fft':rppg_fft, 'rf_fft':rf_fft, 'gt_fft':gt_fft, 'rgb_true': item['est_ppgs'], 'rf_true': item['rf_ppg'], 'start_idx': start_idx}, fft_gt[self.l_freq_idx : self.u_freq_idx + 1]
else:
item_sig = self.lowPassFilter(item_sig)
item['est_ppgs'] = self.lowPassFilter(item['est_ppgs'])
for i in range(item['rf_ppg'].shape[1]):
item['rf_ppg'][:,i] = self.lowPassFilter(item['rf_ppg'][:,i])
item_sig = (item_sig - np.mean(item_sig)) / np.std(item_sig)
item['est_ppgs'] = (item['est_ppgs'] - np.mean(item['est_ppgs'])) / np.std(item['est_ppgs'])
item['rf_ppg'] = (item['rf_ppg'] - np.mean(item['rf_ppg'], axis = 0)) / np.std(item['rf_ppg'], axis = 0)
return item, np.array(item_sig)
def lowPassFilter(self, BVP, butter_order=4):
[b, a] = sig.butter(butter_order, [self.l_freq_bpm/60, self.u_freq_bpm/60], btype='bandpass', fs = self.fs)
filtered_BVP = sig.filtfilt(b, a, np.double(BVP))
return filtered_BVP
| UCLA-VMG/EquiPleth | nndl/data/datasets.py | datasets.py | py | 22,111 | python | en | code | 6 | github-code | 13 |
2856005308 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
from valan.framework import actor
from valan.framework import common
from valan.framework import eval_actor
from valan.framework import hyperparam_flags
from valan.r2r import custom_flags
from valan.r2r import ndh_problem
from valan.r2r import r2r_problem
from valan.r2r.multi_task import mt_problem
FLAGS = flags.FLAGS
def main(_):
logging.info('Total shards: %d; Current shard index: %d', FLAGS.num_tasks,
FLAGS.task)
runtime_config = common.RuntimeConfig(
task_id=FLAGS.task, num_tasks=FLAGS.num_tasks)
data_sources = FLAGS.data_source.split(',')
aggregator_prefix = '_'.join(data_sources)
# Get problem instance.
if FLAGS.problem == 'R2R':
problem = r2r_problem.R2RProblem(
runtime_config,
mode=FLAGS.mode,
data_sources=data_sources,
curriculum=FLAGS.curriculum,
agent_type=FLAGS.agent_type)
elif FLAGS.problem == 'NDH':
problem = ndh_problem.NDHProblem(
runtime_config,
mode=FLAGS.mode,
data_sources=data_sources,
agent_type=FLAGS.agent_type)
elif FLAGS.problem == 'R2R+NDH':
# Multi-task problem-type during training only. Use task-specific problems
# during eval.
if FLAGS.mode != 'train':
raise ValueError('Multi-tasking is only supported for training. '
'Use task-specific problems during eval.')
problem = mt_problem.MTProblem(runtime_config, mode=FLAGS.mode)
else:
raise ValueError('Unsupported problem type encountered: {}'.format(
FLAGS.problem))
logging.info('Current mode is %s', FLAGS.mode)
if FLAGS.mode == 'train':
logging.info('Running train actor...')
actor.run(problem)
else:
logging.info('Running eval actor...')
eval_actor.run(
problem,
# Evaluate each path in the dataset exactly once.
num_episodes_per_iter=problem.get_environment().num_paths,
task_id=runtime_config.task_id)
if __name__ == '__main__':
app.run(main)
| google-research/valan | r2r/actor_main.py | actor_main.py | py | 2,165 | python | en | code | 69 | github-code | 13 |
38924394285 | from perf.kube_watcher.event.logged.logged_event import LoggedEvent
class NodeLoggedEvent(LoggedEvent):
def __init__(self, type, node_name, data, cluster_time, local_time, raw_event):
super(NodeLoggedEvent, self).__init__(type)
self.node_name = node_name
self.data = data
self.cluster_time = cluster_time
self.local_time = local_time
self.raw_event = raw_event
def __str__(self):
return f'[{self.local_time}][{self.node_name}] {self.type}, {self.data}, {self.cluster_time}' | dirtyValera/svoe | data_feed/perf/kube_watcher/event/logged/node_logged_event.py | node_logged_event.py | py | 540 | python | en | code | 12 | github-code | 13 |
14858699423 | from django.urls import path
from . import views
app_name = 'cart'
urlpatterns = [
path('', views.CartView.as_view(), name='summary'),
path('shop/', views.ProductListView.as_view(), name='product-list'),
path('shop/<slug>/', views.ProductDetailView.as_view(), name='product-detail'),
path("increase-quantity/<pk>", views.IncreaseQuantityView.as_view(), name='increase-quantity'),
path("decrease-quantity/<pk>", views.DecreaseQuantityView.as_view(), name='decrease-quantity'),
path("remove-from-cart/<pk>", views.RemoveFromView.as_view(), name='remove-from-cart'),
path('checkout/', views.CheckoutView.as_view(), name='checkout'),
path("payment/", views.PaymentView.as_view(), name='payment'),
]
| sudarshanmestha/ecom_simple_website | cart/urls.py | urls.py | py | 731 | python | en | code | 1 | github-code | 13 |
28833642079 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 24 11:34:12 2017
@author: GrinevskiyAS
"""
from __future__ import division
import numpy as np
from numpy import pi, sin, cos, tan
import matplotlib.pyplot as plt
from matplotlib import cm
data_input=np.loadtxt(r"E:\Aspir_data\SynModel1\aniso_model_UsedInProject.dat")
Nmodels = 100
H_layer = 200
H_between = 400
dh = 2
depth = data_input[:,0]
vp = data_input[:,1]
vs = data_input[:,2]
dn = data_input[:,3]
de = data_input[:,4]
ep = data_input[:,5]
ga = data_input[:,6]
az0 = np.zeros_like(depth, dtype = float)+90
fi0 = pi/2 - pi*az0 / 180
eta = (ep-de)/(1+2*de)
mu = dn * vs**2
vsvp = vs/vp
vpvs = vp/vs
deta = np.insert(np.diff(eta),0,0)
Nd = len(depth)
az_list = np.arange(0, 180, 22.5)
fi_list = pi/2 - pi*az_list / 180
ang_list = np.arange(0, 50, 5)
th_list = pi*ang_list/180
starttime = 0
dt = 2
time = np.cumsum(2000*dh/vp)
time_fl = dt*np.floor(time/dt)
ind_d_top = np.floor(((np.arange(Nmodels))*(H_between + H_layer) + H_between)/dh).astype(int)
ind_d_bot = np.floor(((np.arange(Nmodels)+1)*(H_between + H_layer))/dh).astype(int)
times_top = time[ind_d_top]
times_bot = time[ind_d_bot]
ind_t_top = np.floor((times_top - starttime)/dt).astype(int)
ind_t_bot = np.floor((times_bot - starttime)/dt).astype(int)
ind1 = ind_d_top[1]
def ReflCoef(q):
rc = 0.5 * np.diff(q) / np.mean(np.row_stack((q[1:], q[:-1])), axis = 0)
return np.hstack((0, rc))
r_zp = ReflCoef(vp*dn)
def ComputeRugerReflection(vp, vs, dn, de, ep, ga, fi0, fi_list, th_list):
res = np.zeros((len(vp), len(fi_list), len(th_list)), dtype = float)
r0 = ReflCoef(vp * dn)
mu = dn * vs**2
MnVp = np.hstack( (vp[0], np.mean(np.row_stack((vp[1:], vp[:-1])), axis = 0)) )
MnVs = np.hstack( (vs[0], np.mean(np.row_stack((vs[1:], vs[:-1])), axis = 0)) )
dde = np.insert(np.diff(de), 0, 0)
dep = np.insert(np.diff(ep), 0, 0)
ga_vti = -ga/(1 + 2*ga)
dga_vti = np.insert(np.diff(ga_vti), 0, 0)
#слагаемые для r2
part1 = 2 * ReflCoef(vp)
part2 = (2*MnVs/MnVp)**2 * (2*ReflCoef(mu))
part3 = dde + 8*(MnVs/MnVp)**2 * dga_vti
for ifi, fi in enumerate(fi_list):
r2 = 0.5 * (part1 - part2 + part3 * cos(fi - fi0)**2)
r4 = 0.5 * (2 *ReflCoef(vp) + dep * cos(fi-fi0)**4 + dde * sin(fi-fi0)**2 * cos(fi-fi0)**2)
for ith, th in enumerate(th_list):
resij = r0 + r2*sin(th)**2 + r4 * sin(th)**2 * tan(th)**2
res[:, ifi, ith] = resij
return res
rugeramp = ComputeRugerReflection(vp, vs, dn, de, ep, ga, fi0, fi_list, th_list)
def ComputeMesdagReflection(vp, vs, dn, de, ep, ga, fi0, fi_list, th_list):
res = np.zeros((len(r_zp), len(fi_list), len(th_list)), dtype = float)
mn_de = np.hstack( (de[0], np.mean(np.row_stack((de[1:], de[:-1])), axis = 0)) )
mn_ep = np.hstack( (ep[0], np.mean(np.row_stack((ep[1:], ep[:-1])), axis = 0)) )
mn_ga = np.hstack( (ga[0], np.mean(np.row_stack((ga[1:], ga[:-1])), axis = 0)) )
der = (de + 1 - mn_de) / (1 - mn_de)
epr = (ep + 1 - mn_ep) / (1 - mn_ep)
gar = (ga + 1 - mn_ga) / (1 - mn_ga)
K = (vs/vp)**2
Kcoef = (4*K+1)/(8*K)
for ifi, fi in enumerate(fi_list):
cos2az = cos(fi - fi0)**2
vp_az = vp * der**cos2az * (epr/der)**(cos2az**2)
vs_az = vs * (np.sqrt(der)/gar)**cos2az * (epr/der)**(Kcoef*cos2az**2)
dn_az = dn * der**(-cos2az) * (epr/der)**(-cos2az**2)
for ith, th in enumerate(th_list):
r0 = ReflCoef(vp_az) + ReflCoef(dn_az)
r2 = ReflCoef(vp_az) - 2 * (vs_az/vp_az)**2 * (4*ReflCoef(vp_az) + 2*ReflCoef(dn_az))
r4 = ReflCoef(vp_az)
resij = r0 + r2 * sin(th)**2 + r4 * (tan(th)**2 - sin(th)**2)
res[:, ifi, ith] = resij
return res
mesdagamp = ComputeMesdagReflection(vp, vs, dn, de, ep, ga, fi0, fi_list, th_list)
def PlotRugerAmp(ax, amp, ind, ang_list, az_list, cmap = cm.Spectral_r, vid = 'Az'):
N_ang = len(ang_list)
if vid == 'Az':
data_plot = amp[ind, :, :].T
az_list_plot = az_list
if not (abs(az_list[-1] - az_list[0]) == 180):
az_list_plot = np.hstack((az_list, az_list[0] + 180))
data_plot = np.column_stack((data_plot, data_plot[:,0]))
cm_subsection = np.linspace(0.0,1.0, N_ang)
colors = [ cmap(x) for x in cm_subsection ]
for i, ang in enumerate(ang_list):
ax.plot(az_list_plot, data_plot[i,:], marker = 'o', markerfacecolor=colors[i], markersize = 9, markeredgecolor = 'None',
linewidth = 0.5, color = colors[i], label = str(int(ang)))
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::1], labels[::1], ncol=int(N_ang/2), loc='best', prop = {'size': 12})
elif vid == 'An':
data_plot = amp[ind, :, :]
cm_subsection = np.linspace(0.0,1.0, N_ang)
colors = [ cmap(x) for x in cm_subsection ]
for i, azi in enumerate(az_list):
ax.plot(ang_list, data_plot[i,:], marker = 'o', markerfacecolor=colors[i], markersize = 6, markeredgecolor = 'None',
linewidth = 1.5, color = colors[i], label = str(azi))
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc='best', prop = {'size': 12}, framealpha=0.3)
fgr = plt.figure(facecolor= 'white', figsize = [18,10])
ax_an = fgr.add_subplot(121)
ax_az = fgr.add_subplot(122)
fgr.canvas.set_window_title('Ruger, computed')
PlotRugerAmp(ax_az, rugeramp, ind_d_top[4], ang_list, az_list, cmap = cm.Accent, vid = 'Az')
PlotRugerAmp(ax_an, rugeramp, ind_d_top[4], ang_list, az_list, cmap = cm.Accent, vid = 'An')
fgr.tight_layout()
fgr_akir = plt.figure(facecolor= 'white', figsize = [18,10])
ax_an_akir = fgr_akir.add_subplot(121)
ax_az_akir = fgr_akir.add_subplot(122)
fgr_akir.canvas.set_window_title('Aki-Richards + Mesdag, computed')
PlotRugerAmp(ax_az_akir, mesdagamp, ind_d_top[4], ang_list, az_list, cmap = cm.Accent, vid = 'Az')
PlotRugerAmp(ax_an_akir, mesdagamp, ind_d_top[4], ang_list, az_list, cmap = cm.Accent, vid = 'An')
| antongrin/AniBox | MesgagRugerAppr.py | MesgagRugerAppr.py | py | 6,408 | python | en | code | 1 | github-code | 13 |
14893222883 | import requests
import pandas as pd
import os
api_key = os.getenv('api_key')
companies = ['AMZN','AAPL','MSFT', 'TSLA']
BS_over_time = ''
BS_companies = ''
def balance_sheet(quarter,company,type_analysis):
#api_key = 'bbb11c6dd9e2948898d127f3f08d94c9'
BS = requests.get(f'https://financialmodelingprep.com/api/v3/balance-sheet-statement/{company}?period=quarter&limit=20&apikey={api_key}').json()
IS = requests.get(f'https://financialmodelingprep.com/api/v3/income-statement/{company}?period=quarter&limit=20&apikey={api_key}').json()
#time analysis
if type_analysis == 'time':
BS_metrics = {}
date = BS[quarter]['date']
BS_metrics[date] = {}
BS_metrics[date]['working_capital'] = BS[quarter]['totalCurrentAssets'] - BS[quarter]['totalCurrentLiabilities']
BS_metrics[date]['current_ratio'] = BS[quarter]['totalCurrentAssets']/ BS[quarter]['totalCurrentLiabilities']
BS_metrics[date]['cash_ratio'] = BS[quarter]['cashAndShortTermInvestments'] / BS[quarter]['totalCurrentLiabilities']
BS_metrics[date]['LTDebttoEquity'] = ((BS[quarter]['longTermDebt'] ) / BS[quarter]['totalStockholdersEquity'])
BS_metrics[date]['DebttoEquity'] = ((BS[quarter]['totalDebt'] ) / BS[quarter]['totalStockholdersEquity'])
BS_metrics[date]['Financial Leverage'] = ((BS[quarter]['totalAssets'] ) / BS[quarter]['totalStockholdersEquity'])
BS_metrics[date]['Receivable Turnover'] = (IS[quarter]['revenue'] + IS[quarter+1]['revenue'] + IS[quarter+2]['revenue']+ IS[quarter+3]['revenue']) / ( (BS[quarter]['netReceivables'] + BS[quarter +4]['netReceivables'])/2 )
BS_metrics[date]['Day_Sales_Outstanding'] = 365 /BS_metrics[date]['Receivable Turnover']
BS_metrics[date]['Inventory_Turnover'] = (IS[quarter]['costOfRevenue'] + IS[quarter+1]['costOfRevenue'] + IS[quarter+2]['costOfRevenue']+ IS[quarter+3]['costOfRevenue'])/ ( (BS[quarter]['inventory'] + BS[quarter +4]['inventory'])/2 )
BS_metrics[date]['DOH'] = 365 / BS_metrics[date]['Inventory_Turnover']
BS_metrics[date]['Asset_Turnover'] = (IS[quarter]['revenue'] + IS[quarter+1]['revenue'] + IS[quarter+2]['revenue']+ IS[quarter+3]['revenue']) / ( (BS[quarter]['totalAssets'] + BS[quarter +4]['totalAssets'])/2 )
return BS_metrics
else:
BS_metrics = {}
#date = BS[quarter][company]
BS_metrics[company] = {}
BS_metrics[company]['working_capital'] = BS[quarter]['totalCurrentAssets'] - BS[quarter]['totalCurrentLiabilities']
BS_metrics[company]['current_ratio'] = BS[quarter]['totalCurrentAssets']/ BS[quarter]['totalCurrentLiabilities']
BS_metrics[company]['cash_ratio'] = BS[quarter]['cashAndShortTermInvestments'] / BS[quarter]['totalCurrentLiabilities']
BS_metrics[company]['LTDebttoEquity'] = ((BS[quarter]['longTermDebt'] ) / BS[quarter]['totalStockholdersEquity'])
BS_metrics[company]['DebttoEquity'] = ((BS[quarter]['totalDebt'] ) / BS[quarter]['totalStockholdersEquity'])
BS_metrics[company]['Financial Leverage'] = ((BS[quarter]['totalAssets'] ) / BS[quarter]['totalStockholdersEquity'])
BS_metrics[company]['Receivable Turnover'] = (IS[quarter]['revenue'] + IS[quarter+1]['revenue'] + IS[quarter+2]['revenue']+ IS[quarter+3]['revenue']) / ( (BS[quarter]['netReceivables'] + BS[quarter +4]['netReceivables'])/2 )
BS_metrics[company]['Day_Sales_Outstanding'] = 365 /BS_metrics[company]['Receivable Turnover']
BS_metrics[company]['Inventory_Turnover'] = (IS[quarter]['costOfRevenue'] + IS[quarter+1]['costOfRevenue'] + IS[quarter+2]['costOfRevenue']+ IS[quarter+3]['costOfRevenue'])/ ( (BS[quarter]['inventory'] + BS[quarter +4]['inventory'])/2 )
BS_metrics[company]['DOH'] = 365 / BS_metrics[company]['Inventory_Turnover']
BS_metrics[company]['Asset_Turnover'] = (IS[quarter]['revenue'] + IS[quarter+1]['revenue'] + IS[quarter+2]['revenue']+ IS[quarter+3]['revenue']) / ( (BS[quarter]['totalAssets'] + BS[quarter +4]['totalAssets'])/2 )
return BS_metrics
if len(companies) == 1:
#Time Serie Analysis
print(companies[0])
recent_BS = balance_sheet(0,companies[0],'time')
one_quarter_ago = balance_sheet(1,companies[0],'time')
two_quarter_ago = balance_sheet(2,companies[0],'time')
three_quarter_ago = balance_sheet(3,companies[0],'time')
four_quarter_ago = balance_sheet(4,companies[0],'time')
Balance_sheet_all = []
Balance_sheet_all.append(recent_BS)
Balance_sheet_all.append(one_quarter_ago)
Balance_sheet_all.append(two_quarter_ago)
Balance_sheet_all.append(three_quarter_ago)
Balance_sheet_all.append(four_quarter_ago)
Balance_sheet_all
#convert nested dictionaries to dataframe
BS_over_time = pd.concat([pd.DataFrame(l) for l in Balance_sheet_all],axis=1).T
#Cross Company Analysis
else:
Balance_sheet_all = []
for company in companies:
BS_company = balance_sheet(0,company,'')
Balance_sheet_all.append(BS_company)
BS_companies = pd.concat([pd.DataFrame(l) for l in Balance_sheet_all],axis=1).T
print(BS_companies)
print(BS_over_time) | PvrpleBlvck/Escuela | blvckfinance/balance_sheet_ratios/main.py | main.py | py | 4,980 | python | en | code | 2 | github-code | 13 |
17040281354 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoEprintTaskSubmitModel(object):
def __init__(self):
self._client_id = None
self._client_secret = None
self._content = None
self._eprint_token = None
self._machine_code = None
self._origin_id = None
@property
def client_id(self):
return self._client_id
@client_id.setter
def client_id(self, value):
self._client_id = value
@property
def client_secret(self):
return self._client_secret
@client_secret.setter
def client_secret(self, value):
self._client_secret = value
@property
def content(self):
return self._content
@content.setter
def content(self, value):
self._content = value
@property
def eprint_token(self):
return self._eprint_token
@eprint_token.setter
def eprint_token(self, value):
self._eprint_token = value
@property
def machine_code(self):
return self._machine_code
@machine_code.setter
def machine_code(self, value):
self._machine_code = value
@property
def origin_id(self):
return self._origin_id
@origin_id.setter
def origin_id(self, value):
self._origin_id = value
def to_alipay_dict(self):
params = dict()
if self.client_id:
if hasattr(self.client_id, 'to_alipay_dict'):
params['client_id'] = self.client_id.to_alipay_dict()
else:
params['client_id'] = self.client_id
if self.client_secret:
if hasattr(self.client_secret, 'to_alipay_dict'):
params['client_secret'] = self.client_secret.to_alipay_dict()
else:
params['client_secret'] = self.client_secret
if self.content:
if hasattr(self.content, 'to_alipay_dict'):
params['content'] = self.content.to_alipay_dict()
else:
params['content'] = self.content
if self.eprint_token:
if hasattr(self.eprint_token, 'to_alipay_dict'):
params['eprint_token'] = self.eprint_token.to_alipay_dict()
else:
params['eprint_token'] = self.eprint_token
if self.machine_code:
if hasattr(self.machine_code, 'to_alipay_dict'):
params['machine_code'] = self.machine_code.to_alipay_dict()
else:
params['machine_code'] = self.machine_code
if self.origin_id:
if hasattr(self.origin_id, 'to_alipay_dict'):
params['origin_id'] = self.origin_id.to_alipay_dict()
else:
params['origin_id'] = self.origin_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoEprintTaskSubmitModel()
if 'client_id' in d:
o.client_id = d['client_id']
if 'client_secret' in d:
o.client_secret = d['client_secret']
if 'content' in d:
o.content = d['content']
if 'eprint_token' in d:
o.eprint_token = d['eprint_token']
if 'machine_code' in d:
o.machine_code = d['machine_code']
if 'origin_id' in d:
o.origin_id = d['origin_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayEcoEprintTaskSubmitModel.py | AlipayEcoEprintTaskSubmitModel.py | py | 3,442 | python | en | code | 241 | github-code | 13 |
26057750464 | # Imports
from PyQt5.QtWidgets import QToolBar, QComboBox, QAction, QLineEdit
from PyQt5.QtGui import QDoubleValidator
# Classes
class ObjectSelector(QToolBar):
def __init__(self, object_controller, overlay_controller):
super().__init__()
self._object_controller = object_controller
self._overlay_controller = overlay_controller
self._overlay_controller.register_toolbar(self)
self._ignore_change = False
self._create_actions()
def _create_actions(self):
"""
Create and display all of the toolbar actions.
"""
self._new_object_action = QAction('New Object')
self._new_object_action.triggered.connect(self._create_object)
self.addAction(self._new_object_action)
self._object_list = QComboBox()
self._object_list.currentTextChanged.connect(self._text_changed)
self._update_object_names()
self.addWidget(self._object_list)
self.addSeparator()
self._ruler_action = QAction('Calibration Ruler')
self._ruler_action.setCheckable(True)
self._ruler_action.toggled.connect(
self._overlay_controller.set_ruler_visibility)
self._ruler_action.setChecked(True)
self.addAction(self._ruler_action)
self._axes_action = QAction('Reference Axes')
self._axes_action.setCheckable(True)
self._axes_action.toggled.connect(
self._overlay_controller.set_axes_visibility)
self._axes_action.setChecked(True)
self.addAction(self._axes_action)
self._axes_angle = QLineEdit()
double_validator = QDoubleValidator()
self._axes_angle.setValidator(double_validator)
self._axes_angle.setFixedWidth(75)
self._axes_angle.editingFinished.connect(self._set_reference_angle)
self._axes_angle.setText('0.0')
self.addWidget(self._axes_angle)
self._zoom_action = QAction('Magnifying Glass')
self._zoom_action.setCheckable(True)
self._zoom_action.toggled.connect(
self._overlay_controller.set_zoom_visibility)
self._zoom_action.setChecked(True)
self.addAction(self._zoom_action)
self._inc_action = QAction('Auto-Increment')
self._inc_action.setCheckable(True)
self._inc_action.toggled.connect(
self._overlay_controller.set_auto_increment)
self._inc_action.setChecked(True)
self.addAction(self._inc_action)
def _set_reference_angle(self):
"""
Sets the reference angle to the one in the textbox.
"""
angle = round(float(self._axes_angle.text()), 2)
self._overlay_controller.set_reference_angle(-angle)
self._axes_angle.setText(str(angle))
def update_reference_angle(self, angle):
"""
Changes the textbox to be the given angle.
:param angle: The angle (deg) to set the angle textbox to.
"""
self._axes_angle.setText(str(round(angle, 2)))
def _create_object(self, triggered):
"""
Creates a new object in the object controller.
"""
self._object_controller.create_object()
def _update_object_names(self):
"""
Updates all of the available objects in the combobox to match the ones
in the object controller.
"""
self._ignore_change = True
current_object = self._object_list.currentText()
self._object_list.clear()
object_names = self._object_controller.get_object_names()
self._object_list.addItems(object_names)
try:
self._object_list.setCurrentIndex(object_names.index(current_object))
except ValueError:
pass # The object no longer exists (it was deleted)
self._ignore_change = False
if self._object_controller.get_current_object() is None and \
len(object_names) > 0:
self._object_controller.set_current_object(object_names[0], False)
def update(self):
"""
Updates the toolbar.
"""
self._update_object_names()
# TODO: Update reference angle and visibility
def _text_changed(self, new_text):
"""
Sets the current object to the new text.
:param new_text: The name of the current object to set.
"""
if not self._ignore_change:
self._object_controller.set_current_object(new_text)
| Benjymack/video-tracker | video_tracker/object_model/object_selector.py | object_selector.py | py | 4,454 | python | en | code | 2 | github-code | 13 |
27250609439 | # projet_calcul
# crée une calculatrice en phyton
import tkinter as tk
WIDTH, HEIGHT = 300, 50
pad_x = 50
pad_y = 0
root = tk.Tk()
root.title(" Calculatrice ") # ajoute un titre
canvas = tk.Canvas(root, bg="red", height=HEIGHT, width=WIDTH)
# Variables globales
nb1 = ""
nb2 = ""
op = 0
cpt = 0
# les fonction
def ajoute(nb):
global nb1
nb1 += nb
def num1():
ajoute("1")
label.config(text=nb1)
def num2():
ajoute("2")
label.config(text=nb1)
def num3():
ajoute("3")
label.config(text=nb1)
def num4():
ajoute("4")
label.config(text=nb1)
def num5():
ajoute("5")
label.config(text=nb1)
def num6():
ajoute("6")
label.config(text=nb1)
def num7():
ajoute("7")
label.config(text=nb1)
def num8():
ajoute("8")
label.config(text=nb1)
def num9():
ajoute("9")
label.config(text=nb1)
def num0():
ajoute("0")
label.config(text=nb1)
def virgule():
global cpt
if cpt == 0:
ajoute(".")
cpt+=1
label.config(text=nb1)
def clear():
global nb1, cpt
nb1 = ""
cpt = 0
label.config(text=nb1)
def egale():
global nb1, nb2, op, cpt
nb1 = float(nb1)
if op == 1:
resultat = round(nb2 + nb1, 9)
if op == 2:
resultat = round(nb2 - nb1, 9)
if op == 3:
resultat = round(nb2 * nb1, 9)
if op == 4:
resultat = round(nb2 / nb1, 9)
label.config(text=resultat)
nb1 = ""
nb2 = ""
op = 0
cpt = 0
def plus():
global nb1, nb2, op, cpt
nb2 = float(nb1)
nb1 = ""
op = 1
cpt = 0
label.config(text=" + ")
def moins():
global nb1, nb2, op, cpt
nb2 = float(nb1)
nb1 = ""
op = 2
cpt = 0
label.config(text=" - ")
def multi():
global nb1, nb2, op, cpt
nb2 = float(nb1)
nb1 = ""
op = 3
cpt = 0
label.config(text=" * ")
def div():
global nb1, nb2, op, cpt
nb2 = float(nb1)
nb1 = ""
op = 4
cpt = 0
label.config(text=" / ")
# Début du code
# le label (ligne de calcul)
label = tk.Label(root, text=" 0 ", font=("helvetica", "30"))
label.grid(row=0, column=0, columnspan=3)
# les boutton
b_1 = tk.Button(root, text=" 1 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num1)
b_1.grid(row=3, column=0)
b_2 = tk.Button(root, text=" 2 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num2)
b_2.grid(row=3, column=1)
b_3 = tk.Button(root, text=" 3 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num3)
b_3.grid(row=3, column=2)
b_4 = tk.Button(root, text=" 4 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num4)
b_4.grid(row=2, column=0)
b_5 = tk.Button(root, text=" 5 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num5)
b_5.grid(row=2, column=1)
b_6 = tk.Button(root, text=" 6 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num6)
b_6.grid(row=2, column=2)
b_7 = tk.Button(root, text=" 7 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num7)
b_7.grid(row=1, column=0)
b_8 = tk.Button(root, text=" 8 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num8)
b_8.grid(row=1, column=1)
b_9 = tk.Button(root, text=" 9 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num9)
b_9.grid(row=1, column=2)
b_0 = tk.Button(root, text=" 0 ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=num0)
b_0.grid(row=4, column=0)
b_virgule = tk.Button(root, text=" . ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=virgule)
b_virgule.grid(row=4, column=1)
b_clear = tk.Button(root, text=" C ", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=clear)
b_clear.grid(row=0, column=4)
b_egale = tk.Button(root, text=" = ", bg="black", fg="red", padx=pad_x, pady=pad_y, font=("helvitica", "20"), command=egale)
b_egale.grid(row=4, column=2)
b_plus = tk.Button(root, text=" + ", bg="red", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=plus)
b_plus.grid(row=4, column=4)
b_moins = tk.Button(root, text=" - ", bg="red", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=moins)
b_moins.grid(row=3, column=4)
b_multi = tk.Button(root, text=" * ", bg="red", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=multi)
b_multi.grid(row=2, column=4)
b_div = tk.Button(root, text=" / ", bg="red", padx=pad_x, pady=pad_y, font=("helvetica", "20"), command=div)
b_div.grid(row=1, column=4)
def clav_1(event):
num1()
def clav_2(event):
num2()
def clav_3(event):
num3()
def clav_4(event):
num4()
def clav_5(event):
num5()
def clav_6(event):
num6
def clav_7(event):
num7()
def clav_8(event):
num8()
def clav_9(event):
num9()
def clav_0(event):
num0()
def clav_egale(event):
egale()
def clav_point(event):
virgule()
def clav_plus(event):
plus()
def clav_moins(event):
moins()
def clav_multi(event):
multi()
def clav_div(event):
div()
# root bind
root.bind("<KeyPress-1>", clav_1)
root.bind("<KeyPress-2>", clav_2)
root.bind("<KeyPress-3>", clav_3)
root.bind("<KeyPress-4>", clav_4)
root.bind("<KeyPress-5>", clav_5)
root.bind("<KeyPress-6>", clav_6)
root.bind("<KeyPress-7>", clav_7)
root.bind("<KeyPress-8>", clav_8)
root.bind("<KeyPress-9>", clav_9)
root.bind("<KeyPress-0>", clav_0)
root.bind("<return>", clav_egale)
root.bind("<KeyPress-.>", clav_point)
root.bind("<KeyPress-+>", clav_plus)
root.bind("<KeyPress-->", clav_moins)
root.bind("<KeyPress-*>", clav_multi)
root.bind("<KeyPress-/>", clav_div)
# fin
root.mainloop() | uvsq22011110/projet_calcul | projet_calcul.py | projet_calcul.py | py | 5,601 | python | en | code | 1 | github-code | 13 |
11659124463 | # checks that users enter a valid response (e.g. yes / no
# cash/credit) based on the list of options
def string_checker(question, num_letters, valid_responses):
error = "Please choose {} or {}".format(valid_responses[0], valid_responses[1])
while True:
response = input(question).lower()
for item in valid_responses:
if response == item[:num_letters] or response == item:
return item
print(error)
# the main routine starts here
yes_no_list = ["yes", "no"]
payment_list = ["cash", "credit"]
for case in range(0, 5):
want_instructions = string_checker("Do you want the instructions (y/n): ", 1, yes_no_list)
print("You chose", want_instructions)
for case in range(0, 5):
payment_method = string_checker("payment method?: ", 2, payment_list)
print("You chose", payment_method)
| arthurbykov/programming | 07_string_checker.py | 07_string_checker.py | py | 886 | python | en | code | 0 | github-code | 13 |
39804000080 | #!/usr/bin/env python
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Fill in your name using the given format
your_name = "Patni, Nikhil"
# In[2]:
# For use in colab
from IPython import get_ipython
if 'google.colab' in str(get_ipython()):
get_ipython().system('pip install openml --quiet')
get_ipython().system('pip install category_encoders --quiet')
get_ipython().system('pip install dirty_cat --quiet')
# # Assignment 2
# In this assignment we will focus on handling somewhat 'messy' real-world datasets that require thoughtful preprocessing, rather than just a grid search over all possible models. We will use the [Employee Salary dataset](https://www.openml.org/d/42125), which contains information about the salaries of all people working in a local government in the USA. In the end we will aim to predict salaries, and study whether there are certain biases in the data (or in our models) that we need to be aware of and how to avoid them when training models.
# In[3]:
# imports
# get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import openml
import time
start_time = time.time()
# ignore future warnings
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
# In[4]:
# Pre-flight checklist. Do not change this code.
# Make sure that you have installed recent versions of key packages.
# You could lose points if these checks do not pass.
from packaging import version
import sklearn
import category_encoders
import seaborn
import sys
# install dirty_cat if needed
if 'dirty_cat' not in sys.modules:
get_ipython().system('pip install dirty_cat --quiet')
sklearn_version = sklearn.__version__
catencoder_version = category_encoders.__version__
if version.parse(sklearn_version) < version.parse("1.0.2"):
print("scikit-learn is outdated. Please update now!")
elif version.parse(catencoder_version) < version.parse("2.0.0"):
print("category_encoders is outdated. Please update now!")
else:
print("OK. You may continue :)")
# In[5]:
# Download Employee Salary data. Do not change this code!
# Note that X is a pandas dataframe
salary = openml.datasets.get_dataset(42125)
X, y, _, feat_names = salary.get_data(target=salary.default_target_attribute)
# ### Exploring the data
# A first useful step is to take a closer look at the dataset and how the features are distributed.
# In[6]:
# Peek at the remaining data
X
# The labels (y) contain the salaries for 2017.
# In[7]:
y
# The first thing we notice is that there are missing values in the input data (but not in the labels). Let's see how bad it is, and which features have the most missing values.
# In[8]:
# Check the column data types and missing data
# Some features have significant amounts of missing values
X.info()
# Some categorical columns have a large number of possible values. Especially the job position titles can take many different values, with a highly skewed distribution. This will be tricky to handle.
# In[9]:
# Some values (job positions) are much more frequent than others
X['employee_position_title'].value_counts().plot(kind='barh', figsize=(5,70));
# There are a few numeric features as well, with different distributions.
# In[10]:
# Distributions of numeric data
X.hist(layout=(20,4), figsize=(20,50));
# Let's see how gender, experience (year first hired) and previous salary correlate with each other and the current salary. There are a few outliers visible. Salaries seem to correlate with last year's salary, but not so much with year of first hire. The effect of gender may require more study.
#
# In[11]:
import seaborn as sns
subset = ['gender','year_first_hired','2016_gross_pay_received']
X_sub=X[subset].copy()
# Gender is not numeric, but we can make it numeric (0/1) for this plot
X_sub['gender'] = X_sub['gender'].astype('category').cat.codes
X_sub['salary'] = y
sns.set(style="ticks")
sns.pairplot(X_sub, hue="salary");
# ## Part 1: Data cleaning (12 points)
# ### Question 1.1: A simple pipeline (4 points)
# We will first need to build a machine learning pipeline to minimally preprocess the data and analyse models in a meaningful way, while avoiding data leakage in the evaluation. Implement a function `simple_pipeline` that returns an sklearn pipeline that preprocesses the data in a minimal way before running a given algorithm:
# - Categorical features:
# - Impute missing values by replacing them with the most frequent value for that feature
# - Perform one-hot encoding. Use `sparse=False` to avoid that it returns a sparse datasets. Use `handle_unknown='ignore'` to ignore categorical values that where not seen during training.
# - Numeric features:
# - Replace missing values with '0'. This seems most logical since a missing salary or overtime pay likely means that it was 0.
#
# Note that you only need to build the pipeline, not fit it on the data. You are given the data X, but you cannot use it to train any models.
# In[12]:
# Implement
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.compose import ColumnTransformer
def simple_pipeline(X, model):
""" Returns a minimal pipeline that imputes missing values and does one-hot-
encoding for categorical features
Keyword arguments:
X -- The input data. Only used to identify features types (eg. numeric/
categorical), not for training the pipeline.
model -- any scikit-learn model (e.g. regressor or classifier)
Returns: an (untrained) scikit-learn pipeline which preprocesses the data
and then runs the classifier
"""
# List of numerical features. You get this for free :)
numerical = X.select_dtypes(exclude=["category","object"]).columns.tolist()
# categorical = X.select_dtypes(include=["category","object"]).columns.tolist()
numerical_pipe = Pipeline([
('imputer', SimpleImputer(strategy="constant",fill_value=0))])
categorical_encoder = Pipeline([
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(sparse=False, handle_unknown='ignore'))])
preprocessor = ColumnTransformer([
('num', numerical_pipe, numerical)],remainder=categorical_encoder)
return Pipeline([('preprocess', preprocessor), ('model', model)])
# #### Sanity check
# To be correct, this pipeline should be able to fit any model without error. Uncomment and run this code to do a sanity check.
# In[13]:
from sklearn.tree import DecisionTreeRegressor
simple_pipeline(X, DecisionTreeRegressor()).fit(X,y)
# In[14]:
X
# ### Question 1.2: A simple wrapper (4 points)
# Use a simple wrapping approach to find unnecessary features using backward selection:
# * Implement a function 'backward_selection' that selects features using backward selection, each time removing the least useful feature. In the end it should return a list with the removed features.
# * 'Least useful' is decided based on the performance of the given pipeline (the `simple_pipeline` that you implemented) together with a 3-nearest neighbor model.
# * For the evaluation, use 3-fold shuffled crossvalidation and r2 score (coefficient of determination), using the `simple_pipeline` that you implemented. Use `random_state=0` for the shuffling. To speed things up, you can use training sets of 40% of the data and test sets of 10% of the data.
# * Don't actually change the input data X. Make a copy if you want to remove features.
# * As shown below, run your model and return a list with the 5 removed features.
# In[15]:
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import ShuffleSplit, cross_val_score
# Implement
def backward_selection(X, y, pipe, nr_remove=5):
""" Evaluates the importance of the features in input data X, using the
given pipeline and 3-fold cross-validation.
Returns: The list of the least important features.
Keyword arguments:
X -- The input data
y -- The labels
pipe -- A machine learning pipeline to be evaluated
nr_remove -- The number of features to remove
"""
features = list(X.columns)
ss = ShuffleSplit(n_splits=10, train_size=0.4, test_size=0.1, random_state=0)
knn = KNeighborsRegressor(n_neighbors=3, n_jobs=-1)
least_useful_features = []
while(nr_remove):
acc_loss = 1 - np.mean(cross_val_score(pipe(X[features], knn), X[features], y, cv=ss, scoring='r2', n_jobs=-1))
#print(orig_acc,features)
for i in features:
#X_dummy = X_feature.drop([i], axis=1)
features_d = features[:]
features_d.remove(i)
loss = 1 - np.mean(cross_val_score(pipe(X[features_d], knn), X[features_d], y, cv=ss, scoring='r2', n_jobs=-1))
if loss <= acc_loss:
acc_loss = loss
feature = i
least_useful_features.append(feature)
features.remove(feature)
nr_remove-=1
return least_useful_features
# backward_selection(X, y, simple_pipeline, nr_remove=5)
# In[16]:
X.shape
# ### Question 1.3: Interpretation (2 points)
# * Based on these results, and your out inspection of the data, make a decision on what to do with the features 'full_name', 'department', and 'date_first_hired'. Don't worry if these are not the ones you detected in question 1.2. Can you safely remove these features and is it wise to do so? Also consider any potential ethical issues. Which of the following are correct? Fill in your answer in `q_1_2`. Enter your answer as a comma-separated string without spaces-valued, e.g. "A,B,C"
#
# * 'A': The feature 'full_name' should be removed since it is unique, which is bad for modelling.
# * 'B': The feature 'full_name' should be removed since it contains people's names, which is unethical.
# * 'C': The feature 'full_name' should be kept. The one-hot encoder can handle these kind of features efficiently, and it will make the model more flexible.
# * 'D': The feature 'department' should be removed since it is a duplicate feature with 'department_name'.
# * 'E': The feature 'department' provides useful extra information and should be kept.
# * 'F': The feature 'date_first_hired' is redundant since there is a 'year_first_hired' feature as well, so it can be removed.
# * 'G': The feature 'date_first_hired' should be encoded differently or removed.
# * 'H': The feature 'date_first_hired' is more informative as a category feature than a numeric timestamp, so it should be kept.
# * 'I': No answer
# In[17]:
# Fill in the correct answer. Don't change the name of the variable
q_1_3 = "A,B,D,F"
# ### Question 1.4: Dimensionality (2 points)
# From here, and for the rest of the assignment, we'll remove the three columns discussed in question 1.3 as done below (we overwrite 'X' for convenience). We also remove '2016_gross_pay_received' and '2016_overtime_pay' as they are very closely linked to the target column ('current salary'), hence making this task a little too easy.
#
# Next question: How many features are still being constructed by your `simple_pipeline` (i.e. on how many features is the classifier trained)? Fill in this number in `q_1_4`.
# In[18]:
# cols = [c for c in X.columns if c.lower() not in ['full_name','department','date_first_hired','2016_gross_pay_received', '2016_overtime_pay']]
# X = X[cols]
# In[19]:
knn = KNeighborsRegressor(n_neighbors=3, n_jobs=-1)
reg = simple_pipeline(X, knn).fit(X, y)
# In[20]:
numerical = X.select_dtypes(exclude=["category","object"]).columns.tolist()
tf = reg.named_steps["preprocess"].named_transformers_["remainder"].named_steps["onehot"]
reg_feature_names = tf.get_feature_names_out()
reg_feature_names = np.r_[reg_feature_names, numerical]
no_features = len(reg_feature_names)
# In[21]:
# Fill in the correct answer (should be an integer). Don't change the name of the variable
q_1_4 = 1206
# ## Part 2: Encoders (16 points)
# ### Question 2.1: A flexible pipeline (2 points)
# Implement a function `flexible_pipeline` that has two additional options:
# - Allow to choose a feature scaling method for numeric features. The default is standard scaling. 'None' means no scaling
# - Allow to choose a feature encoding method for categorical features. The default is one-hot encoding.
# In[22]:
# Implement
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import make_column_transformer
def flexible_pipeline(X, model, scaler=StandardScaler(), encoder=OneHotEncoder()):
""" Returns a pipeline that imputes all missing values, encodes categorical features and scales numeric ones
Keyword arguments:
X -- The input data. Only used to identify features types (eg. numeric/categorical), not for training the pipeline.
model -- any scikit-learn model (e.g. a classifier or regressor)
scaler -- any scikit-learn feature scaling method (Optional)
encoder -- any scikit-learn category encoding method (Optional)
Returns: a scikit-learn pipeline which preprocesses the data and then runs the trained model
"""
numerical = X.select_dtypes(include=["int64"]).columns.tolist()
# categorical = X.select_dtypes(include=["category","object"]).columns.tolist()
numerical_pipe = Pipeline([
('imputer', SimpleImputer(strategy='constant',fill_value=0)),
('scaler', scaler)])
if(encoder.__class__.__name__ == "TargetEncoder"):
categorical_encoder = Pipeline([
('mf_imputer', SimpleImputer(strategy='most_frequent')),
('target', encoder),('mean_imputer', SimpleImputer(strategy='mean'))])
else:
categorical_encoder = Pipeline([
('mf_imputer', SimpleImputer(strategy='most_frequent')),
('encoder', encoder)])
preprocessor = ColumnTransformer([
('num', numerical_pipe, numerical)],remainder=categorical_encoder)
return Pipeline([('preprocess', preprocessor), ('model', model)])
# #### Sanity check
# To be correct, this pipeline should be able to fit any model and encoder without error. Uncomment and run this code to do a sanity check.
# In[23]:
X.info()
# In[24]:
flexible_pipeline(X, DecisionTreeRegressor(random_state=0), encoder=OneHotEncoder(handle_unknown = 'ignore')).fit(X,y)
# ### Question 2.2: Comparing encoders (4 points)
# Implement a function `plot_2_2` which plots a heatmap comparing several combinations of catergory encoders and models:
# * As model, the following algorithms in their default hyperparameters settings:
# * Ridge regression
# * Lasso regression
# * Random Forests
# * Gradient Boosting
# * As encoders, use the following options in their default settings:
# * One-Hot encoder (with `sparse=False`)
# * Ordinal encoder
# * Target encoder
#
# Always use standard scaling. You should evaluate all pipelines using r2 score (coefficient of determination) with 5-fold cross-validation.
# Compare all methods with the same cross-validation folds, shuffle the data and use `random_state=0`. Where possible, also use `random_state=0` for the models.
# Only report the test scores (not the training scores).
#
# Note 1: When encoding categorical values, it is possible to see a category at test time that you did not see at training time, in which case you don't have an encoding for that category, resulting (by default) in an error. For one-hot encoded categories, you can work around this by ignoring the category (all one-hot encoded features will then be zero). For ordinal encoding, you can assing a new value (e.g. '-1') for all unknown categories. For target encoding, the default is to return the target mean. These strategies can be set when creating the encoding.
#
# Note 2: TargetEncoding is part of the `category_encoders` extension of scikit-learn. [Read more about it.](https://contrib.scikit-learn.org/category_encoders/targetencoder.html)
# We found that the implementation may have a bug that returns NaN values. You can work around it by wrapping it in a small pipeline followed by a SimpleImputer that replaces NaNs with the mean of the encoded values.
# In[25]:
y.shape
# In[26]:
### Helper plotting function. Do not change.
import seaborn as sns
def heatmap(columns, rows, scores):
""" Simple heatmap.
Keyword arguments:
columns -- list of options in the columns
rows -- list of options in the rows
scores -- numpy array of scores
"""
plt.figure()
df = pd.DataFrame(scores, index=rows, columns=columns)
sns.heatmap(df, cmap='RdYlGn_r', linewidths=0.5, annot=True, fmt=".3f")
# In[27]:
from sklearn.model_selection import KFold
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from category_encoders import TargetEncoder
from sklearn.linear_model import Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
# Implement
def plot_2_2(X, y):
""" Evaluates a range of models with different categorical encoders and
plots the results in a heat map.
"""
models = [Ridge(random_state=0), Lasso(random_state=0), RandomForestRegressor(random_state=0),
GradientBoostingRegressor(random_state=0)]
encoders = [OneHotEncoder(sparse=False, handle_unknown='ignore'), OrdinalEncoder(handle_unknown='use_encoded_value',
unknown_value=-1), TargetEncoder()]
scores=[]
kf = KFold(shuffle=True, random_state=0)
for model in models:
for encoder in encoders:
score = np.mean(cross_val_score(flexible_pipeline(X, model, encoder=encoder), X, y, cv=kf, scoring='r2', n_jobs=-1))
#rows.append(model.__class__.__name__)
#columns.append(encoder.__class__.__name__)
scores.append(score)
INDEX = [
'Ridge',
'Lasso',
'RandomForestRegressor',
'GradientBoostingRegressor'
]
COLUMNS = [
'OneHotEncoder', 'OrdinalEncoder', 'TargetEncoder'
]
heatmap(COLUMNS,INDEX,np.array(scores).reshape(-1, 3))
#pd.DataFrame(np.array(scores).reshape(-1, 3), index=INDEX, columns=COLUMNS)
#heatmap(encoders, models, scores)
# plot_2_2(X, y)
# In[28]:
y
# ### Question 2.3: Interpretation (2 points)
# Interpret the results as well as you can:
# - Which models work well in combination with certain encoders? Why do you think that is? Consider the following:
# - How do you explain the cases where performance is not good?
# - What is the effect on the encodings on training time?
# - Some encoders produce lots of features while others produce very few. What is the effect of that on different kinds of models?
#
# Indicate which of the following are correct? Fill in your answer in `q_2_3`. Enter your answer as a comma-separated string without spaces-valued, e.g. "A,B,C"
#
# * 'A': The best performance with one-hot-encoders is seen with linear models, since they can learn complex functions in the high-dimensional space created by one-hot-encoders.
# * 'B': The linear models work well, no matter the encoding.
# * 'C': The linear models do not work well with ordinal encoding, since linear models will assume that the ordering implies a meaningful distance between categories.
# * 'D': The best performance is seen with RandomForests and Gradient Boosting
# * 'E': Decision-tree based ensembles work well with ordinal encodings (better than linear models), since they can build sub-trees for each individual encoder value.
# * 'F': Decision-tree based ensembles do not work well with ordinal encodings, because the ordering is meaningless in this case.
# * 'G': Decision-tree based ensembles work well with ordinal encodings, because they can make good use of the ordering of the categories.
# * 'H': Decision-tree based ensembles can handle different kinds of encodings rather well.
# * 'I': Target encoding works generally well because it captures key information in one or a few features without significantly increasing the dimensionality of the data, like one-hot encoders do.
# * 'J': Target encoding doesn't work well for regression problems since it only creates one new feature.
# * 'K': Target encoding does not work well here, likely because the categorical features are quite skewed, with only a few examples of some categories.
# * 'L': No answer.
#
# In[29]:
# Fill in your explanation. Don't change the name of the variable
q_2_3 = "A,C,D,E,H,I"
# ### Question 2.4: Categorical Feature Embeddings (6 points)
# Now, for something special, we will try the 'SuperVectorizer', a new automated feature encoder that tries to build the best encoding based on how many distinct categories there are. It is also robust against features with many possible values and even typos in category names. In these cases, it encodes features by learning an 'embedding' (a numeric vector representation) based on the similarity of category names. [Read more about it here](https://arxiv.org/abs/1907.01860) but be aware that the paper is quite technical :).
#
# * Implement a function `create_embeddings` that uses the 'SuperVectorizer' to generate an embedding for each employee.
# * Note: The SuperVectorizer only works on string features, so create a copy of X and convert all non-numeric features to strings.
#
# In[30]:
from dirty_cat import SuperVectorizer
# Your implementation goes here
def create_embeddings(X, y):
""" Embeds all categorical features using the SuperVectorizer
Returns a dataframe X with the embedded representation
X -- The input data.
y -- The target values
"""
X_str = X.copy()
categorical = X.select_dtypes(exclude=["int64"]).columns.tolist()
X_str[categorical] = X_str[categorical].astype(str)
sup_vec = SuperVectorizer(auto_cast=True)
X_enc = sup_vec.fit_transform(X_str, y)
return X_enc
# X_embed = create_embeddings(X, y)
# In[31]:
# X_embed.shape
# * Visualize the embedding using a dimensionality reduction technique, [tSNE](https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html). Please read up on it a bit if you have never heard of it before.
# * Use attribute `verbose=0` for your submission
# * This should compress `X_embed` from (num_employees, embedding_size) to an array of size (num_employees, 2)
# * Make sure that you don't overwite `X`
# * Implement a function `plot_tsne` that plots the 2D vector as a scatter plot, color-coded by the salary
# In[32]:
from sklearn.manifold import TSNE
# Your implementation goes here
def compute_tsne(X):
""" Applies tSNE to build a 2D representation of the data
Returns a dataframe X with the 2D representation
X -- The input data
"""
return TSNE(verbose=0).fit_transform(X)
# X_embed_reduced = compute_tsne(X_embed)
# In[33]:
X.shape
# In[34]:
# X_embed_reduced.shape
# In[35]:
# X_embed_reduced[:,1]
# In[36]:
# Your implementation goes here
def plot_tsne(tsne_embeds, scores):
""" Plots the given 2D data points, color-coded by score
tsne_embeds -- The tSNE embeddings of all employees
scores -- The corresponding salaries
"""
marker_size=15
plt.scatter(tsne_embeds[:,0], tsne_embeds[:,1], marker_size, c=scores)
plt.title("tSNE embeddings of all employees")
cbar= plt.colorbar()
cbar.set_label("Salary", labelpad=+1)
plt.show()
# plot_tsne(X_embed_reduced, y)
# Interpret the result. Indicate which of the following are correct? Fill in your answer in `q_2_4`. Enter your answer as a comma-separated string without spaces, e.g. "A,B,C".
#
# * 'A': Some groups of employees clearly clusters together
# * 'B': The result is entirely random because the embedding doesn't manage to preserve the information in the original data.
# * 'C': Some groups of employees with a high salary cluster together
# * 'D': Some groups of employees with a low salary cluster together
# * 'E': The clusters are all clearly delineated
# * 'F': No answer
# In[37]:
# Fill in your answer. Don't change the name of the variable
q_2_4 = "A,D"
# ### Question 2.5: Compare again (2 points)
# * Implement a function `plot_2_5` that evaluates the same algorithms as in question 2.2, and returns a heatmap (just as in question 2.2), but now using the SuperVectorizer.
#
# In[38]:
# Implement
def plot_2_5(X, y):
""" Plots a heatmap for the different encoder options
Keyword arguments:
X -- The input data
y -- The target labels
Returns a heatmap
"""
models = [Ridge(random_state=0), Lasso(random_state=0), RandomForestRegressor(random_state=0),
GradientBoostingRegressor(random_state=0)]
encoders = []
categorical = X.select_dtypes(exclude=["int64"]).columns.tolist()
X_str = X.copy()
X_str[categorical] = X_str[categorical].astype(str)
scores=[]
kf = KFold(shuffle=True, random_state=0)
for model in models:
pipe = flexible_pipeline(X_str, model, encoder=SuperVectorizer(auto_cast=True))
score = np.mean(cross_val_score(pipe, X_str, y, cv=kf, scoring='r2', n_jobs=-1))
#rows.append(model.__class__.__name__)
#columns.append(encoder.__class__.__name__)
scores.append(score)
INDEX = [
'Ridge',
'Lasso',
'RandomForestRegressor',
'GradientBoostingRegressor'
]
COLUMNS = [
'SuperVectorizer'
]
heatmap(COLUMNS,INDEX,np.array(scores).reshape(-1, 1))
# plot_2_5(X, y)
# In[39]:
X.shape
# Interpret the result. Indicate which of the following are correct? Fill in your answer in `q_2_5`. Enter your answer as a comma-separated string without spaces, e.g. "A,B,C".
#
# * 'A': We get the best results so far, especially with the random forests and gradient boosting.
# * 'B': The results are about the same as the one-hot-encoder we saw in question 2.2.
# * 'C': The learned embeddings seems useful to capture information about which categories are similar (e.g. which employee positions are similar).
# * 'D': Embeddings don't work well for decision-tree based models, one-hot encoding works much better.
# * 'E': No answer
#
# In[40]:
# Fill in your answer. Don't change the name of the variable
q_2_5 = "C"
# ## Part 3: Feature importance (6 points)
# In this part, we will continue with your `flexible_pipeline`, and we use a random forest to learn which features
# are most important to predict the salary of a person. This may reveal how salaries are decided. We will do this with both Random Forest's importance estimates and with permutation importance.
#
# ### Question 3.1: Model-based feature importance (4 points)
# Implement a function `plot_3_1` that does the following:
# * Split the data using a standard shuffled train-test split. Use `random_state=0`.
# * Combine your `flexible_pipeline`, without feature scaling but with one-hot-encoding, with a RandomForest regressor. Train that pipeline on the training set.
# * Remember that the categorical features were encoded. Retrieve their encoded names from the one-hot-encoder (with `get_feature_names`). You can get the encoder from the trained pipeline or ColumnTransformer. Carefully check the documentation.
# * Retrieve the feature importances from the trained random forest and match them to the correct names. Depending on how you implemented your `flexible_pipeline` these are likely the first or the last columns in the processed dataset.
# * Compute the permutation importances given the random forest pipeline and the test set. Use `random_state=0` and at least 10 iterations.
# * Pass the tree-based and permutation importances to the plotting function `compare_importances` below.
# In[41]:
# Plotting function. Do not edit.
def compare_importances(rf_importance, perm_importance, rf_feature_names, feature_names):
""" Compares the feature importances from random forest to permutation importance
Keyword arguments:
rf_importance -- The random forest's feature_importances_
perm_importance -- The permutation importances as computed by sklearn.inspection.permutation_importance
rf_feature_names -- The names of the features received by the random forest, in the same order as their importances
feature_names -- The original features names in their original order
"""
topk = 30
# Trees
sorted_idx = rf_importance.argsort()[-topk:]
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
y_ticks = np.arange(0, topk)
ax[0].barh(y_ticks, rf_importance[sorted_idx])
ax[0].set_yticklabels(rf_feature_names[sorted_idx])
ax[0].set_yticks(y_ticks)
ax[0].set_title("Random Forest Feature Importances")
# Permutations
sorted_idx = perm_importance.importances_mean.argsort()[-topk:]
ax[1].boxplot(perm_importance.importances[sorted_idx].T, vert=False, labels=feature_names[sorted_idx])
ax[1].set_title("Permutation Importances (test set)")
fig.tight_layout()
plt.show()
# In[42]:
# Implement
from sklearn.model_selection import train_test_split
from sklearn.inspection import permutation_importance
from sklearn.ensemble import RandomForestRegressor
def plot_3_1(X, y):
""" See detailed description above.
"""
numerical = X.select_dtypes(include=["int64"]).columns.tolist()
categorical = X.select_dtypes(exclude=["int64"]).columns.tolist()
X_train, X_test, y_train, y_test = train_test_split(X, y.values.ravel(), shuffle=True, random_state=0)
rf = flexible_pipeline(X, RandomForestRegressor(random_state=0), scaler=None, encoder=OneHotEncoder(sparse=False, handle_unknown='ignore'))
rf.fit(X_train, y_train)
tf = rf.named_steps["preprocess"].named_transformers_["remainder"].named_steps["encoder"]
rf_feature_names = tf.get_feature_names_out(categorical)
rf_feature_names = np.r_[numerical, rf_feature_names]
tree_feature_importances = rf.named_steps["model"].feature_importances_
#sorted_idx = tree_feature_importances.argsort()
#print(len(rf_feature_names))
permutation_importances = permutation_importance(rf, X_test, y_test, n_repeats=10, random_state=0, n_jobs=-1)
compare_importances(tree_feature_importances,permutation_importances,rf_feature_names,X_test.columns)
# plot_3_1(X, y)
# ### Question 3.2: Interpretation (2 point)
# Interpret the results of Question 3.1. What seems to affect salary the most? Do both methods give the same or similar results? If not, what are the differences? Try to explain these differences based on your understanding of how these methods work.
#
# Indicate which of the following are correct? Fill in your answer in `q_3_2`. Enter your answer as a comma-separated string without spaces, e.g. "A,B,C"
#
# * 'A': The exact ranking differs a bit because, due to the one-hot-encoding, we get an importance per category in the the model-based feature importances.
# * 'B': Overall, the results are quite similar. The year of first hire, assignment category (fulltime/parttime), and the position title have the most influence on salary.
# * 'C': There are some key differences. Some of the most important features in one method are the least important in the other.
# * 'D': Only some job categories (employee positions) affect salary greatly (e.g. manager)
# * 'E': Gender only shows up in the permutation importances, and with a very low importance.
# * 'F': Gender is important according to both methods.
# * 'H': Year of first hire is not important according to both methods.
# * 'G': Year of first hire is very important according to one method, yet not at all important by the other.
# * 'I': Year of first hire is important according to both methods.
# * 'J': No answer
#
#
#
# In[43]:
# Fill in your answer. Don't change the name of the variable
q_3_2 = "A,B,D,E,I"
# ## Part 4: Algoritmic bias (6 points)
# Check whether there is any algorithmic bias in the predictions.
# ### Question 4.1: Gender bias (3 points)
# First, we want to check whether the model gives better predictions for males or for females.
#
# * Use a RandomForest pipeline with 100 trees and `max_features=0.1`
# * Use 3-fold cross validation and return the predictions for every fold.
# * Hint: sklearn has methods that return the predictions for cross-validation splits (instead of only a score).
# * Separate the test set predictions into different groups depending on the feature 'gender', and report the r2 score for each group.
# * Implement a function `plot_4_1` which returns a simple bar chart visualizing both scores.
# In[44]:
#Implement
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import r2_score
def plot_4_1(X, y):
""" Returns a bar chart of the R2 measured, grouped by the value for the 'gender' feature
"""
rf = RandomForestRegressor(n_estimators=100, min_samples_split=10, max_features=0.1, random_state=0)
pipe = flexible_pipeline(X, rf, encoder=TargetEncoder())
pred = cross_val_predict(pipe, X, y, cv=3, n_jobs=-1)
#For missing Gender values, replacing it with most-frequent 'M'
male_indices = list(np.where(X["gender"] != 'F')[0])
female_indices = list(np.where(X["gender"] == 'F')[0])
male_r2 = r2_score(y[male_indices], pred[male_indices])
female_r2 = r2_score(y[female_indices], pred[female_indices])
# function to add value labels
def addlabels(x,y):
for i in range(len(x)):
plt.text(i, y[i]+0.01, y[i], ha = 'center')
# Make a Bar plot
height = [female_r2, male_r2]
bars = ('Female', 'Male')
y_pos = np.arange(len(bars))
# Create bars
plt.figure(figsize=(6,5))
plt.bar(y_pos, height)
# Adding value labels
addlabels(bars, height)
# giving X and Y labels
plt.xlabel("Gender")
plt.ylabel("r2_score")
# Create names on the x-axis
plt.xticks(y_pos, bars)
# Show graphic
plt.show()
# plot_4_1(X, y)
# Interpret the results. Indicate which of the following are correct. Fill in your answer in `q_4_1`. Enter your answer as a comma-separated values without spaces, e.g. "A,B,C"
# * 'A': The model is much more accurate (more than 1% accuracy difference) in predicting the salaries for males than for females.
# * 'B': The model is about equally accurate for both males and females.
# * 'C': Salaries are equal for both genders.
# * 'D': The model doesn't seem to be biased.
# * 'E': The model is clearly biased.
# * 'F': No answer
# In[45]:
# Fill in your answer. Don't change the name of the variable
q_4_1 = "A,E"
# ### Question 4.2: Instance reweighting (3 points)
# Second, since the data contains more males than females, we want to check whether we can get a better model if we balance the data by giving more
# weight to the examples about female employees.
#
# * Implement a function `plot_4_2` where you weight the samples (instances) with a weight inversely proportional to their frequency. For instance, male instances should be weighted by $\frac{num\_instances}{num\_males}$, and analogous for females.
# * and then visualise the results in the same way as in question 4.1 (as a bar chart).
# * Interpret the results and explain them in `answer_q_4_2`.
# In[46]:
#Implement
def plot_4_2(X, y):
""" Returns a bar chart of the score measured, grouped by the value for the 'gender' feature
"""
male_indices = list(np.where(X["gender"] != 'F')[0])
female_indices = list(np.where(X["gender"] == 'F')[0])
female_weight = len(y)/len(female_indices)
male_weight = len(y)/len(male_indices)
weights = np.empty(len(y), dtype=float)
weights[male_indices] = male_weight
weights[female_indices] = female_weight
rf = RandomForestRegressor(n_estimators=100, max_features=0.1, min_samples_split=10, random_state=0)
pipe = flexible_pipeline(X, rf, encoder=TargetEncoder())
pred = cross_val_predict(pipe, X, y, cv=3, fit_params={'model__sample_weight': weights}, n_jobs=-1)
male_r2 = r2_score(y[male_indices], pred[male_indices])
female_r2 = r2_score(y[female_indices], pred[female_indices])
# function to add value labels
def addlabels(x,y):
for i in range(len(x)):
plt.text(i, y[i]+0.01, y[i], ha = 'center')
# Make a Bar plot
height = [female_r2, male_r2]
bars = ('Female', 'Male')
y_pos = np.arange(len(bars))
# Create bars
plt.figure(figsize=(6,5))
plt.bar(y_pos, height)
# Adding value labels
addlabels(bars, height)
# giving X and Y labels
plt.xlabel("Gender")
plt.ylabel("r2_score")
# Create names on the x-axis
plt.xticks(y_pos, bars)
# Show graphic
plt.show()
# plot_4_2(X, y)
# Interpret the results. Indicate which of the following are correct. Fill in your answer in `q_4_2`. Enter your answer as a comma-separated values without spaces, e.g. "A,B,C"
# * 'A': The model is now much better at predicting the salaries of females.
# * 'B': The model is now much worse at predicting the salaries of females.
# * 'C': The model is only marginally better at predicting the salaries of females than before.
# * 'D': Salaries are now equal for both genders.
# * 'E': The unbalance between males and females was not so large, and the model was already quite good, so the instance weighting didn't do much.
# * 'F': The model still is clearly biased.
# * 'G': No answer
# In[47]:
# Fill in your answer. Don't change the name of the variable
q_4_2 = "C,E,F"
# In[48]:
print(time.time()-start_time)
# In[ ]:
last_edit = 'March 28, 2022' | nikhil-96/ML-Assignment2 | submit/solution.py | solution.py | py | 37,730 | python | en | code | 1 | github-code | 13 |
40343102973 | # -*- coding: utf-8 -*-
import socket, time, random
import sys, os, struct
import traceback
import select
import getpass
host = '' # Bind to all interfaces
MachineInterface_onFindInterfaceAddr = 1
MachineInterface_startserver = 2
MachineInterface_stopserver = 3
MachineInterface_onQueryAllInterfaceInfos = 4
MachineInterface_onQueryMachines = 5
MachineInterface_killserver = 6
MachineInterface_setflags = 7
from . import Define, MessageStream
class ComponentInfo( object ):
"""
"""
def __init__( self, streamStr = None ):
"""
"""
if streamStr:
self.initFromStream( streamStr )
def initFromStream( self, streamStr ):
"""
"""
self.entities = 0 # KBEngine.Entity数量
self.clients = 0 # 客户端数量
self.proxies = 0 # KBEngine.Proxy实例数量
self.consolePort = 0 # 控制台端口
self.genuuid_sections = 0 # --gus
reader = MessageStream.MessageStreamReader(streamStr)
self.uid = reader.readInt32()
self.username = reader.readString()
self.componentType = reader.readInt32()
self.componentID = reader.readUint64()
self.componentIDEx = reader.readUint64()
self.globalOrderID = reader.readInt32()
self.groupOrderID = reader.readInt32()
self.genuuid_sections = reader.readInt32()
self.intaddr = socket.inet_ntoa(reader.read(4))
self.intport = socket.ntohs(reader.readUint16())
self.extaddr = socket.inet_ntoa(reader.read(4))
self.extport = socket.ntohs(reader.readUint16())
self.extaddrEx = reader.readString()
self.pid = reader.readUint32()
self.cpu = reader.readFloat()
self.mem = reader.readFloat()
self.usedmem = reader.readUint32()
self.state = reader.readInt8()
self.machineID = reader.readUint32()
self.extradata = reader.readUint64()
self.extradata1 = reader.readUint64()
self.extradata2 = reader.readUint64()
self.extradata3 = reader.readUint64()
self.backaddr = reader.readUint32()
self.backport = reader.readUint16()
self.componentName = Define.COMPONENT_NAME[self.componentType]
self.consolePort = self.extradata3
if self.componentType in [Define.BASEAPP_TYPE, Define.CELLAPP_TYPE]:
self.fullname = "%s%s" % (self.componentName, self.groupOrderID)
else:
self.fullname = self.componentName
if self.componentType in [Define.BASEAPP_TYPE, Define.CELLAPP_TYPE]:
self.entities = self.extradata
if self.componentType == Define.BASEAPP_TYPE:
self.clients = self.extradata1
if self.componentType == Define.BASEAPP_TYPE:
self.proxies = self.extradata2
#print("%s, uid=%i, cID=%i, gid=%i, groupid=%i, uname=%s" % (Define.COMPONENT_NAME[self.componentType], \
# self.uid, self.componentID, self.globalOrderID, self.groupOrderID, self.username))
class Machines:
def __init__(self, uid = None, username = None, listenPort = 0):
"""
"""
self.udp_socket = None
self.listenPort = listenPort
if uid is None:
uid = Define.getDefaultUID()
if username is None:
try:
username = Define.pwd.getpwuid( uid ).pw_name
except:
import getpass
username = getpass.getuser()
self.uid = uid
self.username = username
if type(self.username) is str:
self.username = username.encode( "utf-8" )
else:
try:
if type(self.username) is unicode:
self.username = username.encode( "utf-8" )
except:
pass
self.startListen()
self.reset()
def __del__(self):
#print( "Machines::__del__(), Machines destroy now" )
self.stopListen()
def startListen(self):
"""
"""
assert self.udp_socket is None
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 5 * 1024 * 1024)
self.udp_socket.bind((host, self.listenPort))
self.replyPort = self.udp_socket.getsockname()[1]
#print( "udp receive addr: %s" % (self.udp_socket.getsockname(), ) )
def stopListen(self):
"""
"""
if self.udp_socket is not None:
self.udp_socket.close()
self.udp_socket = None
def reset(self):
"""
"""
self.interfaces = {} # { componentType : [ComponentInfo, ...], ... }
self.interfaces_groups = {} # { machineID : [ComponentInfo, ...], ...}
self.interfaces_groups_uid = {} # { machineID : [uid, ...], ...}
self.machines = []
def send(self, msg, ip = "<broadcast>"):
"""
发送消息
"""
_udp_broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_udp_broadcast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if isinstance(ip, (tuple, list)):
for addr in ip:
_udp_broadcast_socket.sendto(msg, (addr, 20086))
elif ip == "<broadcast>":
_udp_broadcast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
_udp_broadcast_socket.sendto(msg, ('255.255.255.255', 20086))
else:
_udp_broadcast_socket.sendto(msg, (ip, 20086))
def sendAndReceive(self, msg, ip = "<broadcast>", trycount = 0, timeout = 1, callback = None):
"""
发送消息,并等待消息返回
"""
self.send(msg, ip)
self.udp_socket.settimeout(timeout)
dectrycount = trycount
recvDatas = []
while True:
try:
datas, address = self.udp_socket.recvfrom(10240)
recvDatas.append(datas)
#print ("Machine::sendAndReceive(), %s received %s data from %r" % (len(recvDatas), len(datas), address))
if callable( callback ):
try:
if callback( datas, address ):
return recvDatas
except:
traceback.print_exc()
except socket.timeout:
if dectrycount <= 0:
break
dectrycount -= 1
#print("Machine::sendAndReceive(), try count %s" % (trycount - dectrycount))
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
break
return recvDatas
def receiveReply(self, timeout = 1):
"""
等待消息返回
"""
self.udp_socket.settimeout(timeout)
try:
datas, address = self.udp_socket.recvfrom(10240)
return datas, address
except socket.timeout:
return "", ""
def queryAllInterfaces(self, ip = "<broadcast>", trycount = 1, timeout = 1):
"""
"""
self.reset()
nameLen = len( self.username ) + 1 # 加1是为了存放空终结符
msg = MessageStream.MessageStreamWriter(MachineInterface_onQueryAllInterfaceInfos)
msg.writeInt32(self.uid)
msg.writeString(self.username)
msg.writeUint16(socket.htons(self.replyPort)) # reply port
datas = self.sendAndReceive( msg.build(), ip, trycount, timeout )
self.parseQueryDatas( datas )
def queryMachines(self, ip = "<broadcast>", trycount = 1, timeout = 1):
"""
"""
self.reset()
nameLen = len( self.username ) + 1 # 加1是为了产生空终结符
msg = MessageStream.MessageStreamWriter(MachineInterface_onQueryMachines)
msg.writeInt32(self.uid)
msg.writeString(self.username)
msg.writeUint16(socket.htons(self.replyPort)) # reply port
datas = self.sendAndReceive( msg.build(), ip, trycount, timeout )
self.parseQueryDatas( datas )
def startServer(self, componentType, cid, gus, targetIP, kbe_root, kbe_res_path, kbe_bin_path, trycount = 1, timeout = 1):
"""
"""
msg = MessageStream.MessageStreamWriter(MachineInterface_startserver)
msg.writeInt32(self.uid)
msg.writeInt32(componentType)
msg.writeUint64(cid)
msg.writeUint16(gus)
msg.writeUint16(socket.htons(self.replyPort)) # reply port
msg.writeString(kbe_root)
msg.writeString(kbe_res_path)
msg.writeString(kbe_bin_path)
if trycount <= 0:
self.send( msg.build(), targetIP )
self.receiveReply()
else:
self.sendAndReceive( msg.build(), targetIP, trycount, timeout )
def stopServer(self, componentType, componentID = 0, targetIP = "<broadcast>", trycount = 1, timeout = 1):
"""
"""
msg = MessageStream.MessageStreamWriter(MachineInterface_stopserver)
msg.writeInt32(self.uid)
msg.writeInt32(componentType)
msg.writeUint64(componentID)
msg.writeUint16(socket.htons(self.replyPort)) # reply port
if trycount <= 0:
self.send( msg.build(), targetIP )
self.receiveReply()
else:
self.sendAndReceive( msg.build(), targetIP, trycount, timeout )
def killServer(self, componentType, componentID = 0, targetIP = "<broadcast>", trycount = 1, timeout = 1):
"""
"""
msg = MessageStream.MessageStreamWriter(MachineInterface_killserver)
msg.writeInt32(self.uid)
msg.writeInt32(componentType)
msg.writeUint64(componentID)
msg.writeUint16(socket.htons(self.replyPort)) # reply port
if trycount <= 0:
self.send( msg.build(), targetIP )
self.receiveReply()
else:
self.sendAndReceive( msg.build(), targetIP, trycount, timeout )
def setFlags(self, componentType, flags, componentID = 0, targetIP = "<broadcast>", trycount = 1, timeout = 1):
"""
"""
msg = MessageStream.MessageStreamWriter(MachineInterface_setflags)
msg.writeInt32(self.uid)
msg.writeInt32(componentType)
msg.writeUint64(componentID)
msg.writeUint32(flags)
msg.writeUint16(socket.htons(self.replyPort)) # reply port
if trycount <= 0:
self.send( msg.build(), targetIP )
self.receiveReply()
else:
_receiveData = self.sendAndReceive( msg.build(), targetIP, trycount, timeout )
_receiveList = []
for _data in _receiveData:
_receiveList.append(int.from_bytes(_data, byteorder='little', signed = True))
_succ = "False"
if 1 in _receiveList:
_succ = "True"
print("componentID: %d, success: %s" % (componentID, _succ))
def parseQueryDatas( self, recvDatas ):
"""
"""
for data in recvDatas:
self.parseQueryData( data )
def parseQueryData( self, recvData ):
"""
"""
cinfo = ComponentInfo( recvData )
componentInfos = self.interfaces.get(cinfo.componentType)
if componentInfos is None:
componentInfos = []
self.interfaces[cinfo.componentType] = componentInfos
found = False
for info in componentInfos:
if info.componentID == cinfo.componentID and info.pid == cinfo.pid:
found = True
break
if found:
return
componentInfos.append(cinfo)
machineID = cinfo.machineID
gourps = self.interfaces_groups.get(machineID, [])
if machineID not in self.interfaces_groups:
self.interfaces_groups[machineID] = gourps
self.interfaces_groups_uid[machineID] = []
# 如果pid与machineID相等,说明这个是machine进程
if cinfo.pid != machineID:
gourps.append(cinfo)
if cinfo.uid not in self.interfaces_groups_uid[machineID]:
self.interfaces_groups_uid[machineID].append(cinfo.uid)
else:
# 是machine进程,把它放在最前面,并且加到machines列表中
gourps.insert(0, cinfo)
self.machines.append( cinfo )
def makeGUS(self, componentType):
"""
生成一个相对唯一的gus(非全局唯一)
"""
if not hasattr( self, "ct2gus" ):
self.ct2gus = [0] * Define.COMPONENT_END_TYPE
self.ct2gus[componentType] += 1
return componentType * 100 + self.ct2gus[componentType]
def makeCID(self, componentType):
"""
生成相对唯一的cid(非全局唯一)
"""
if not hasattr( self, "cidRand" ):
self.cidRand = random.randint(1, 99999)
if not hasattr( self, "ct2cid" ):
self.ct2cid = [0] * Define.COMPONENT_END_TYPE
self.ct2cid[componentType] += 1
t = int( time.time() ) % 99999
cid = "%02i%05i%05i%04i" % (componentType, t, self.cidRand, self.ct2cid[componentType])
return int(cid)
def getMachine( self, ip ):
"""
通过ip地址找到对应的machine的info
"""
for info in self.machines:
if info.intaddr == ip:
return info
return None
def hasMachine( self, ip ):
"""
"""
for info in self.machines:
if info.intaddr == ip:
return True
return False
def getComponentInfos( self, componentType ):
"""
获取某一类型的组件信息
"""
return self.interfaces.get( componentType, [] )
| kbengine/kbengine | kbe/tools/server/pycommon/Machines.py | Machines.py | py | 11,756 | python | en | code | 5,336 | github-code | 13 |
12178730245 | from typing import *
from sklearn.cluster import AgglomerativeClustering, k_means
from spherecluster import SphericalKMeans
from utility_functions import get_clus_config
# ----------------------------------------------------------------------
# type definitions
data_type = List[Iterator[float]] # a list of vectors
# ----------------------------------------------------------------------
class Clustering:
"""Clustering interface for different clustering methods."""
def __init__(self):
clus_configs = get_clus_config()
self.clus_type = clus_configs['type']
self.affinity = clus_configs['affinity']
self.linkage = clus_configs['linkage']
self.n_clusters = clus_configs['n_clusters']
self.cluster = None
self.compactness = None
def fit(self,
data: List[Iterator[float]],
find_n: bool = False
) -> Dict[str, Union[List[int], Union[float, None]]]:
"""Cluster the input data into n clusters.
Args:
data: A list of vectors.
find_n: If True, don't use self.n_cluster but find n using
elbow analysis instead
Return:
A list of integers as class labels. The order of the list
corresponds to the order of the input data.
"""
if find_n:
self.n_clusters = 5 # self._get_n()
if self.clus_type == 'kmeans':
self.cluster = k_means(n_clusters=self.n_clusters)
elif self.clus_type == 'sphericalkmeans':
self.cluster = SphericalKMeans(n_clusters=self.n_clusters)
elif self.clus_type == 'agglomerative':
self.cluster = AgglomerativeClustering(
n_clusters=self.n_clusters,
affinity=self.affinity,
linkage=self.linkage)
self.cluster.fit(data)
self._calc_density()
return {'labels': self.cluster.labels_, 'density': self.compactness}
def _get_n(self):
"""Get the output number of clusters.
Use knee method or comparable.
"""
raise NotImplementedError
def _calc_density(self):
"""Compute the cluster density if possible."""
if self.clus_type == 'kmeans' or self.clus_type == 'sphericalkmeans':
return self.cluster.inertia_
else:
return None | jagol/BA_Thesis | pipeline/clustering.py | clustering.py | py | 2,376 | python | en | code | 2 | github-code | 13 |
3875319135 | import requests
import os
import logging
import sys
import json
import base64
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def get_consul_svc(svc_endpoint, request_params, service):
"""
Since consul does not have concept of prefix for service, so we are using tags for this purpose.
"""
consul_data = requests.get(svc_endpoint, params=request_params)
body = json.loads(consul_data.text)
return {
"address": body[0]["Service"]["Address"],
"port": body[0]["Service"]["Port"]
}
def key_missing_error(key_endpoint, status_code):
log_msg = "Unable to get key endpoint: '{}' having status_code: '{}'".format(
key_endpoint,
status_code
)
logger.error(log_msg)
def get_consul_kv(key_endpoint, params=None):
data = {}
r = requests.get(key_endpoint, params=params)
if not r.ok:
key_missing_error(key_endpoint, r.status_code)
return None
body = json.loads(r.text)
for val in body:
if val.get("Value"):
data[val["Key"]] = base64.b64decode(val["Value"]).decode()
return data
def del_consul_kv(key_endpoint, params=None):
r = requests.delete(key_endpoint, params=params)
if r.text.strip() != "true":
logger.error("unable to delete key endpoint: '{}' having status_code: '{}'".format(
key_endpoint, r.status_code))
logger.info("successfully deleted key endpoint: '{}' with params: '{}'".format(
key_endpoint, params))
def put_consul_kv(key_endpoint, key, value):
if type(key_endpoint) != str or type(key) != str or type(value) != str:
logger.error("send string type only in {}".format(__class__))
r = requests.put(key_endpoint, value)
if r.text.strip() != "true":
logger.error("unable to put key endpoint: '{}' having status_code: '{}'".format(
key_endpoint, r.status_code))
logger.info("successfully put key endpoint: '{}'".format(key_endpoint))
class Config:
"""
Wrapper on top of consul api to facilitates configuration for Application.
Get Consul Server hostname and port from the environment variable `CONSUL_HOSTNAME` and `CONSUL_PORT`
Puts all application configuration on Consul. Allow overwriting of any configuration with the environment variable.
Priority of configuration will be:
1: Environment Variable
2: Consul key’s value
Key Naming:
- Use underscores to separate words inside the key name.
- Use lower case letters.
- Key name for environment variable must be capitalised.
So if key name is `redis_hostname` on the application then corresponding
consul key name will be `component/environment/URL/redis_hostname` (like `example-app/production/v2.pyconfig.com/redis_hostname`)
Environment variable name will be `REDIS_HOSTNAME`.
"""
def __init__(self, consulhost=None, consulport=None, keyprefix=None, component=None, env=None, prefix=None):
self.consul_host = consulhost or os.environ.get(
'CONSUL_HOSTNAME') or 'localhost'
self.consul_port = int(
consulport or os.environ.get('CONSUL_PORT') or '8500')
self.app = component or os.environ.get('COMPONENT_NAME') or 'DUMMY'
self.env = env or os.environ.get('APP_ENV') or 'development'
self.prefix = prefix or os.environ.get('COMPONENT_PREFIX') or 'dev'
self.keyprefix = keyprefix or '{}/{}/{}'.format(
self.app, self.env, self.prefix)
self.consul_url = "http://{}:{}".format(
self.consul_host, self.consul_port)
self.consul_kv_endpoint = "{}/v1/kv".format(self.consul_url)
self.consul_svc_endpoint = "{}/v1/health/service".format(
self.consul_url)
logger.info("consul url is '{}'".format(self.consul_url))
logger.info("keyprefix is '{}'".format(self.keyprefix))
def put(self, key, value):
"""
It will write to Consul with prefix/key key name.
`keyprefix`, `key`, `value` are string data-type.
example:
Config.keyprefix = "app/staging/main"
Config.put(key", "value")
"""
key_endpoint = "{}/{}/{}".format(self.consul_kv_endpoint,
self.keyprefix, key)
put_consul_kv(key_endpoint, key, value)
def reset(self, data):
"""
It will delete existing prefix namespace on consul, if already present and will create mentioned key-value pairs on Consul.
`data` is dictionary data-type with string data-type as key and value.
`keyprefix` is string data-type.
example:
Config.keyprefix = "app/staging/main"
d = { "key1": "value1", "key2": "value2" }
Config.reset(d)
"""
key_endpoint = "{}/{}".format(self.consul_kv_endpoint, self.keyprefix)
del_consul_kv(key_endpoint, params={"recurse": True})
for k, v in data.items():
self.put(k, v)
def get(self, key):
"""
It will read from Consul and environment variable then return the value of higher priority.
`keyprefix`, `key` are string data-type. return data-type is dictionary or string depending on whether value is json encoded.
example:
Config.keyprefix = "app/staging/main"
Config.get("key")
"""
env_key = key.upper()
if env_key in os.environ:
try:
value = json.loads(os.environ[env_key])
except ValueError:
value = os.environ[env_key]
return value
key_endpoint = "{}/{}/{}".format(self.consul_kv_endpoint,
self.keyprefix, key)
consul_data = get_consul_kv(key_endpoint)
if consul_data:
try:
value = json.loads(
consul_data["{}/{}".format(self.keyprefix, key)])
except ValueError:
value = consul_data["{}/{}".format(self.keyprefix, key)]
return value
logger.error("Invalid prefix -> {} or Invalid key -> {}".format(
self.keyprefix, key))
return None
def get_multi(self, keyprefix=''):
"""
It reads all keys from Consul recursively and returns a dictionary with flattening of directories so beware if it overwrites same key name.
`keyprefix` are string data-type. return data-type is dictionary.
example:
Config.get_multi("jwt_keys") or Config.get_multi()
"""
keyprefix = self.keyprefix if keyprefix == '' else keyprefix
key_endpoint = "{}/{}".format(self.consul_kv_endpoint,
keyprefix)
consul_data = get_consul_kv(key_endpoint, {"recurse": True})
data = {}
if consul_data:
for k, v in consul_data.items():
try:
data[k.split('/')[-1]] = json.loads(v)
except ValueError:
data[k.split('/')[-1]] = v
return data
return {
"Invalid prefix -> {}".format(
self.keyprefix)
}
def get_all(self):
"""
It will read from Consul and environment variable then return the dictionary containing all with below format:
{
environment: {'key': 'value', ... },
consul: {'key': 'value', ... }
}
`keyprefix` is string data-type and return data-type is dictionary.
example:
Config.keyprefix = "app/staging/main"
Config.get_all()
"""
data = {'environment': {}, 'consul': {}}
for k, v in os.environ.items():
data['environment'][k] = v
key_endpoint = "{}/{}".format(self.consul_kv_endpoint, self.keyprefix)
consul_data = get_consul_kv(key_endpoint, params={"recurse": True})
if consul_data:
for k, v in consul_data.items():
data['consul'][k] = v
return data
def delete(self, key):
"""
It will delete `key` from Consul.
`keyprefix`, `key` are string data-type.
example:
Config.keyprefix = "app/staging/main"
Config.delete("key")
"""
key_endpoint = "{}/{}/{}".format(self.consul_kv_endpoint, self.keyprefix, key)
del_consul_kv(key_endpoint)
def delete_all(self):
"""
It will delete `prefix` from Consul.
`prefix` is string data-type.
example:
Config.keyprefix = "app/staging/main"
Config.delete_all()
"""
key_endpoint = "{}/{}".format(self.consul_kv_endpoint, self.keyprefix)
del_consul_kv(key_endpoint, params={"recurse": True})
def get_service(self, service):
"""
It will get service from Consul using HTTP API request.
This will be used for geting services like RabbitMQ, Redis, Kafka, ElasticSearch hostname.
So basically anything that needs status-checking/load-balancing, can be used through this.
`keyprefix` and `service` are string data-type. return type will be dictionary data-type.
example:
Config.keyprefix = "app/staging/main"
Config.get_service("rmq-stg-internal")
"""
svc_endpoint = "{}/{}".format(self.consul_svc_endpoint, service)
request_params = {"tag": self.keyprefix, "passing": True}
return get_consul_svc(svc_endpoint, request_params, service)
| synup/ji | consul_pyconfig/consul_pyconfig/config.py | config.py | py | 9,492 | python | en | code | 7 | github-code | 13 |
29246664576 | import pandas as pd
import os
#Purges all rows where the name matches the specified purgeTarget from all data files with the specified prefix.
#Useful for removing bad or modded games :-)
#PurgeTarget needs to match the row exactly
targetPath = "../../reaperCSVs/cluster data 40k/"
filePrefix = ""
purgeRows = False
purgeTarget = ""
purgeColumn = 'Name'
purgeColumns = False
columnsToPurge = ['Unnamed: 0.1', 'Unnamed: 0']
purgeDuplicates = True
duplicatesColumn = '0Replay_id'
files = [f for f in os.listdir(targetPath) if os.path.isfile(os.path.join(targetPath, f)) and f.lower().endswith(".csv") and f.startswith(filePrefix)]
print("Found", files.__len__(), "files.")
for x in range(0, files.__len__()):
file = files[x]
data = pd.read_csv(targetPath + file)
data = data.drop('Unnamed: 0', axis=1)
shapeBefore = data.shape
if purgeColumns:
cols = [c for c in columnsToPurge if c in data.columns]
data = data.drop(cols, axis=1)
if purgeDuplicates:
data = data.drop_duplicates([duplicatesColumn], keep='first')
if purgeRows:
data = data[data.Name != purgeTarget]
shapeAfter = data.shape
if shapeBefore != shapeAfter:
print("Removed", shapeBefore[0] - shapeAfter[0], "rows and", shapeBefore[1] - shapeAfter[1], "columns from", file)
data = data.reset_index(drop=True)
data.to_csv(targetPath + file, index=True)
| JohnSegerstedt/DATX02-19-81 | clustering/Utilities/csvpurge.py | csvpurge.py | py | 1,413 | python | en | code | 4 | github-code | 13 |
39740126027 | import sys
import numpy as np
from xml import sax
from xml.sax.saxutils import escape
from math import sqrt, sin, cos, radians, atan2
from functools import partial
def lat_lon_elevation_from_gpx( gpx ):
class GPXTrackHandler( sax.handler.ContentHandler ):
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
self.reset()
def reset( self ):
self.lat_lon_elevation = []
self.lat = self.lon = self.elevation = 0.0
self.in_ele = 0
def startElement( self, name, attrs ):
if name == 'trkpt':
self.lat, self.lon = float(attrs['lat'].strip()), float(attrs['lon'].strip())
elif name == 'ele':
self.in_ele += 1
elif name == 'trkseg':
self.reset()
def characters( self, content ):
if self.in_ele:
self.elevation = float(content.strip())
def endElement( self, name ):
if name == 'ele':
self.in_ele -= 1
elif name == 'trkpt':
self.lat_lon_elevation.append( (self.lat, self.lon, self.elevation) )
p = sax.make_parser()
gth = GPXTrackHandler()
p.setContentHandler( gth )
try:
p.parse( gpx )
except Exception as e:
return []
return gth.lat_lon_elevation
def lat_lon_elevation_to_gpx( lat_lon_elevation, stream=None, name="" ):
stream = stream or sys.stdout
stream.write( """<?xml version="1.0" encoding="UTF-8"?>
<gpx version="1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
creator="Edward Sitarski lat_lon_elevation_to_gpx"
xmlns="http://www.topografix.com/GPX/1/0"
xsi:schemaLocation="http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd">
"""
)
stream.write( '<trk>\n' )
if name:
stream.write( '<name>{}</name>'.format(escape(name)) )
stream.write( '<trkseg>\n' )
for lat, lon, e in lat_lon_elevation:
stream.write( '<trkpt lat="{}" lon="{}"><ele>{}</ele></trkpt>\n'.format(lat, lon, e) )
stream.write( '</trkseg>\n' )
stream.write( '</trk>\n' )
stream.write( '</gpx>\n' )
def great_circle_distance( pointA, pointB ):
EARTH_RADIUS = 6371.0088 * 1000.0 # meters
latA, lonA = (float(i) for i in pointA)
latB, lonB = (float(i) for i in pointB)
phiA = radians( latA )
phiB = radians( latB )
delta_latitude = radians(latB - latA)
delta_longitude = radians(lonB - lonA)
a = (
sin(delta_latitude / 2.0) ** 2
+ cos(phiA) * cos(phiB) * sin(delta_longitude / 2.0) ** 2
)
c = 2.0 * atan2(sqrt(a), sqrt(max(0.0, 1.0 - a)))
return EARTH_RADIUS * c
def pldist( point, start, end ):
"""
Calculate the distance from point to the line.
All points are numpy arrays.
"""
if np.array_equal(start, end):
return np.linalg.norm(point - start)
delta_line = end - start
return np.divide(
np.abs(np.linalg.norm(np.cross(delta_line, start - point))),
np.linalg.norm(delta_line)
)
def _rdp_iter(M, start_index, last_index, epsilon, dist=pldist):
stk = [(start_index, last_index)]
global_start_index = start_index
mask = np.ones(last_index - start_index + 1, dtype=bool)
while stk:
start_index, last_index = stk.pop()
dmax = 0.0
index = start_index
p1, p2 = M[start_index], M[last_index]
for i in range(index + 1, last_index):
if mask[i - global_start_index]:
d = dist(M[i], p1, p2)
if d > dmax:
index = i
dmax = d
if dmax > epsilon:
stk.append((start_index, index))
stk.append((index, last_index))
else:
mask[start_index + 1 - global_start_index:last_index - global_start_index] = False
return mask
def rdp_iter(M, epsilon, dist=pldist, return_mask=False):
"""
Simplifies a given array of points (multi-dimensional).
return_mask: return the mask of points to keep instead of the edited points.
"""
mask = _rdp_iter(M, 0, len(M) - 1, epsilon, dist)
if return_mask:
return mask
return M[mask]
def rdp(M, epsilon=0, dist=pldist, return_mask=False):
"""
Simplifies a given array of points M using the Ramer-Douglas-Peucker
algorithm.
"""
algo = partial(rdp_iter, return_mask=return_mask)
if "numpy" in str(type(M)):
return algo(M, epsilon, dist)
return algo(np.array(M), epsilon, dist).tolist()
def lat_lon_elevation_to_points_itr( lat_lon_elevation ):
lat_min = lat_max = lat_lon_elevation[0][0]
lon_min = lon_max = lat_lon_elevation[0][1]
for lat, lon, e in lat_lon_elevation:
if lat < lat_min:
lat_min = lat
elif lat > lat_max:
lat_max = lat
if lon < lon_min:
lon_min = lon
elif lon > lon_max:
lon_max = lon
# Use a flat earth and compute the length of a degree.
degree_delta = (lat_max - lat_min) / 5.0
lat_distance = great_circle_distance( (lat_min, lon_min), (lat_min+degree_delta, lon_min) ) / degree_delta
degree_delta = (lon_max - lon_min) / 5.0
lon_distance = great_circle_distance( (lat_min, lon_min), (lat_min, lon_min+degree_delta) ) / degree_delta
# Multiply the length of the lat, lon difference.
return ( ((lat - lat_min) * lat_distance, (lon - lon_min) * lon_distance, e) for (lat, lon, e) in lat_lon_elevation )
def simplify_gpx_file( gpx, epsilon=1.0 ):
lat_lon_elevation = lat_lon_elevation_from_gpx( gpx )
points_itr = lat_lon_elevation_to_points_itr( lat_lon_elevation )
p = np.fromiter( points_itr, dtype=np.dtype((float, 3)) )
mask = rdp( p, epsilon, return_mask=True );
lat_lon_elevation = [v for i, v in enumerate(lat_lon_elevation) if mask[i]]
p = p[mask]
return lat_lon_elevation, p # Return the edited points and xy_elevation array.
if __name__ == '__main__':
import json
lat_lon_elevation, points = simplify_gpx_file( 'hero_dolomites_86_km_14_06_22.gpx' )
with open('hero_dolomites_86_km_14_06_22_filter.gpx', 'w') as f:
lat_lon_elevation_to_gpx( lat_lon_elevation, f )
with open('hero_dolomites_86_km_14_06_22_filter.json', 'w') as f:
json.dump( lat_lon_elevation, f )
| esitarski/RaceDB | core/gpx_util.py | gpx_util.py | py | 5,720 | python | en | code | 12 | github-code | 13 |
14577446472 | import csv
import requests
from requests.auth import HTTPBasicAuth
import json
import psycopg2
import time
def open_database_connection(postgres_config):
"""
Open a database connection with autocommit property.
Args:
postgres_config : dictionary of postgres configuration
Returns:
connection object
"""
conn = psycopg2.connect(
host=postgres_config['host'],
port=postgres_config['port'],
dbname=postgres_config['database'],
user=postgres_config['user'],
password=postgres_config['password']
)
conn.set_session(autocommit=True)
return conn
def load_json(json_file_path):
"""
Load a json file and return the dictionary.
Args:
json_file_path : path of the json file to load
Returns:
dictionary of key value pairs of the json file
"""
with open(json_file_path) as json_file:
json_dict = json.loads(json_file.read())
return json_dict
def company_to_sql_columns(comp_json):
"""
Convert JSON obtained from Intrinio get request to SQL column keys.
Args:
comp_json : raw JSON obtained from intrinio companies API.
Returns:
dictionary mapping SQL columns to values, using dollar escaping for postgres
"""
def get_field(field):
""" Return 'NULL' if field is None. """
return '$${}$$'.format(comp_json[field]) if comp_json[field] else 'NULL'
return {
'company_id': get_field('cik'),
'company_name': get_field('name'),
'ceo_name': get_field('ceo'),
'company_description': get_field('short_description'),
'hq_address': get_field('business_address'),
'num_employees': get_field('employees'),
'sector': get_field('sector'),
'industry': get_field('industry_category')
}
def main():
# Load credentials for Intrinio API
intrinio_cred = load_json('../intrinio_credentials.json')
# Open database connection using postgres configuration file
postgres_config = load_json('../postgres_config.json')
conn = open_database_connection(postgres_config)
# Retrieve all companies
with open('../data/companies.csv', newline='') as csvfile:
company_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
max_iters = 2300
for row in company_reader:
# Get ticker to use in GET request
ticker = row[0]
# Get company information
comp_json = requests.get('https://api.intrinio.com/companies?identifier={}'.format(ticker), auth=HTTPBasicAuth(intrinio_cred['API_USER'], intrinio_cred['API_KEY'])).json()
# Map fields to SQL column names
comp_item = company_to_sql_columns(comp_json)
# If company id is NULL, do not insert
if comp_item['company_id'] == 'NULL':
print(comp_item)
else:
with conn.cursor() as cur:
# Insert values into the database
try:
cur.execute("INSERT INTO company VALUES({company_id}, {company_name}, {ceo_name}, {company_description}, {hq_address}, {num_employees}, {sector}, {industry});".format(**comp_item))
# Check if successfully inserted
cur.execute("SELECT * FROM company WHERE company_id={company_id};".format(company_id=comp_item['company_id']))
if len(cur.fetchone()) == 0:
print("Error inserting company\n{}".format(comp_item))
except psycopg2.IntegrityError as e:
print("Integrity error {}:\n{}".format(e, comp_item))
if max_iters == 0:
print("Waiting...")
time.sleep(11 * 60)
print("Resuming...")
max_iters = (max_iters + 1) % 2300
if __name__ == '__main__':
main() | dwtcourses/financial-advisor | intrinio/tools/company_database_insert.py | company_database_insert.py | py | 3,923 | python | en | code | 0 | github-code | 13 |
27074621569 | import subprocess, os
class MemoryMonitor(object):
def __init__(self):
"""Create new MemoryMonitor instance."""
self.pid = os.getpid()
def usage(self):
"""Return int containing memory used by user's processes."""
self.process = subprocess.Popen("ps -p %s -o rss | awk '{sum+=$1} END {print sum}'" % self.pid,
shell=True,
stdout=subprocess.PIPE,
)
self.stdout_list = self.process.communicate()[0].split('\n')
return int(self.stdout_list[0])
| entone/GeriCare | Shared/system/memory_monitor.py | memory_monitor.py | py | 619 | python | en | code | 1 | github-code | 13 |
17041177037 | from sklearn.datasets import fetch_20newsgroups
from sklearn.metrics import f1_score, classification_report
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
######
### This one downloads a large dataset, so it may take a while
categories = ['rec.sport.hockey', 'rec.sport.baseball', 'rec.autos']
newsgroup_train = fetch_20newsgroups(subset='train', categories=categories,
remove=('headers', 'footers', 'quotes'))
newsgroup_test = fetch_20newsgroups(subset='test', categories=categories,
remove=('headers', 'footers', 'quotes'))
######
### Explore the data
for txt in (newsgroup_train.data)[0:2]:
print(txt)
print('.....')
print(newsgroup_train.target[0:2])
print(newsgroup_train.target_names)
print("No. of newsgroups: %d" % len(newsgroup_train.data))
######
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(newsgroup_train.data)
X_test = vectorizer.transform(newsgroup_test.data)
######
## Explore the Tfidf data
for txt in (X_train)[0:2]:
print(txt)
print('.....')
######
classifier = Perceptron(n_iter=100, eta0=0.1)
classifier.fit(X_train, newsgroup_train.target)
predictions = classifier.predict(X_test)
print(classification_report(newsgroup_test.target, predictions))
| rupendrab/pyutil | Perceptron_01.py | Perceptron_01.py | py | 1,382 | python | en | code | 0 | github-code | 13 |
30365546051 | import logging
import voluptuous
from tornado.options import options
from tornado.httputil import url_concat, responses
from tornado.web import RequestHandler, HTTPError
from tornado import escape
class APIError(Exception):
pass
def get_error_message(ex):
if isinstance(ex, (list, tuple)):
ex = ex[0]
return get_error_message(ex)
if isinstance(ex, Exception):
ex = ex.args[0]
return get_error_message(ex)
if isinstance(ex, str):
return ex
class BaseHandler(RequestHandler):
@property
def db(self):
""" :rtype: torndb.Connection """
return self.application.db
def write_ok(self, chunk):
res = {
'ok': True,
'body': chunk
}
self.write(res)
def write_error(self, status_code, **kwargs):
result = {
'ok': False,
'error': {
'message': 'Internal Server Error',
}
}
if 'exc_info' in kwargs:
try:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError):
# 一般的 HTTPError
message = (exception.reason or
exception.log_message or
responses.get(exception.status_code,
'Unknown'))
result['error']['message'] = message
elif isinstance(exception, voluptuous.error.Error):
# 参数校验错误
errors = []
if isinstance(exception, voluptuous.error.MultipleInvalid):
errors = exception.errors
elif isinstance(exception, voluptuous.error.Invalid):
errors = [exception]
elif isinstance(exception, voluptuous.error.MatchInvalid):
errors = [exception]
self.set_status(400)
result['error']['message'] = get_error_message(exception)
invalid_params = ['.'.join(map(str, e.path))
for e in errors if e.path]
if invalid_params:
result['error']['params'] = invalid_params
elif isinstance(exception, APIError):
result['error']['message'] = ' '.join(exception.args)
except Exception as e:
logging.exception(e)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = escape.json_encode(result)
self.finish(chunk)
def plain_args(self):
return dict((key, self.get_argument(key)) for key in self.request.arguments)
def render_string(self, template, **kwargs):
kwargs['url_concat'] = url_concat
kwargs['options'] = options
return super(BaseHandler, self).render_string(template, **kwargs)
class PageNotFound(BaseHandler):
def get(self):
self.set_status(404)
self.render('404.html')
def post(self):
self.get()
| chenjian525/dm | dm/controllers/__init__.py | __init__.py | py | 3,150 | python | en | code | 0 | github-code | 13 |
34055045058 | from rest_framework import serializers
from advertisment.models import Advertisment
def valid_transaction_number(transaction_number):
trsct_num = str(transaction_number)
if trsct_num[2] != '-' or trsct_num[6] != '-' or trsct_num[9] != '/':
raise serializers.ValidationError("invalid transaction_number")
if len(trsct_num) != 12:
raise serializers.ValidationError("invalid transaction_number")
if not trsct_num[:2].isalpha() or not trsct_num[7:9].isalpha():
raise serializers.ValidationError("invalid transaction_number")
try:
int(trsct_num[3:6])
int(trsct_num[-2:])
except:
raise serializers.ValidationError("invalid transaction_number")
class AdvertismentSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
website_url = serializers.URLField()
start_date = serializers.DateField()
end_date = serializers.DateField()
price = serializers.IntegerField()
title = serializers.CharField(max_length=50)
photo_url = serializers.URLField()
transaction_number = serializers.CharField()
# valid_transaction_number(transaction_number)
def create(self, validated_data):
return Advertisment.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.website_url = validated_data.get('website_url', instance.website_url)
instance.start_date = validated_data.get('background-repeat:', instance.start_date)
instance.end_date = validated_data.get('end_date', instance.end_date)
instance.price = validated_data.get('price', instance.price)
instance.title = validated_data.get('title', instance.title)
instance.photo_url = validated_data.get('photo_url', instance.photo_url)
instance.transaction_number = validated_data.get('transaction_number', instance.transaction_number)
| holeksii/python-projects | restfulapi/advertisment/serializers.py | serializers.py | py | 1,909 | python | en | code | 0 | github-code | 13 |
15244214081 | import numpy as np
import xlwings as xw
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
def vtk_visualization(k_grids, t_grids, k, t, v_values):
## VTK plot
fig = plt.figure()
ax = fig.gca(projection = '3d')
ax.plot_surface(k, t, v_values, cmap=cm.coolwarm)
ax.set_xlabel('k')
ax.set_ylabel('t')
ax.set_zlabel('v')
ax.set_title('VTK')
plt.show()
## T steps plot
fig = plt.figure(figsize=plt.figaspect(0.5))
fig.add_subplot(121)
ax1 = fig.add_subplot(121)
ax1.plot(t_grids, '.-', linewidth=1.0, ms=4.0)
ax1.grid(True, which='both')
ax2 = fig.add_subplot(122)
ax2.plot(-1.0 * np.diff(t_grids), '.-', linewidth=1.0, ms=4.0)
ax2.grid(True, which='both')
ax2.set_ylim(bottom = 0.0)
plt.suptitle('T steps')
plt.show()
## k steps plot
fig = plt.figure(figsize=plt.figaspect(0.5))
fig.add_subplot(121)
ax1 = fig.add_subplot(121)
ax1.plot(k_grids, '.-', linewidth=1.0, ms=4.0)
ax1.grid(True, which='both')
ax2 = fig.add_subplot(122)
ax2.plot(np.diff(k_grids), '.-', linewidth=1.0, ms=4.0)
ax2.grid(True, which='both')
ax2.set_ylim(bottom = 0.0)
plt.suptitle('k steps')
plt.show()
if __name__ == '__main__':
wb = xw.Book('LocVol Parameters.xlsx')
sht = wb.sheets['VTK']
t_grids = sht.range('A2').options(np.array, expand='down').value
k_grids = sht.range('B1').options(np.array, expand='right').value
v_values = sht.range('B2').options(np.array, expand='table').value
print(v_values.shape)
k, t = np.meshgrid(k_grids, t_grids)
vtk_visualization(k_grids, t_grids, k, t, v_values) | cycbill/Local-Vol-Calibration | source/vtk_visualization.py | vtk_visualization.py | py | 1,699 | python | en | code | 2 | github-code | 13 |
10299074103 |
import matplotlib.pyplot as pt
import pandas as pd
data = pd.read_csv('/Users/shuchitamishra/Desktop/Prod-Migration/Cron Job 15_03-Table 1.csv')
print(data)
result = data.groupby('Status')['Status'].count()
print(result)
pt.axis('equal')
pt.pie(result, colors = ['red','yellow','green'], labels = ['Failure','Skipped','Success'], autopct='%1.1f%%')
pt.legend(title='Analysis')
pt.show()
| shuchita28/Migration_Demo | piechart.py | piechart.py | py | 397 | python | en | code | 0 | github-code | 13 |
72087529298 | from typing import Callable
import jax
import jax.numpy as jnp
from jax.flatten_util import ravel_pytree
from newton_smoothers.base import MVNStandard, FunctionalModel
from newton_smoothers.batch.utils import (
log_posterior_cost,
residual_vector,
block_diag_matrix,
line_search_update,
)
def _gn_bfgs_hess_update(S, s, yd, yc):
aux = (
jnp.outer(yc - S @ s, yd) / jnp.dot(yd, s)
+ jnp.outer(yd, yc - S @ s) / jnp.dot(yd, s)
- jnp.outer(yc - S @ s, s) @ jnp.outer(yd, yd) / jnp.dot(yd, s) ** 2
)
tau = jnp.minimum(
1.0, jnp.abs(jnp.dot(s, yc)) / jnp.abs(jnp.dot(s, jnp.dot(S, s)))
)
return tau * S + aux
def _gn_bfgs_step(
x: jnp.ndarray,
r: jnp.ndarray,
J: jnp.ndarray,
W: jnp.ndarray,
bfgs_hess: jnp.ndarray,
):
grad = jnp.dot(J.T @ W, r)
hess = J.T @ W @ J + bfgs_hess
dx = -jnp.linalg.solve(hess, grad)
return dx
def _line_search_gn_bfgs(
x0: jnp.ndarray,
fun: Callable,
residual: Callable,
weights: jnp.ndarray,
k: int,
):
W = weights
def body(carry, _):
x, rp, Jp, bfgs_hess = carry
dx = _gn_bfgs_step(x, rp, Jp, W, bfgs_hess)
xn = line_search_update(x, dx, fun)
# GN-BFGS hessian update
rn = residual(xn)
Jn = jax.jacobian(residual)(xn)
yd = jnp.dot(Jn.T @ W, rn) - jnp.dot(Jp.T @ W, rp)
yc = jnp.dot(Jn.T @ W, rn) - jnp.dot(Jp.T @ W, rn)
bfgs_hess = _gn_bfgs_hess_update(bfgs_hess, xn - x, yd, yc)
return (xn, rn, Jn, bfgs_hess), fun(xn)
rp = residual(x0)
Jp = jax.jacobian(residual)(x0)
bfgs_hess = 1e-6 * jnp.eye(x0.shape[0])
(xn, _, _, _), fn = jax.lax.scan(
body, (x0, rp, Jp, bfgs_hess), jnp.arange(k)
)
return xn, fn
def line_search_iterated_batch_gn_bfgs_smoother(
init_nominal: jnp.ndarray,
observations: jnp.ndarray,
init_dist: MVNStandard,
transition_model: FunctionalModel,
observation_model: FunctionalModel,
nb_iter: int = 10,
):
flat_init_nominal, _unflatten = ravel_pytree(init_nominal)
def _flat_log_posterior_cost(flat_state):
_state = _unflatten(flat_state)
return log_posterior_cost(
_state,
observations,
init_dist,
transition_model,
observation_model,
)
def _flat_residual_vector(flat_state):
_state = _unflatten(flat_state)
return residual_vector(
_state,
observations,
init_dist,
transition_model,
observation_model,
)
weight_matrix = block_diag_matrix(
init_nominal,
observations,
init_dist,
transition_model,
observation_model,
)
init_cost = _flat_log_posterior_cost(flat_init_nominal)
flat_nominal, costs = _line_search_gn_bfgs(
x0=flat_init_nominal,
fun=_flat_log_posterior_cost,
residual=_flat_residual_vector,
weights=weight_matrix,
k=nb_iter,
)
return _unflatten(flat_nominal), jnp.hstack((init_cost, costs))
| hanyas/second-order-smoothers | newton_smoothers/batch/ls_gn_bfgs.py | ls_gn_bfgs.py | py | 3,123 | python | en | code | 3 | github-code | 13 |
24994899184 | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek
import pandas as pd
from pyspark.sql.functions import monotonically_increasing_id
from datetime import datetime
from pyspark.sql import functions as F
from pyspark.sql import types as T
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['KEYS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['KEYS']['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
""" Process song data and write songs and artists tables"""
print("------------processing songs----------\n")
#use the full data set
song_data = os.path.join(input_data,"song_data/*/*/*/*.json")
#use a subset of the dataset
#song_data = os.path.join(input_data,"song_data/A/A/A/*.json")
# read song data file
df = spark.read.json(song_data)
# extract columns to create songs table
songs_table = df['song_id','title','artist_id','year','duration']
# write songs table to parquet files partitioned by year and artist
songs_table.write.partitionBy('year','artist_id').parquet(os.path.join(output_data,"songs.parquet"),'overwrite')
print("------------processing artists----------\n")
# extract columns to create artists table
artists_table = df['artist_id','artist_name','artist_location','artist_latitude','artist_longitude']
#artists_table.limit(2).toPandas()
# write artists table to parquet files
artists_table.write.parquet(os.path.join(output_data,"artists.parquet"),'overwrite')
def process_log_data(spark, input_data, output_data):
""" Process log data and write users, time and songplays tables"""
# get filepath to log data file
log_data =os.path.join(input_data,"log_data/*/*/*.json")
#use a subset of the dataset
#log_data = '/home/workspace/data/log-data/2018-11-19-events.json'
# read log data file
df = spark.read.json(log_data)
# filter by actions for song plays
df = df[df['page'] == 'NextSong']
print("------------processing users----------\n")
# extract columns for users table
users_table = df[['userId','firstName', 'lastName','gender','level']]
# write users table to parquet files
users_table.write.parquet(os.path.join(output_data,"users.parquet"),'overwrite')
print("------------processing time----------\n")
#add new column for timestamp
get_timestamp = F.udf(lambda x: datetime.fromtimestamp( (x/1000.0) ), T.TimestampType())
df = df.withColumn("timestamp", get_timestamp(df.ts))
df = df.withColumn("start_time", df.timestamp)
df = df.withColumn('hour',hour('start_time')).withColumn('day',dayofmonth('start_time')).withColumn('week',weekofyear('start_time'))
df = df.withColumn('month',month('start_time')).withColumn('year',year('start_time')).withColumn('weekday',dayofweek('start_time'))
# extract columns to create time table
time_table = df[['start_time','hour','day','week','month','year','weekday']]
# write time table to parquet files partitioned by year and month
time_table.write.partitionBy('year','month').parquet(os.path.join(output_data,"time.parquet"),'overwrite')
print("------------processing songplays----------\n")
# read in song data to use for songplays table
song_df = spark.read.parquet("songs.parquet")
df = df.join(song_df,song_df.title == df.song)
songplays_table = df['start_time', 'userId', 'level', 'song_id', 'artist_id', 'sessionId', 'location', 'userAgent']
songplays_table.select(monotonically_increasing_id().alias('songplay_id')).collect()
# write songplays table to parquet files partitioned by year and month
songplays_table.write.parquet(os.path.join(output_data, 'songplays.parquet'), 'overwrite')
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = ""
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| edwards158/UdacityDataEngineering | datalakes/project4/etl.py | etl.py | py | 4,538 | python | en | code | 0 | github-code | 13 |
2276388320 | from typing import Callable
from ipywidgets import IntProgress
import numpy as np
import copy
class MultiDimGA:
def __init__(self):
self.f = None
self.h = None
self.n = None
self.dim = None
self.intervals = None
self.max_iter = None
self.max_no_conv_iter = None
self.tournament_n = None
self.mutation_p = None
self.crossover_p = None
self.min_lifetime = None
self.max_lifetime = None
self.reproduction_p = None
self.max_problem = None
self.verbose = None
self.m = 0
self.bit_array_sizes = []
self.history = []
def solve(self,
f: Callable[[], float],
intervals: list = [[-2, 2], [0, 1]],
h: float = 1e-8,
n: int = 100,
tournament_n: int = 3,
mutation_p: float = 0.1,
crossover_p: float = 0.9,
max_iter: int = 100,
max_no_conv_iter: int = 20,
min_lifetime: int = 10,
max_lifetime: int = 10,
reproduction_p: float = 0.5,
max_problem: bool = False,
verbose: bool = False) -> float:
self.h = h
self.n = n
self.f = f
self.dim = np.array(intervals).shape[0]
self.intervals = intervals
self.tournament_n = tournament_n
self.mutation_p = mutation_p
self.crossover_p = crossover_p
self.max_iter = max_iter
self.max_no_conv_iter = max_no_conv_iter
self.max_problem= max_problem
self.verbose = verbose
self.progress_bar = IntProgress(max=max_iter)
self.min_lifetime = min_lifetime
self.max_lifetime = max_lifetime
self.eta = 1/2 * (self.max_lifetime - self.min_lifetime)
self.reproduction_p = reproduction_p
self.bit_array_sizes = self._calc_bit_array_size()
self.m = np.sum(self.bit_array_sizes)
args, solution = self._genetic_algorithm()
return args, f(*args)
def _genetic_algorithm(self) -> tuple:
self.history = []
best_score = np.inf
best_args = None
no_conv_iter = 0
display(self.progress_bar)
population = self._initialize_population()
for i in range(self.max_iter):
population = self._next_population(population)
if population.shape[0] == 0:
break
args, score = self._eval_population(population)
self.history.append(score)
if (i+1) % 1 == 0:
self.progress_bar.value = i + 1
if score < best_score:
best_score = score
best_args = args
no_conv_iter = 0
else:
no_conv_iter += 1
if no_conv_iter > self.max_no_conv_iter:
break
self.progress_bar.value = self.max_iter
return best_args, best_score
def _initialize_population(self):
population_values = np.random.choice(2, size=[self.n, self.m])
population = np.array([{'age': 0, 'value': ind} for ind in population_values])
return population
def _next_population(self, population: np.ndarray) -> np.ndarray:
population_lifetime = self._calc_lifetime(population)
survived = []
for ind in population:
ind['age'] = ind['age'] + 1
for ind, lifetime in zip(population, population_lifetime):
if ind['age'] < lifetime:
survived.append(ind)
died = len(population) - len(survived)
offspring = self._spawn_offspring(np.array(survived))
new_population = np.concatenate([survived + offspring])
if self.verbose:
print(f"Total: {len(new_population)}; Born: {len(offspring)}; Died: {died}")
return new_population
def _spawn_offspring(self, population):
offspring = []
for _ in range(int(population.shape[0]/2)):
if np.random.rand() < self.reproduction_p:
x1 = self._tournament(population)
x2 = self._tournament(population)
if np.random.rand() < self.mutation_p:
x1, x2 = self._mutation(x1), self._mutation(x2)
if np.random.rand() < self.crossover_p:
x1, x2 = self._crossover(x1, x2)
x1['age'], x2['age'] = 0, 0
offspring.extend([x1, x2])
return offspring
def _calc_lifetime(self, population):
scores = - np.array([self._eval(ind) for ind in population])
scores_ = scores - np.min(scores) + 1
scores_min_abs = np.min(np.abs(scores))
scores_max_abs = np.max(np.abs(scores_))
lifetime = self.min_lifetime + 2*self.eta*(scores_ - scores_max_abs)/(scores_max_abs - scores_min_abs)
return lifetime
def _binary_to_decimal(self, bits: np.ndarray, a: int, b: int, m: int) -> float:
decimal = 0
for idx, bit in enumerate(bits):
decimal += 2 ** idx * bit
return (b - a) / (2 ** m - 1) * decimal + a
def _eval(self, individual: dict) -> float:
args = self._arg_eval(individual)
y = self.f(*args)
return -y if self.max_problem else y
def _arg_eval(self, individual: dict) -> list:
args = np.zeros(self.dim)
for i in range(self.dim):
a = self.intervals[i][0]
b = self.intervals[i][1]
m = self.bit_array_sizes[i]
clip_from = sum(self.bit_array_sizes[:i])
clip_to = sum(self.bit_array_sizes[:i+1])
args[i] = self._binary_to_decimal(
individual['value'][clip_from:clip_to], a, b, m
)
return args
def _eval_population(self, population: np.ndarray) -> tuple:
index = np.argmin([self._eval(ind) for ind in population])
ind_args = self._arg_eval(population[index])
ind_score = self._eval(population[index])
return ind_args, ind_score
def _tournament(self, population: np.ndarray) -> np.ndarray:
contestants_indices = np.random.randint(population.shape[0], size=self.tournament_n)
contestants = population[contestants_indices]
contestants_eval = [self._eval(contestant) for contestant in contestants]
champion_index = np.argmin(contestants_eval)
return copy.deepcopy(contestants[champion_index])
def _mutation(self, x: dict) -> np.ndarray:
index = np.random.randint(self.m)
x['value'][index] = bool(x['value'][index]) ^ True
return x
def _crossover(self, x1: dict, x2: dict) -> tuple:
points = np.random.randint(self.m, size=(2))
x1['value'][min(points):max(points)], x2['value'][min(points):max(points)] = \
x2['value'][min(points):max(points)], x1['value'][min(points):max(points)]
return x1, x2
def _calc_bit_array_single(self, interval: np.ndarray) -> int:
a, b = interval[0], interval[1]
samples = abs(b - a) / self.h
m = 0
while samples > 2 ** m:
m += 1
return m
def _calc_bit_array_size(self) -> list:
array_sizes = []
for interval in self.intervals:
array_sizes.append(self._calc_bit_array_single(interval))
return array_sizes
| vseredovych/genetic-algorithms | multi-dim-ga/multidimga.py | multidimga.py | py | 7,558 | python | en | code | 0 | github-code | 13 |
3424531626 | #!/usr/bin/env python
import os
import time
from setuptools import find_packages, setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def readme(fname='README'):
"""
Utility function to read the README file.
Used for the long_description.
Args:
fname: file name, usualy 'REAMDE'
Returns: text from 'README' file.
"""
with open(os.path.join(os.path.dirname(__file__), fname)) as file:
return file.read()
setup(
name="framework",
version="0.{}.{}".format(*time.localtime()),
author="Anton Butenko",
author_email="ant.butenko@gmail.com",
description=("POC python framework."),
license="BSD",
packages=find_packages(),
long_description=readme(),
install_requires=[
'selenium',
'pytest',
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
) | AButenko/selenium_tests | setup.py | setup.py | py | 1,046 | python | en | code | 0 | github-code | 13 |
26203147465 | from http import HTTPStatus as HTTP_Status
from utils.log import Log
class HTTPMethod(object):
"""Constants representing various HTTP request methods."""
GET = "get"
PUT = "put"
POST = "post"
DELETE = "delete"
code_priority_order = cpo = {
HTTP_Status.OK: 1,
HTTP_Status.CREATED: 2,
HTTP_Status.RESET_CONTENT: 2,
HTTP_Status.NOT_MODIFIED: 0,
HTTP_Status.NOT_FOUND: 3,
HTTP_Status.NO_CONTENT: 4,
HTTP_Status.CONFLICT: 4,
HTTP_Status.NOT_ACCEPTABLE: 4,
HTTP_Status.UNPROCESSABLE_ENTITY: 4,
}
def __lt(code_a, code_b):
prior_a = __get(code_a)
prior_b = __get(code_b)
return prior_a is not None and prior_b is not None and prior_a < prior_b
def __lte(code_a, code_b):
prior_a = __get(code_a)
prior_b = __get(code_b)
return prior_a is not None and prior_b is not None and prior_a <= prior_b
def __gt(code_a, code_b):
prior_a = __get(code_a)
prior_b = __get(code_b)
return prior_a is not None and prior_b is not None and prior_a > prior_b
def __gte(code_a, code_b):
prior_a = __get(code_a)
prior_b = __get(code_b)
return prior_a is not None and prior_b is not None and prior_a >= prior_b
def __eq(code_a, code_b):
prior_a = __get(code_a)
prior_b = __get(code_b)
return prior_a is not None and prior_b is not None and prior_a == prior_b
def __get(code):
prior_order = cpo.get(code, None)
if prior_order is None:
Log.get("http-lib").warn(f"{code} without priority order.")
return prior_order
HTTP_Status.lt = __lt
HTTP_Status.lte = __lte
HTTP_Status.gt = __gt
HTTP_Status.gte = __gte
HTTP_Status.eq = __eq
| guard-project/cb-manager | lib/http.py | http.py | py | 1,654 | python | en | code | 1 | github-code | 13 |
73718734736 | import numpy as np
from prtp.Grating import Grating
from prtp.Combination import Combination
from prtp.Rays import Rays
import prtp.transformationsf as trans
import astropy.units as u
class GratingStack(Combination):
'''
Class GratingStack:
A special kind of combination that specifically handles a group of gratings
'''
@u.quantity_input(rx=u.mm,ry=u.mm,rz=u.mm)
def __init__(self,rx=None, ry=None, rz=None, keeporder=True):
'''
Initializes the GratingStack:
Inputs:
rx,ry,rz - The point about which the whole stack will rotate, see
defineRotationPoint for more info. Must be astropy units of length
keeporder - If True, photons will be traced to the Gratings in the order
they were added to the stack. If False, the stack will use
smartTrace, where photons are sent to the nearest Grating first
Notes:
- If you want more complicated Gratings, you can modify their parameters
using self.modifyParam(name,value), or access the Gratings themselves
in the self.componentlist parameter
'''
Combination.__init__(self)
if rx is not None:
self.rx = rx.to(u.mm)
if ry is not None:
self.ry = ry.to(u.mm)
if rz is not None:
self.rz = rz.to(u.mm)
self.keeporder = keeporder
## Ray-Tracing Functions
def trace(self, rays, considerweights=False,eliminate='remove'):
'''
Function trace:
Traces the rays through all of the Gratings in the Stack
Inputs:
rays - The rays you want to trace
considerweights - If true, any effect that probabilistically removes
photons will instead affect their weights
eliminate - If 'remove', photons that miss will be removed. Otherwise,
missed photons will be replaced with NaNs in the x-position
Outputs:
Efficiency information about the stack
Notes:
- Assumes that each Grating has been given the necessary parameters,
this function works with no user input.
'''
if self.keeporder:
return self.defaultTrace(rays,considerweights,eliminate)
else:
return self.smartTrace(rays,considerweights,eliminate)
def defaultTrace(self,rays,considerweights=False,eliminate='remove'):
'''
Function defaultTrace:
Traces the Rays through the Grating Stack in the order that the Gratings
were added to the Stack. This function will be called if
self.keeporder is True
Inputs:
rays - The rays you want to trace through the stack
considerweights - Boolean saying if you want to consider the reflectivity
of the Gratings
eliminate - If 'remove', photons that miss will be removed. Otherwise,
missed photons will be replaced with NaNs in the x-position
Outputs:
A tuple containing information about the efficiency of the Gratings
'''
# Make a blank Rays object to store the Rays that make it
if eliminate == 'remove':
finalrays = Rays()
else:
# When eliminate is NaN, we will keep track of traced rays using
# finalrays, where np.nan represents a photon which has not yet
# been successfully traced
finalrays = rays.copy()
finalrays.x[:] = np.nan
# success stores which rays have been successfully traced.
success = np.zeros(len(rays)).astype(bool)
# Keep track of the input rays for when we're finished with one Grating
inputrays = rays.copy()
# Keep track of the length of the input rays
l = rays.length(considerweights)
# Iterate through each Grating Object
for g in self.componentlist:
# Through each pass we need to ensure that the rays that make it are
# placed into a final rays object
# All those that miss are passed to the next Grating
g.trace_to_surf(rays)
# Find which rays have hit the grating
tarray = g.hit(rays)
hitrays = rays.split(tarray)
# Make sure at least some rays have hit the grating
if (len(hitrays) == 0):
continue
g.trace(hitrays,considerweights)
# Add the hitrays to our final tally
if eliminate == 'remove':
finalrays += hitrays
# Take the rays that hit this grating out of the original Rays object
inputrays.remove(tarray)
else:
# Of the rays which were just successfully traced, for which ones
# is it their first time? Those ones need to be pulled
rays_to_pull = np.logical_and(np.logical_not(success),tarray)
finalrays.pullrays(rays,rays_to_pull)
# Update success array
success = np.logical_not(np.isnan(finalrays.x))
# Back remaining rays up to their original position
rays.makecopy(inputrays)
if len(rays) == 0:
break
# Make it so that the original rays now contain the output
rays.makecopy(finalrays)
return ("Missed Grating Stack", l, rays.length(considerweights))
def smartTrace(self,rays,considerweights=False,eliminate='remove'):
'''
Function smartTrace:
Traces the Rays through the Grating Stack in the order that the photons
would collide with them. This function will be called if
self.keeporder is False
Inputs:
rays - The rays you want to trace through the stack
considerweights - Boolean saying if you want to consider the
reflectivity of the Gratings
eliminate - If 'remove', photons that miss will be removed. Otherwise,
missed photons will be replaced with NaNs in the x-position
Outputs:
A tuple containing information about the efficiency of the Gratings
Notes:
This function is usually slower than defaultTrace. It should only
be used if different photons in the Rays object will encounter
Gratings in a different order.
'''
# Make a blank Rays object to store the Rays that make it
if eliminate == 'remove':
finalrays = Rays()
else:
# When eliminate is NaN, we will keep track of traced rays using
# finalrays, where np.nan represents a photon which has not yet
# been successfully traced
finalrays = rays.copy()
finalrays.x[:] = np.nan
# success stores which rays have been successfully traced.
success = np.zeros(len(rays)).astype(bool)
# Keep track of the input rays for when we're finished with one Grating
inputrays = rays.copy()
# Keep track of the length of the input rays
l = rays.length(considerweights)
# Find the order that each photon will see the gratings
orders = []
for g in self.componentlist:
orders.append(g.getDist(rays))
# orderarr stores (for each photon) the order in which it will see
# the gratings
orderarr = np.stack(orders,axis=1)
orderarr = np.argsort(orderarr).astype(float)
i = 0
while True:
# Check if we've successfully traced everything
if (orderarr[:,0] == -1).all():
break
# Find which rays need to be trace to this Grating
tarray = (orderarr[:,0] == i)
if (np.sum(tarray) == 0):
# Go to the next Grating
i += 1
if (i >= len(self.componentlist)):
i = 0
continue
newrays = rays.copy()
self.componentlist[i].trace_to_surf(newrays)
# Find which rays have hit the grating
hit = self.componentlist[i].hit(newrays)
# Keep those which have hit the Grating
if eliminate == 'remove':
newrays.remove(np.logical_or(np.logical_not(hit),np.logical_not(tarray)))
else:
newrays.x[np.logical_or(np.logical_not(hit),np.logical_not(tarray))] = np.nan
if (np.sum(hit) != 0):
# These operations can only be done on non-empty Rays Objects
# Trace and save the Rays which hit the Grating
self.componentlist[i].trace(newrays,eliminate=eliminate)
# Add the hitrays to our final tally
if eliminate == 'remove':
finalrays += newrays
# Take the rays that hit this grating out of the original Rays object
inputrays.remove(hit)
else:
# Of the rays which were just successfully traced, for which ones
# is it their first time? Those ones need to be pulled
rays_to_pull = np.logical_and(np.logical_and(np.logical_not(success),hit),tarray)
finalrays.pullrays(newrays,rays_to_pull)
# Update success array
success = np.logical_not(np.isnan(finalrays.x))
# Use this hit trutharray to find which of the original rays have
# hit the Grating
test = tarray.copy()
tarray[tarray] = hit[tarray]
# Remove the rays which have hit this Grating
if eliminate == 'remove':
rays.remove(tarray)
else:
rays.x[tarray] = np.nan
# Update the orderarr to rotate out those which needed to be traced
# to this Grating
orderarr[test] = np.roll(orderarr[test],-1,axis=1)
# Set the already tried indices to -1 to make sure we don't try them
# again
orderarr[test,-1] = -1
# Update orderarr to remove the hit photons
if eliminate == 'remove':
orderarr = np.delete(orderarr,np.where(tarray)[0],0)
else:
orderarr[tarray] == np.nan
# Go to the next Grating
i += 1
if (i >= len(self.componentlist)):
i = 0
# Make it so that the original rays now contain the output
rays.makecopy(finalrays)
return ("Missed Grating Stack", l, rays.length(considerweights)) | bjmyers/prtp | GratingStack.py | GratingStack.py | py | 11,200 | python | en | code | 0 | github-code | 13 |
71528955218 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sensors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SensorData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.BinaryField()),
('timestamp', models.BigIntegerField()),
('sensor_id', models.ForeignKey(to='sensors.Sensor')),
],
options={
},
bases=(models.Model,),
),
]
| Fransan/hal0001 | hal_env/hal/sensors/migrations/0002_sensordata.py | 0002_sensordata.py | py | 712 | python | en | code | 0 | github-code | 13 |
41388296989 | import common
import time
import redis
import re
from uuid import uuid4
r = redis.StrictRedis()
def reimport():
print("Flushing all redis contents")
r.flushall()
import_start = time.time()
total_count = 0
for lines_buffer in common.chunk_lines(10000):
total_count += len(lines_buffer)
print("Inserting {0} more records, {1:,} total inserted".format(len(lines_buffer), total_count))
for line in lines_buffer:
key = uuid4()
r.hset("haproxy::entry::" + str(key), "_raw", line)
import_end = time.time()
print(
"Imported {0:,} records in {1:.2f} seconds, {2:.2f} records / second".format(
total_count, import_end - import_start, total_count / (import_end - import_start)))
regex = r"""
^(?P<log_month>[A-Za-z]{3}) (?P<log_day>[0-9]{2}) (?P<log_hour>[0-9]{2}):(?P<log_minute>[0-9]{2}):(?P<log_second>[0-9]{2})
(?P<log_ip>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})
(?P<process_name>[A-Za-z]+)\[(?P<pid>[0-9]+)\]:
(?P<client_ip>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+):(?P<client_port>[0-9]+)
\[(?P<accept_day>[0-9]{2})/(?P<accept_month>[A-Za-z]{3})/(?P<accept_year>[0-9]{4}):(?P<accept_hour>[0-9]{2}):(?P<accept_minute>[0-9]{2}):(?P<accept_second>[0-9]{2}\.[0-9]{3})\]
(?P<frontend_name>[^ ]+)
(?P<backend_name>[^/]+)/(?P<server_name>[^ ]+)
(?P<Tq>[^/]+)/(?P<Tw>[^/]+)/(?P<Tc>[^/]+)/(?P<Tr>[^/]+)/(?P<Tt>[^ ]+)
(?P<status_code>[^ ]+)
(?P<bytes_read>[^ ]+)
(?P<captured_request_cookie>[^ ]+)
(?P<captured_response_cookie>[^ ]+)
(?P<termination_state>[^ ]+)
(?P<actconn>[^/]+)/(?P<feconn>[^/]+)/(?P<beconn>[^/]+)/(?P<srv_conn>[^/]+)/(?P<retries>[^ ]+)
(?P<srv_queue>[^/]+)/(?P<backend_queue>[^ ]+)
.*
\"((?P<http_verb>[^ ]+) (?P<request_uri>[^ ]+) ?(?P<http_version>HTTP/[0-9]\.[0-9])?|<BADREQ>)\"?
"""
def parse():
from multiprocessing import Pool
pool = Pool(6)
parse_start = time.time()
results = []
buff = []
for key in r.scan_iter(match="haproxy::entry::*"):
buff.append(key)
if len(buff) >= 10000:
results.append(pool.apply_async(internal_parse, [buff]))
buff = []
if len(results) % 10 == 0:
print("{0} chunks enqueued".format(len(results)))
pool.close()
while True:
ready = len([res for res in results if res.ready()])
failed = len([res for res in results if res.ready() and not res.successful()])
print("Ready: {0} out of {1}, {2} failed".format(ready, len(results), failed))
if ready == len(results):
break
time.sleep(5)
for res in results:
if res.ready() and not res.successful():
res.get()
pool.join()
parse_end = time.time()
print("Parse completed in {0:.2f} seconds".format(parse_end - parse_start))
def internal_parse(buff):
preped_regex = re.compile(regex.replace("\n", ""))
for key in buff:
raw = r.hget(key, "_raw").decode("utf-8")
m = preped_regex.match(raw)
r.hmset(key, m.groupdict())
return None
#def index():
# print("Creating indexes...")
# index_start = time.time()
# conn = psycopg2.connect(connection_string)
# cur = conn.cursor()
# cur.execute("CREATE INDEX http_verb ON haproxy (http_verb)")
# cur.execute("CREATE INDEX status_code ON haproxy (status_code)")
# cur.execute("ANALYZE haproxy")
# conn.commit()
# index_end = time.time()
# print("Index creation completed in {0:.2f} seconds".format(index_end - index_start))
if __name__ == "__main__":
#reimport()
parse()
#index()
| mfenniak/log-tool-comparison | import_redis.py | import_redis.py | py | 3,604 | python | en | code | 0 | github-code | 13 |
33394152496 | def mergeSortedArray(A, m, B, n):
# write your code here
def sort_quick(lst):
if len(lst) == 0:
return lst
pivot = lst[0]
left = []
right = []
for i in range(1, len(lst)):
if lst[i] <= pivot:
left.append(lst[i])
else:
right.append(lst[i])
return sort_quick(left) + [pivot] + sort_quick(right)
for i in range(n):
A[i + m] = B[i]
return sort_quick(A)
print(mergeSortedArray([1, 3, 4, 6, 0, 0], 4, [2, 5], 2))
| mx11Code/pythoncode | lintcode/64.py | 64.py | py | 549 | python | en | code | 0 | github-code | 13 |
27282120100 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login$', views.login, name='login'),
url(r'^timeinout$', views.timeinout, name='timeinout'),
#<--------------DIRECTORIES------------>
#<--ADMIN-->
#Dashboard
url(r'^Admin$', views.AdminDashboard, name='Admin/Dashboard'),
url(r'^Admin/Calendar$', views.AdminCalendar, name='Admin/Calendar'),
#Accounts
url(r'^Admin/ManageUsers$', views.AdminManageUsers, name='Admin/ManageUsers'),
url(r'Admin/AddUser$', views.AdminAddUser, name='Admin/AddUser'),
#Inventory
url(r'^Admin/ViewInventory$', views.AdminViewInventory, name='Admin/ViewInventory'),
url(r'^Admin/AddItem$', views.AdminAddItem, name='Admin/AddItem'),
#Residency
url(r'^Admin/ViewResidencies$', views.AdminViewResidencies, name='Admin/ViewResidencies'),
url(r'^Admin/EvaluateResidency$', views.AdminEvaluateResidency, name='Admin/EvaluateResidency'),
url(r'^Admin/ManageTerm$', views.AdminManageTerm, name='Admin/ManageTerm'),
#Item Lending
url(r'^Admin/BorrowItem$', views.AdminBorrowItem, name='Admin/BorrowItem'),
url(r'^Admin/ReturnItem$', views.AdminReturnItem, name='Admin/ReturnItem'),
url(r'^Admin/ReturnItem2$', views.AdminReturnItem2, name='Admin/ReturnItem2'),
#Reports
url(r'^Admin/ResidencyReport$', views.AdminResidencyReport, name='Admin/ResidencyReport'),
#Inbox
url(r'^Admin/Inbox$', views.AdminInbox, name='Admin/Inbox'),
#Laboratory
url(r'^Admin/AddLaboratory', views.AdminAddLaboratory, name='Admin/AddLaboratory'),
url(r'^Admin/EditLaboratory$', views.AdminEditLaboratory, name='Admin/EditLaboratory'),
#<--END-->
#<--STUDENT-->
#Profile
url(r'Student/Profile$', views.StudentProfile, name='Student/Profile'),
#Dashboard
url(r'^Student$', views.StudentDashboard, name='Student/Dashboard'),
url(r'^Student/Calendar$', views.StudentDashboard2, name='Student/Calendar'),
#Item Lending
url(r'^Student/BorrowItem$', views.StudentBorrowItem, name='Student/BorrowItem'),
url(r'^Student/ReturnItem$', views.StudentReturnItem, name='Student/ReturnItem'),
url(r'^Student/ReturnItem2$', views.StudentReturnItem2, name='Student/ReturnItem2'),
#Inbox
url(r'^Student/Inbox$', views.StudentInbox, name='Student/Inbox'),
#Residency
url(r'Student/SetResidency$', views.StudentSetResidency, name='Student/SetResidency'),
url(r'Student/EditResidency$', views.StudentEditResidency, name='Student/EditResidency'),
#Reports
url(r'Student/GroupInventory$', views.StudentGroupInventory, name='Student/GroupInventory'),
#<--END-->
#<--FACULTY-->
#Dashboard
url(r'^Faculty$', views.FacultyDashboard, name='Faculty/Dashboard'),
url(r'^Faculty/Calendar$', views.FacultyCalendar, name='Faculty/Calendar'),
url(r'^Faculty/Profile$', views.FacultyProfile, name='Faculty/Profile'),
#Groups
url(r'Faculty/EvaluateUser$', views.FacultyEvaluateUser, name='Faculty/EvaluateUser'),
url(r'Faculty/ManageGroups$', views.FacultyManageGroups, name='Faculty/ManageGroups'),
url(r'Faculty/AddGroup$', views.FacultyAddGroup, name='Faculty/AddGroup'),
url(r'Faculty/EditGroup$', views.FacultyEditGroup, name='Faculty/EditGroup'),
#Residency
url(r'^Faculty/ViewResidencies$', views.FacultyViewResidencies, name='Faculty/ViewResidencies'),
#Item Lending
url(r'^Faculty/BorrowItem$', views.FacultyBorrowItem, name='Faculty/BorrowItem'),
url(r'^Faculty/ReturnItem$', views.FacultyReturnItem, name='Faculty/ReturnItem'),
url(r'^Faculty/ReturnItem2$', views.FacultyReturnItem2, name='Faculty/ReturnItem2'),
#Reports
url(r'^Faculty/ResidencyReport$', views.FacultyResidencyReport, name='Faculty/ResidencyReport'),
#Inbox
url(r'^Faculty/Inbox$', views.FacultyInbox, name='Faculty/Inbox'),
#<--END-->
#<--TECHNICIAN AND FACULTY-TECHNICIAN-->
#Dashboard
url(r'^FacultyTech$', views.FacultyTechDashboard, name='FacultyTech/Dashboard'),
url(r'^FacultyTech/Calendar$', views.FacultyTechCalendar, name='FacultyTech/Calendar'),
url(r'^FacultyTech/Profile$', views.FacultyTechProfile, name='FacultyTech/Profile'),
#Inventory
url(r'^FacultyTech/AddItem$', views.FacultyTechAddItem, name='FacultyTech/AddItem'),
#Residency
url(r'^FacultyTech/ViewResidencies$', views.FacultyTechViewResidencies, name='FacultyTech/ViewResidencies'),
url(r'^FacultyTech/EvaluateResidency$', views.FacultyTechEvaluateResidency, name='FacultyTech/EvaluateResidency'),
url(r'^FacultyTech/ManageTerm$', views.FacultyTechManageTerm, name='FacultyTech/ManageTerm'),
#Item Lending
url(r'^FacultyTech/BorrowItem$', views.FacultyTechBorrowItem, name='FacultyTech/BorrowItem'),
url(r'^FacultyTech/ReturnItem$', views.FacultyTechReturnItem, name='FacultyTech/ReturnItem'),
url(r'^FacultyTech/ReturnItem2$', views.FacultyTechReturnItem2, name='FacultyTech/ReturnItem2'),
#Reports
url(r'^FacultyTech/ResidencyReport$', views.FacultyTechResidencyReport, name='FacultyTech/ResidencyReport'),
url(r'^FacultyTech/BorrowedItems$', views.FacultyTechBorrowedItems, name='FacultyTech/BorrowedItems'),
url(r'^FacultyTech/GroupsInventory$', views.FacultyTechGroupsInventory, name='FacultyTech/GroupsInventory'),
#Inbox
url(r'^FacultyTech/Inbox$', views.FacultyTechInbox, name='FacultyTech/Inbox'),
#<--END-->
#AJAX
url(r'ajax/editlab', views.EditLabAjax, name='Admin/EditLabAjax'),
url(r'ajax/borrowitem', views.BorrowItemAjax, name='FacultyTech/BorrowItemAjax'),
url(r'ajax/manualborrowitem', views.BorrowItemManAjax, name='FacultyTech/BorrowItemManAjax'),
url(r'ajax/getuserinfo', views.UserInfoAjax, name='FacultyTech/UserInfoAjax'),
url(r'ajax/gettimein', views.TimeInOutAjax, name='TimeInOutAjax'),
#<-----------END OF DIRECTORIES--------->
]
| pmija/ARC-DJANGO | ARC/urls.py | urls.py | py | 5,878 | python | en | code | 0 | github-code | 13 |
74859824976 | class Solution:
def mergeSort(self, data, head, rear):
if head >= rear - 1:
return 0
mid = (head + rear) >> 1
left = self.mergeSort(data, head, mid)
right = self.mergeSort(data, mid, rear)
current = 0
i, j = head, mid
tempdata = []
while i < mid and j < rear:
if data[i] <= data[j]:
tempdata.append(data[i])
i += 1
else:
tempdata.append(data[j])
j += 1
current += mid - i
while i < mid:
tempdata.append(data[i])
i += 1
while j < rear:
tempdata.append(data[j])
j += 1
j = 0
for i in range(head, rear):
data[i] = tempdata[j]
j += 1
return current + left + right
def InversePairs(self, data):
size = len(data)
if size < 2:
return 0
return self.mergeSort(data, 0, size) | colinsongf/JianZhiOffer | 数组中的逆序对.py | 数组中的逆序对.py | py | 1,003 | python | en | code | 0 | github-code | 13 |
26535712159 | # from M1.list_49_键盘操作 import mouse_move
# mouse_move()
###package测试成功
# class Employee(object):
# pass
# employee1 = Employee()
# employee1.first = 'Harry'
# employee1.surname = 'Portter'
# employee1.salary = 4000
# employee1.email = 'Harry@163.com'
# print('{}, {}, {}'.format(employee1.first+'' +employee1.surname, employee1.salary, employee1.email))
class Employee():
raiseAmount = 2
employeeNum = 0
def __init__(self, first, surname, salary):
self.first = first
self.surname = surname
self.salary = salary
self.email = self.first + '.' + self.surname + '@163.com'
Employee.employeeNum += 1
@property
def first(self):
return self.__first
@first.setter
def first(self, value):
if isinstance(value, str):
self.__first = value
else:
print('please input a string')
def infoSummary(self):
return'{}, {}, {}'.format(self.first+'' + self.surname, self.salary, self.email)
def raiseSalary(self):
self.salary = self.salary*Employee.raiseAmount
@classmethod
def setRaiseAmount(cls, amount):
cls.raiseAmount = amount
@classmethod
def newFromString(cls, empstr):
first, surname, salary = empstr.split('-')
return cls(first, surname, salary)
@staticmethod
def whatDay(day):
num = day.weekday()
if num == 0:
print('Are you OK?')
else:
print('meishi')
class Writer(Employee):
def __init__(self, first, surname, salary, masterwork):
Employee.__init__(self,first,surname,salary)
self.masterwork = masterwork
class Leader(Employee):
def __init__(self, first, surname, salary, employees =None):
super().__init__(first, surname, salary)
###?????
if employees is None:
self.employees = []
else:
self.employees = employees
employee1 = Employee('Harry', 'Potter', 4000)
employee2 = Employee('bilbo', 'baggins', 4000)
# print(employee1.infoSummary(),employee2.infoSummary())
# employee1.raiseSalary()
# print(employee1.infoSummary(), Employee.raiseAmount)
# print(employee1.__dict__)
# print(employee1.__dict__['first'])
# Employee.setRaiseAmount(4)
# employee1.raiseSalary()
# print(employee1.infoSummary(), Employee.raiseAmount)
# print(Employee.employeeNum)
empStr1 = 'J.K-Rowling-10000'
empStr2 = 'J.R.R-Tolkin-8000'
employee3 = Employee.newFromString(empStr1)
employee4 = Employee.newFromString(empStr2)
print(employee3.infoSummary() ,employee4.infoSummary())
# from datetime import date
# day = date.today()
# Employee.whatDay(day)
# print(help(Employee))
print('----------------------')
empWriter1 = Writer('Mark','Twain',8000,'worker')
print(empWriter1.infoSummary())
empLeader = Leader('Jstin','caesar',12000,[employee3,employee4])
# print(empLeader.__dict__['employees'][0].__dict__['__first'])
employee3.first= 'BOB'
print(employee3.infoSummary())
# print(empLeader.__dict__['employees'][0].__dict__['__first'])
print(employee3.first) | ghfuidy/Hello-World | list_54_test_package.py | list_54_test_package.py | py | 3,108 | python | en | code | 0 | github-code | 13 |
37690001602 | import app.api.templates.DC_CV_Config_AIDC_L3_INTERNET_MED_template_TELEPAC.services.interfaces as interfaces_service;
import app.api.templates.DC_CV_Config_AIDC_L3_INTERNET_MED_template_TELEPAC.services.services as services_service;
import app.api.templates.DC_CV_Config_AIDC_L3_INTERNET_MED_template_TELEPAC.commom.utils.utils as utils;
async def builder_config_services_interface( client_type, vbus_obj,aidc_template_in,_aidctype):
interfaces_objs = await interfaces_service.get_interfaces(vbus_obj)
#atruibir service id to the interface
config= {}
service_id= await services_service.generate_service(client_type)
#service id to the interface
await interfaces_service.update_interface(interfaces_objs[0],service_id.id)
config['INTERFACEDESCRIPTION1'] = await utils.setup_interface_description(aidc_template_in, service_id.service_id, _aidctype)
service_id_1= await services_service.generate_service(client_type)
await interfaces_service.update_interface(interfaces_objs[1],service_id.id)
#service id to the interface
config['INTERFACEDESCRIPTION2'] = await utils.setup_interface_description(aidc_template_in, service_id_1.service_id, _aidctype)
config['INTERFACE1'] = interfaces_objs[0].interface
config['INTERFACE2'] = interfaces_objs[1].interface
return config | icpmtech/Mastering-Python-in-the-Cloud-API-Development-with-AWS | Hands-on Coding/Hands-On-Projects-and-Case-Studies/Hands-On Projects/FastAPI-API-Solution-Init/app/api/templates/book/commom/builders/service_interface.py | service_interface.py | py | 1,336 | python | en | code | 0 | github-code | 13 |
4511132910 | #
# @lc app=leetcode.cn id=75 lang=python
#
# [75] 颜色分类
#
# https://leetcode-cn.com/problems/sort-colors/description/
#
# algorithms
# Medium (57.26%)
# Likes: 761
# Dislikes: 0
# Total Accepted: 166.8K
# Total Submissions: 291.4K
# Testcase Example: '[2,0,2,1,1,0]'
#
# 给定一个包含红色、白色和蓝色,一共 n 个元素的数组,原地对它们进行排序,使得相同颜色的元素相邻,并按照红色、白色、蓝色顺序排列。
#
# 此题中,我们使用整数 0、 1 和 2 分别表示红色、白色和蓝色。
#
#
#
#
#
#
# 示例 1:
#
#
# 输入:nums = [2,0,2,1,1,0]
# 输出:[0,0,1,1,2,2]
#
#
# 示例 2:
#
#
# 输入:nums = [2,0,1]
# 输出:[0,1,2]
#
#
# 示例 3:
#
#
# 输入:nums = [0]
# 输出:[0]
#
#
# 示例 4:
#
#
# 输入:nums = [1]
# 输出:[1]
#
#
#
#
# 提示:
#
#
# n == nums.length
# 1
# nums[i] 为 0、1 或 2
#
#
#
#
# 进阶:
#
#
# 你可以不使用代码库中的排序函数来解决这道题吗?
# 你能想出一个仅使用常数空间的一趟扫描算法吗?
#
#
#
# @lc code=start
class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
if not nums or len(nums) == 0:
return
t = [0] * len(nums)
def msort(a, b, e, t):
if b >= e or b + 1 >= e:
return
m = b + ((e-b)>>1)
msort(a, b, m, t)
msort(a, m, e, t)
i = b
j = m
to = b
while i < m or j < e:
if j >= e or (i < m and a[i] <= a[j]):
t[to] = a[i]
to += 1
i += 1
else:
t[to] = a[j]
to += 1
j += 1
for i in range(b, e):
a[i] = t[i]
msort(nums, 0, len(nums), t)
# @lc code=end
| lagoueduCol/Algorithm-Dryad | 08.Sort/75.颜色分类.mergesort.py | 75.颜色分类.mergesort.py | py | 2,061 | python | zh | code | 134 | github-code | 13 |
13644548907 | import csv
def write_csv():
"""Method used to write in csv
"""
file_name=input("enter the name of file")
list_element=input('Enter the element of list')
list_1=list_element.split(",")
for i in range(0,len(list_1)):
if list_1[i].isdigit():
list_1[i]=int(list_1[i])
try:
with open(file_name,'w',newline='') as file:
writer_1 = csv.writer(file)
writer_1.writerow(list_1)
file.close()
except:
print("something went wrong while writing")
def read_csv():
"""Method use to read the csv file
"""
file_name=input("enter the name of file")
try:
with open(file_name,'r') as file:
reader_1=csv.reader((file))
for row in reader_1:
print(row)
file.close()
except:
print("No Such file existed")
print("Please enter correct name ")
def append():
"""Method used to apend the record in a csv file
"""
file_name=input("enter the name of file")
list_element=input('Enter the element of list')
list_1=list_element.split(",")
for i in range(0,len(list_1)):
if list_1[i].isdigit():
list_1[i]=int(list_1[i])
try:
with open(file_name,'a',newline='') as file:
writer_1 = csv.writer(file)
writer_1.writerow(list_1)
file.close()
except:
print("something went wrong while writing")
def main():
continue_1="y"
while continue_1=='y' or continue_1=='Y':
choice=int(input('choice are: \n 1: read \n 2: write \n 3:append'))
if choice==1:
read_csv()
elif choice==2:
write_csv()
elif choice==3:
append()
continue_1=input("want to continue 'y' or 'Y' ")
if __name__=="__main__":
main() | Abhinavk1243/python-learning | scripts/Fileoperations/csv_1.py | csv_1.py | py | 1,844 | python | en | code | 0 | github-code | 13 |
13321533891 | import os
import sys
if sys.version_info[0]<3: # require python3
raise Exception("Python3 required! Current (wrong) version: '%s'" % sys.version_info)
activate_this = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'venv',
'bin',
'activate_this.py')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
thisGlobals = globals()
thisGlobals['__file__'] = activate_this
with open(activate_this) as f:
code = compile(f.read(), activate_this, 'exec')
exec(code, thisGlobals)
def application(environ, start_response):
ENVIRONMENT_VARIABLES = [
'FLASK_DB_CONN_STR',
'FLASK_SECRET_KEY',
'FLASK_LOG_LEVEL',
'FLASK_LOG_HANDLER'
]
for key in ENVIRONMENT_VARIABLES:
os.environ[key] = environ.get(key)
from fwsdemo.app import app as application
return application(environ, start_response)
| nicc777/flask-webservice-wsgi-python3-demo | opt/fwsdemo/app.wsgi | app.wsgi | wsgi | 877 | python | en | code | 3 | github-code | 13 |
4193018706 | import numpy as np
import torch
import torchvision
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms, models, datasets
import matplotlib.pyplot as plt
# Created the Class for the custom dataset
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, root_img, root_mask, root_npy_labels, root_npy_bboxes, transforms = None):
"""
Inputs:
root_img: The path to the root directory where the image .h5 files are stored
root_mask: The path to the root directory where the mask .h5 files are stored
root_npy_labels: The path to the .npy dataset for labels
root_npy_bboxes: The path to the .npy dataset for the ground truth bounding boxes
transforms: Apply a Pytorch transform to each instance of the image
"""
self.root_img = root_img
self.root_mask = root_mask
self.root_npy_labels = root_npy_labels
self.root_npy_bboxes = root_npy_bboxes
self.transforms = transforms
self.imgs = list(sorted(os.listdir(os.path.join(root_img, "mycoco_images"))))
self.masks = list(sorted(os.listdir(os.path.join(root_mask, "mycoco_masks"))))
self.labels = np.load(self.root_npy_labels, allow_pickle = True)
self.bboxes = np.load(self.root_npy_bboxes, allow_pickle = True)
# To support indexing when an object of the CustomDataset Class is created
def __getitem__(self, index):
# Convert the Masks and the input image into an array
img_path = os.path.join(self.root_img, "mycoco_images", self.imgs[index])
mask_path = os.path.join(self.root_mask, "mycoco_masks", self.masks[index])
img = Image.open(img_path).convert("RGB")
mask = Image.open(mask_path)
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
image_id = torch.tensor([index])
# Convert the Mask, image, bounding boxes and labels to a Pytorch Tensor
masks = torch.as_tensor(masks, dtype = torch.uint8)
bounding_boxes = torch.as_tensor(self.bboxes[index], dtype = torch.float32)
labels = torch.as_tensor(self.labels[index], dtype = torch.int64)
batch = {}
batch["boxes"] = bounding_boxes
batch["image_id"] = image_id
batch["masks"] = masks
batch["labels"] = labels
if self.transforms is not None:
img, batch = self.transforms(img,batch)
return img, batch
def __len__(self):
return len(self.imgs)
| arp95/mask_rcnn_instance_segmentation | Code/custom_dataset.py | custom_dataset.py | py | 2,950 | python | en | code | 3 | github-code | 13 |
73492588496 | # Heap Sort
# Time Complexity: O(nlogn)
# Space Complexity: O(1)
def Heapify(i,n,arr):
smallest=i
left=2*i+1
right=2*i+2
if left<n and arr[left]<arr[smallest]:
smallest=left
if right<n and arr[right]<arr[smallest]:
smallest=right
if smallest!=i:
arr[i],arr[smallest]=arr[smallest],arr[i]
Heapify(smallest,n,arr)
def BuildTree(n,arr):
#BuildTree
for i in range(n//2-1,-1,-1):
Heapify(i,n,arr)
def HeapSort(n,arr):
#Build Tree
BuildTree(n,arr)
#Heap Sort
for i in range(n):
arr[0],arr[n-i-1]=arr[n-i-1],arr[0]
Heapify(0,n-i-1,arr)
def main():
n=int(input())
arr=list(map(int,input().split()))
HeapSort(n,arr)
print(arr)
main()
| Ayush-Tiwari1/DSA | Days.29/Python/2.Heap-Sort.py | 2.Heap-Sort.py | py | 772 | python | en | code | 0 | github-code | 13 |
73958544976 | from date_handler import date_normalizer, compute_days
def compute_and_print_result(date):
# Calcula o número de dias e printa o resultado
date_1, date_2 = date_normalizer(date)
n_days = compute_days(date_1, date_2)
print(f"\n- Data: {date.strip()}" )
print(f"- Há {n_days} dias entre as duas datas")
def dates_from_file(file_path):
try:
with open(file_path, 'r') as file:
dates = file.readlines()
return dates
except FileNotFoundError:
print(f"O arquivo '{file_path}' não foi encontrado.")
def get_user_response():
# Pega a data pelo console
response = input("\n- Você deseja inserir a data pelo console? s/n: ").lower()
run = False
if response in ["s", "sim", "y", "yes"]:
run = True
return run
| LeonardoAleee/Teste_LP | program.py | program.py | py | 822 | python | pt | code | 0 | github-code | 13 |
42546855887 | # -*- coding: utf-8 -*-
__all__ = ('Player', 'CardBattlePlayer', )
import kivy
kivy.require(r'1.10.0')
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import get_color_from_hex
# from kivy.factory import Factory
from kivy.properties import (
NumericProperty, StringProperty, ListProperty, BooleanProperty
)
# from kivy.graphics import Color, Line
from kivy.animation import Animation
import setup_logging
logger = setup_logging.get_logger(__name__)
from basicwidgets import (
AutoLabel,
change_label_text_with_fade_animation, wrap_function_for_bind,
)
from .tefudalayout import TefudaLayout
Builder.load_string(r"""
#:kivy 1.10.0
<CardBattlePlayer>:
canvas.before:
Color:
rgba: .2, .2, .2, 1
Line:
rectangle: [*self.pos, *self.size]
BoxLayout:
orientation: 'horizontal'
pos_hint: {'x': 0, 'y': 0}
FloatLayout:
size_hint_x: 0.8
TefudaLayout:
id: id_tefuda
size_hint: 0.9, 0.9
child_aspect_ratio: 0.7
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
BoxLayout:
size_hint_x: 0.2
orientation: 'vertical'
# id: id_status
AutoLabel:
size_hint_y: 1.7
# bold: True
text: root.id
BoxLayout:
Image:
source: 'icon_cost.png'
AutoLabel:
id: id_label_cost
text: '{}/{}'.format(str(root.cost), str(root.max_cost))
BoxLayout:
Image:
source: 'icon_deck.png'
AutoLabel:
id: id_label_deck
text: str(root.n_cards_in_deck)
""")
def let_label_animate(label, duration=0.2):
r'''内部で呼び出している関数の名前が長いので、この関数を用意した'''
return wrap_function_for_bind(
change_label_text_with_fade_animation,
label, duration=duration)
class Player(Factory.EventDispatcher):
id = StringProperty()
cost = NumericProperty()
max_cost = NumericProperty()
n_cards_in_deck = NumericProperty()
tefuda = ListProperty()
color = ListProperty()
is_black = BooleanProperty()
honjin_prefix = StringProperty()
first_row_prefix = StringProperty()
second_row_prefix = StringProperty()
class CardBattlePlayer(Factory.FloatLayout):
id = StringProperty()
cost = NumericProperty()
max_cost = NumericProperty()
n_cards_in_deck = NumericProperty()
color = ListProperty()
def __init__(self, *, player, **kwargs):
super().__init__(
id=player.id,
cost=player.cost,
max_cost=player.max_cost,
n_cards_in_deck=player.n_cards_in_deck,
color=player.color,
**kwargs)
player.bind(
id=self.setter('id'),
cost=self.on_cost_changed,
max_cost=self.on_cost_changed,
n_cards_in_deck=let_label_animate(self.ids.id_label_deck),
color=self.setter('color'))
def on_cost_changed(self, player, value):
label = self.ids.id_label_cost
self.cost = player.cost
self.max_cost = player.max_cost
animation = getattr(self, '_cost_animation', None)
# costが最大値を上回っていないなら白色で非点滅
if player.cost <= player.max_cost:
if animation is not None:
label.color = get_color_from_hex('#ffffff')
animation.stop(label)
self._cost_animation = None
# costが最大値を上回っているなら赤色で点滅
elif animation is None:
label.color = get_color_from_hex('#ff2222')
animation = Animation(
opacity=0,
duration=0.8,
transition='in_cubic') + Animation(
opacity=1,
duration=0.8,
transition='out_cubic')
animation.repeat = True
animation.start(label)
self._cost_animation = animation
# def on_n_cards_in_deck_changed(self, player, value):
# label = self.ids.id_label_deck
# def on_fadeout_complete(animation, widget):
# self.n_cards_in_deck = value
# animation_fadein = Animation(
# opacity=1,
# duration=0.1,
# transition='linear')
# animation_fadein.start(label)
# animation_fadeout = Animation(
# opacity=0,
# duration=0.1,
# transition=r'linear')
# animation_fadeout.bind(on_complete=on_fadeout_complete)
# animation_fadeout.start(label)
| gottadiveintopython/wildwar-old-version | wildwar/cardbattle_client/cardbattleplayer.py | cardbattleplayer.py | py | 4,852 | python | en | code | 6 | github-code | 13 |
41590614584 | import configparser
import functools
from itsdangerous import Serializer, BadSignature
from main.db import get_db
from flask import (
Blueprint, render_template, flash, redirect, url_for, session, g, current_app, request
)
from main.db import get_db
bp = Blueprint('manage', __name__, url_prefix='/manage')
@bp.before_app_request
def load_parent():
db = get_db()
id = session.get('rodzic', None)
if id is None:
g.rodzic = None
else:
g.rodzic = db.execute(
'SELECT * FROM rodzice WHERE id = ?',
(id,)
).fetchone()
def auth_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.rodzic is None:
return redirect(url_for('index'))
return view(**kwargs)
return wrapped_view
@bp.route('/auth/<string:key>')
def auth(key):
db = get_db()
valid, id = Serializer(current_app.config['SECRET_KEY']).loads_unsafe(key)
if not valid:
flash('Niepoprawny hash. Sprawdź czy nie pomyliłeś się przy przepisywaniu.')
return redirect(url_for('index'))
rodzic = db.execute(
'SELECT * FROM rodzice WHERE id = ?',
(id,)
).fetchone()
if not rodzic:
flash('Rodzic pasujący do hasha nie istnieje. ')
return redirect(url_for('index'))
else:
session.clear()
session['rodzic'] = id
return redirect(url_for('manage.panel'))
@bp.route('/', methods=('GET', 'POST'))
@auth_required
def panel():
db = get_db()
terminy = db.execute(
'SELECT * FROM wizyty JOIN nauczyciele ON nauczyciele.id = wizyty.id_nauczyciela '
'WHERE id_rodzica = ? ORDER BY godzina', (g.rodzic['id'],)
).fetchall()
return render_template('zapisy/manage.html',
terminy=terminy,
)
@bp.route('/delet', methods=['POST'])
@auth_required
def delet():
db = get_db()
godzina = request.form['godzina']
id_nauczyciela = request.form['id_nauczyciela']
wizyta = db.execute(
'SELECT * FROM wizyty '
'WHERE id_nauczyciela = ? '
'AND godzina = ?',
(id_nauczyciela, godzina)
).fetchone()
if not wizyta:
flash('Nie ma takiej wizyty.')
return redirect(url_for('manage.panel'))
if wizyta['id_rodzica'] != g.rodzic['id']:
flash('To nie twoja wizyta.')
return redirect(url_for('manage.panel'))
db.execute(
'DELETE FROM wizyty '
'WHERE id_nauczyciela = ? '
'AND godzina = ?',
(id_nauczyciela, godzina))
db.commit() #commit(die)
return redirect(url_for('manage.panel'))
| szkarpinski/staszic-zapisy | main/manage.py | manage.py | py | 2,631 | python | pl | code | 3 | github-code | 13 |
4321756831 | ###############################################################################
# Copyright (C) 2018, 2019, 2020 Dominic O'Kane
# Guillaume Lefieux
###############################################################################
import numpy as np
from financepy.models.black import Black
from financepy.utils.global_types import OptionTypes
from FinTestCases import FinTestCases, globalTestCaseMode
import sys
sys.path.append("..")
testCases = FinTestCases(__file__, globalTestCaseMode)
def test_Black():
forward = 0.034
strike = 0.050
riskFreeIR = 0.00
time_to_expiry = 2.0
volatility = 0.20
testCases.header("ITEM", "CALL", "PUT")
call_optionType = OptionTypes.EUROPEAN_CALL
put_optionType = OptionTypes.EUROPEAN_PUT
df = np.exp(-riskFreeIR * time_to_expiry)
model = Black(volatility)
dp = 12 # Precision
try:
#######################################################################
valueCall = model.value(
forward, strike, time_to_expiry, df, call_optionType)
valuePut = model.value(
forward, strike, time_to_expiry, df, put_optionType)
assert round((valueCall - valuePut), dp) == round(df*(forward - strike), dp), \
"The method called 'value()' doesn't comply with Call-Put parity"
testCases.print("VALUE", valueCall, valuePut)
#######################################################################
deltaCall = model.delta(
forward, strike, time_to_expiry, df, call_optionType)
deltaPut = model.delta(
forward, strike, time_to_expiry, df, put_optionType)
assert round((1/df) * (deltaCall - deltaPut), dp) == 1.0, \
"The method called 'delta()' doesn't comply with Call-put parity"
testCases.print("DELTA", deltaCall, deltaPut)
#######################################################################
gammaCall = model.gamma(
forward, strike, time_to_expiry, df, call_optionType)
gammaPut = model.gamma(
forward, strike, time_to_expiry, df, put_optionType)
assert round(gammaCall - gammaPut, dp) == 0.0, \
"The method called 'gamma()' doesn't comply with Call-Put parity"
testCases.print("GAMMA", gammaCall, gammaPut)
#######################################################################
thetaCall = model.theta(
forward, strike, time_to_expiry, df, call_optionType)
thetaPut = model.theta(
forward, strike, time_to_expiry, df, put_optionType)
assert round((thetaCall - thetaPut), dp) == round((riskFreeIR * time_to_expiry) * (forward - strike) * df, dp), \
"The method called 'theta()' doesn't comply with Call-Put parity"
testCases.print("THETA", thetaCall, thetaPut)
#######################################################################
vegaCall = model.vega(
forward, strike, time_to_expiry, df, call_optionType)
vegaPut = model.vega(
forward, strike, time_to_expiry, df, put_optionType)
assert round(vegaCall - vegaPut, dp) == 0.0, \
"The method called 'vega()' doesn't comply with Call-Put parity"
testCases.print("VEGA", vegaCall, vegaPut)
#######################################################################
except AssertionError as err:
raise err
###############################################################################
test_Black()
testCases.compareTestCases()
| domokane/FinancePy | tests_golden/TestFinModelBlack.py | TestFinModelBlack.py | py | 3,679 | python | en | code | 1,701 | github-code | 13 |
19242581573 | from urllib import request,parse
from http import cookiejar
import requests
import os
#urlopen
# re = request.urlopen("http://www.baidu.com")
# print(re.read())
#urlencode
#re = request.urlopen("http://www.baidu.com/s?wd=新型冠状病毒")
# q = {"wd":"新型冠状病毒"}
# url = "http://www.baidu.com/s?"
#
# n_q = parse.urlencode(q)
# print(n_q)
# url = url +n_q
# re = request.urlopen(url)
# print(re.read())
#unquote
# q = {"wd":"新型冠状病毒"}
# en_q = parse.urlencode(q)
# print(en_q)
# de_q = parse.unquote(en_q)
# print(de_q)
#urlparse
# url = parse.urlparse('https://i.cnblogs.com/EditPosts.aspx?opt=1')
# print(url)
#request.Request
#
url = "https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false"
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"Referer":"https://www.lagou.com/jobs/list_python"
}
data = {
"first": "true",
"pn": str(1),
"kd": "python"
}
rep = request.Request(url,headers=headers,data= parse.urlencode(data).encode("utf-8"),method="PSOT")
re = request.urlopen(url)
st = re.read().decode("utf-8")
f = open("index.html","w")
count = f.write(st)
f.close()
print(st)
print(count)
| zhweiwei/python- | 爬虫学习/test_request.py | test_request.py | py | 1,240 | python | en | code | 0 | github-code | 13 |
7724421215 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QMessageBox, QFileDialog
from PyQt5.QtCore import QThread, pyqtSignal
import openpyxl as xl
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
import sys
import os
import datetime
from ui.main import Ui_MainWindow
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
self.year=datetime.datetime.now().year
self.file_path = ''
self.pushButton_3.clicked.connect(self.import_excel)
self.pushButton.clicked.connect(self.export_excel)
self.pushButton_2.clicked.connect(self.quit)
self.pushButton_4.clicked.connect(self.about)
self.label.setText('Excelファイルをインポートしてください…')
self.textEdit.append('Excelファイルをインポートしてください…')
self.textEdit.setReadOnly(True)
self.progressBar.setMaximum(100)
self.progressBar.setValue(0)
current_year=datetime.datetime.now().year
for year in range(current_year-10, current_year+11):
self.comboBox.addItem(str(year))
self.comboBox.setCurrentText(str(current_year))
def import_excel(self):
a = QFileDialog.getOpenFileName(self,
'プロジェクト管理ファイルを選択してください',
'',
'Excel Files (*.xlsx);;All Files (*)')
if a[0] == "":
return
wb = xl.load_workbook(a[0])
judge=False
for each in wb.sheetnames:
if 'quotation record' in each.lower():
judge=True
break
if not judge:
self.label.setText('対象のデータシート "****** Quotation Record" が見つかりません!')
self.textEdit.append('対象のデータシート "****** Quotation Record" が見つかりません!')
QMessageBox.critical(self, 'フォーマットエラー', '対象のデータシート "****** Quotation Record" が見つかりません!')
return
wb.close()
self.file_path=a[0]
a_spit=a[0].split('/')
name=a_spit[len(a_spit)-1]
self.label.setText(f'ファイル「{name}」がインポートされました…')
self.textEdit.append(f'ファイル「{name}」がインポートされました…')
self.update_progressbar(0, 100)
def export_excel(self):
if self.file_path=='':
self.label.setText('まずExcelファイルをインポートしてください!')
self.textEdit.append('まずExcelファイルをインポートしてください!')
QMessageBox.critical(self, 'Excelファイルなし',
'まずExcelファイルをインポートしてください!')
return
a = QFileDialog.getSaveFileName(self,
'Please select the OT sheet file path.',
f'./Opportunities Detail 案件一覧{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}',
'Excel Files (*.xlsx);;All Files (*)')
if a[0] == '':
self.label.setText('Excel出力をキャンセルしました。')
self.textEdit.append('Excel出力をキャンセルしました。')
return
wb = xl.Workbook()
try:
wb.save(filename=a[0])
except PermissionError:
QMessageBox.critical(self, '許可が拒否されました!',
'許可が拒否されました! Please close the excel file with the same filename first!')
wb.close()
return
wb.close()
self.working = Working(import_filename=self.file_path, export_filename=a[0], YEAR=int(self.comboBox.currentText()))
self.working.finish_msgbox.connect(self.finish_msgbox)
self.working.update_msg.connect(self.update_msg)
self.working.update_progressbar.connect(self.update_progressbar)
self.working.start()
def finish_msgbox(self, title, text):
QMessageBox.information(self, title, text)
def update_msg(self, msg):
self.label.setText(msg)
self.textEdit.append(msg)
def update_progressbar(self, percentage, max):
self.progressBar.setMaximum(max)
self.progressBar.setValue(percentage)
def quit(self):
a = QMessageBox.question(self, '操作確認', 'システムを閉じますか?', QMessageBox.Yes | QMessageBox.No)
if a != 16384:
return
# print(12345)
sys.exit()
def closeEvent(self, event):
a = QMessageBox.question(self, '操作確認', 'システムを閉じますか?', QMessageBox.Yes | QMessageBox.No)
if a != 16384:
event.ignore()
return
sys.exit()
def about(self):
QMessageBox.information(self, 'ツール情報',
'AKT DIV.5 見積管理ツール (V1.4版)\n'
'言語: 日本語\n'
'バージョン: V1.4版\n\n'
'開発者:An Lu\n'
'開発時間:2023年3月29日\n\n'
'連絡先: (+66)84-208-1862\n'
'E-mail: lu@akaganethailand.co.th\n'
'住所:16 Compomax Building, 5th Floor, Room No. 502, Soi Ekamai 4, Sukhumvit 63 Rd., Prakanongnua, Vadhana, Bangkok 10110 (Head office)\n\n'
'AKAGANE(THAILAND) CO., LTD.')
class Working(QThread):
finish_msgbox = pyqtSignal(str, str)
update_msg = pyqtSignal(str)
update_progressbar = pyqtSignal(int, int)
def __init__(self,import_filename, export_filename, YEAR):
super(Working, self).__init__()
self.import_filename=import_filename
self.export_filename=export_filename
self.YEAR = YEAR
def reading_excel(self, file_path):
wb = xl.load_workbook(filename=file_path, data_only=True) #updated on 7/20/2023 for v1.3, added "data_only=True"
#ws = wb[f'{self.YEAR} Div5 Quotation Record']
for each in wb.sheetnames:
if 'quotation record' in each.lower():
ws = wb[f'{each}']
break
# Scanning for locating the first cell of the diagram
self.update_msg.emit('')
i_start = -1
signal = False
for i in range(1, ws.max_row + 1):
for j in range(1, ws.max_column + 1):
if str(ws.cell(row=i, column=j).value).strip().lower() == 'quo no.':
i_start = i
signal = True
break
if signal:
break
if i_start == -1:
#print('Wrong excel format!')
return -1
# print(i_start)
# Scanning each j columns for finding out the targets
self.update_msg.emit('インポートファイルの目標シートと題名を検索しています…')
self.update_progressbar.emit(50, 100)
result_dict = {}
for j in range(1, ws.max_column + 1):
if str(ws.cell(row=i_start, column=j).value).strip().lower().replace('\n', ' ') == 'client':
result_dict['j_client'] = j
if str(ws.cell(row=i_start, column=j).value).strip().lower().replace('\n', ' ') == 'product name':
result_dict['j_productName'] = j
if str(ws.cell(row=i_start, column=j).value).strip().lower().replace('\n', ' ') == 'selling price':
result_dict['j_sellingPrice'] = j
if str(ws.cell(row=i_start, column=j).value).strip().lower().replace('\n', ' ') == 'success rate':
result_dict['j_successRate'] = j
if str(ws.cell(row=i_start, column=j).value).strip().lower().replace('\n',
' ') == 'estimated delivery month':
result_dict['j_month'] = j
if str(ws.cell(row=i_start, column=j).value).strip().lower().replace('\n',
' ') == 'status':
result_dict['j_status'] = j
# print(result_dict)
self.update_msg.emit(f'目標題名を発見しました:{result_dict}…')
# Reading source data
data_matrix = []
for i in range(i_start + 1, ws.max_row + 1):
if not ws.cell(row=i, column=result_dict['j_productName']).value:
break
data_line = [str(ws.cell(row=i, column=result_dict['j_client']).value).strip(),
str(ws.cell(row=i, column=result_dict['j_productName']).value).strip(),
float(ws.cell(row=i, column=result_dict['j_sellingPrice']).value) if (
ws.cell(row=i, column=result_dict['j_sellingPrice']).value and str(
ws.cell(row=i, column=result_dict['j_sellingPrice']).value).strip() != '-') else 0,
str(ws.cell(row=i, column=result_dict['j_successRate']).value).strip(),
ws.cell(row=i, column=result_dict['j_month']).value,
str(ws.cell(row=i, column=result_dict['j_status']).value).strip().lower(),
]
self.update_msg.emit(f'ソースデータを読み込んでいます:{data_line}…')
self.update_progressbar.emit(i, ws.max_row)
data_matrix.append(data_line)
# print(data_matrix)
wb.close()
return data_matrix
def create_diagram(self, ws, start_row, mode, data_matrix):
key_words = {}
if mode == 'accept':
key_words['title'] = 'Secured Business/受注案件'
key_words['type'] = 'Accept'
key_words['remarks'] = ''
elif mode == 'reject':
key_words['title'] = 'Reject Business/失注案件'
key_words['type'] = 'Reject'
key_words['remarks'] = ''
elif mode == 'a':
key_words['title'] = 'Opportunities A/Aヨミ案件'
key_words['type'] = 'A'
key_words['remarks'] = '80% can secure the business'
elif mode == 'b':
key_words['title'] = 'Opportunities B/Bヨミ案件'
key_words['type'] = 'B'
key_words['remarks'] = '60% can secure the business'
else:
key_words['title'] = 'Opportunities C/Cヨミ案件'
key_words['type'] = 'C'
key_words['remarks'] = '30% can secure the business'
# Create the first diagram
ws.cell(row=start_row, column=2).value = key_words['title']
font = Font(name="Calibri", size=11, bold=True)
ws.cell(row=start_row, column=2).font = font
ws.cell(row=start_row, column=5).value = key_words['type']
font = Font(name="Calibri", size=11, bold=True)
ws.cell(row=start_row, column=5).font = font
ws.cell(row=start_row, column=6).value = key_words['remarks']
font = Font(name="Calibri", size=11, bold=True)
ws.cell(row=start_row, column=6).font = font
ws.cell(row=start_row, column=17).value = 'Unit/THB'
font = Font(name="Calibri", size=11, bold=False)
ws.cell(row=start_row, column=17).font = font
ws.cell(row=start_row, column=17).alignment = Alignment(horizontal='right', vertical='center')
# start_row=3
start_row += 1
for j in range(2, 18):
if j == 2:
ws.cell(row=start_row, column=j).font = Font(name="Calibri", size=8, bold=True)
ws.cell(row=start_row, column=j).value = 'Success rate'
elif j == 3 or j == 4:
ws.cell(row=start_row, column=j).font = Font(name="Calibri", size=11, bold=True)
if j == 3:
ws.cell(row=start_row, column=j).value = 'Client'
else:
ws.cell(row=start_row, column=j).value = 'Product Name'
else:
ws.cell(row=start_row, column=j).font = Font(name="Calibri", size=11, bold=False)
ws.cell(row=start_row, column=j).alignment = Alignment(horizontal='center', vertical='center')
ws.cell(row=start_row, column=j).fill = PatternFill("solid", fgColor="FDE9D9")
ws.cell(row=start_row, column=j).border = Border(left=Side(border_style='thin', color='000000'),
right=Side(border_style='thin', color='000000'),
top=Side(border_style='thin', color='000000'),
bottom=Side(border_style='double', color='000000'))
for j in range(5, 17):
ws.cell(row=start_row, column=j).number_format = '[$-en-US]mmm-yy;@'
ws.cell(row=start_row, column=5).value = datetime.datetime(self.YEAR - 1, 11, 1, 0, 0)
ws.cell(row=start_row, column=6).value = datetime.datetime(self.YEAR - 1, 12, 1, 0, 0)
ws.cell(row=start_row, column=7).value = datetime.datetime(self.YEAR, 1, 1, 0, 0)
ws.cell(row=start_row, column=8).value = datetime.datetime(self.YEAR, 2, 1, 0, 0)
ws.cell(row=start_row, column=9).value = datetime.datetime(self.YEAR, 3, 1, 0, 0)
ws.cell(row=start_row, column=10).value = datetime.datetime(self.YEAR, 4, 1, 0, 0)
ws.cell(row=start_row, column=11).value = datetime.datetime(self.YEAR, 5, 1, 0, 0)
ws.cell(row=start_row, column=12).value = datetime.datetime(self.YEAR, 6, 1, 0, 0)
ws.cell(row=start_row, column=13).value = datetime.datetime(self.YEAR, 7, 1, 0, 0)
ws.cell(row=start_row, column=14).value = datetime.datetime(self.YEAR, 8, 1, 0, 0)
ws.cell(row=start_row, column=15).value = datetime.datetime(self.YEAR, 9, 1, 0, 0)
ws.cell(row=start_row, column=16).value = datetime.datetime(self.YEAR, 10, 1, 0, 0)
accepted_info = []
for data_line in data_matrix:
if data_line[5].strip()=='reject':
data_line[3]='Reject'
elif data_line[5].strip()=='accept':
data_line[3]='Accept'
if str(data_line[3]).strip() == key_words['type']:
accepted_info.append(data_line)
accepted_info.append([None, None, None, None, None])
# start_row=4
start_row += 1
i = start_row
for each in accepted_info:
ws.cell(row=i, column=2).value = each[3]
ws.cell(row=i, column=3).value = each[0]
ws.cell(row=i, column=4).value = each[1]
if each[4] and isinstance(each[4], datetime.datetime):
print(isinstance(each[4], datetime.datetime))
print(each[4].year, each[4].month)
print(type(each[4].year), type(each[4].month))
for j in range(5, 17):
pro_year = each[4].year
pro_month = each[4].month
cal_year = ws.cell(row=start_row - 1, column=j).value.year
cal_month = ws.cell(row=start_row - 1, column=j).value.month
if pro_year == cal_year and pro_month == cal_month:
ws.cell(row=i, column=j).value = float(each[2])
for j in range(2, 18):
if 5 <= j < 17:
ws.cell(row=i, column=j).fill = PatternFill("solid", fgColor="FFFF00")
ws.cell(row=i, column=j).number_format = '###,###'
ws.cell(row=i, column=j).font = Font(name="Calibri", size=11, bold=False)
ws.cell(row=i, column=j).border = Border(left=Side(border_style='thin', color='000000'),
right=Side(border_style='thin', color='000000'),
bottom=Side(border_style='thin', color='000000'))
i += 1
behind_row = i
ws.cell(row=behind_row, column=4).value = 'Total'
ws.cell(row=behind_row,
column=5).value = f'=SUBTOTAL(9,E{start_row}:E{behind_row - 1})'
ws.cell(row=behind_row,
column=6).value = f'=SUBTOTAL(9,F{start_row}:F{behind_row - 1})'
ws.cell(row=behind_row,
column=7).value = f'=SUBTOTAL(9,G{start_row}:G{behind_row - 1})'
ws.cell(row=behind_row,
column=8).value = f'=SUBTOTAL(9,H{start_row}:H{behind_row - 1})'
ws.cell(row=behind_row,
column=9).value = f'=SUBTOTAL(9,I{start_row}:I{behind_row - 1})'
ws.cell(row=behind_row,
column=10).value = f'=SUBTOTAL(9,J{start_row}:J{behind_row - 1})'
ws.cell(row=behind_row,
column=11).value = f'=SUBTOTAL(9,K{start_row}:K{behind_row - 1})'
ws.cell(row=behind_row,
column=12).value = f'=SUBTOTAL(9,L{start_row}:L{behind_row - 1})'
ws.cell(row=behind_row,
column=13).value = f'=SUBTOTAL(9,M{start_row}:M{behind_row - 1})'
ws.cell(row=behind_row,
column=14).value = f'=SUBTOTAL(9,N{start_row}:N{behind_row - 1})'
ws.cell(row=behind_row,
column=15).value = f'=SUBTOTAL(9,O{start_row}:O{behind_row - 1})'
ws.cell(row=behind_row,
column=16).value = f'=SUBTOTAL(9,P{start_row}:P{behind_row - 1})'
ws.cell(row=behind_row, column=17).value = f'=SUM(E{behind_row}:P{behind_row})'
behind_row += 1
ws.cell(row=behind_row, column=4).value = 'Quarter Total'
ws.cell(row=behind_row, column=7).value = f'=E{behind_row - 1}+F{behind_row - 1}+G{behind_row - 1}'
ws.cell(row=behind_row, column=10).value = f'=H{behind_row - 1}+I{behind_row - 1}+J{behind_row - 1}'
ws.cell(row=behind_row, column=13).value = f'=K{behind_row - 1}+L{behind_row - 1}+M{behind_row - 1}'
ws.cell(row=behind_row, column=16).value = f'=N{behind_row - 1}+O{behind_row - 1}+P{behind_row - 1}'
behind_row += 1
global REFER_ROW
if mode == 'reject' or mode == 'c':
ws.cell(row=behind_row, column=4).value = ''
ws.cell(row=behind_row, column=7).value = ''
ws.cell(row=behind_row, column=10).value = ''
ws.cell(row=behind_row, column=13).value = ''
ws.cell(row=behind_row, column=16).value = ''
elif mode == 'a':
ws.cell(row=behind_row, column=4).value = 'Quarter Achievement ratio'
ws.cell(row=behind_row, column=7).value = f'=(G{behind_row - 1}*0.8+$E$15)/$E$16'
ws.cell(row=behind_row, column=10).value = f'=(J{behind_row - 1}*0.8+$H$15)/$H$16'
ws.cell(row=behind_row, column=13).value = f'=(M{behind_row - 1}*0.8+$K$15)/$K$16'
ws.cell(row=behind_row, column=16).value = f'=(P{behind_row - 1}*0.8+$N$15)/$N$16'
ws.cell(row=behind_row, column=17).value = f'=(Q{behind_row - 2}*0.8+$Q$15)/$Q$16'
REFER_ROW = behind_row - 1
elif mode == 'b':
ws.cell(row=behind_row, column=4).value = 'Quarter Achievement ratio'
ws.cell(row=behind_row, column=7).value = f'=(G{behind_row - 1}*0.6+G{REFER_ROW}*0.8+$E$15)/$E$16'
ws.cell(row=behind_row, column=10).value = f'=(J{behind_row - 1}*0.6+J{REFER_ROW}*0.8+$H$15)/$H$16'
ws.cell(row=behind_row, column=13).value = f'=(M{behind_row - 1}*0.6+M{REFER_ROW}*0.8+$K$15)/$K$16'
ws.cell(row=behind_row, column=16).value = f'=(P{behind_row - 1}*0.6+P{REFER_ROW}*0.8+$N$15)/$N$16'
ws.cell(row=behind_row, column=17).value = f'=(Q{behind_row - 2}*0.6+Q{REFER_ROW-1}*0.8+$Q$15)/$Q$16'
else:
ws.cell(row=behind_row, column=4).value = 'Quarter Achievement ratio'
ws.cell(row=behind_row, column=7).value = f'=G{behind_row - 1}/$E$16'
ws.cell(row=behind_row, column=10).value = f'=J{behind_row - 1}/$H$16'
ws.cell(row=behind_row, column=13).value = f'=M{behind_row - 1}/$K$16'
ws.cell(row=behind_row, column=16).value = f'=P{behind_row - 1}/$N$16'
for i in range(behind_row - 2, behind_row + 1):
for j in range(4, 18):
ws.cell(row=i, column=j).font = Font(name="Calibri", size=11, bold=True)
for i in range(behind_row - 2, behind_row):
for j in range(5, 18):
ws.cell(row=i, column=j).number_format = '_-* #,##0_-;-* #,##0_-;_-* "-"_-;_-@_-'
for i in range(behind_row, behind_row + 1):
for j in range(5, 18):
ws.cell(row=i, column=j).number_format = '0%'
# Reset start_row
start_row = behind_row + 2
return start_row
def create_excel(self, data_matrix):
self.update_msg.emit('Excelファイルを出力しています…')
self.update_progressbar.emit(0, 100)
wb = xl.Workbook()
ws = wb.active
self.update_msg.emit('Excel書式を調整しています…')
self.update_progressbar.emit(10, 100)
# Setting column width
ws.column_dimensions['A'].width = 1.89 + 0.78
ws.column_dimensions['B'].width = 10.33 + 0.78
ws.column_dimensions['C'].width = 11.78 + 0.78
ws.column_dimensions['D'].width = 44.33 + 0.78
ws.column_dimensions['E'].width = 9.22 + 0.78
ws.column_dimensions['F'].width = 9.78 + 0.78
ws.column_dimensions['G'].width = 9.78 + 0.78
ws.column_dimensions['H'].width = 9.78 + 0.78
ws.column_dimensions['I'].width = 9.78 + 0.78
ws.column_dimensions['J'].width = 9.78 + 0.78
ws.column_dimensions['K'].width = 9.78 + 0.78
ws.column_dimensions['L'].width = 9.78 + 0.78
ws.column_dimensions['M'].width = 9.78 + 0.78
ws.column_dimensions['N'].width = 9.78 + 0.78
ws.column_dimensions['O'].width = 9.78 + 0.78
ws.column_dimensions['P'].width = 9.78 + 0.78
ws.column_dimensions['Q'].width = 15.11 + 0.78
# Create the first diagram
ws.cell(row=1, column=2).value = '2:Opportunities Detail 案件一覧'
font = Font(name="Calibri", size=12, bold=True)
ws.cell(row=1, column=2).font = font
self.update_msg.emit('受注案件表を作成しています…')
self.update_progressbar.emit(20, 100)
start_row = self.create_diagram(ws=ws, start_row=3, mode='accept', data_matrix=data_matrix)
self.update_msg.emit('失注案件表を作成しています…')
self.update_progressbar.emit(30, 100)
start_row = self.create_diagram(ws=ws, start_row=start_row, mode='reject', data_matrix=data_matrix)
self.update_msg.emit('Aタイプ案件表を作成しています…')
self.update_progressbar.emit(40, 100)
start_row = self.create_diagram(ws=ws, start_row=start_row, mode='a', data_matrix=data_matrix)
self.update_msg.emit('Bタイプ案件表を作成しています…')
self.update_progressbar.emit(70, 100)
start_row = self.create_diagram(ws=ws, start_row=start_row, mode='b', data_matrix=data_matrix)
self.update_msg.emit('Cタイプ案件表を作成しています…')
self.update_progressbar.emit(80, 100)
self.create_diagram(ws=ws, start_row=start_row, mode='c', data_matrix=data_matrix)
self.update_msg.emit(F'Excelファイル「{self.export_filename}」を保存しています…')
self.update_progressbar.emit(100, 100)
wb.save(self.export_filename)
wb.close()
def run(self):
self.update_progressbar.emit(0, 100)
file_path = self.import_filename
self.update_msg.emit('Excelファイルを読み込んでいます…')
data_matrix = self.reading_excel(file_path=file_path)
if data_matrix==-1:
self.update_msg.emit('インポートされたファイル形式は違います!正しいExcelファイルをインポートしてください…')
self.finish_msgbox.emit('エラー', 'インポートされたファイル形式は違います!正しいExcelファイルをインポートしてください…')
self.update_progressbar.emit(100, 100)
os.remove(self.export_filename)
return
self.create_excel(data_matrix=data_matrix)
self.update_msg.emit(F'案件表の作成が完了しました…')
self.update_progressbar.emit(100, 100)
self.finish_msgbox.emit('完了', '案件表の作成が完了しました…')
MainWindow.file_path=''
if __name__ == '__main__':
app = QApplication(sys.argv)
REFER_ROW=-1
MainWindow = MainWindow()
MainWindow.show()
sys.exit(app.exec_()) | Josefina-Hernandez/DIV5_SALES_RECORD_TOOL | main_app.py | main_app.py | py | 25,205 | python | en | code | 0 | github-code | 13 |
71611904978 | """Calculate two two-tuples ali-metric."""
# pylint: disable=
from typing import Tuple
# import math
def twotuple_metric(vec1: Tuple[int, int], vec2: Tuple[int, int]) -> float:
"""Calculate two two-tuples ali-metric.
Args:
vec1: two-tuples of int
vec2: two-tuples of int
Return:
float metric: 1, 0.5 or 0
>>> twotuple_metric([0, 1], [0, 1])
1.0
>>> twotuple_metric([0, 1], [0, 2])
0.5
>>> twotuple_metric([0, 1], [1, 2])
0.0
>>> twotuple_metric([0, 1], [0, ""])
0.0
return 1. * float(vec1[0] == vec2[0] and vec1[1] == vec2[1]) or 0.5 * float(vec1[0] == vec2[0] or vec1[1] == vec2[1])
"""
# if any of vec[:2], vec2[:2] cant be cast to int, return -.0
try:
int(vec1[0])
int(vec1[1])
int(vec2[0])
int(vec2[1])
except ValueError:
return 0.0
# if vec1 == vec2:
if vec1[0] == vec2[0] and vec1[1] == vec2[1]:
return 1.0
# if vec1[0] == vec2[0] or vec1[1] == vec2[1]:
if (
vec1[0] == vec2[0]
and abs(vec1[1] - vec2[1]) <= 1
or vec1[1] == vec2[1]
and abs(vec1[0] - vec2[0]) <= 1
):
return 0.5
return 0.0
| ffreemt/text-alignment-benchmarks | align_benchmark/twotuple_metric.py | twotuple_metric.py | py | 1,209 | python | en | code | 0 | github-code | 13 |
70336848978 | from itertools import combinations
import numpy as np
import pandas as pd
def read_calc_event_into_df(event_file: str, types: [str]) -> pd.DataFrame:
"""
Function to read in fixation gaze data CSV file generated by Tobii Eye Tracker,
aggregates fixation events, remove timezone and shift x coordinates
fixation_file: (str) name of fixation file
:return: (pd.DataFrame) data loaded into dataframe
"""
event_df = pd.read_csv(event_file, index_col=0, parse_dates=[0])
event_df = event_df[event_df["event"].isin(types)]
event_df["duration"] = event_df["dur"]
if types == "fixation":
event_df = event_df[["x", "y", "duration", "event"]]
event_df['x'] = event_df['x'] - 1920 * 2
else:
event_df = event_df[["duration"]]
# ignore +1 timezone
event_df = event_df.tz_localize(None)
return event_df
def calculate_time_delta(gaze_df):
# actual time
gaze_df['t'] = gaze_df.index
# time of previous frame
gaze_df['t-1'] = gaze_df['t'].shift(periods=1)
# delta time since last frame
gaze_df['dt'] = gaze_df['t'] - gaze_df['t-1'] # added column dt
# convert to seconds
gaze_df['dt'] = gaze_df['dt'] / np.timedelta64(1, 's')
# drop old columns t and t-1
gaze_df = gaze_df.drop(columns=['t', 't-1'])
return gaze_df
def calculate_delta(df, column_name, delta_name):
"""
Function to calculate the delta between two timestamps
df: (pd.DataFrame) data to calculate delta from
column_name: (str) name of column to calculate delta from
delta_name: (str) name how to name new column where calculations are saved
:return: (pd.DataFrame) data with calculated column delta_name
"""
# actual time
df['t'] = df[column_name]
# time of previous frame
df['t-1'] = df['t'].shift(periods=1)
# calculate difference
df[delta_name] = df['t'] - df['t-1']
# delete temp columns
df = df.drop(columns=['t', 't-1'])
return df
def calculate_velocity_and_acceleration(gaze_df, values, data_name=None):
if data_name is None:
prefix = ""
else:
prefix = "_" + data_name
value_combinations = []
for n in range(1, len(values) + 1):
value_combinations += combinations(values, n)
for val in values:
if prefix == "":
sel_val = val
else:
sel_val = prefix[1:] + "_" + val
d_val = 'd' + prefix + "_" + val
gaze_df = calculate_delta(gaze_df, column_name=sel_val, delta_name=d_val)
# vx, vy and velocity v
for val in values:
d_val = 'd' + prefix + "_" + val
v_val = 'v' + prefix + "_" + val
gaze_df[v_val] = gaze_df[d_val] / gaze_df['dt']
v_per_value = dict()
for val in values:
d_val = 'd' + prefix + "_" + val
v_per_value[d_val] = gaze_df[d_val] ** 2
for value_combination in value_combinations:
v_tmp = None
v_name_combination = 'v' + prefix
for val in value_combination:
d_val = 'd' + prefix + "_" + val
v_name_combination = v_name_combination + "_" + val
if v_tmp is None:
v_tmp = v_per_value[d_val]
else:
v_tmp = v_tmp + v_per_value[d_val]
gaze_df[v_name_combination] = np.sqrt(np.array(v_tmp)) / gaze_df['dt']
# delta velocities (temporary calculations)
for val in values:
dv_val = 'dv' + prefix + "_" + val
v_val = 'v' + prefix + "_" + val
gaze_df = calculate_delta(gaze_df, column_name=v_val, delta_name=dv_val)
for value_combination in value_combinations:
v_name_combination = 'v' + prefix
dv_name_combination = 'dv' + prefix
for val in value_combination:
v_name_combination = v_name_combination + "_" + val
dv_name_combination = dv_name_combination + "_" + val
gaze_df = calculate_delta(gaze_df, column_name=v_name_combination, delta_name=dv_name_combination)
# acceleration [px/s^2]
for val in values:
a_val = 'a' + prefix + "_" + val
dv_val = 'dv' + prefix + "_" + val
gaze_df[a_val] = gaze_df[dv_val] / gaze_df['dt']
for value_combination in value_combinations:
a_name_combination = 'a' + prefix
dv_name_combination = 'dv' + prefix
for val in value_combination:
a_name_combination = a_name_combination + "_" + val
dv_name_combination = dv_name_combination + "_" + val
gaze_df[a_name_combination] = gaze_df[dv_name_combination] / gaze_df['dt']
# drop delta velocities
dv_value_combinations_names = []
for value_combination in value_combinations:
dv_name_combination = 'dv' + prefix
for val in value_combination:
dv_name_combination = dv_name_combination + "_" + val
dv_value_combinations_names.append(dv_name_combination)
gaze_df = gaze_df.drop(columns=["dv" + prefix + "_" + val for val in values] + dv_value_combinations_names)
return gaze_df
| im-ethz/CHI-2023-paper-Leveraging-driver-vehicle-and-environment-interaction | eye_feature_engineering/eye_feature_utils/calc_utils.py | calc_utils.py | py | 5,031 | python | en | code | 2 | github-code | 13 |
7800066704 | # With em conjunto com o open é uma função de python que permite abrir um arquivo "as file"
# Em seguida, dentro do operador with, o arquivo é lido dentro de uma variável chamada contents
# Após, o conteúdo da variável é transformado em um array
print('Giovanni Oliveira da Silva')
print('Guilherme Castilho')
print('Guilherme Maciel')
print('Renato Caetité Cruz')
print('=' * 42)
print('Totalização Simples de Vendas de Produtos\n')
#Array que recebe os dados do .txt
arrFile = []
#Matriz que contém os array dos produtos.
arrProdutos = []
#Variável que recebe a multiplicação entre a quantidade vendida e o preço.
totVend = 0.0
#soma total das vendas.
soma = 0.0
#Abertura do .txt
with open("vendas.txt") as file:
#Atribuição do .txt para uma string
contents = file.read()
#conversão da string para array
arrFile = contents.split("\n")
#Atribuição dos itens do array "arrFile" a matriz "arrProdutos"
i = 0
while i < len(arrFile):
arrProdutos.append(arrFile[i].split(';'))
i += 1
#Operação de busca pelo ID do produto
valor = int(input("Digite o código: "))
while valor != 0:
#Validação do ID
if valor < 10000 or valor > 21000:
print("{} Código inválido (deve ser entre 10000 e 21000)".format(valor))
valor = int(input("Digite o código: "))
else:
#Operação de cálculo do total vendido por ID
i = 0
while i < len(arrProdutos):
if str(valor) in arrProdutos[i]:
totVend = float(arrProdutos[i][1]) * float(arrProdutos[i][2])
soma += totVend
i += 1
print("Total vendido do produto {} = R$ {:.2f}".format(valor, soma))
valor = int(input("Digite o código: "))
soma = 0.0
totVen = 0.0
print("Fim do programa")
| Guilherme-Maciel/FATEC_Projeto_Vendas | N2A.py | N2A.py | py | 1,915 | python | pt | code | 0 | github-code | 13 |
24595049515 | import connexion
import six
from flask_sqlalchemy import SQLAlchemy
from swagger_server.models.api_response import ApiResponse
from swagger_server.models.order import Order # noqa: E501
from swagger_server.models.position_in_order import PositionInOrder # noqa: E501
from swagger_server import util
from swagger_server.Models import *
from swagger_server.db import db
from sqlalchemy.dialects.postgresql import UUID
import uuid
def delete_order(order_id): # noqa: E501
order=OrderDB.query.filter(OrderDB.id==order_id).first()
positions = PositionInOrderDB.query.filter(PositionInOrderDB.id == order_id).all()
for e in positions:
print(e.id)
db.session.delete(e)
if(order==None):
return 'This order does not exist',404
db.session.delete(order)
db.session.commit()
return 'success!'
def get_order_by_id(order_id): # noqa: E501
all_orders=OrderDB.query.all()
orders=[]
for e in all_orders:
orders.append(dict(id=e.id,Price=e.Price,address=e.address,status=str(e.status)))
print(e.address)
resp = {"Orders": orders}
print(resp)
return resp
def place_order(body=None): # noqa: E501
body=connexion.request.get_json()
print(body)
sumOfOrder=0
newUUID = uuid.uuid4()
print(newUUID)
for e in body['Positions']:
print(e['PizzaId'],e['quantity'])
pizza = PizzaDB.query.filter(PizzaDB.id == e['PizzaId']).first()
sumOfOrder+=pizza.price*e['quantity']
newPos = PositionInOrderDB(order_uuid=str(newUUID),id_pizza=pizza.id,count=e['quantity'])
db.session.add(newPos)
print(sumOfOrder)
newOrder=OrderDB(address=body['address'],Price=sumOfOrder,status='in_progress',id=str(newUUID))
db.session.add(newOrder)
db.session.commit()
return 'do some magic!'
# def place_order(id=None, positions=None, ship_date=None, price=None, status=None, complete=None): # noqa: E501
# """Place an order for a pizza
#
# Place a new order in the store # noqa: E501
#
# :param id:
# :type id: int
# :param positions:
# :type positions: list | bytes
# :param ship_date:
# :type ship_date: str
# :param price:
# :type price: float
# :param status:
# :type status: str
# :param complete:
# :type complete: bool
#
# :rtype: Order
# """
# print('test!!!')
# if connexion.request.is_json:
# positions = [PositionInOrder.from_dict(d) for d in connexion.request.get_json()] # noqa: E501
# print(price)
# ship_date = util.deserialize_datetime(ship_date)
# return 'do some magic!'
def store_create_position_post(body=None): # noqa: E501
if connexion.request.is_json:
body = PositionInOrder.from_dict(connexion.request.get_json()) # noqa: E501
print(body)
pizza_id=body.pizza_id
count=body.quantity
body.price=PizzaDB.query.filter(PizzaDB.id==pizza_id).first().price*body.quantity
# PositionInOrderDB()
# uuid.uuid4()
return body
# def store_create_position_post(pizza_id=None, quantity=None, price=None, discount=None): # noqa: E501
# """Create positions for order
#
# Place a new position in the order # noqa: E501
#
# :param pizza_id:
# :type pizza_id: dict | bytes
# :param quantity:
# :type quantity: int
# :param price:
# :type price: float
# :param discount:
# :type discount: float
#
# :rtype: PositionInOrder
# """
# if connexion.request.is_json:
# pizza_id = object.from_dict(connexion.request.get_json()) # noqa: E501
# return 'do some magic!'
| Serafimka2705/flask_open_api | flask_open_api/swagger_server/controllers/store_controller.py | store_controller.py | py | 3,640 | python | en | code | 0 | github-code | 13 |
27242270356 | from io import BytesIO
from django.http import HttpResponse
from django.template.loader import get_template
from django.core.mail import send_mail,EmailMultiAlternatives
from movintrendz.settings import EMAIL_HOST_USER
from xhtml2pdf import pisa
def render_to_pdf(template_src, context_dict={}):
template = get_template(template_src)
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return None
ones = ["", "One ","Two ","Three ","Four ", "Five ", "Six ","Seven ","Eight ","Nine ","Ten ","Eleven ","Twelve ", "Thirteen ", "Fourteen ", "Fifteen ","Sixteen ","Seventeen ", "Eighteen ","Nineteen "]
twenties = ["","","Twenty ","Thirty ","Forty ", "Fifty ","Sixty ","Seventy ","Eighty ","Ninety "]
thousands = ["","Thousand ","Million ", "Billion ", "Trillion ", "Quadrillion ", "Quintillion ", "Sextillion ", "Septillion ","Octillion ", "Nonillion ", "Decillion ", "Undecillion ", "Duodecillion ", "Tredecillion ", "Quattuordecillion ", "Quindecillion", "Sexdecillion ", "Septendecillion ", "Octodecillion ", "Novemdecillion ", "Vigintillion "]
def num999(n):
c = n % 10 # singles digit
b = int(((n % 100) - c) / 10) # tens digit
a = int(((n % 1000) - (b * 10) - c) / 100) # hundreds digit
t = ""
h = ""
if a != 0 and b == 0 and c == 0:
t = ones[int(a)] + "Hundred "
elif a != 0:
t = ones[int(a)] + "Hundred and "
if b <= 1:
h = ones[int(n%100)]
elif b > 1:
h = twenties[int(b)] + ones[int(c)]
st = t + h
return st
def num2word(num):
if num == 0:
return 'Zero'
i = 3
n = str(num)
word = ""
k = 0
while(i == 3):
nw = n[-i:]
n = n[:-i]
if int(nw) == 0:
word = num999(int(nw)) + thousands[int(nw)] + word
else:
word = num999(int(nw)) + thousands[int(k)] + word
if n == '':
i = i+1
k += 1
return word[:-1]
def send_email(subject,message,from_email,to_list):
send_mail(subject,message,from_email,to_list,fail_silently=True)
return True
import math, random
import string
# function to generate OTP
def generateOTP4digit() :
# Declare a digits variable
# which stores all digits
digits = "0123456789"
OTP = ""
# length of password can be chaged
# by changing value in range
for i in range(4) :
OTP += digits[math.floor(random.random() * 10)]
return OTP
# function to generate OTP
def generateOTP6digit() :
# Declare a string variable
# which stores all string
string = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
OTP = ""
length = len(string)
for i in range(6) :
OTP += string[math.floor(random.random() * length)]
return OTP
def randomStringDigits(stringLength=30):
"""Generate a random string of letters and digits """
lettersAndDigits = string.ascii_letters + string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
import re
# Make a regular expression
# for validating an Email
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
# Define a function for
# for validating an Email
def check_email_correctness(email):
# pass the regualar expression
# and the string in search() method
if(re.search(regex,email)):
return True
else:
return False
'''Calendar utility'''
from cal.models import Event
from .models import Subscription_Stop
from datetime import date, datetime, timedelta
def createEvents(a):
'''
Create event for each deliver.
'''
start_date = a.start_date
s_year, s_month, s_day = [int(x) for x in str(start_date).split('-')]
end_date = a.end_date
e_year, e_month, e_day = [int(x) for x in str(end_date).split('-')]
interval = int(a.interval)
start_date = datetime(s_year, s_month, s_day)
start_date_copy = datetime(s_year, s_month, s_day)
end_date = datetime(e_year, e_month, e_day)
user = a.customer_email
transaction_id = a.transaction_id
title = a.product_name + " subscription " + a.shifts
while start_date <= end_date:
print('created')
e = Event(
title = title,
user = user,
sub_transaction_id = transaction_id,
start_time = start_date
)
e.save()
start_date = start_date + timedelta(days=interval)
# Set the first Event(start_date) isPaidForEvent = True
Event.objects.filter(sub_transaction_id=transaction_id, start_time=start_date_copy).update(isPaidForEvent=True)
def updateEvent(subscription_instance, isStartDateChanged, isEndDateChanged, isIntervalChanged):
'''
User Subscription_Stop model to get the Events that are valid.
To be flexible with start_date and end_date
'''
transaction_id = subscription_instance[0].transaction_id
title = subscription_instance[0].product_name + " subscription " + subscription_instance[0].shifts
interval = subscription_instance[0].interval
subscription_start = subscription_instance[0].start_date
subscription_end = subscription_instance[0].end_date
user = subscription_instance[0].customer_email
events = Event.objects.filter(sub_transaction_id = transaction_id, user=user)
today_date = date.today()
if isStartDateChanged:
# deleting previous events:
Event.objects.filter(sub_transaction_id = transaction_id, user=user).delete()
# recreating all events:
current_date = subscription_start
while current_date <= subscription_end:
e = Event(
title = title,
sub_transaction_id = transaction_id,
start_time = current_date,
user = user
)
e.save()
current_date = current_date + timedelta(days=interval)
Event.objects.filter(sub_transaction_id=transaction_id, start_time=subscription_start).update(isPaidForEvent=True)
else:
if isIntervalChanged:
# delete all the records from today onwards:
Event.objects.filter(sub_transaction_id = transaction_id, user = user, start_time__gte=today_date).delete()
if today_date <= subscription_start:
current_date = subscription_start
else:
last_end_date = Event.objects.filter(sub_transaction_id = transaction_id, user=user).order_by('-start_time')[0].start_time
print(last_end_date)
current_date = last_end_date + timedelta(days=interval)
while current_date <= subscription_end:
e = Event(
title = title,
sub_transaction_id = transaction_id,
start_time = current_date,
user = user
)
e.save()
current_date = current_date + timedelta(days=interval)
if today_date <= subscription_start:
Event.objects.filter(sub_transaction_id=transaction_id, start_time=subscription_start).update(isPaidForEvent=True)
else:
# check if end date is changed:
if isEndDateChanged != 0:
if isEndDateChanged == 1:
# adding dates:
last_end_date = Event.objects.filter(sub_transaction_id = transaction_id, user=user).order_by('-start_time')[0].start_time
current_date = last_end_date + timedelta(days=interval)
while current_date <= subscription_end:
e = Event(
title = title,
sub_transaction_id = transaction_id,
start_time = current_date,
user = user
)
e.save()
current_date = current_date + timedelta(days=interval)
else:
# deleting extra:
Event.objects.filter(sub_transaction_id = transaction_id, user=user, start_time__gt=subscription_end).delete()
# Handling changes in interval:
# Handling changes in start_date:
# e = Event.objects.filter(sub_transaction_id = transaction_id, start_time__lte=subscription_start).delete()
# Handling changes in end_date:
# prev_last_date = Event.objects.filter(sub_transaction_id=transaction_id).order_by('-start_time')[0].start_time
# if prev_last_date > subscription_end:
# Event.objects.filter(sub_transaction_id=transaction_id, start_time__gt=subscription_end).delete()
# else:
# current_last_date = prev_last_date + timedelta(days=interval)
# while current_last_date <= subscription_end:
# e = Event(
# title = title,
# sub_transaction_id = transaction_id,
# start_time = current_last_date
# )
# e.save()
# current_last_date = current_last_date + timedelta(days=interval)
# Handling stops
stop_dates = Subscription_Stop.objects.filter(transaction_id = transaction_id, user = user)
for event in events:
for stop_date in stop_dates:
if stop_date.start_date <= event.start_time <= stop_date.end_date:
event.isStopped = True
event.save(update_fields=['isStopped'])
else:
event.isStopped = False
event.save(update_fields=['isStopped'])
def deleteEvent(transaction_id, user):
'''
To delete all the events once Subscription is deleted.
To delete all the subscription stops once subscription is deleted
'''
Event.objects.filter(sub_transaction_id = transaction_id, user = user).delete()
Subscription_Stop.objects.filter(transaction_id = transaction_id, user = user).delete() | just-get-it/JGI4 | userdetail/utils.py | utils.py | py | 10,125 | python | en | code | 0 | github-code | 13 |
32564502499 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# use packages in the requirements.txt
]
test_requirements = [
'pytest',
]
setup(
name='pypeline',
version='0.1.0',
description="python based packages for Optical Coherence Tomography applications",
long_description=readme + '\n\n' + history,
author="Zhijia Yuan",
author_email='shelpermisc@gmail.com',
url='https://github.com/shelper/pypeline',
packages=[
'pypeline',
],
package_dir={'pypeline':
'pypeline'},
include_package_data=True,
install_requires=requirements,
license="ISCL",
zip_safe=False,
keywords='pypeline',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
| shelper/pypeline | setup.py | setup.py | py | 1,327 | python | en | code | 0 | github-code | 13 |
30000757758 | # -*- coding: utf-8 -*-
from repo import store
from util import file
from util.tool import to_list, to_time, pre_process, sort_coo, extract_topn_from_vector
from analysis.algorithm import community_detection as cd
from sklearn.feature_extraction import stop_words
class ClusterAnalysis():
"""
initialize
"""
def __init__(self, datum, cluster=None):
# cluster based on search data
self.datum = datum
self.cluster = cluster
self.graph = store.Graph()
self.clusterMapper = store.ClusterMapper(self.cluster)
# networkx
self.nx = store.NetworkX()
self.community = cd.CommunityDetection(self.nx.getGraph())
if self.cluster is not None:
self.handleCluster()
"""
cluster data가 존재하는 경우 vertex에 클러스터 ID를 setting
"""
def handleCluster(self):
idx = 0
for data in tuple(self.datum):
idx += 1
vertex = store.Vertex(data)
self.graph.setVertex(vertex.getGid(),vertex)
print(float(idx)/len(tuple(self.datum))*100)
self.setClusterId()
"""
클러스터에 속한 vertex들을 반환한다.
"""
def getClusterMember(self,clusterId):
return list(filter(lambda x : x.getClusterId() == clusterId, self.graph.getVertices().values()))
"""
vertex의 클러스터 번호를 반환한다.
@Deprecated
"""
def getClusterIdByVid(self, vid):
clusterId = ''
for key, value in self.cluster:
valueArr = to_list(value)
if vid in valueArr:
clusterId = key
return clusterId
"""
After cluster members are found, set cluster_id in vertex object
"""
def setClusterId(self):
for cluster_id, members in self.cluster:
for vertex in [self.graph.getVertex(member) for member in to_list(members)]:
vertex.setClusterId(cluster_id)
"""
rule 기반 클러스터와 location 기반 클러스터를 비교검산하여 새로은 클러스터 리스트를 반환한다.
"""
def getSimilarClusterList(self):
vertices = self.graph.getVertices()
self.clusterMapper.setClusterListByRule(vertices)
self.clusterMapper.setClusterListByLoc(vertices)
print('ruleMapper >> '+str(self.clusterMapper.ruleMapper))
print('locationMapper >> '+str(self.clusterMapper.locMapper))
# new cluster list dictionary
clusterList = dict()
# new cluster number
idx = 1
# new cluster 생성 시작
for cids in self.clusterMapper.ruleMapper.values():
locDict = dict()
locCntDict = dict()
# rule이 같은 cluster 번호마다 location을 조회하여 딕셔너리 생성
for cid in cids:
locDict[cid] = self.clusterMapper.getLocMapperItem(cid)
# 만들어진 location 딕셔너리를 비교하여 같은 location이면 값 증가
for key,value in locDict.items():
for v in value:
if not locCntDict.has_key(v):
locCntDict[v] = 1
else:
locCntDict[v] += 1
# location 딕셔너리에서 위에서 체크된 location count 수를 비교하여 2 이상인 지역정보를 조회하고, 같은 지역인 클러스터 번호를 분류
for key, value in locDict.items():
for v in value:
for key1,value1 in list(filter(lambda item: item[:][1] > 1, locCntDict.items())):
if not clusterList.has_key(idx):
clusterList[idx] = set()
if v == key1:
clusterList[idx].add(key)
if clusterList.has_key(idx):
idx += 1
# new cluster 생성 끝
self.clusterMapper.createNewCluster(clusterList)
return clusterList
"""
step 1. In clusters, dtime distinct and counting : complete
step 2. check clusters alive time : complete
"""
def getDtimeStatistics(self,cluster,db=None):
dtimeDict = dict()
dtimeRangeDict = dict()
for cluster_id,members in cluster:
dtimeDict[cluster_id] = set()
for vertex in list(filter(lambda x:x['cluster_id'] == cluster_id, [self.graph.getVertex(member).get() for member in to_list(members)])):
if 'dtime' in vertex.keys():
dtimeDict[cluster_id].add(to_time(vertex['dtime']))
dtimeRangeDict[cluster_id] = dict()
dtimeRangeDict[cluster_id]['alive'] = (max(dtimeDict[cluster_id]) - min(dtimeDict[cluster_id])).days + 1
for cluster_id,members in cluster:
dtimeCntDict = dict()
for vertex in list(filter(lambda x:x['cluster_id'] == cluster_id, [self.graph.getVertex(member).get() for member in to_list(members)])):
for time in dtimeDict[cluster_id]:
if to_time(vertex['dtime']) == time:
if not dtimeCntDict.has_key(time):
dtimeCntDict[time] = 0
dtimeCntDict[time] += 1
dtimeDict[cluster_id] = dtimeCntDict
print('dtimeDict >> ', dtimeDict)
print('dtimeRangeDict >> ', dtimeRangeDict)
"""
step 3. get each cluster detection reason distinct : complete
"""
def getDetectionReasonCluster(self,cluster):
reasonDict = dict()
for cluster_id, members in cluster:
reasonDict[cluster_id] = set()
for vertex in list(filter(lambda x:x['cluster_id'] == cluster_id, [self.graph.getVertex(member).get() for member in to_list(members)])):
reasonDict[cluster_id].add(vertex['detection_reason'])
print(reasonDict)
def getMaxMemberCluster(self,cluster):
return reduce(lambda x,y: y if len(x) < len(y) else x, [(cluster_id, to_list(members)) for cluster_id, members in cluster])
def exportCSV4ClusterNumber(self):
header = list()
results = list()
idx = 0
for cluster_id, members in self.cluster:
for vertex in self.getClusterMember(cluster_id):
result = list()
for key in vertex.get():
if idx == 0:
header.append(key)
result.append(vertex.get()[key])
results.append(result)
idx += 1
file.writeCSV('result.csv', header, results)
def runLouvainMethod(self, datum):
self.nx.add_edges_from(datum)
self.clusterMapper.setLouvainMethodCluster(self.community.louvain_method())
self.nx.clear()
return self.clusterMapper.louvainMethodCluster
def runGirvanNewman(self, datum):
self.nx.add_edges_from(datum)
self.clusterMapper.setGirvanNewmanCluster(self.community.girvan_newman())
self.nx.clear()
return self.clusterMapper.girvanNewmanCluster
def insertClusterTfIdf(self,cluster=None, qt=None, qr=None):
import pandas as pd
import json
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
# cluster 특징 추출 : properties:value --> 1 line text
if cluster is None:
cluster = self.cluster
_dict = dict()
_dict['text'] = list()
_dict['cluster_id'] = list()
for cluster_id, members in cluster:
text = ''
for vertex in [self.graph.getVertex(member).get() for member in to_list(members)]:
for key in vertex.keys():
if vertex[key] is not None:
value = None
if isinstance(vertex[key], unicode):
value = vertex[key].encode('utf-8')
else:
value = str(vertex[key])
text += ' '+pre_process(value)
else:
text += ''
text.strip()
_dict['text'].append(text)
_dict['cluster_id'].append(cluster_id)
# pandas dataframe set
df_idf = pd.DataFrame(_dict)
# df_idf['text'] = reduce(lambda x,y: x+' '+y.apply(lambda y1:pre_process(y1)), [df_idf[key] for key in self.graph.vKeys()])
# not yet used stopwords
# stopWords = file.get_stop_words('analysis/stopword.txt')
# cluster 특징 dataframe column list get
docs = df_idf['text'].tolist()
# count vectorizer 객체 생성
cv = CountVectorizer(max_df=0.85, stop_words='english', analyzer='word', token_pattern=r'(?u)\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}\:\d*|(?u)\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}|\b\w\w+\b')
# 전체 dictionary 생성
word_count_vector = cv.fit_transform(docs)
# print(list(cv.vocabulary_.keys())[:10])
# tfidf 객체 생성 및 텍스트별 카운팅 산출
tfidf_transformer = TfidfTransformer(smooth_idf=True, use_idf=True)
tfidf_transformer.fit(word_count_vector)
# print idf values
# df_idf1 = pd.DataFrame(tfidf_transformer.idf_, index=cv.get_feature_names(),columns=["tf_idf_weights"])
# sort ascending
# print(df_idf1.sort_values(by=['tf_idf_weights']))
feature_names = cv.get_feature_names()
idx = 1
# dictionary에서 클러스터별 특징 출력
for doc in docs:
tf_idf_vector = tfidf_transformer.transform(cv.transform([doc]))
sorted_items=sort_coo(tf_idf_vector.tocoo())
keywords=extract_topn_from_vector(feature_names,sorted_items,100)
sorted_keywords = sorted(keywords, key=lambda k : keywords[k], reverse=True)
param_keywords = dict()
# top 5 추출
for k in sorted_keywords[:5]:
param_keywords[k] = keywords[k]
qt.doQuery(qr.getQueryString('creation.table.insert_cluster_ch'), {'cluster_id':str(idx), 'characteristic':json.dumps(param_keywords)})
idx += 1
| jhs9396/pyTest | analysis/cluster_analysis.py | cluster_analysis.py | py | 11,076 | python | en | code | 1 | github-code | 13 |
29712158310 | from rest_framework import mixins, generics
from rest_framework.response import Response
from django.http import Http404
from service_user.models import UserModel
from .serializer import ProfileSerializer
from utils import log, ParameterKeys, get_serializer_error_code
class ProfileAPI(generics.GenericAPIView, mixins.RetrieveModelMixin):
lookup_field = "user_id"
queryset = UserModel.objects.all()
serializer_class = ProfileSerializer
def get(self, request, *args, **kwargs):
log(self.get, params=request.query_params)
# getting user object
try:
_i = self.get_object()
except Http404:
return Response({
ParameterKeys.STATUS: ParameterKeys.INVALID,
ParameterKeys.CODE: ParameterKeys.INVALID_USER
})
# verifying token
token_id = request.query_params.get(ParameterKeys.TOKEN_ID, None)
if token_id is None or token_id != _i.token_id:
Response({
ParameterKeys.STATUS: ParameterKeys.INVALID,
ParameterKeys.CODE: ParameterKeys.INVALID_TOKEN
})
# response
_s = self.get_serializer(_i)
_r = {ParameterKeys.STATUS: ParameterKeys.SUCCESS}
_r.update(_s.data)
log(self.get, ret=_r)
return Response(_r)
def put(self, request, *args, **kwargs):
log(self.put, data=request.data)
try:
_i = self.get_object()
except Http404:
return Response({
ParameterKeys.STATUS: ParameterKeys.INVALID,
ParameterKeys.CODE: ParameterKeys.INVALID_USER
})
# verifying token
token_id = request.data.get(ParameterKeys.TOKEN_ID, None)
if token_id is None or token_id != _i.token_id:
Response({
ParameterKeys.STATUS: ParameterKeys.INVALID,
ParameterKeys.CODE: ParameterKeys.INVALID_TOKEN
})
# response
_d = request.data
if _d[ParameterKeys.USERNAME] == _i.username:
del _d[ParameterKeys.USERNAME]
_s = self.get_serializer(_i, data=_d, partial=True)
if _s.is_valid():
_s.save()
ret = {ParameterKeys.STATUS: ParameterKeys.SUCCESS}
ret.update(_s.data)
log(self.put, data=ret)
return Response(ret)
log(self.put, errors=_s.errors)
return Response({
ParameterKeys.STATUS: ParameterKeys.INVALID,
ParameterKeys.CODE: get_serializer_error_code(_s)
})
| yseiren87/jellicleSpace | server/service_user/views/profile/view.py | view.py | py | 2,601 | python | en | code | 0 | github-code | 13 |
6427354417 | #!/usr/bin/env python
import numpy as np
import pickle
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
def load_dataset(fn):
"""
Load the dataset from "fn" (generated by mkdataset.py and modified by gethandpos.py)
returns the list of all data
"""
# read index file
with open(fn + '.index.pickle', 'rb') as f:
index = pickle.load(f)
# load part data according to index file
data = []
for item in index:
# read part data ...
with open(item['fn'], 'rb') as f:
part_data = pickle.load(f)
# ... and validate its correctness
if len(part_data) != item['len']:
print('Warning: incorrect index file: len({}) != {}'.format(item['fn'], item['len']))
# load all data into exactly one list
data.extend(part_data)
return data
# load testset
test = load_dataset('testset')
print('test:', len(test))
def normalize_hand(hand):
"""
Normalize detected hand keypoints, moving the center (mean of coord) of non-zero points to (0,0)
"""
retv = hand.astype(np.float)
nonzero = (retv == 0).sum(1) != 2
retv[nonzero] -= np.average(retv[nonzero], 0)
return retv
import torchvision.models as models
class mynet(nn.Module):
"""
The naive FCN model
"""
def __init__(self, shp, l):
super(mynet, self).__init__()
self.thenet = nn.Sequential(
nn.Linear(np.product(shp), 128), nn.ReLU(True),
nn.Linear(128, 256), nn.ReLU(True),
nn.Linear(256, 72), nn.ReLU(True),
nn.Linear(72, l),
)
def forward(self, X):
v = X.flatten(start_dim=1)
v = self.thenet(v)
return v
#### load pretrained model and the number-str mapping of label
obj = torch.load('train-result.pth')
Y_map = obj['Y_map']
EPOCH = obj['epoch']
#### initialize the model and optimizer
# net = models.densenet161()
net = mynet(test[0]['hand'].shape, len(Y_map))
loss = nn.CrossEntropyLoss()
opt = optim.Adagrad(net.parameters(), lr = 0.01)
opt.load_state_dict(obj['opt'])
net.load_state_dict(obj['net'])
print('Model at epoch #{}'.format(EPOCH))
def __linear_gradient(pos, c1, c2):
"""
linear interpolation for numbers
pos: number
a real number in [0, 1]
c1: number
c2: number
"""
return ((c2 - c1) * pos + c1 if pos > 0 else c1) if pos < 1 else c2
def linear_gradient(pos, c1, c2):
"""
linear interpolation to generate colors
pos: number
a real number in [0, 1]
c1: Tuple[number, number, number]
start color
c2: Tuple[number, number, number]
end color
"""
return (__linear_gradient(pos, c1[0], c2[0]), __linear_gradient(pos, c1[1], c2[1]), __linear_gradient(pos, c1[2], c2[2]))
import cv2
from src import util
#### the parameter used to generate output video
video_fourcc = cv2.VideoWriter_fourcc(*'mp4v') # video type
fps = 30 # output FPS
output_video = None # the opencv video write object
output_file = 'result.mp4' # when output_file is set to None, just preview and does not generate video file
max_seq = len(test)
seq = 0
for i in test:
seq += 1
img = i['frame']
hand = i['hand']
# normalize handpos for prediction
x = normalize_hand(hand)
# draw handpose in canvas
canvas = img.copy()
canvas = util.draw_handpose(canvas, [hand])
# get the size of canvas to initialize video writer (if not initialized)
rect = (0, 0, canvas.shape[1], canvas.shape[0])
if output_video is None and output_file is not None:
output_video = cv2.VideoWriter(output_file, video_fourcc, fps, (canvas.shape[1], canvas.shape[0]))
# do the prediction
with torch.no_grad():
predicted = net(torch.from_numpy(x).float().unsqueeze_(0)).squeeze() # perform prediction
predicted = F.softmax(predicted, 0) # calculate probability
predicted_class = predicted.argmax() # retrive the predicted label
predicted_prob = predicted[predicted_class] # retrive the prediction probability
color = linear_gradient((predicted_prob.item() - 0.5) / 0.4, (31, 15, 197), (14, 161, 19)) # generate overlay text color
# generate overlay text
output = '{}, prob={:.0%}'.format(Y_map[predicted_class], predicted_prob)
if predicted_prob <= 0.5:
output = 'Undetermined'
# render overlay text
cv2.putText(canvas, output, (rect[0] + 10, rect[3] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1., color, 2, cv2.LINE_AA)
print('Frame {}/{}'.format(seq, max_seq), end=' \r', flush=True)
# write frame into file or preview it
if output_video is not None:
output_video.write(canvas)
else:
cv2.imshow('img', canvas)
if cv2.waitKey(1) == 27:
break
# cleaning up
cv2.destroyAllWindows()
if output_video is not None:
output_video.release()
print('')
| yzihan/AR-Gesture-Semantic | test.py | test.py | py | 4,950 | python | en | code | 0 | github-code | 13 |
27832623036 | #!/usr/bin/env python3
import re
import io
PEERING_BASE = "../peeringDB/rawdumps/dump"
ASNRE = re.compile("ClearDataTD\"\>([0-9]+)\ ")
BWRE = re.compile("\>(.+)\ ")
def main():
fullMap = {}
for i in range(1, 465):
tmpMap = parseFile(PEERING_BASE + str(i) + ".txt")
for tASN in tmpMap:
fullMap[tASN] = tmpMap[tASN]
outFP = open("../peerDBResult.csv", "w")
for tASN in fullMap:
outFP.write(tASN + "," + fullMap[tASN] + "\n")
outFP.close()
def parseFile(fileName):
retMap = {}
inFP = open(fileName, "r", encoding="latin-1")
inRegion = 0
curASN = None
noASN = 0
for line in inFP:
if "participant_view.php?id=" in line:
inRegion = 1
elif inRegion == 1:
matcher = ASNRE.search(line)
if matcher:
curASN = matcher.group(1)
inRegion = 2
else:
noASN = noASN + 1
inRegion = 0
elif inRegion == 2:
inRegion = 3
elif inRegion == 3:
matcher = BWRE.search(line)
if matcher:
retMap[curASN] = matcher.group(1)
else:
print("shiiiiiii")
inRegion = 0
curASN = None
inFP.close()
if noASN > 0:
print("no asn " + str(noASN) + " in " + fileName)
return retMap
if __name__ == "__main__":
main()
| pendgaft/cash-nightwing | trafficModel/scripts/extraPeeringDB.py | extraPeeringDB.py | py | 1,438 | python | en | code | 0 | github-code | 13 |
14122513479 | from telegram.ext import Updater, CommandHandler
from gtts import gTTS
import re
import uuid
import os
TOKEN = os.environ['TELEGRAM_TOKEN']
updater = Updater(token=TOKEN)
dispatcher = updater.dispatcher
def start(bot, update):
bot.sendMessage(chat_id=update.message.chat_id, text="I'm bot please talk to me!")
def say(bot, update):
text = re.sub(r'\/say\s+', '', update.message.text)
tts = gTTS(text=text, lang='ru')
message_name ="/tmp/" + uuid.uuid4().hex + "-voice.mp3"
tts.save(message_name)
bot.sendVoice(chat_id=update.message.chat_id, voice=open(message_name, 'rb'))
os.remove(message_name)
start_handler = CommandHandler('start', start)
say_handler = CommandHandler('say', say)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(say_handler)
import os
PORT = int(os.environ.get('PORT', '5000'))
# add handlers
updater.start_webhook(listen="0.0.0.0", port=PORT, url_path=TOKEN)
updater.bot.setWebhook("https://gopcer-bot.herokuapp.com/" + TOKEN)
updater.idle()
| DevilsNightsix/gopcer | bot.py | bot.py | py | 1,011 | python | en | code | 0 | github-code | 13 |
25949327135 | import torch.nn as nn
import torch
from typing import Optional
import copy
import random
import dgl.function as fn
from sklearn import preprocessing as sk_prep
from gnn_modules import setup_module
class model_ggd(nn.Module):
def __init__(
self,
in_dim: int,
num_hidden: int,
num_layers: int,
nhead: int, # 4
activation: str,
feat_drop: float,
attn_drop: float,
negative_slope: float,
residual: bool,
norm: Optional[str],
encoder_type: str = "gcn",
num_proj_layers=1,
drop_feat=0.2
):
super(model_ggd, self).__init__()
self.drop_feat = drop_feat
self.encoder = Encoder(
in_dim,
num_hidden,
num_layers,
nhead, # 4
activation,
feat_drop,
attn_drop,
negative_slope,
residual,
norm,
encoder_type
)
self.mlp = torch.nn.ModuleList()
for i in range(num_proj_layers):
self.mlp.append(nn.Linear(num_hidden, num_hidden))
self.loss = nn.BCEWithLogitsLoss()
def forward(self, g, features):
aug_feat = aug_feature_dropout(features, self.drop_feat)
h_1 = self.encoder(g, aug_feat, corrupt=False)
h_2 = self.encoder(g, aug_feat, corrupt=True)
sc_1 = h_1.squeeze(0)
sc_2 = h_2.squeeze(0)
for i, lin in enumerate(self.mlp):
sc_1 = lin(sc_1)
sc_2 = lin(sc_2)
sc_1 = sc_1.sum(1).unsqueeze(0)
sc_2 = sc_2.sum(1).unsqueeze(0)
logits = torch.cat((sc_1, sc_2), 1)
lbl_1 = torch.ones(1, g.num_nodes())
lbl_2 = torch.zeros(1, g.num_nodes())
labels = torch.cat((lbl_1, lbl_2), 1).to(logits.device)
loss = self.loss(logits, labels)
return loss
def embed(self, g, features):
h_1 = self.encoder(g, features, corrupt=False)
feat = h_1.clone().squeeze(0)
degs = g.in_degrees().float().clamp(min=1)
norm = torch.pow(degs, -0.5)
norm = norm.to(h_1.device).unsqueeze(1)
for _ in range(10):
feat = feat * norm
g.ndata['h2'] = feat
g.update_all(fn.copy_u('h2', 'm'), fn.sum('m', 'h2'))
feat = g.ndata.pop('h2')
feat = feat * norm
h_2 = feat.unsqueeze(0)
embeds = (h_1 + h_2).squeeze(0).detach()
embeds = sk_prep.normalize(X=embeds.cpu().numpy(), norm="l2")
embeds = torch.FloatTensor(embeds).to(h_1.device)
return embeds
class Encoder(nn.Module):
def __init__(
self,
in_dim: int,
num_hidden: int,
num_layers: int,
nhead: int, # 4
activation: str,
feat_drop: float,
attn_drop: float,
negative_slope: float,
residual: bool,
norm: Optional[str],
encoder_type: str = "gcn",
):
super(Encoder, self).__init__()
self._encoder_type = encoder_type
assert num_hidden % nhead == 0
if encoder_type in ("gat", "dotgat"):
enc_num_hidden = num_hidden // nhead
enc_nhead = nhead
else:
enc_num_hidden = num_hidden
enc_nhead = 1
self.conv = self.encoder = setup_module(
m_type=encoder_type,
enc_dec="encoding",
in_dim=in_dim,
num_hidden=enc_num_hidden,
out_dim=enc_num_hidden,
num_layers=num_layers,
nhead=enc_nhead,
nhead_out=enc_nhead,
concat_out=True,
activation=activation,
dropout=feat_drop,
attn_drop=attn_drop,
negative_slope=negative_slope,
residual=residual,
norm=norm,
)
def forward(self, g, features, corrupt=False):
if corrupt:
perm = torch.randperm(g.number_of_nodes())
features = features[perm]
features = self.conv(g, features)
return features
def aug_feature_dropout(input_feat, drop_percent=0.2):
aug_input_feat = copy.deepcopy(input_feat)
drop_feat_num = int(aug_input_feat.shape[1] * drop_percent)
drop_idx = random.sample([i for i in range(aug_input_feat.shape[1])], drop_feat_num)
aug_input_feat[:, drop_idx] = 0
return aug_input_feat
| CladernyJorn/SSL-GNN-Project | models/ggd.py | ggd.py | py | 4,490 | python | en | code | 2 | github-code | 13 |
311932532 | import sys
import requests
import pprint
import ast
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def classify_all_comments_sentiment(input_file, output_file):
'''
calculates the sentiment of each comment in given input_file
'''
sia = SentimentIntensityAnalyzer()
file = open(input_file, 'r')
filtered_file = open(output_file, 'w')
total_obj = 0
written_obj = 0
while True:
line = file.readline()
if len(line) == 0:
break
total_obj += 1
data = json.loads(line)
sentiment = sia.polarity_scores(data['body'])
data['sentiment'] = sentiment
json.dump(data, filtered_file)
filtered_file.write('\n')
written_obj += 1
print('Total objects: {}'.format(total_obj))
print('Written objects: {}'.format(written_obj))
filtered_file.close()
class EmotionAPI:
'''
This program uses ParallelDots for emotional text analysis (https://www.paralleldots.com)
'''
url = 'https://apis.paralleldots.com/v3/'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
api_key = ''
def __init__(self,apiExtension):
self.url = '{}{}'.format(self.url, apiExtension)
def make_emotion_request(text):
'''
{
'emotion': {
'probabilities': {
'angry': 0.376,
'indifferent': 0.189,
'sad': 0.381,
'excited': 0.014,
'happy': 0.04
},
'emotion': 'sad'
},
'usage': 'By accessing ParallelDots API or using information generated by ParallelDots API, you are agreeing to be bound by the ParallelDots API Terms of Use: http://www.paralleldots.com/terms-and-conditions'
}
'''
api_object = EmotionAPI('emotion')
payload = 'text={}&api_key={}&lang_code=en'.format(text, api_object.api_key)
response = requests.request('POST', api_object.url, data=payload, headers=api_object.headers)
return response
def get_emotion_from_response(response):
dictionary = ast.literal_eval(response.text)
return dictionary['emotion']['emotion']
def get_probabilities_from_response(response):
dictionary = ast.literal_eval(response.text)
return dictionary['emotion']['probabilities']
| jshiohaha/redditCommentsAndPresidentialElection | src/comment_classifier.py | comment_classifier.py | py | 2,376 | python | en | code | 3 | github-code | 13 |
43298745859 | from OpenGL.GL import *
from OpenGL.GLU import *
from glfw.GLFW import *
attributes = [
# x y z R G B A
((-0.866, -0.75, 0), (1, 0, 0, 1)),
(( 0.866, -0.75, 0), (1, 1, 0, 1)),
(( 0, 0.75, 0), (0, 0, 1, 1))
]
if glfwInit() == GLFW_FALSE:
raise Exception("error: init glfw")
glfwWindowHint(GLFW_SAMPLES, 8)
window = glfwCreateWindow(640, 480, "OpenGL Window", None, None)
glfwMakeContextCurrent(window)
def size_callback(window, _w, _h):
width, height = glfwGetFramebufferSize(window)
glViewport(0, 0, width, height)
aspect = width / height
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if aspect >= 1:
glOrtho(-aspect, aspect, -1, 1, -1, 1)
else:
glOrtho(-1, 1, -1/aspect, 1/aspect, -1, 1)
glMatrixMode(GL_MODELVIEW)
glfwSetWindowSizeCallback(window, size_callback)
size_callback(window, 640, 480)
vendor, renderer = glGetString(GL_VENDOR).decode("utf-8"), glGetString(GL_RENDERER).decode("utf-8")
version, glsl_version = glGetString(GL_VERSION).decode("utf-8"), glGetString(GL_SHADING_LANGUAGE_VERSION).decode("utf-8")
major, minor = glGetInteger(GL_MAJOR_VERSION), glGetInteger(GL_MINOR_VERSION)
extensions = [glGetStringi(GL_EXTENSIONS, i) for i in range(glGetInteger(GL_NUM_EXTENSIONS))]
print(f"\n{vendor} / {renderer}\n OpenGL: {version}\n GLSL: {glsl_version}\n Context {major}.{minor}\n")
@GLDEBUGPROC
def __CB_OpenGL_DebugMessage(source, type, id, severity, length, message, userParam):
msg = message[0:length]
print(msg.decode("utf-8"))
glDebugMessageCallback(__CB_OpenGL_DebugMessage, None)
errors_only = False
if errors_only:
glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, None, GL_FALSE)
glDebugMessageControl(GL_DEBUG_SOURCE_API, GL_DEBUG_TYPE_ERROR, GL_DONT_CARE, 0, None, GL_TRUE)
else:
glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, None, GL_TRUE)
glEnable(GL_DEBUG_OUTPUT)
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS)
glEnable(GL_MULTISAMPLE) # default
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 0.0)
while not glfwWindowShouldClose(window):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glBegin(GL_TRIANGLES)
for vertex, color in attributes:
glColor4f(*color)
glVertex3f(*vertex)
glEnd()
glfwSwapBuffers(window)
glfwPollEvents()
glfwTerminate()
exit() | Rabbid76/graphics-snippets | example/python/opengl_hello_triangle/hello_triangle_glfw_begin_end.py | hello_triangle_glfw_begin_end.py | py | 2,395 | python | en | code | 172 | github-code | 13 |
41863256946 | #!/usr/bin/env python3
# Author:Tanaya Jadhav
# Uses a list of known barcodes to find them in a fastq file and
# create 10 fastq files
# all sequences in 1 fastq file contain the same barcode
from itertools import islice
def main():
barcodes = ['ATGAGATCTT', 'AGCTCATTTC', 'TGAAAATCTT', 'TATCCAGCCA', 'AGGCAGGCAG', 'CTTGTTACTA', 'AAGGCACAAG',
'TGCTCGCTGA', 'GTACCGCCGT', 'CCTCACCAGC']
filelist = []
for i in barcodes:
file_name_string = i + '_seq.fq'
filelist.append(file_name_string)
filedict = dict(zip(barcodes, filelist))
# print(filedict)
with open('/Users/tanayajadhav/Desktop/CompGen/week3_seqs_bc.fq', 'r') as infile:
x = 5
while x == 5:
lines = []
x=6
for line in infile:
x=5
lines.append(line)
if len(lines) >= 4:
bc = lines[1][:10]
if bc in filedict:
with open(filedict[bc], 'a') as o:
o.write(lines[0] + lines[1] + lines[2] + lines[3])
break
print('done')
if __name__ == '__main__':
main()
| tanaya-jadhav/Python | barcodesorter.py | barcodesorter.py | py | 1,188 | python | en | code | 0 | github-code | 13 |
11630860732 | #!/usr/bin/python
# -*- coding: latin-1 -*-
import re
import sys
# Exemplo de programa (programa (c) da pagina 20 com a macro GOTO expandida)
program = """
[A2] S1 = S1 - 1
if (S1 != 0): GOTO A2
[A] if (S2 != 0): GOTO B
K = K + 1
if (K != 0): GOTO C
[B] S2 = S2 - 1
S1 = S1 + 1
K1 = K1 + 1
K = K + 1
if (K != 0): GOTO A
[C] if (K1 != 0): GOTO D
K = K + 1
if (K != 0): GOTO E
[D] K1 = K1 - 1
S2 = S2 + 1
K = K + 1
if (K != 0): GOTO C
[E]
"""
DLABEL = "\s*(?:\[(\w+)\])?\s*"
GOTO = "\s*GOTO\s*(\w+)\s*"
VAR = "\s*([a-zA-Z]+(?:\w*#*)*)\s*"
LABEL = "\s*([a-zA-Z]\d*)\s*"
eip = 3
counter = 0
labels = {}
varss = {
'S1': 5,
'S2': 10,
}
if __name__ == "__main__":
print("[i]: Starting program...")
# ------- Parser -------#
program = program.split("\n")
peip = 0
while(peip < len(program)):
###
### Expanssão macro
###
line = program[peip]
# Search for a match
match = re.match(f"\s*(?:\[(\w+)\])?\s*", line)
(label,) = match.groups()
if (label and label not in labels):
labels[label] = peip
peip += 1
# Print program
for i, line in enumerate(program):
print(f"{i}\t{line}")
# ------- Interpreter -------#
while(eip < len(program)):
line = program[eip]
l = line.replace('\n', '')
print(f"({eip})\t{varss} -->", end="")
# Search for a match
match = False
# X = X + 1 | X = X - 1
match = re.match(f"{DLABEL}{VAR}={VAR}\s*(\+|\-)\s*1\s*$", line)
if match:
(_, v, _, op) = match.groups()
if (v not in varss):
varss[v] = 0
if (op == '+'):
varss[v] += 1
elif (op == '-'):
if(varss[v] > 0):
varss[v] -= 1
# if (X != 0): GOTO A
match = re.match(f"{DLABEL}if\s*\({VAR}!=\s*0\s*\):\s*GOTO\s*{LABEL}\s*$", line)
if match:
(_, v, label) = match.groups()
if varss.get(v, 0) != 0:
if label not in labels:
break
eip = labels[label]
print(f" {varss}")
continue
print(f" {varss}")
eip += 1
print(varss)
print("[i]: Ending program...")
| DaviChavesPinheiro/teoria-da-computacao | Tarefa01 - Ultima Questão.py | Tarefa01 - Ultima Questão.py | py | 2,564 | python | en | code | 0 | github-code | 13 |
2625085145 | # -*- coding: utf-8 -*-
import logging
import pytz
import datetime
from pyramid.httpexceptions import HTTPServerError, HTTPOk
from pyramid.view import view_config
from stalker.db.session import DBSession
from stalker import Project, StatusList, Status, Sequence, Entity, Studio
import stalker_pyramid
from stalker_pyramid.views import get_logged_in_user, milliseconds_since_epoch, \
PermissionChecker
from stalker_pyramid import logger_name
logger = logging.getLogger(logger_name)
@view_config(
route_name='create_sequence'
)
def create_sequence(request):
"""runs when adding a new sequence
"""
logged_in_user = get_logged_in_user(request)
name = request.params.get('name')
code = request.params.get('code')
status_id = request.params.get('status_id')
status = Status.query.filter_by(id=status_id).first()
project_id = request.params.get('project_id')
project = Project.query.filter_by(id=project_id).first()
logger.debug('project_id : %s' % project_id)
if name and code and status and project:
# get descriptions
description = request.params.get('description')
# get the status_list
status_list = StatusList.query.filter_by(
target_entity_type='Sequence'
).first()
# there should be a status_list
# TODO: you should think about how much possible this is
if status_list is None:
return HTTPServerError(detail='No StatusList found')
new_sequence = Sequence(
name=name,
code=code,
description=description,
status_list=status_list,
status=status,
created_by=logged_in_user,
project=project
)
DBSession.add(new_sequence)
else:
logger.debug('there are missing parameters')
logger.debug('name : %s' % name)
logger.debug('code : %s' % code)
logger.debug('status : %s' % status)
logger.debug('project : %s' % project)
HTTPServerError()
return HTTPOk()
@view_config(
route_name='update_sequence'
)
def update_sequence(request):
"""runs when adding a new sequence
"""
logged_in_user = get_logged_in_user(request)
sequence_id = request.params.get('sequence_id')
sequence = Sequence.query.filter_by(id=sequence_id).first()
name = request.params.get('name')
code = request.params.get('code')
status_id = request.params.get('status_id')
status = Status.query.filter_by(id=status_id).first()
if sequence and code and name and status:
# get descriptions
description = request.params.get('description')
#update the sequence
sequence.name = name
sequence.code = code
sequence.description = description
sequence.status = status
sequence.updated_by = logged_in_user
date_updated = datetime.datetime.now(pytz.utc)
sequence.date_updated = date_updated
DBSession.add(sequence)
else:
logger.debug('there are missing parameters')
logger.debug('name : %s' % name)
logger.debug('status : %s' % status)
HTTPServerError()
return HTTPOk()
@view_config(
route_name='get_sequences',
renderer='json'
)
def get_sequences(request):
"""returns all sequences as a json data
"""
return [
{
'id': sequence.id,
'name': sequence.name,
'status': sequence.status.name,
'status_color': sequence.status.html_class,
'user_id': sequence.created_by.id,
'user_name': sequence.created_by.name,
'thumbnail_full_path': sequence.thumbnail.full_path
if sequence.thumbnail else None
}
for sequence in Sequence.query.all()
]
@view_config(
route_name='get_project_sequences_count',
renderer='json'
)
@view_config(
route_name='get_entity_sequences_count',
renderer='json'
)
def get_project_sequences_count(request):
"""returns the count of sequences in a project
"""
project_id = request.matchdict.get('id', -1)
sql_query = """select
count(1)
from "Sequences"
join "Tasks" on "Sequences".id = "Tasks".id
where "Tasks".project_id = %s""" % project_id
return DBSession.connection().execute(sql_query).fetchone()[0]
@view_config(
route_name='get_project_sequences',
renderer='json'
)
@view_config(
route_name='get_entity_sequences',
renderer='json'
)
def get_project_sequences(request):
"""returns the related sequences of the given project as a json data
"""
# TODO: use pure SQL query
entity_id = request.matchdict.get('id', -1)
entity = Entity.query.filter_by(id=entity_id).first()
return [
{
'thumbnail_full_path': sequence.thumbnail.full_path if sequence.thumbnail else None,
'code': sequence.code,
'id': sequence.id,
'name': sequence.name,
'status': sequence.status.name,
'status_code': sequence.status.code.lower(),
'status_color': sequence.status.html_class if sequence.status.html_class else 'grey',
'created_by_id': sequence.created_by.id if sequence.created_by else None,
'created_by_name': sequence.created_by.name if sequence.created_by else None,
'description': sequence.description,
'date_created': milliseconds_since_epoch(sequence.date_created),
'percent_complete': sequence.percent_complete
}
for sequence in entity.sequences
]
@view_config(
route_name='list_sequence_tasks',
renderer='templates/task/list/list_sequence_tasks.jinja2'
)
def list_sequence_tasks(request):
"""called when reviewing tasks
"""
logger.debug('list_sequence_tasks starts******************')
logged_in_user = get_logged_in_user(request)
studio = Studio.query.first()
entity_id = request.matchdict.get('id')
if not entity_id:
entity = studio
else:
entity = Entity.query.filter_by(id=entity_id).first()
task_type = request.params.get('task_type', None)
logger.debug('task_type %s', task_type)
projects = Project.query.all()
mode = request.matchdict.get('mode', None)
came_from = request.params.get('came_from', request.url)
return {
'mode': mode,
'entity': entity,
'has_permission': PermissionChecker(request),
'logged_in_user': logged_in_user,
'milliseconds_since_epoch': milliseconds_since_epoch,
'stalker_pyramid': stalker_pyramid,
'projects': projects,
'studio': studio,
'came_from': came_from,
'task_type':task_type
}
| eoyilmaz/stalker_pyramid | stalker_pyramid/views/sequence.py | sequence.py | py | 6,777 | python | en | code | 6 | github-code | 13 |
19067987020 | # A deck with 26 red and 26 black.
# Payoff: Red = +1, Black = -1
# Can stop any time.
# Find the best strategy
# Stop when payoff reach k and remaining cards only x left
import random
class Deck:
def __init__(self, plus, minus):
self.plus_cards = plus
self.minus_cards = minus
def draw(self):
deck_count = self.plus_cards + self.minus_cards
if deck_count > 0:
p_plus = self.plus_cards / deck_count
if random.uniform(0,1) < p_plus:
self.plus_cards += -1
return 1
else:
self.minus_cards += -1
return -1
else:
return 0
def terminate_condition(deck, payoff):
deck_count = deck.plus_cards + deck.minus_cards
if deck_count == 0: return True
elif payoff > 3: return True
else: False
def play_game(terminate_condition_method, *args):
payoff = 0
deck = Deck(26,26)
while not(terminate_condition_method(*args)):
payoff += deck.draw()
return payoff
def play_many_games(count):
total_payoff = 0
for x in range(count):
total_payoff += play_game(terminate_condition(deck, payoff))
return total_payoff / count
print(play_many_games(10000))
| laichunpongben/machine_learning | deck.py | deck.py | py | 1,280 | python | en | code | 0 | github-code | 13 |
35897092393 | ##Perceptron Gate
import pandas as pd
import matplotlib as plt
import matplotlib.pylab as plt
import numpy as np
def AND(x1,x2) :
b = -0.7
if ((x1!=1)&(x1!=0))|((x2!=1)&(x2!=0)) :
print("invalid input")
else :
w = np.array([0.5, 0.5])
x = np.array([x1,x2])
std = np.sum(w*x) + b
if std > 0 :
return(1)
else : return(0)
def OR(x1,x2) :
b = -0.2
if ((x1!=1)&(x1!=0))|((x2!=1)&(x2!=0)) :
print("invalid input")
else :
w = np.array([0.5, 0.5])
x = np.array([x1,x2])
std = np.sum(w*x) + b
if std > 0 :
return(1)
else : return(0)
def NAND(x1,x2) :
b = -0.7
if ((x1!=1)&(x1!=0))|((x2!=1)&(x2!=0)) :
print("invalid input")
else :
w = np.array([0.5, 0.5])
x = np.array([x1,x2])
std = -(np.sum(w*x) + b)
if std > 0 :
return(1)
else : return(0)
def XOR(x1, x2) :
a = NAND(x1, x2)
b = OR(x1,x2)
c = AND(a,b)
return c
def step_function(x) :
return np.array(x > 0 , dtype = np.int)
def sigmoid(x) :
return 1/(1+np.exp(-x))
def relu(x) :
return np.maximum(0,x)
def st_fr(X) :
W1 = np.array([[0.1, 0.3, 0.6],
[0.4, 0.1, 0.6]])
B1 = np.array([0.3,0.6,0.1])
if len(X) != 2 :
return print("Incorrect data set")
else :
A1 = np.dot(X, W1) + B1
Y1 = sigmoid(A1)
W2 = np.array([[0.1, 0.3],
[0.4, 0.1],
[0.9, 0.2]])
B2 = np.array([0.3,0.6])
A2 = np.dot(Y1, W2) + B2
Y2 = relu(A2)
W3 = np.array([[0.8, 0.9],
[0.3, 0.2]])
B3 = np.array([0.3,-0.4])
A3 = np.dot(Y2, W3) + B3
Y3 = sigmoid(A3)
return Y2
def soft_max(a) :
exa = np.exp(a)
y = exa / sum(exa)
return y
| popper6508/just_practice | Data_Processing_Practice/Deep Learning base.py | Deep Learning base.py | py | 1,866 | python | en | code | 0 | github-code | 13 |
70293427858 | from fastapi import HTTPException, status
from app.repository.route_repository import RouteRepository
from app.repository.user_repository import UserRepository
from app.model.route import RouteRequestDTO, Route
from app.model.email import Email
from app.utils.mail_sender import MailSender
class RouteService:
def __init__(self) -> None:
pass
async def get_all_routes(self):
return await RouteRepository.get_all_routes()
async def get_route_by_id(self, id: int):
result = await RouteRepository.get_route_by_id(id)
if result is not None:
return result
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="No such user in database")
async def create_route(self, data: RouteRequestDTO):
return await RouteRepository.create_route(data)
async def delete_route(self, id: int):
return await RouteRepository.delete_route(id)
async def update_route(self, id: int, data: RouteRequestDTO):
return await RouteRepository.update_route(id, data)
async def select_route(self, id: int, email: str):
route = await RouteRepository.get_route_by_id(id)
if route is not None and email is not None:
route_email_data = self.__create_route_confirmation_email(route, email)
await MailSender().send_selected_route_data_email(route_email_data)
else:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Something bad happened. Our monkeys are working on it")
def __create_route_confirmation_email(self, data: Route, user_email: str) -> Email:
email = Email(
email=[user_email],
body={
"route_id": data.id,
"start_location": data.start_location,
"end_location": data.end_location,
"cost": data.price_per_km,
"distance": data.distance,
"date_of_execution": data.date_of_execution.strftime("%m/%d/%Y, %H:%M:%S")
}
)
return email
route_service = RouteService() | sorenowy/codetaintransportationappication | app/service/route_service.py | route_service.py | py | 2,237 | python | en | code | 0 | github-code | 13 |
20239996000 | # coding: utf-8
"""
- Classe Principal
- Controla entrada/saida dos widgets na tela principal
conforme as ações principais da aplicação:
- Novo Registro
- Pesquisa
- Alterar
- Apagar
"""
# ----- Importações ----- #
import sqlite3
import PyIntroDados
import PyPesquisa
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.popup import Popup
from kivy.uix.label import Label
# ----- Classe Principal ----- #
class Principal(FloatLayout):
def __init__(self, **kwargs):
super().__init__()
self.cont1 = 0 # ----- Controla WidIntroDados I \ O ----- #
self.cont2 = 0 # ----- Controla WidPesquisa I \ O ----- #
self.cont3 = 0 # ----- Controla WidResultadoPesquisa I \ O ----- #
# ----- Instancia Classe Formulário ----- #
self.widintrodados = PyIntroDados.WidIntroDados()
# ----- Instancia Classe Pesquisa ----- #
self.widpesquisa = PyPesquisa.WidPesquisa()
# ----- Instancia Classe Resultado da Pesquisa ----- #
self.widrespesquisa = PyPesquisa.ResPesquisaWid()
# ----- Variavel da ação desejada dos botões principais ----- #
# 'Novo Registro' - 'Pesquisa' - 'Alterar Registro' - 'Apagar Registro' #
self.tipo_acao_principal = None
def acao_principal(self, tipoacao):
# ----- Confere se tem um formulário já aberto ----- #
if self.cont1 == 1:
self.remove_widget(self.widintrodados) # Remove wid intro dados
self.cont1 = 0
if self.cont2 == 1:
self.remove_widget(self.widpesquisa) # Remove wid pesquisa
self.cont2 = 0
if self.cont3 == 1:
self.remove_widget(self.widrespesquisa) # Remove wid resultado da pesquisa
self.cont3 = 0
if self.cont1 == 0:
if tipoacao == 'Novo Registro':
self.wid_intro_dados(tipoacao)
else:
self.wid_pesquisa(tipoacao)
self.tipo_acao_principal = tipoacao
# if tipoacao == 'Alterar Registro':
# self.wid_pesquisa(tipoacao)
# if tipoacao == 'Apagar Registro':
# self.wid_pesquisa(tipoacao)
# if tipoacao == 'Pesquisa':
# self.wid_pesquisa(tipoacao)
# ----- Abre formulario introdução de dados na ação principal ----- #
def wid_intro_dados(self, tipowid):
# ----- Abre formulario introdução de dados ----- #
self.add_widget(self.widintrodados)
self.widintrodados.ids.titulo_widintrodados.text = tipowid # Titulo formulário
self.widintrodados.limpa_widintrodados() # Prepara widgets novo formulário
self.widintrodados.tipo_acao = tipowid # Indica a ação desejada
self.cont1 = 1
# ----- Abre widget de pesquisa ----- #
def wid_pesquisa(self, tipoacao):
self.add_widget(self.widpesquisa)
self.widintrodados.limpa_widintrodados() # Prepara widgets novo formulário
self.widpesquisa.limpa_widpesquisa() # Prepara widgets para nova pesquisa
self.widpesquisa.ids.textopesquisa.text = tipoacao
self.widpesquisa.tipo_acao_pesq = tipoacao
self.cont2 = 1
# ----- Opções de wid de resultado da pesquisa ----- #
def opcoes_pesquisa(self, texto):
if self.tipo_acao_principal != 'Pesquisa':
self.open_wid_intro_dados(texto)
if self.tipo_acao_principal == 'Pesquisa':
self.open_wid_res_pesquisa(texto)
# ----- Abre formulario introdução de dados depois da pesquisa ----- #
def open_wid_intro_dados(self, nome):
# ----- Confere se tem um formulário já aberto ----- #
if self.cont1 == 1:
self.widintrodados.limpa_widintrodados()
self.remove_widget(self.widintrodados) # Remove wid intro dados
self.add_widget(self.widintrodados)
self.widintrodados.ids.titulo_widintrodados.text = self.widpesquisa.tipo_acao_pesq
self.widintrodados.tipo_acao = self.widpesquisa.tipo_acao_pesq # Indica a ação desejada
self.widintrodados.ids.btn_limpar.disabled = True
self.widintrodados.inicio_alterar_dados(nome)
self.cont1 = 1
# ----- Fecha wids ----- #
def close_wid(self):
if self.cont1 == 1:
self.widintrodados.limpa_widintrodados()
self.remove_widget(self.widintrodados)
self.cont1 = 0
if self.cont3 == 1:
self.remove_widget(self.widrespesquisa)
self.cont3 = 0
# ----- Abre wid resultados da pesquisa ----- #
def open_wid_res_pesquisa(self, texto):
if self.cont3 == 1:
self.remove_widget(self.widrespesquisa)
self.cont3 = 0
self.add_widget(self.widrespesquisa)
self.widrespesquisa.resultado_pesq(texto)
self.cont3 = 1
# ----- Ações iniciais para apagar registro ----- #
def apagar_registro(self):
self.widintrodados.apagar_registro_f()
self.remove_widget(self.widpesquisa)
self.remove_widget(self.widintrodados)
# ----- Classes Popups ----- #
class PopupSalvar(Popup):
pass
class PopupFaltaDados(Popup):
pass
class PopupConfirmacao(Popup):
# ----- Texto acentuado ----- #
texto = 'Não'
# ----- Modelo Wid Label ----- #
class WidLabelTexto(Label):
historico = 'Histórico:'
certidao = 'Certidão:'
posicao = 'Posição:'
# ----- Modelo Wid Label resultados ----- #
class WidLabelResultados(Label):
pass
| Antonio-Neves/Arquivo-Passivo | PyPrincipal.py | PyPrincipal.py | py | 5,616 | python | pt | code | 6 | github-code | 13 |
18416442121 | first_player_name = input()
second_player_name = input()
player1card = input()
player2card = ''
winner = ''
player1_total_points = 0
player2_total_points = 0
Number_wars = False
while player1card != 'End of game':
player2card = input()
player1card = int(player1card)
player2card = int(player2card)
player1points = 0
player2points = 0
if player1card == player2card:
Number_wars = True
print('Number wars!')
player1card = int(input())
player2card = int(input())
if player1card > player2card:
winner = first_player_name
break
elif player2card > player1card:
winner = second_player_name
break
if player1card > player2card:
player1points = player1card - player2card
elif player2card > player1card:
player2points = player2card - player1card
player1_total_points += player1points
player2_total_points += player2points
player1card = input()
if Number_wars == True:
if winner == first_player_name:
print(f'{first_player_name} is winner with {player1_total_points} points')
elif winner == second_player_name:
print(f'{second_player_name} is winner with {player2_total_points} points')
else:
print(f'{first_player_name} has {player1_total_points} points')
print(f'{second_player_name} has {player2_total_points} points') | MiroVatov/Python-SoftUni | Python Basic 2020/Number wars ver 03.py | Number wars ver 03.py | py | 1,444 | python | en | code | 0 | github-code | 13 |
22426027191 | #website scapping
import requests
def getHTML(url):
try:
r = requests.get(url,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return (r.text)
except:
return ("ERROR")
if __name__== "__main__":
url = "http://www.duq.edu"
print(getHTML(url))
| YaleYe/HOPPYTIME | Scaping/webscapping.py | webscapping.py | py | 341 | python | en | code | 0 | github-code | 13 |
22322378608 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unicodedata
from urllib.parse import urlparse, urlunparse
import piexif
import requests
import scrapy
URL = "http://my.yoolib.com/mht/collection/?esp=0"
DATA_DIR = "/home/jean-baptiste/mht_files"
WIDTH = 1024
HEIGHT = 640
TRANSLATE_TABLE = {
"\xa0": "Dimensions du document"
}
class ThumbnailsSpider(scrapy.Spider):
FILENAME_TRANS_TAB = str.maketrans(*["/\0", "__"])
name = "thumbnails"
start_urls = [URL]
def parse(self, response):
nav_selector = '.wp-pagenavi a::attr(href)'
next_page_url = response.css(nav_selector).extract()[-1]
for media_url in self.parse_result_page(response):
yield scrapy.Request(
response.urljoin(media_url),
callback=self.parse_media_page
)
if not response.url.endswith(next_page_url):
yield scrapy.Request(
response.urljoin(next_page_url),
callback=self.parse
)
def parse_result_page(self, response):
title_selector = 'h2.title'
for brickset in response.css(title_selector):
yield brickset.css("a::attr(href)").extract_first()
def parse_media_page(self, response):
infos = {"scrapper_url": response.url}
title_selector = '//h1[@class="title"]/text()'
infos['titre'] = response.xpath(title_selector).extract_first()
media_infos_selector = '#media_info'
media_infos = response.css(media_infos_selector).extract_first()
infos['description'] = scrapy.Selector(text=media_infos).xpath('//p/text()').extract_first()
extra_infos = scrapy.Selector(text=media_infos).css('.itembloc1').extract()
for extra_info in extra_infos:
selector = scrapy.Selector(text=extra_info)
key = selector.xpath('//div[@class="keybloc1"]/text()').extract_first()
if key in TRANSLATE_TABLE:
key = TRANSLATE_TABLE[key]
text_value = selector.xpath('//div[@class="valuebloc1"]/text()').extract_first()
link_value = selector.xpath('//div[@class="valuebloc1"]/a/text()').extract_first()
infos[key] = text_value or link_value
img_src_selector = '#yoolib_img img::attr(src)'
img_src = response.css(img_src_selector).extract_first()
parsed_url = list(urlparse(img_src))
params = [x for x in parsed_url[4].split('&') if not (x.startswith('WID') or x.startswith('HEI')
or x.startswith('CVT'))]
params.extend(["WID=" + str(WIDTH), "HEI=" + str(HEIGHT), "CVT=JPEG"])
parsed_url[4] = '&'.join(params)
ThumbnailsSpider.write_and_tag_picture(urlunparse(parsed_url), infos)
@staticmethod
def write_and_tag_picture(picture_url, media_infos):
file_name = DATA_DIR + '/' + media_infos['titre'].translate(ThumbnailsSpider.FILENAME_TRANS_TAB) + '.jpeg'
with open(file_name, 'wb') as handle:
response = requests.get(picture_url, stream=True)
for block in response.iter_content(1024):
handle.write(block)
exif_datas = piexif.load(file_name)
exif_datas['Exif'][piexif.ExifIFD.UserComment] = ThumbnailsSpider._remove_accents(str(media_infos))
exif_bytes = piexif.dump(exif_datas)
piexif.insert(exif_bytes, file_name)
@staticmethod
def _remove_accents(input_str):
nfkd_form = unicodedata.normalize('NFKD', input_str)
only_ascii = nfkd_form.encode('ASCII', 'ignore')
return only_ascii
| spanska/yoolib-scrapper | spiders/thumbnails_spider.py | thumbnails_spider.py | py | 3,639 | python | en | code | 0 | github-code | 13 |
38829786885 |
cpf = '11245257401'
new_cpf = cpf[:-2]
reverso = 10
total = 0
for index in range(19):
if index > 8:
index -= 9
total += int(new_cpf[index]) * reverso
reverso -= 1
if reverso < 2:
reverso = 11
d = 11 - (total % 11)
if d > 9:
d = 0
total = 0
new_cpf += str(d)
sequencia = new_cpf == str(new_cpf[0]) * len(cpf)
if cpf == new_cpf and not sequencia:
print("Válido")
else:
print("Inválido")
| Thiago18l/Python-Projects | src/Advanced/cpf.py | cpf.py | py | 479 | python | en | code | 0 | github-code | 13 |
31753282082 | # Get the loan details
money_owed = float(input("How much do you owe?\n"))
interest_rate = int(input("What is the interest rate?\n"))
payment = float(input("What is the monthly payment?\n"))
months = int(input("How many months do you want to calculate?\n"))
monthly_rate = interest_rate / 100 / 12
for i in range(months):
interest_for_this_month = money_owed * monthly_rate
money_owed = money_owed + interest_for_this_month
money_owed = money_owed - payment
print('Month ', i, ' Paid:', payment, 'interest', interest_for_this_month, 'remaining balance', money_owed)
| palaniappa/dummy | python-learning/loan.py | loan.py | py | 586 | python | en | code | 0 | github-code | 13 |
3947193048 | # -*- coding: utf-8 -*-
import time
import glob
import datetime
import traceback
import itertools
from threading import Thread
from xiyouhelper.tray import SysTrayIcon
from xiyouhelper.disable_system_proxy import disable_proxy
from xiyouhelper.hide_window import hide_self, show_self
def show_window(sysTrayIcon):
show_self()
def hide_window(sysTrayIcon):
hide_self()
def start(sysTrayIcon):
if not control["run"]:
control["run"] = True
print("已启动")
else:
print("运行中,无需操作")
def stop(sysTrayIcon):
if control["run"]:
control["run"] = False
print("已暂停")
else:
print("暂停中,无需操作")
def bye(sysTrayIcon):
print('Bye.')
def run_once(sysTrayIcon=None):
print("-*-" * 10)
print("Datetime: %s" % datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
print("Running: %s" % control["run"])
if control["run"]:
try:
disable_proxy()
except:
print(traceback.format_exc())
def run():
while True:
run_once()
time.sleep(60)
control = {"run": True}
t = Thread(target=run)
t.setDaemon(True)
t.start()
icons = itertools.cycle(glob.glob('*.ico'))
hover_text = "西游代理辅助 - 周期性关闭系统代理"
menu_options = (
('运行一次', next(icons), run_once),
('启动', next(icons), start),
('暂停', next(icons), stop),
('显示', next(icons), show_window),
('隐藏', next(icons), hide_window),
)
SysTrayIcon(next(icons), hover_text, menu_options, on_quit=bye, default_menu_index=1)
| shapled/xiyou-helper | run.py | run.py | py | 1,608 | python | en | code | 0 | github-code | 13 |
26791751843 | from replit import clear
from art import logo
#HINT: You can call clear() to clear the output in the console.
print(logo)
print("Welcome to the Secret Auction Program.")
repeat = "yes"
bidding = {}
while repeat == "yes":
key = input("\nWhat is your name? ")
value = int(input("What's your bid? $"))
bidding[key] = value
repeat = input("Are there any other bidders? Type 'yes' or 'no'.\n").lower()
# clear()
max = 0
for unit in bidding:
compare = bidding[unit]
if compare > max:
max = compare
print(f"\nThe winner is {unit} with a bid of ${max}.") | nilayhangarge/100-Days-of-Code-Challenge | Project/Day-09 Blind Auction/main.py | main.py | py | 566 | python | en | code | 0 | github-code | 13 |
19123118765 | """
This module detects the word given the audio file using HMM techniques
1. We need to train 3 HMMs to detect 3 words: go, down, stop
2. Create obs for each file for all files from each directory for go, down, stop
3. Using the observations and some N states, train the corresponding model
4. Given a new file, compute the observations, pass it through the models and find argmax
"""
import json
import os
import pickle
import random
import numpy as np
from scipy.cluster.vq import vq, kmeans, whiten
from codebook_creator import create_mfcc_dataset_for_codebook, collect_training_data, get_mfcc_vectors
from myhmm_scaled import MyHmmScaled
model_file_name = r"./models/a.json"
BASE_DATAPATH = "data"
STOP_PATH = "data\stop"
DOWN_PATH = "data\down"
GO_PATH = "data\go"
def create_initial_model(num_states, num_symbols, model_name=None):
"""
create an initial lambda with pi, aij, bjk suitable for codebook size
write the model to model_name
:return:
"""
pi = create_initial_distribution(num_states)
aij = create_aij(num_states)
bjk = create_bjk(num_states, num_symbols)
model = {
"A": aij,
"B": bjk,
"pi": pi,
}
model = {"hmm": model}
if model_name is not None:
val = json.dumps(model)
with open(model_name, "w") as f:
f.write(val)
return model
def create_initial_distribution(num_states):
pi1 = np.random.dirichlet(np.ones(num_states), size=1)[0].tolist()
assert sum(pi1) == 1, pi1
pi = dict()
for i, val in enumerate(pi1):
pi[i] = val
return pi
def create_aij(num_states):
aij = {}
for i in range(num_states):
data = np.random.dirichlet(np.ones(num_states), size=1)[0].tolist()
aij[i] = {}
for j, val in enumerate(data):
aij[i][j] = val
return aij
def create_bjk(num_states, num_symbols):
bjk = {}
for i in range(num_states):
data = np.random.dirichlet(np.ones(num_symbols), size=1)[0].tolist()
bjk[i] = {}
for j, val in enumerate(data):
bjk[i][j] = val
return bjk
def get_vecs(label, num_files=100):
vecs_list = []
file_names = collect_training_data(label, num_files=num_files)
for name in file_names:
vecs = get_mfcc_vectors([name])
vecs_list.append(vecs)
return vecs_list
def get_obs(vecs, book):
codes = vq(np.array(vecs), book)[0]
codes = [str(code) for code in codes]
return codes
def get_obs_list(vecs_list):
obs_list = []
for vecs in vecs_list:
obs = get_obs(vecs, book)
obs_list.append(obs)
return obs_list
def train(hmm, obs_list):
hmm.forward_backward_multi_scaled(obs_list)
return hmm
def classify(models, obs):
probs = {}
for k, v in models.items():
prob = v.forward_scaled(obs)
probs[k] = prob
print("probs: ", probs)
keys = list(probs.keys())
vals = list(probs.values())
val = max(vals)
index = vals.index(val)
key = keys[index]
# print("Predicted Class = ", key)
return key
if __name__ == '__main__':
# v = create_initial_model(2, 64, "./models/a.json")
# print(v)
labels = ["go", "stop",]
labels = ["stop", "go"]
book = pickle.load(open("book.p", "rb"))
models = {
}
for label in labels:
hmm = MyHmmScaled(model_file_name)
vecs_list = get_vecs(label, 200)
obs_list = get_obs_list(vecs_list)
print(label)
# print(len(obs_list), len(obs_list[0]))
# print(obs_list[33])
hmm = train(hmm, obs_list)
models[label] = hmm
pickle.dump(models, open("models.p", "wb"))
print(models)
classify(models, obs_list[12]) | AnshulRanjan2004/PyHMM | detect_word.py | detect_word.py | py | 3,720 | python | en | code | 0 | github-code | 13 |
4818739790 | def solution(gems):
kind = len(set(gems))
size = len(gems)
answer = [0, size - 1]
dic = {gems[0]:1}
start = end = 0
while end < size:
if len(dic) < kind:
end += 1
if end == size: break
dic[gems[end]] = dic.get(gems[end], 0) + 1
else:
if (end - start + 1) < (answer[1] - answer[0] + 1): answer = [start, end]
if dic[gems[start]] == 1: del dic[gems[start]]
else: dic[gems[start]] -= 1
start += 1
answer[0] += 1
answer[1] += 1
return answer
| gilbutITbook/080338 | 11장/보석_쇼핑.py | 보석_쇼핑.py | py | 591 | python | en | code | 32 | github-code | 13 |
20681349681 | from flask import Flask, request, jsonify
app = Flask(__name__)
cidades = [
{
'id': 1,
'nome': 'Houston',
'prefeito': 'Sylvester Turner (D)',
},
{
'id': 2,
'nome': 'Chicago',
'prefeito': 'Brandon Johnson',
},
{
'id': 3,
'nome': 'Los angeles',
'prefeito': 'Karen Bass',
},
]
@app.route('/cidades', methods=['GET'])
def obter_cidades():
return jsonify(cidades)
@app.route('/cidades/<int:id>',methods=['GET'])
def obter_cidades_por_id(id):
for cidades in cidades:
if cidades.get('id') == id:
return jsonify(cidades)
@app.route('/cidades/<int:id>', methods=['PUT'])
def editar_cidades_por_id(id):
cidades_alterada = request.get_json()
for indice, cidades in enumerate(cidades):
if cidades.get('id') == id:
cidades[indice].update(cidades_alterada)
return jsonify(cidades[indice])
@app.route('/cidades',methods=['POST'])
def incluie_nova_cidade():
nova_cidade = request.get_json()
cidades.append(nova_cidade)
return jsonify(cidades)
@app.route('/cidades/<int:id>', methods=['DELETE'])
def excluir_cidades(id):
for indice, cidade in enumerate(cidades):
if cidade.get('id') == id:
del cidades[indice]
return jsonify(cidades)
app.run(port=7000,host='localhost',debug=True) | Andrezada/API-Cidades | api.py | api.py | py | 1,412 | python | pt | code | 0 | github-code | 13 |
10992257429 | from mpi4py import MPI
import time
from sys import argv
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
host = MPI.Get_processor_name()
payload_size = 32
size = comm.Get_size()
msg_tot = int(float(argv[1]))
file_name = 'rank_' + str(rank) + '.csv'
sent_time = {}
msg_num = 0
beg = 0.0
delta = 0.0
if rank == 1:
data = bytes(payload_size)
beg = time.time()
while True:
if msg_tot in sent_time:
sent_time[msg_tot][msg_num] = time.time()
else:
sent_time[msg_tot] = {msg_num: time.time()}
comm.send(data, dest=0, tag=msg_num)
msg_num += 1
if msg_tot == msg_num:
break
end = time.time()
delta = end - beg
with open(file_name, 'a') as csv:
# sent_time[msg_tot] = {msg_num: time.time()}
for k1, v1 in sent_time.items():
for k2, v2 in v1.items():
csv.write('%d, %d, %d, %f' % (rank, k1, k2, v2))
csv.write('\n')
elif rank == 2:
with open(file_name, 'a') as csv:
while True:
data = comm.recv(source=0, tag=msg_num)
if data is not None:
time_recv = time.time()
csv.write('%d, %d, %d, %f' % (rank, msg_tot, msg_num, time_recv))
csv.write('\n')
msg_num += 1
if msg_tot == msg_num:
break
elif rank == 0:
while True:
data = comm.recv(source=1, tag=msg_num)
if data is not None:
comm.send(data, dest=2, tag=msg_num)
msg_num += 1
if msg_tot == msg_num:
break
if rank == 1:
print('%s sent %d in %f sec' % (rank, msg_num, delta))
if rank == 2:
print('%s recv %d' % (rank, msg_num))
| folkpark/MPI_Benchmarking | exp10/exp10_3n/mpi_ch_size.py | mpi_ch_size.py | py | 1,726 | python | en | code | 0 | github-code | 13 |
26559284824 | import cv2
import numpy as np
import random
import os
import os.path
from PIL import Image
import matplotlib.pyplot as plt
import fnmatch
#переменная - словарь классов
label_dict = {0: 'anger', 1: 'contempt', 2: 'disgust', 3: 'fear', 4: 'happy', 5: 'neutral', 6: 'sad', 7: 'surprise', 8: 'uncertain'}
#этот класс делает аугментацию
class AugImage:
def __init__(self):
pass
def rotate_image(self, image_file, deg):
image = cv2.imread(image_file)
rows, cols,c = image.shape
M = cv2.getRotationMatrix2D((cols / 2,rows / 2), deg, 1)
image = cv2.warpAffine(image, M, (cols, rows))
return image
def erosion_image(self, image_file, shift):
"""
значения от 3 до 6 дают нормальный результат
"""
image = cv2.imread(image_file)
kernel = np.ones((shift,shift),np.uint8)
image = cv2.erode(image,kernel,iterations = 1)
return image
def gausian_blur(self, image_file, blur):
image = cv2.imread(image_file)
image = cv2.GaussianBlur(image,(5,5),blur)
return image
def dilation_image(self, image_file, shift):
"""
значения от 3 до 5 дают приемлемый результат
"""
image = cv2.imread(image_file)
kernel = np.ones((shift, shift), np.uint8)
image = cv2.dilate(image,kernel,iterations = 1)
return image
def random_func(self, image_file):
"""
функция случайного выбора между другими функциями
"""
func = random.choice([self.rotate_image(image_file, 7),
self.erosion_image(image_file, 5),
self.gausian_blur(image_file, 10),
self.dilation_image(image_file, 5)])
#возвращает уже готовый numpy массив обработанного изображения!
return func
#этот класс
#1 - вырезает лица из изображений
#2 - делает балансировку датасета
#3 - вычисляет медианные значения размеров изображений
#4 - препроцессинг изображения перед подачей в модель (при детекции). Решил, что эта функция более уместна здесь
class PreprocessDataset:
def __init__(self):
pass
def crop_face(self, initial_path, final_path, user_confidence = 0.5):
"""
назначение этой функции - вырезать лицо из изображения для более успешной тренировки сети
Эта функция принимает на вход два пути: initial_path - путь где лежат папки с изображениями
(названия папок - классы детекции), final_path - это название папки, где будут созданы папки
с такими же названиями классов и в них будут сохраннены уже обработанные изображения
dnn из библиотеки моделей opencv - используется для поиска лица на изображении
в final_path - сохраняется обрезанное изображение лица
user_confidence - задаем порог вероятности определения (рекомендуемый - 0.5)
файлы: deploy.prototxt и res10_300x300_ssd_iter_140000.caffemodel необходимы для загрузки
модели dnn. Они должны лежать в папке с этим кодом!
"""
#загружаем dnn модель из зоопарка opencv
net = cv2.dnn.readNetFromCaffe('./deploy.prototxt', './res10_300x300_ssd_iter_140000.caffemodel')
#счетчики количества папок с классами
counter = 0
#проверяем есть ли такой путь в корневой папке
if os.path.exists(initial_path):
#пробегаем по папкам с классами
for root, dirs, files in os.walk(initial_path):
#сбрасываем счетчик файлов в папке
counter_files = 0
#первая папка - корневая папка
if root == initial_path:
#берем из неё названия папок с классами
list_folders = dirs
continue
#пробегаем по файлам по очереди в каждой папке классов
for name in files:
#создаем путь: класс/название файла
core_path = os.path.join(list_folders[counter], name)
#считываем изображение
image = cv2.imread(str(initial_path) + '/' + str(core_path))
#считываем размеры изображения для последующей конвертации координат bounding box
(h, w) = image.shape[:2]
#здесь происходит детекция лица
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
#пробегаем по всем обнаруженным детекциям
for i in range(0, detections.shape[2]):
#если вероятность больше установленной пользователем, то вырезаем этот bounding box
confidence = detections[0, 0, i, 2]
if confidence > user_confidence:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
image_crop = image[startY:endY, startX:endX]
#создаем папку с классом
if not os.path.exists('./' + str(final_path) + '/' + str(list_folders[counter])):
os.makedirs('./' + str(final_path) + '/' + str(list_folders[counter]))
#сохраняем обработанное изображение
name_new_file = str(counter_files) + '_' + str(list_folders[counter]) + '.jpg'
cv2.imwrite('./' + str(final_path) + '/' +
str(list_folders[counter]) + '/' + str(name_new_file), image_crop)
#обновляем счетчик файлов
counter_files += 1
#заполняем словарь с количеством файлов в папках
files_in_folders[list_folders[counter]] = counter_files
#обновляем счетчик папок с классами
counter += 1
#если такого пути к необработанным изображениям нет - выводим текст
else:
print(initial_path, ' - такого пути нет!')
#cv2.waitKey(0)
def balance_dataset(self, main_path):
"""
эта функция производит балансировку датасета. Находит папку с максимальным количество файлов и
с помощью аугментации добавляет в другие папки файлы. Здесь используется рандомная аугментация
из класса AugImg
"""
emotions_dict = dict()
#проходим по основной папке, считаем кол-во файлов, создаем словарь
for root, dirs, files in os.walk(main_path):
if root == main_path:
continue
count = len(fnmatch.filter(os.listdir(root), '*.*'))
emotions_dict[root] = count
#определяем экземпляр аугментации
my_aug = AugImage()
#находим папку с максимальным количеством файлов
max_feature = max(emotions_dict, key = emotions_dict.get)
#определяем максимальное количество файлов
max_number = max(emotions_dict.values())
print('максимальное количество: ', max_number)
#удалим из словаря ключ с фичей с максимальным значением
del emotions_dict[max_feature]
#проходим по словарю
for item_path in emotions_dict:
#этот счетчик нужен для названия создаваемых файлов
counter = 0
#вычисляем разницу между количеством файлов в этой папке и максимальным
number = max_number - emotions_dict[item_path]
print('для', item, ' - ', number, 'изначально')
#если количество файлов в папке меньше чем в 2.5 раза - рекомендация - добавить другие файлы
if number / emotions_dict[item] > 2.5:
print('слишком большая разница в количестве для', item_path)
print('лучше найти больше других изображений для этого класса!')
continue
#если разница меньше чем количество файлов в папке - то добавляем кол-во файлов равное этой разнице
if number < emotions_dict[item_path]:
#случайно выбираем файлы из папки для аугментации
filenames = random.sample(os.listdir(item_path), number)
counter = 0
#проходим по выбранным файлам
for file in filenames:
file_path = os.path.join(item_path, file)
#аугментация
img = my_aug.random_func(file_path)
#сохраняем созданный файл в ту же папку
cv2.imwrite(item_path + '/' + '_aug_' + str(counter) + '.jpg', img)
counter += 1
print('для', item, ' - ', number, 'добавлено')
#если разница больше чем файлов в папке, то берем просто количество файлов в папке
elif number > emotions_dict[item_path]:
filenames = random.sample(os.listdir(item_path), emotions_dict[item_path])
counter = 0
for file in filenames:
file_path = os.path.join(item_path, file)
img = my_aug.random_func(file_path)
cv2.imwrite(item_path + '/' + '_aug_' + str(counter) + '.jpg', img)
counter += 1
print('для', item_path, ' - ', emotions_dict[item_path], 'добавлено')
print('датасет сбалансирован, проверьте папку ', main_path)
def count_image_sizes(self, main_path):
"""
main_path - путь к папке где хранятся папки с изображениями
эта функция считывает размеры всех изображений в том числе и в подпапках от main_path
рисует две гистограммы: ширина и длина изображений
"""
list_heights = []
list_width = []
for root, dirs, files in os.walk(main_path):
for file in files:
filepath = os.path.join(root, file)
img = Image.open(filepath)
list_heights.append(img.height)
list_width.append(img.width)
fig, axs = plt.subplots(figsize=(12,6), ncols = 1, nrows = 2)
axs[0].hist(list_heights)
axs[1].hist(list_width)
axs[0].set_title('Высота')
axs[1].set_title('Ширина')
plt.show()
print('медиана высоты = ', np.median(list_heights))
print('медиана ширины = ', np.median(list_width))
def process_image(self, image):
(h, w) = image.shape[:2]
if h ==0 or w == 0:
return None
if image is not None:
image = cv2.resize(image, (64, 64), interpolation = cv2.INTER_AREA)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return image | sanchelo2006/EMOTION_RECOGNITION | Preprocessing.py | Preprocessing.py | py | 13,836 | python | ru | code | 0 | github-code | 13 |
35428203974 | import datetime
import cdflib
import numpy as np
from matplotlib import dates
from sunpy.net import Fido
from sunpy.net import attrs as a
# # define start and end date
# start_time="2012-5-26 10:30"
# end_time="2012-5-28 15:40"
# # specify spacecraft 'ahead'/'behind'
# spacecraft = 'ahead'
def get_swaves(start_time, end_time, path=None):
#######################
# downloading the files
#######################
dataset = 'STEREO_LEVEL2_SWAVES'
cda_dataset = a.cdaweb.Dataset(dataset)
trange = a.Time(start_time, end_time)
# always add 1 day to enddate because the enddate itself should be included in the data (which isn't the case)
trange = a.Time(start_time, trange.end.to_datetime().date()+datetime.timedelta(days=1))
result = Fido.search(trange, cda_dataset)
downloaded_files = Fido.fetch(result, path=path) # use Fido.fetch(result, path='/ThisIs/MyPath/to/Data/{file}') to use a specific local folder for saving data files
downloaded_files.sort()
# print(downloaded_files)
return downloaded_files
def plot_swaves(downloaded_files, spacecraft, start_time, end_time, ax, cmap='inferno'):
###################
# reading the files
###################
data_all = []
time_all = []
for i in downloaded_files:
cdf_file = cdflib.CDF(i)
data = cdf_file.varget("avg_intens_" + spacecraft)
data_all.append(data)
freq = cdf_file.varget('frequency')/1000 # in MHz
time = cdf_file.varget('Epoch')
time_all.append(cdflib.epochs.CDFepoch.to_datetime(time))
# full time array for plotting
time_arr = np.array(time_all).flatten()
# full data array for plotting
data_all = np.array(data_all)
# if there are more than one 1-day file downloaded
if data_all.shape[0] > 1:
data_arr = np.concatenate((data_all[0], data_all[1]))
for i in range(1, data_all.shape[0] - 1):
data_arr = np.concatenate((data_arr, data_all[i+1]))
# switching frequency axis
data_arr = data_arr.T
else:
# switching frequency axis
# one must choose the first entry of data_all here, because it's a list with len==1
data_arr = data_all[0].T
if isinstance(start_time, str):
start = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M')
end = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M')
else:
start, end = start_time, end_time
######################
# plotting the spectra
######################
colormesh = ax.pcolormesh(time_arr, freq[::-1], data_arr[::-1], vmin=0, vmax=0.5*np.max(data_arr), cmap=cmap)
ax.set_ylabel('Frequency [MHz]')
# Disable xlabel here because we only use it stacked ontop particle spectrogram:
# ax.set_xlabel('Date and time (UT)')
ax.set_yscale('log')
ax.set_ylim(freq[-1], freq[0])
ax.set_yticks([0.01, 0.1, 1, 10])
ax.set_yticklabels(['0.01', '0.1', '1', '10'])
ax.set_xlim(start, end)
ax.xaxis_date()
ax.xaxis.set_major_formatter(dates.DateFormatter('%d/%m %H:%M'))
# plt.show()
return ax, colormesh
| serpentine-h2020/SEPpy | seppy/tools/swaves.py | swaves.py | py | 3,139 | python | en | code | 5 | github-code | 13 |
39426606530 | import math
import random
import sqlite3
import asyncio
import time
import aioschedule
from telebot import types
from telebot.async_telebot import AsyncTeleBot
from sqlalchemy import create_engine, and_
from sqlalchemy import MetaData, Table, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine('sqlite:///data.db')
class Users(Base):
__table__ = Table("Users", Base.metadata, autoload_with=engine)
class Mobs(Base):
__table__ = Table("Mobs", Base.metadata, autoload_with=engine)
class Locations(Base):
__table__ = Table("Locations", Base.metadata, autoload_with=engine)
class Items(Base):
__table__ = Table("Items", Base.metadata, autoload_with=engine)
class UserItems(Base):
__table__ = Table("user_items", Base.metadata, autoload_with=engine)
metadata = MetaData(bind=engine)
user_items = Table('user_items', metadata, autoload=True)
adjacent_locations = Table('adjacent_locations', metadata, autoload=True)
print(adjacent_locations.columns)
Session = sessionmaker(bind=engine)
session = Session()
with open("secret.txt") as file:
lines = [line.rstrip() for line in file]
TOKEN = lines[0]
bot = AsyncTeleBot(TOKEN)
@bot.message_handler(commands=['help'])
async def help(message):
await bot.send_message(chat_id=message.chat.id,
text=f'/start name def_hp def_mana')
@bot.message_handler(commands=['start'])
async def start_game(message):
args = message.text.split()
# await bot.reply_to(message, str(args))
user = session.query(Users).get(message.from_user.id)
if user:
session.delete(user)
session.commit()
user = Users(UserID=message.from_user.id,
Nickname=message.from_user.username)
if len(args) > 1:
user.Nickname = args[1]
if len(args) > 2 and args[2].isdigit():
user.HP = args[2]
if len(args) > 3 and args[3].isdigit():
user.Money = args[2]
session.add(user)
session.commit()
await bot.reply_to(message, f"играет {user.Nickname}")
def distance(fr, to):
a = session.query(Locations).get(fr)
b = session.query(Locations).get(to)
return math.sqrt((a.XCoord - b.XCoord) * (a.XCoord - b.XCoord) + (
a.YCoord - b.YCoord) * (a.YCoord - b.YCoord))
async def respawn(chat_id, user) -> None:
await bot.send_message(chat_id, text='вы так и не сходили')
user.XP = 0
user.CurHP = user.HP
user.LocationID = 1
session.merge(user)
session.commit()
aioschedule.clear(chat_id)
@bot.message_handler(commands=['move'])
async def move(message):
args = message.text.split()
if len(args) != 2 or not args[1].isdigit():
await bot.reply_to(message, f"введите номер локации")
return
to = int(args[1])
fro = session.query(Users).filter(
Users.UserID == message.from_user.id).one().LocationID
q = session.query(
adjacent_locations).filter(and_(
adjacent_locations.c.LocationID == fro,
adjacent_locations.c.AdjacentLocationID == to)).all()
if not q:
options = session.query(
adjacent_locations.c.AdjacentLocationID).filter(
adjacent_locations.c.LocationID == fro).all()
await bot.reply_to(message,
f"вам доступны только {[x[0] for x in options]}")
return
sec = distance(fro, to)
location = session.query(Locations).get(to)
await bot.send_message(chat_id=message.chat.id,
text=f'идти {sec} сек')
time.sleep(sec)
await bot.reply_to(message,
f"вы на месте")
user = session.query(Users).get(message.from_user.id)
user.LocationID = to
session.merge(user)
session.commit()
if location.LocationType == 'dungeon':
mob_options = session.query(Mobs).filter(
Mobs.ReqLevel <= user.Level).all()
rand = random.randint(0, len(mob_options) - 1)
mob = mob_options[rand]
location.MobID = mob.MobID
location.CurMobHP = mob.HP
session.merge(location)
session.commit()
await bot.send_message(message.chat.id,
text='быстрее используйте /fight, /info у вас 60 секунд!')
aioschedule.every(60).seconds.do(respawn, chat_id=message.chat.id,
user=user).tag(
message.chat.id)
if location.LocationType == 'city':
pass
@bot.message_handler(commands=['fight'])
async def fight(message):
user = session.query(Users).get(message.from_user.id)
location = session.query(Locations).get(user.LocationID)
if location.LocationType != 'dungeon':
await bot.send_message(message.chat.id,
text='нельзя драться не в подземелье')
return
mob = session.query(Mobs).get(location.MobID)
aioschedule.clear(message.chat.id)
await bot.send_message(message.chat.id, text='Fight!')
location.CurMobHP -= user.Attack
session.commit()
if location.CurMobHP <= 0:
aioschedule.clear(message.chat.id)
user.XP += 10
user.Level = user.XP // 100 + 1
await bot.send_message(message.chat.id, text='вы победили')
return
user.CurHP -= mob.Attack
session.commit()
if user.CurHP <= 0:
await bot.send_message(message.chat.id, text='вы проиграли')
user.XP = 0
user.CurHP = user.HP
user.LocationID = 1
session.merge(user)
session.commit()
aioschedule.clear(message.chat.id)
return
aioschedule.every(60).seconds.do(respawn, chat_id=message.chat.id,
user=user).tag(
message.chat.id)
@bot.message_handler(commands=['info'])
async def info(message):
user = session.query(Users).get(message.from_user.id)
location = session.query(Locations).get(user.LocationID)
if location.LocationType != 'dungeon':
await bot.send_message(message.chat.id,
text='не вижу противника...')
return
mob = session.query(Mobs).get(location.MobID)
aioschedule.clear(message.chat.id)
await bot.send_message(message.chat.id,
text=f'cur mob HP: {location.CurMobHP}\nmod attack:{mob.Attack}\nyour HP: {user.CurHP}')
user.CurHP -= mob.Attack
if user.CurHP <= 0:
user.XP = 0
user.CurHP = user.HP
user.LocationID = 1
session.merge(user)
session.commit()
aioschedule.clear(message.chat.id)
await bot.send_message(message.chat.id, text='вы проиграли')
return
aioschedule.every(60).seconds.do(respawn, chat_id=message.chat.id,
user=user).tag(
message.chat.id)
async def scheduler():
while True:
await aioschedule.run_pending()
await asyncio.sleep(1)
async def main():
await asyncio.gather(bot.infinity_polling(), scheduler())
if __name__ == '__main__':
asyncio.run(main())
| nikalebed/deep_python_hw | hw4/main.py | main.py | py | 7,306 | python | en | code | 0 | github-code | 13 |
11942868545 | from Mapping.Map import Map
from Mapping.Square import Square
from Mapping.Terrain import average_distribution
from operator import itemgetter
class Region:
def __init__(self, world, squares):
self.squares = squares
self.world = world
def square_in_region(region, square):
return any([x.x_pos == square.x_pos and x.y_pos == square.y_pos for x in region.squares])
def add_core_regions_to_map(world_map: Map) -> Map:
def point_in_region(region: Region, square: Square) -> bool:
if not square.terrain == region.squares[0].terrain:
print(False)
return False
else:
min_distance = min([manhatten_distance(region_square, square) for region_square in region.squares])
print(min_distance)
return min_distance == 1
regions = []
for row in world_map.grid:
for col in row:
for ii, region in enumerate(regions):
if point_in_region(region, col):
region.squares.append(col)
col.region = ii
break
else:
regions.append(Region(world_map, [col]))
col.region = len(regions) - 1
return world_map
def modify_regions(world_map: Map):
regions = world_map.get_regions()
region_distributions = {}
for region in regions:
sqs = regions[region]
region_dist = average_distribution([x.surrounding_terrain for x in sqs])
region_distributions[region] = region_dist
update = False
for row in world_map.grid:
for col in row:
dist = col.surrounding_terrain
surrounding_squares = [x for x in world_map.get_area_around_coordinate(col.x_pos, col.y_pos, 1)]
surrounding_regions = [x.region for x in surrounding_squares]
surrounding_distributions = [region_distributions[x] for x in surrounding_regions]
distances = [x.difference_to_distribution(dist) for x in surrounding_distributions]
min_index = min(enumerate(distances), key=itemgetter(1))[0]
new_region = surrounding_regions[min_index]
if new_region != col.region:
update = True
col.region = new_region
return update, world_map
| Cal1ban/CreaturesGA | Mapping/Areas.py | Areas.py | py | 2,290 | python | en | code | 0 | github-code | 13 |
14851121695 | #rules
#1. a...k first character must be an alphabet b/w a-k
#2. second must be a digit divisible by 3
#3. followed by any number of charecters
#fullmatch() function is used to find exact match
from re import *
varname=input("enter variable name:")
rule="[a-k][369][a-zA-Z0-9]*"
matcher=fullmatch(rule,varname)
if matcher==None:
print("invalid variable name")
else:
print("valid variable name")
| rizniyarasheed/python | regularExpression/regex_prgrm4.py | regex_prgrm4.py | py | 407 | python | en | code | 0 | github-code | 13 |
35661790125 | import sys
import os
from sklearn.feature_extraction.text import CountVectorizer
def main():
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from keras_en_parser_and_analyzer.library.dl_based_parser import ResumeParser
from keras_en_parser_and_analyzer.library.utility.io_utils import read_pdf_and_docx
current_dir = os.path.dirname(__file__)
current_dir = current_dir if current_dir is not '' else '.'
data_dir_path = current_dir + '/data/resume_samples' # directory to scan for any pdf and docx files
def parse_resume(file_path, file_content):
print('parsing file: ', file_path)
parser = ResumeParser()
parser.load_model(current_dir + '/models')
parser.parse(file_content)
# print(parser.raw.encode("utf-8")) # print out the raw contents extracted from pdf or docx files
if parser.unknown is False:
summary, expertise, experience, knowledge, project = parser.summary()
# print(parser.summary().encode("utf-8"))
print(summary.encode('utf-8'))
vocabulary = []
for i in experience:
vocabulary.append(i.encode("utf-8"))
print(vocabulary)
# prefinal_vocabulary = []
# for i in vocabulary:
# try:
# prefinal_vocabulary.append(i.decode("utf-8"))
# except:
# prefinal_vocabulary.append(str(i,"utf-8"))
# print(prefinal_vocabulary[9])
final_vocabulary = []
# for i in prefinal_vocabulary:
# print(i)
# try:
# j = i.split(',')
# except:
# pass
# print(j)
print('++++++++++++++++++++++++++++++++++++++++++')
collected = read_pdf_and_docx(data_dir_path, command_logging=True, callback=lambda index, file_path, file_content: {
parse_resume(file_path, file_content)
})
print('count: ', len(collected))
if __name__ == '__main__':
main()
| mohancm/android_kernel_wiko_fever | demo/dl_based_parser_predict.py | dl_based_parser_predict.py | py | 2,089 | python | en | code | 2 | github-code | 13 |
40967148094 | import streamlit as st
import app2
st.set_page_config(
page_title="Blog Generator",
page_icon="🐾",
layout="centered")
# Pages as key-value pairs
PAGES = {
"Blog Generator": app2
}
st.sidebar.title('Go to:')
selection = st.sidebar.radio("", list(PAGES.keys()))
page = PAGES[selection]
page.app()
| RajanGoyal1002/Blog_GPT-3 | gpt_app.py | gpt_app.py | py | 338 | python | en | code | 0 | github-code | 13 |
24619359045 | """
Implement CORDEX specific DRS scheme.
"""
import re
import os
from drslib.drs import BaseDRS, DRSFileSystem, _ensemble_to_rip, _rip_to_ensemble
from drslib import config
from drslib.exceptions import TranslationError
class CordexDRS(BaseDRS):
DRS_ATTRS = [
'activity', 'product', 'domain', 'institute', 'gcm_model', 'experiment',
'ensemble', 'rcm_model', 'rcm_version', 'frequency', 'variable', 'subset',
'extended',
]
PUBLISH_LEVEL = 'variable'
OPTIONAL_ATTRS = ['extended']
DRS_JSON_MAP = {
'driving_model': 'gcm_model',
'model_version': 'rcm_version',
'model': 'rcm_model',
}
@classmethod
def _encode_component(klass, component, value):
from drslib.translate import _from_date
if value is None:
return '%'
elif component == 'ensemble':
return _ensemble_to_rip(value)
elif component == 'version':
return 'v%d' % value
elif component == 'subset':
#!TODO: remove duplication in drs.py
N1, N2, clim = value
if clim:
val = '%s-%s-clim' % (_from_date(N1), _from_date(N2))
else:
val = '%s-%s' % (_from_date(N1), _from_date(N2))
else:
return value
@classmethod
def _decode_component(cls, component, value):
from drslib.translate import _to_date
if value == '%':
ret = None
elif component == 'ensemble':
if value == (None, None, None):
ret = None
else:
ret = _rip_to_ensemble(value)
elif component == 'version':
if value[0] == 'v':
ret = int(value[1:])
else:
ret = int(value)
elif component == 'subset':
N1 = N2 = None
parts = value.split('-')
if len(parts) > 3:
raise ValueError('cannot parse extended component %s' % repr(value))
N1, N2 = _to_date(parts[0]), _to_date(parts[1])
if len(parts) == 3:
clim = parts[2]
if clim != 'clim':
raise ValueError('unsupported extended component %s' % repr(value))
else:
clim = None
ret = (N1, N2, clim)
else:
ret = value
return ret
class CordexFileSystem(DRSFileSystem):
drs_cls = CordexDRS
def filename_to_drs(self, filename):
"""
Return a DRS instance deduced from a filename.
"""
if self._is_ignored(filename):
raise TranslationError()
# VariableName_Domain_GCMModelName_CMIP5ExperimentName_CMIP5EnsembleMember_RCMModelName_RCMVersionID_Frequency_StartTime-EndTime.nc
m = re.match(r'(?P<variable>.*?)_(?P<domain>.*?)_(?P<gcm_model>.*?)_(?P<experiment>.*?)_(?P<ensemble>.*?)_(?P<institute>.*?)-(?P<rcm_model>.*?)_(?P<rcm_version>.*?)_(?P<frequency>.*?)(?:_(?P<subset>.*?))?\.nc', filename)
if not m:
raise TranslationError()
comp_dict = m.groupdict()
drs = self.drs_cls(activity='cordex')
for component in ['variable', 'domain', 'gcm_model', 'experiment',
'ensemble', 'institute', 'rcm_model', 'rcm_version', 'frequency',
'subset']:
comp_val = comp_dict[component]
if component == 'rcm_model' and comp_val is not None:
drs[component] = "%s-%s" % (comp_dict['institute'], comp_dict['rcm_model'])
elif comp_val is not None:
drs[component] = drs._decode_component(component, comp_val)
return drs
def filepath_to_drs(self, filepath):
"""
Return a DRS instance deduced from a full path.
"""
# Split off the variable and version directory then pass
# the results to other functions
parts = filepath.split('/')
version_str, filename = parts[-2:]
drs = self.filename_to_drs(filename)
drs.version = drs._decode_component('version', version_str)
drs.update(self.publication_path_to_drs('/'.join(parts[:-2])))
return drs
def drs_to_storage(self, drs):
return '%s/d%d' % (self.VERSIONING_FILES_DIR, drs.version)
def storage_to_drs(self, subpath):
files_dir, subpath2 = subpath.split('/')
assert subpath2[0] == 'd'
version = int(subpath2[1:])
return self.drs_cls(version=version)
def drs_to_ingest_cache_path(self, drs):
return os.path.abspath(self.drs_to_publication_path(drs))
# drs_to_realpath(self, drs): defined in superclass
def drs_to_linkpath(self, drs, version=None):
if version is None:
version = drs.version
pubpath = self.drs_to_publication_path(drs)
return os.path.abspath(os.path.join(pubpath, 'v%d' % version))
| ESGF/esgf-drslib | drslib/cordex.py | cordex.py | py | 5,005 | python | en | code | 1 | github-code | 13 |
35514297324 | from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
# I want to use the "redirect" function (source: https://youtu.be/8kBo91L8JTY )
from django.shortcuts import render, redirect
from django.urls import reverse
# This will let me find the max value from a list of database entries
from django.db.models import Max
# This imports flash messages (source: https://youtu.be/8kBo91L8JTY )
from django.contrib import messages
""" This will let me obtain the current date and time whenever I create a listing (source:
https://pythonguides.com/how-to-get-current-time-in-django/ )
"""
import datetime
""" This will let me use the "@login_required" attribute (source:
https://docs.djangoproject.com/en/4.0/topics/auth/default/#the-login-required-decorator )
"""
from django.contrib.auth.decorators import login_required
""" This will let me use the Django form for creating listings, which is on the forms.py file (source:
https://docs.djangoproject.com/en/4.0/topics/forms/ . I'm also adding the form that allows me to get the bids from
buyers. )
"""
from .forms import CreateListingForm, BidForm, CommentForm
from .models import User
# This will import the Listings,Categories, and all the other tables from the models.py file
from .models import Listings, Categories, Bids, Comments, Watchlists
"""
I can show the active listings in the home page, regardless of whether the user has logged in or not. Remember
that you don’t have to be logged in in eBay in order to see a product and their price. You only need to log in to buy
or sell a product.
I need to send all of the resulting data into index.html, NOT to create.html. I want to display the listings on
the home page, NOT on the page for creating a new listing. So, I’ll need to specify that on the views.py file.
BUG: The prices aren’t being updated properly on the Active Listings nor in the Closed Auctions pages.
To solve this, I need to put the same Jinja code that I put on listings.html on index.html. Also note that I will need
to copy and paste the code that gets the highest bid from the display_listing() view into the index() view. I also need
to add the variables that will get the bids from the database so I can send them over Jinja to index.html.
I will have to do the same in the inactive_listings() view and the inactive.html file.
I have a massive problem, which is that I need to get the number of bids for each of the products. So, I think I should
do like for the watchlists: create an array that will store all of the bids for each of the products during each
iteration of the “for” loop that will render each product. But even this could give me problems, since all of the
bids for all of the products would be stored in the same array, which would give me all kinds of problems.
Another possible solution would be to either take either the highest bid amount or the initial price for a product from
the database, depending on the case.
I think that the key for displaying the prices correctly in Active Listings is storing all of the price amounts in an
array. I will first declare an empty array. Then, I will create a “for” loop that will iterate every product that’s
active. Then, I will check if there’s at least one bid for the current product in the “for loop” (or check if it’s not
0 or not none. I don’t know how the max() function works.) If the current product in the loop has at least a bid, I
will append the maximum bid for that product in an array using the max() function. That will give me a number, not an
instance, so it will work. If that product doesn’t have at least a bid, or the max function returns 0 or None, I will
append the price from the initial_price column from the Listings table into the array.
Then, I will send that array via Jinja to the Active Listings page (index.html). Finally, I will create a “for” loop on the index.html file, and I will iterate every item of the price array in the HTML tag that has the “Current Bid” title. But, to avoid bugs, I need to get this loop and the “Current Bid” title out of the “for listing in listings” loop. Otherwise, it will print me more than once each price of each product. I will have to create twice the “for listing in listings” loop to prevent nesting the “for” loop that iterates the “price amount” array inside of the previous loop.
To a number as a maximum value using Max(), I will have to use an associative array style of notation. For instance, I
could use notation like the following:
max_variable = Table.objects.aggregate(Max('column'))['column__max']
(source: afahim’s reply on https://stackoverflow.com/questions/844591/how-to-do-select-max-in-django )
I will transform the price array into a dictionary or associative array. In that dictionary, I will store the ID of the
product, and its respective price. Then on the index.html file, I will compare the product ID from the Listings table,
and the product ID of the prices dictionary. If they are both the same, I will print the price for the current
iteration of the “for” loop. That way, each product will only have one price printed.
To create a dictionary in Python, I need to use the following notation:
Dictionary = {'Attribute_1': 'Value_1', 'Attribute_2': 'Value_2'}
(source: https://www.geeksforgeeks.org/python-dictionary/ )
Creating a dictionary and appending to a dictionary is a bit different than for lists or regular arrays. When declaring
the dictionary, I need to also declare the keys (‘ID’, ‘Price’, etc.) Then for appending values for a dictionary, I
need to append each value for each key separately. For instance:
dictionary["Key_1"].append("Value_1")
dictionary["Key_2"].append("Value_2")
(source: https://www.guru99.com/python-dictionary-append.html#2 )
"""
def index(request):
# This stores all the active products
listings = Listings.objects.filter(active=True)
# This stores all the bids
products_with_bids = Bids.objects.all
# This is the declaration of the variable that will display the amount for the highest bid
highest_bid_amount = ''
# This dictionary will store all the price amounts and the ID for each product
price_amounts = []
# price_amounts = {"ID": [],"Price": []}
# This will store the price for each product in the array that stores the prices
for product in listings:
# This stores the initial price for the current product
initial_product_price = Listings.objects.values_list('initial_price', flat=True).filter(pk=product.id)
# This gets all the bids for the current product in the loop
bids_current_product = Bids.objects.filter(listing=product.id)
# This gets the highest bid for the current product in the loop
highest_bid_amount = bids_current_product.aggregate(Max('bid'))['bid__max']
# This executes if the product has any bids
if highest_bid_amount is not None:
# This updates the database so that the current price is equal to the highest bid ...
Listings.objects.filter(pk=product.id).update(current_price=highest_bid_amount)
# This inserts the highest bid for the current product in the prices array
# price_amounts.append(highest_bid_amount)
# price_amounts["ID"].append(product.id)
# price_amounts["Price"].append(highest_bid_amount)
# If the product doesn't have any bids, I will update its price back to its initial price on the database
else:
Listings.objects.filter(pk=product.id).update(current_price=initial_product_price)
# price_amounts.append(product.initial_price)
# price_amounts["ID"].append(product.id)
# price_amounts["Price"].append(product.initial_price)
return render(request, "auctions/index.html", {
"listings": listings,
"price_amounts": price_amounts,
"products_with_bids": products_with_bids
# "highest_bid_amount": highest_bid_amount,
})
""" This will display all of the closed auctions
Upon further consideration, I don’t have to touch or modify the inactive listings. So, I won’t modify its code for the
time being.
"""
def inactive_listings(request):
return render(request, "auctions/inactive_listings.html", {
"listings": Listings.objects.all(),
})
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "auctions/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "auctions/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "auctions/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/register.html")
""" 2.a) Users should be able to visit a page to create a new listing.
For this, I’ll just create a route. I’ll make a view in views.py, add the URL in urls.py, and add the link into the
layout.html or index.html (although I think it would be best to put it on layout.html, since I want users to be able
to access the page to create a new listing from anywhere within the website.) I’ll open the website to see if the
distribution code already added a text that says “create new page” so that I can insert a link there.
No, there isn’t any text saying “Create New Listing” if the user logs in into their account. I need to add the text,
and add the link. And, as I said, I prefer adding the “Create new listing” link in layouts.html so that the user can
create a new listing in any page within the website. I will put it somewhere close to the “log out” link. Upon further
inspection, I see that there is a “nav” class within layouts.html, which serves as a navbar. I will insert within
that <ul> tag with class “nav” the text with the “Create new listing” link.
I need to use the @login_required decorator on the line right before creating the view function for creating new
listings in views.py so that only users that have logged in into their accounts can create listings. That is, I need
to put the line “@login_required” right on top of “def function_name():” in views.py.
I will at first create a simple view function in views.py, which will only redirect the user to the create.html page.
I will call the function “create(request)”. I won’t check for POST requests yet. To redirect them to the create.html
page, I will use the following snippet: return render(request, "auctions/create.html") .
Next, I would need to edit the urls.py file to add the path towards the “/create” URL.
Then, in the views.py file, to render the form (if the user has just entered the page), I will have to first import
that form. That’s done using “from .forms import NameOfTheForm”. Then, within the view() function, I need to add a
variable, and make that variable equal to “NameOfTheForm()” (source:
https://docs.djangoproject.com/en/4.0/topics/forms/ .) Finally, I will send that form to create.html using jinja
notation. To do that, I will have to put that variable inside the “return render request()” function, by using the
following syntax: {“form_variable”: form_variable}. Then, I need to call that variable via Jinja notation in
create.html.
I will add some debugging code to print the data stored in the Listings table from the database into the /create
page. I will do that just to learn how to print data from the database into the website. To do that, in the create()
view, I will add Django’s Query Set notation into a variable. Then, I will send that variable to the /create page via
Jinja notation. The syntax that I should use will be something like: “ “variable_name”: Model_name.objects.all() .”
I will now proceed to insert the data from the form’s inputs from the create page into the database. To do that, I
will obtain the data from the POST request from the /create page, and I will insert it into an “if” statement. I will
insert all of that data into multiple variables. Then, I will insert those variables into the different fields of the
Listings table in the database. But, to do that, I need to obtain the ID number from the user that’s currently logged
in, so that the listing is created for that specific user.
The data from the user that’s currently logged in is stored into the request of the different views functions, and I
can access it by using the following snippet: “request.user” (source: K Z’s answer from
https://stackoverflow.com/questions/12615154/how-to-get-the-currently-logged-in-users-user-id-in-django . So, if I
use something like “logged_user = request.user” to store all of the data from the logged in user, I can access their
PK by using the following snippet: “logged_user_id = logged_user.id”.)
To get data from an input via a POST request from a form in a view() with an “if” statement, I need to use the
following syntax in the view(): to check if a POST request was made: " if request.method == "POST": " (source:
Brian’s lecture for this assignment). Then, to insert the data from the form into a variable in the view(), I need to
use the following syntax: " request.POST["input_name"] ", and I need to insert that into a variable.
Now, to insert the data from that form into my database, I will need to use Django’s Query Set syntax. The syntax that
I will need to use will be: " table.field.add(variable_with_input_data) ".That will add a row, that is, an entry,
into that table in my database. The problem is that I don’t want to add just one field into that row. I want to add
like 7 or 8 fields within that same row, that is, within that same listing. So, I think I will have to use the “
variable.save() ” syntax that Brian used when he was explaining how to use the Query Set syntax on the Python shell.
The Query Set syntax that I will use to insert the 3 fields of the form, as well as all of the other remaining 4 or 5
fields into a single listing in the Listings table in the database, I will use syntax like the following:
new_listing = Listings(seller_id=logged_user_id, product_name=product_name_variable_from_input,
description=description_variable, initial_price=price_variable, …, active=True). Then, to save that into the database,
I’ll need to use the following syntax: “ new_listing.save() ”.
To insert the PK of a user, I need to get an instance of the User table, or I’ll get an error when trying to execute
the Query Set syntax. To get an instance of the User table, I need to use the following syntax: “
user = User.objects.get(id=id_number_from_user) “ (source: JamesO’s answer from
https://stackoverflow.com/questions/9616569/django-cannot-assign-u1-staffprofile-user-must-be-a-user-instance .)
I need to fix the formatting of the database entries from the Categories model. I will use the
"strip()" function (source: https://www.codegrepper.com/code-examples/python/remove+outer+quotes+from+string+python )
BUG FIX: This fixes the issue of the category name being displayed being its ID number instead of the category.
What I do is first, obtain the ID of the category from the Create Listing form. Then, I create a variable which will
obtain the name of the category whose ID number is the one obtained from the Create Listing form. Finally, I
use that variable to display that entry of the database. Now, both the dropdown menu and the databse entry show
the proper format for the categories for the listings (source: iridescent's reply from
https://stackoverflow.com/questions/4300365/django-database-query-how-to-get-object-by-id .)
BUG: If the user doesn’t choose a category, the category will be ‘’ (NULL), so I’ll get an error message from Django.
That is because I’m trying to find an entry with the ID number that is NULL or ‘’, which, of course, doesn’t exist. If
I fix this bug (with an “if” statement), I will finish this part of the homework.
I’ll put an “if” statement saying that, if the user types “ ‘’ ” as the category, that it should insert “category” in
the “category_formatted” variable. Otherwise, I will use the query set statement that will look for the category name
whose ID is the one inserted in the form submitted by the user.
BUG: For some reason, the only date being displayed is Jan 1st, 2022.
The database is displaying the date of creation for all of the listings to be jan 1st, 2022. I need to see what went
wrong. It may be that it’s inserting the default ate that I specified, instead of taking the current date and time of
creation of the entry. After checking aout my code on models.py, I can confirm that the default date that I specified
is indeed January 1st, 2022. I need to modofy the code to take the current date and time whenever I create a new
listing.
In my views.py file, I’m never obtaining the date from anywhere, neither from the form, nor from a Query Set
statement. So, I will have to use the proper Query Set statement to grab the current date and time, and insert in into
the “created_on” variable, that is, inside of each listings’ entry.
It seems that I need to import a Python library called “datetime”, and then I need to use this snippet on the views.py
file: “datetime.now()” (source: https://pythonguides.com/how-to-get-current-time-in-django/ .)
I will specify that the initial price for the current_price column will be the same as in the initial_price column.
"""
@login_required
def create(request):
form = CreateListingForm() # Form from forms.py to create a new listing
logged_user = request.user # This stores the data from the currently logged in user
logged_user_id = logged_user.id # PK of the currently logged in user
# This creates an instance of the User table, which I'll need to use in the Query Set syntax
user_instance = User.objects.get(id=logged_user_id)
# This will be a confirmation message
listing_creation_confirmation_message = ''
if request.method == "POST":
listing_title = request.POST["listing_title"]
starting_bid = request.POST["starting_bid"]
description = request.POST["description"]
picture_url = request.POST["picture_url"]
category = request.POST["category"]
# This obtains the current date and time
current_date_and_time = datetime.datetime.now()
# This removes the parentheses from the Categories entries.
# category_formatted = category.strip('(')
# This checks if the user selected a category
if category != '':
# This gets a category name by its PK, which is obtained from the form
category_formatted = Categories.objects.get(pk=category)
# If the user didn't select a category, no category will be inserted
else:
category_formatted = category
# This prepares the new listing data before inserting it into the database
new_listing = Listings(seller_id=user_instance, product_name=listing_title,
description=description, initial_price=starting_bid, current_price=starting_bid,
picture_url=picture_url, category=category_formatted, created_on=current_date_and_time,
active = True)
# This inserts the new listing into the database
new_listing.save()
# This prints a confirmation message that tells the user that the listing was created
listing_creation_confirmation_message = 'Your listing was successfully created!'
return render(request, "auctions/create.html", {
"form": form,
"listings": Listings.objects.all(),
"listing_creation_confirmation_message": listing_creation_confirmation_message
})
else:
return render(request, "auctions/create.html", {
"form": form,
"listings": Listings.objects.all()
})
""" This will display a page for a specific listing if the user clicks on a listing on the home page.
Now, I need to go to urls.py, since I need to create a page specific for each listing. I will use the ID of each
listing to differentiate the URL from each listing page. To do that, I will need to insert the ID of the currently
clicked listing into a string from the views.py field (if I’m not mistaken), and then insert it into the “<str”
keyword in urls.py.
To get the seller’s name on the views.py file, first, I will get the seller’s ID from the listing’s entry. The specific
field that I want from the Listings table is called “seller_id_id”, which I’ll need to store in a variable. Then, I need
to call the User table, and find a user whose ID is the same as the one in the listing for that particular page. Finally,
I want to obtain the username of that user, and then send it to listing.html.
What I ended up doing was to obtain the ID of the current listing, and all of the IDs of all of the users. Then, in jinja
notation on the listing.html file, I used an “if” statement comparing the seller’s ID from the current listing, and all
the IDs from all of the users. If they are the same, the name of the seller will be printed.
I already have created the model for the watchlists. So, I will use a Query Set statement to add a currently selected product
into the Watchlist table as an entry. The first thing that I’ll need will be the ID code of the product. I don’t know if
I’m storing it as a foreign key, but, ideally, it should be that way. After checking, it turns out that I didn’t use a foreign
key, but a “OneToMany” and “ManyToMany” functions for the user ID and the listing ID for the Watchlist table. The columns for
the Watchlist table are User and the Listings tables (which probably takes the ID code for those tables), and the listing’s URL.
So, the Query Set statement that I’ll use to insert the product into a watchlist will insert that product’s URL, ID, and seller
ID into the Watchlist table in the database. The columns are “listing_url”, “user”, and “listing”. “listing_url” refers to the
URL for the selected product. “user” refers to the currently logged in user. Finally, “listing” refers to the currently
selected product.
I’ll add a button that says “Add to Watchlist” once the user clicks on a product and enters that product’s page. If the user
has already added that product to their watchlist, the button will change to “Remove from watchlist”, which will remove the
item from the user’s watchlist.
The table I’m interested in is called “auctions_watchlists”. There’s a similar table called “auctions_watchlists_user”. I think
this last table was automatically created since I’m using a “many to many” relationship function for the “user” column (the
column that gets the seller’s ID).
So, I will first create a button in the listing pages that say something like “Add to Watchlist”. Then, I will add in views.py
a Query Set on the view for the individual listing pages that will insert the seller ‘s ID, the listing ID, and the listing’s
URL into the “auctions_watchlists” table. The button can be styled using Bootstrap. I could put the button below the price.
After further consideration, I decided to put it under the description.
To insert the logged in user’s data in the “user” column for the Watchlist table, I will obtain the logged in user’s ID from the
“request.user” function. Then, I’ll get that user by usign a Query Set statement looking for a user with the ID belonging to
the currently logged in user.
Then, to insert the currently selected product’s data into the “listing” column of the Watchlist table, I will use a Query Set
statement to get the product whose ID is the one that’s typed on the URL bar. That’s already being inserted in the 2nd
parameter of the display_listing() view (which is called “listing_id”).
Now, getting the URL of the currently selected product will be a bit tricky. I need to select the entire text that’s
inserted in the URL bar. I will use the URL used in the urls.py file for the display_listing() view as a template. So, I
may insert the URL “listing/listing_id>” in the “listing_url” column.
Note: DO NOT USE get() on Listings.objects, since I would get an error that won't let me enter the page for a specific
product. I should keep the filter() function.
BUG: I'm getting an error telling me that I have issues when trying to assign the value to a Many To Many field, which,
in my case, is the "product_id" field fro the Watchlist table. To fix that, it seems that I need to save the Query Set
statement without the user, and THEN I need to add the user using something like "user.add(request.user)" (source:
https://geeksqa.com/django-direct-assignment-to-the-forward-side-of-a-many-to-many-set-is-prohibited-use-user-set-instead
)
BUG: The product page's URL is not being properly inserted into the database. What I'll do is to store the URL into 2
parts: one with the word "listing/", and the other half with the product's ID. Then, I will concatenate both variables
in a single variable using the "+" sign (source: https://www.educative.io/edpresso/how-to-concatenate-strings-in-python
.)
The ID of the products are NOT stored on the auctions_watchlists table: instead, they’re being inserted in a new table
that was automatically created called “auctions_watchlists_product_id”. This happened because the product_id column is
stored as a One To Many field, which generates a table. The auctions_watchlists_product_id stores the ID of each entry
on the watchlist, the ID of each entry in this new table, and the ID of the product added to the watchlist.
Each watchlist doesn’t have their own ID. To separate one watchlist from the other, I have to use the ID of the user
who’s the owner of that watchlist. That way, I’ll use a filter to find all of the products added by a person by using
that person’s ID on the auctions_watchlist table.
Now that the product is being properly added into the watchlist after clicking on the “Add to Watchlist” button, I need
to change that button into “Remove from Watchlist” after the user adds that product into their wish list. Then, after
clicking the “Remove” button, that product will be removed from the Watchlist table in the database.
The ID of the products are NOT stored on the auctions_watchlists table: instead, they’re being inserted in a new table
that was automatically created called “auctions_watchlists_product_id”. This happened because the product_id column is
stored as a Many To Many field, which generates a table. The auctions_watchlists_product_id stores the ID of each entry
on the watchlist, the ID of each entry in this new table, and the ID of the product added to the watchlist.
Each watchlist doesn’t have their own ID. To separate one watchlist from the other, I have to use the ID of the user
who’s the owner of that watchlist. That way, I’ll use a filter to find all of the products added by a person by using
that person’s ID on the auctions_watchlist table.
So, to change the “Add to Watchlist” button to “Remove from Watchlist”, and the removing that product from that user’s
watchlist, I’ll have to make an “if” statement using Django on the display_listing.html page. I’ll specify that, if
that particular user doesn’t have that product on his list, to display “Add to Watchlist”. Otherwise, it should display
a button that says “Remove from Watchlist”.
I need to get all of the IDs for the entries of the auction_watchlist table for the logged in user. Then, I need to
compare those IDs with the watchlist IDs stored in the auctions_watchlists_product_id. Since there will be inevitable
a match (since one of those tables has a Many To Many relationship with the other), I will compare those results with
the ID of the listing I’m currently in (which is stored in the listing_id variable in the display_entry() view.) If
the product where I’m currently in has the same ID as one of the products in the user’s watchlist, I should show the
“Remove” button. Otherwise, I should show the “Add to watchlist” button.
Actually, the “if” statement to decide whether to show the “Add to Watchlist” or “Remove” button is less complicated
than I’m making it to be. I only need to check the number for the product ID on the product page’s URL (which is
stored in the listing_id parameter on the display_listing() view), and compare it with all of the products stored on
that user’s watchlist. If there’s a match, I will display the “Remove” button. Otherwise, I will display the “Add
to Watchlist” button.
Then, I will send the listing_id parameter and the results of the subquery to the display_listing.html file from the
display_listing() function. Then, using Jinja, I will use an “if” statement that checks if the number in the listing
is inside of the subquery. If it is, I will display the “Remove” button. Otherwise, I will display the “Add to
Watchlist” button.
To check if the user has a product on their watchlist, I will make a subquery which will take data from both the
auctions_watchlists and the auctions_watchlists_product_id tables. In this case, I only want the product IDs for the
watchlist for the currently logged in user. I will do the subquery using Query Set notation, not SQL. To do that, I
will first make a Query Set query to obtain all of the IDs from the Watchlist table for the currently logged in user.
Then, I would make a query on the auctions_watchlists_product_id table to look for all of the products, but only
those whose watchlist ID match those as the ones in the previous query (source: Ramast’s reply from
https://stackoverflow.com/questions/8556297/how-to-subquery-in-queryset-in-django .)
Upon further consideration, I don’t even need to use a subquery to check if the user added a product to their
watchlist. I could simply use a Query set query to select all items on the Watchlist table for the currently logged in
user, and send it to the listing.html file. Then, with an “if” statement, I would check if the ID of the current number
is inside that “array” with the user’s products. If it is, I will display the “remove” button.
I could check if the number stored in the parameter "listing_id" exists in the Watchlist table for the currently
logged-in user using an "if" statement through notation like the following: "if listing_id in Watchlists.objects.filter"
(source: Siddharth Gupta's reply from
https://stackoverflow.com/questions/32002207/how-to-check-if-an-element-is-present-in-a-django-queryset .)
By looking at my submission for the “Wiki” homework assignment, I think I may have the solution that I’m looking for to check
when to display the “Remove” or the “Add to Watchlist” buttons. I will first go to the views.py file to the view that displays
that page for a particular listing/product. Here, I will declare an empty array, which will be used for storing all of the
products in the watchlist from the currently-logged user. Then, I will create a “for” loop and an “if” statement to populate
that array with all of the products stored ina a watchlist that belong to the currently-logged user. Then, I will send that
array to the listing.html page.
Then, I will go to the listing.html file, and I will use Jinja notation. Here, I will use a “for” loop and an “if” statement
to check for each product within the array with the products stored in the user’s watchlist. If that array has a product with
the same ID as the current product being displayed on the webpage, that means the user has that product already stored on
their watchlist. So, I will display the “Remove” button. Otherwise, I will display the “Add to Watchlist” button.
To store the product IDs, I need to use the "values_list()" from the Query Set notation (source:
https://docs.djangoproject.com/en/4.0/ref/models/querysets/ .)
To remove the parentheses and the quotations marks when using values_list() while using a Query Set statement,
I need to add the parameter "flat=True" (source: https://docs.djangoproject.com/en/4.0/ref/models/querysets/ .)
I was finally able to modify the button from "Add to Wishlist" to "Remove". To do it, I needed to convert both
the listing_id variable (the parameter that stores the ID for the product whose page the user has currently clicked
on) and the "product" (or "i") variable in the "for" loop that populates the user's watchlist into integers. That
way, I can safely compare both numbers. If both numbers are the same, a boolean variable will tell the "Remove"
button to appear on the product's page. Otherwise, the "Add to Watchlist" will appear. To convert a value into an
integer, I need to use the int() function (source:
https://www.freecodecamp.org/news/python-convert-string-to-int-how-to-cast-a-string-in-python/#:~:text=To%20convert%2C%20or%20cast%2C%20a,int(%22str%22)%20.)
BUG Fix: The "Add to Watchlist" button doesn't change to "Remove" immediately after adding a product to that user's watchlist.
Instead, I have to exit the currently selected product's page, and the re-enter it so that I can notice the difference.
So, I will try reloading the current page by using HttpResponseRedirect.
BUG Fix: If the user’s not logged in, I get an error message from Django saying "User matching query does not exist" whenever
I try to enter into a product’s page. To fix it, I will try to use the function “request.user.is_authenticated” to check
if the uer’s logged in before trying to insert their ID into the variable that I’m using to store it (source:
https://www.delftstack.com/howto/django/django-check-logged-in-user/#:~:text=Check%20the%20Logged%20in%20User%20in%20Views%20in%20Django,-In%20views%2C%20we&text=We%20can%20use%20request.,in%2C%20it%20will%20return%20True%20. )
Now, I will need to remove the product from the user’s watchlist if they click on “Remove”, by removing that entry from the
database. To do that, I will create a new input for the forms that have the “Add to Watchlist” and “Remove” buttons,
respectively. That new input, which will be hidden, will have their “name” attribute to have different values, so that the
views.py file can differentiate between the “Add” and the “Remove” buttons whenever a user clicks on them. For instance, I can
add “ name=’add’ ” for the “Add” button, and “ name=’remove’ ” for the “Remove” button.
Then in the display_listing() view, I will put an “if” statement that checks if the “add” or the “remove” buttons were pressed.
If the “add” input was submitted, I will insert that product into the watchlist. Meanwhile, if the “remove” button was submitted,
I will delete that entry from the database. I need to check how to delete an entry from a database by using Query Set notation.
An alternative method would be to use the same “name” attribute for both the “Add” and “Remove” inputs. Then I would check on the
display_listing() view the value of the input. If the input’s value is “Add to Watchlist”, I will execute the Query Set statements
to add that product into the user’s watchlist. Otherwise, if the input is “Remove from Watchlist”, I will execute the Query Set
statements to remove that entry from the user’s watchlist in the database.
To delete a record from a database by using Query Set notation, I need to use the following function:
“Table.objects.filter(id=id).delete()” (source: Wolph’s reply on
https://stackoverflow.com/questions/3805958/how-to-delete-a-record-in-django-models .)
Users should be able to see the bid form from the listing.html page (the page that displays the currently selected
product.) So, there’s no need (and I shouldn’t) create a new view. I should use a currently existing view(), which, in
this case, should be the display_listing() view. I will import the bid form into that view, and send it via Jinja to
listing.html.
However, the only users who should be able to bid should be logged users. The assignment even says “If the user is
signed in, the user should be able to bid on the item”. So, I should use the function that says that, if the user’s
signed in, that the user should be able to see the bid form.
Also, I will have to insert the bid amount into two tables: The Bids table, and the Listings table. I need to update
the price of the product after someone makes a bid, so the Listing table needs to be updated. Also, since it seems
that one of the questions of this homework assignment asks me to show a page with all of the bids made by a user, I
will need to keep track of that user’s bids. That can be done by inserting the bids on the Bids table.
Now, to detect whether I clicked on either “Bid” or “Add to Watchlist”, I will go the display_listing() view, and I
will use the following line of code to detect which submit button I clicked: "if 'post_form_name' in request.POST:"
(source: Damon Abdiel’s reply on
https://stackoverflow.com/questions/866272/how-can-i-build-multiple-submit-buttons-django-form .)
Next, I need to add the code that will be executed if the user clicks on the “Bid” button. I will insert whatever
number is inserted in there into the database on the Bid table. But, for debugging reasons, I could add a message to
be printed if the user clicks on “Bid.”
The 3 columns to which I need to insert in the Bid model whenever a user bids for a product are “listing”, “buyer”, and “bid”.
Both “listing” and “buyer” are foreign keys. But still, I need to get the proper user and product ID, and insert them into
“listing” and “buyer”, respectively. I will get the user ID from “request.user”, since I need the ID of the user that’s currently
logged-in, and I need to insert it into “buyer”. As for the “listing” column, I will get the ID of the product that’s being
displayed in the current page. That’s stored in the 2nd parameter of the display_listing() view (listing_id).
Finally, for the “bid” column, I will get the number that was typed on the “Your bid” input from the POST form. However, I need
to put a set of conditions on it. The user won’t be able to just type any number as the price for their bids. They will have to
type a bid that’s equal or higher than the initial bid. Additionally, if another user had already placed a bid on that product,
then the current user needs to place a bid that’s larger than the previous user. It wouldn’t make sense if a current user can
buy the product if they place the same bid as a previous user who had previously bidded on that item. So, if the current user
places a bid larger or equal to the initial bid, and larger than any other previous bid from other users, their bid will be
inserted into the Bid model in the “bid” column.
Otherwise, I need to display an error message saying that the bid needs to be higher than any other previous bid, and the bid
that was entered in the input box shouldn’t be inserted into the database.
The thing is, I need to be able to differentiate between the initial price and if at least someone else has already placed a
bid for a particular product. Otherwise, the “if” statement won’t be able to tell the user if they are able to place a bid
that’s exactly the same as the price for that product (the initial bid), or if they are forced to place a bid that’s higher
than the price that’s being shown on the page (in the case that another person has already placed a bid on that item).
One way to tell my “if” statement if the user can enter a bid that’s equal to the price being displayed is by checking the Bid
table to see if there’s any entry that has the ID of the product that’s currently being displayed on the page. If that product
doesn’t have any entry on that table, then the prices being shown on the page is the initial bid. So, the user will be able to
place a bid that’s the same (or higher) than the one being displayed on the page. Otherwise, if there’s at least 1 entry on the
Bids table for that particular product, they user will not be able to place a bid that’s equal to the price being diplayed on
the page. They will only be able to enter a price higher than the one from the bid placed by the previous user.
Also, the current user will be able to tell if someone has already placed a bid for that product since the page will display the
name of the bidder that had placed the previous bid. That way, the user will be able to tell if they can place a bid that’s
equal to the price being displayed or not.
* Note for a future algorithm: once I get all of the bids that have been placed for a specific product, and if the seller wants
to close the auction for that item, I will use a Query Set statement that gets all of the bids from the Bid model for that
product. Then, I will obtain the bid that has the highest value for the “bid” column. I could use something similar to the
“MAX” function from SQL. Then, that would be the winner for that auction.
Remember: I cannot insert anything on any of the 3 columns for the Bids table unless all of the conditions from the “if”
statement are met.
To check whether a person has placed a bid on an item, I will count the number of entries for the Bids table for the currently
selected product. If it’s 0, then nobody has placed a bid on the current product. Otherwise, at least someone has bidded on the
product. To count the number of entries in a table by using Query Set, I need to use the following format:
variable = Model_name.objects.filter(column=what_youre_looking_for).count() (source: Mikhail Chernykh’s reply on
https://stackoverflow.com/questions/15635790/how-to-count-the-number-of-rows-in-a-database-table-in-django .)
To avoid any issues when comparing numbers, I will make sure to convert the numbers obtained from the database and the post
form into floats. That can be done with the float() function (source:
https://www.datacamp.com/community/tutorials/python-data-type-conversion .)
Now that the display_listing() view is properly detecting the amount typed by the user on the bid input, I need to
modify the database properly by adding entries into the Bids table, and modifying the “initial_price” column of the
Listing table with the bid of the current user. If the user types any appropriate bid amount, I will always add an
entry on the Bids table. It doesn’t matter if the same person bids multiple times for the same product, as long as
their current bid is higher than their previous one. I will also update the “initial_price” column of the Listings
table every time that the user types an appropriate bid.
The only thing that I need to pay attention is that, if at least 1 user bids on an item, I will display their name on
the page. I will put something like “Current highest bidder: (Name.)”
To add an entry on the Bids table with the bid placed by the user, I will use a Query Set statement that says “insert
the ID of the currently logged in user and of the product of the current page in the ‘buyer’ and ‘listing’ columns.
Then, insert the bid from the POST form in the ‘bid’ column.” I may not need to use the “column_name.add(variable)”
snippet of code since I’m using foreign keys, not Many to Many relationships. That is, after typing “.save()”, the
query should add the entry into the Bids table.
Next, I would have to update the “initial_price” column from the Listings table with the amount from the current bid.
I think that a Query Set statement similar to the one in the previous paragraph would work. The only thing that would
change would be the column that I should use.
Actually, I’m wrong: I can’t use the same Query Set for inserting an entry into a table for updating the column of an
entry. I need to specify first the exact entry that I want to modify (like by using the “filter” attribute), and then
I can update that entry.
To update an entry using Query Set, I need to use the update() function. I would need syntax like the following:
Model_name.objects.filter(pk=id_of_entry_I_want_to_modify).update(column='new_value_for_column')
(source: Daniel Roseman’s reply on
https://stackoverflow.com/questions/2712682/how-to-select-a-record-and-update-it-with-a-single-queryset-in-django )
BUG FIX: to add an entry into the Bids table, I needed an instance of the Listing class to insert the ID of the
current product into the “listing” column (I guess because it’s using a foreign key). That’s done using a Query Set
with get(), and inserting that into a variable.
Now, I want to display the name of the bidder for the person who placed the previous bid (if at least one person has
bidden for the current product.) For that, I need to call the “buyer” column of the Bids table (using syntax like
“bid.buyer” via Django.) But the thing is that I need the name of the person who has the highest bid. I need to look
for a Query Set that gets a maximum value from a list of entries for a table (like MAX did in SQL.)
Or, since the maximum bid is the same as the value in “initial_price” on the Listings table, I could check if there’s
at least 1 bidder in the Bids table for the current product. If there is, I will check the ID of the bidder for the
current product that has bidden for the price being shown on the page (which is stored in the “initial_price” column.)
I will create an empty variable at the start of the display_listing() view, which will store the name of the highest
bidder for the current product. It will be initially empty. Then, I will check what I specified in the last paragraph.
If I find a bidder, I will store the name of that bidder in a variable, and send it to listing.html to print it.
The part of making the list inactive once the seller clicks on a button on their own listing can be relatively
simple to do. The problem will be choosing the highest bidder and setting them as the winner of the auction.
So, I will first turn the product for a particular listing to become inactive when the seller presses a button to
close the auction. If it’s inactive, I’m planning on hiding the listing from the “Active Listings” page.
To make the button clickable and make something happen to occur, I would need to use a “Submit” button by using a POST
form. Remember to give a different ID to that submit button compared to the other buttons in the product page. That
button will say something like “Close Auction”. Then, after the user clicks it, the views.py file will get that
request, and do something.
I could create a new view for adding the functionality that closes the auction if the seller clicks on the “close
auction” button.
Remember, only the seller should see the “close auction” button. So, I’ll have to put a condition saying that the user
must be logged in by using the decorator that checks if a user is logged in on top of the “close auction” view. Then,
I need to put an extra condition saying that the “close auction” button should only appear if the ID of the user is
the same as the ID of the seller of that particular product.
After further consideration, I see that I SHOULDN’T create a new view for closing the auction. Otherwise, I would need
to create a new URL, and add it on the urls.py file. And the thing is that I want to display all of this in the
listing.html page. So, I will need to add the functionality for closing the auction inside of the display_listing()
view.
The first important task will be to ONLY render the “Close auction” button if that product’s seller is logged in. I
can do that by sending a variable from the display_listing() view to the listing.html file, and using an “if”
statement in the listing.html file. I will check that, if the seller for the current product is logged in, I will
render the “Close auction” button.
Now that I’m detecting if the user has clicked on the “Close Auction” button, I will need to change the property from
the Listings model that says whether as listing is active. I will change it to False. The property is called “active”,
I have to update it to False. I need to use a Query set statement.
Now, to make the highest bidder the winner of the auction, I will first get from the database who’s the highest
bidder. Then, I will display their name once they win the auction. This will be relatively complicated, since I need
to get the data from the Bid model to get the bidder with the highest bid.
For debugging purposes, I will store the name of the auction winner in a variable, and print it on the listing page.
To make the homework assignment easier, I will create a separate page that will display the closed listings. That way,
I’ll be able to easily find closed listings (since they should disappear from the “Active Listings” page), and I’ll
be able to easily display a message to the highest bidder telling them that they won the auction (if they enter into
that product page.)
But first, before creating a new page and a new URL, I will show the auction’s winner a victory message if they are
logged in in the closed listing’s page. I will need to compare the name of the currently logged in user, and the name
of the auction’s winner. If they are the same, I will display the victory message. The column of the User model that
stores the names of the users is “username”.
I will later also need to display all of the comments stored in the database within the current product page. But,
to do that, I will need to modify the display_listing() view to be able to get the comments from the POST form, and
store them on the database. THEN I will send the comments via Jinja to the current product page (the listing.html
file.)
BUG: If I edit a bid from the admin panel, and then I enter into that product’s page on the web app, I get the
following error: “DoesNotExist at /listing/3. Bids matching query does not exist.” I think that happens because I’m
getting the bid by using a query by the amount of money, not from the bid’s ID. I need to fix that.
To fix the above bug, I will fix my code so that, to look for the highest bid, I will use a function that gets the
maximum value out of all of the bids for a particular product. I can do that by importing a library called “Max”. Then,
I will obtain all of the bids for a particular product using filter(). After that, I will get the max bid value by
using the following function: aggregate(Max(‘column’)). Finally, to get the instance of the bid with the highest value,
while also accepting empty values (if nobody has bid for a particular product), I will use the following snippet:
instance = variable.order_by(‘-column’).first() (source: Sasha Chedygov’s reply on
https://stackoverflow.com/questions/844591/how-to-do-select-max-in-django .)
BUG: Even if I delete a bid, a product will never return to its original price, and it won’t return to its previous
lower bid either. That is, if a product is worth $70, and someone bids $79 for it, even if I delete that bid, the
price will still charge you $79. It will never go back to its original $70 price. I need to fix this.
As to how to return to the original price if the bid deleted was the only bid, I will need to store the initial price
somewhere else in the database. For instance, I could create a new column called “current_price” for the Listings
table. That column will have the exact same value as “initial_price” if no one has bid for that product (or if all of
the bids for that product have been deleted.) However, I might have to delete and rebuild the database for this to
work properly.
I need to look at the code from the display_listing() view to see where I’m getting the current price of the product
that I’m displaying on a product page. I need that price to be equal to the highest bid stored in the database.
The problem may be the line “<b>Current Bid:</b> ${{listing_data.initial_price}}”. I shouldn’t take the price of the
bid from the initial_price column from the Listings table. I need to take the max value for the “bid” column for
the Bids table.
I could create a condition saying that, if there are no bids, display the price stored in “initial_price”. I will
modify my code so that initial_price is never modified (so that I could always get the initial price if I delete all
of the bids.) But, if there are bids, that the price that should be displayed should be the max value for the “bid”
column from the Bids table.
To detect if there’s at least a bid, there are multiple ways. I could say that, if the variable that stores the highest
bid amount is not empty, that the current bid displayed on the page should be the variable that contains the highest
bid. Otherwise, I should display the price stored in “current_price”.
If the number of bids goes back to 0 (like, if I delete all bids for a particular product), I will reset the
current_price column back to the same value as initial_price, so that the product returns to its initial price.
To fix the problem of the price not being updated in a product page when a user places a bid, I will refresh the page
whenever the user clicks on “submit”. To do that, I think I need to use an HttpResponseRedirect on the
display_listing() view.
BUG: If I submit a bid that’s lower than the current bid, the web app will accept it, and no error message will be
displayed.
To fix this bug, I think I need to change the variable that compares the amount in the current price, and the price on
the bid placed by the user. The variable that I’m currently using to compare both price amoutns is storing the price
stores in initial_price. Now that I have a new column called current_price to store the current bid, I need to modify
that variable. This is on the display_listing() page.
Since I can’t seem to show the confirmation message if a user successfully places a bid, and instantaneously update the
price of the product in the product page, what I’ll do is redirect the user to the home page whenever they place a
successful bid, and then show them a success flash message. To do that, I will use a function called “redirect(/)” to
redirect users to the home page after placing as successful bid. Then, by importing a flash message function, I will
store a confirmation message (source: https://youtu.be/8kBo91L8JTY ). That way, the product page will have enough time
to update the price of the product to which the user just bid. Also, they’ll be able to see a confirmation message
telling them that their bid has been placed.
"""
def display_listing(request, listing_id):
# This obtains the listing that I want to display as iterable objects
current_listing = Listings.objects.filter(id=listing_id)
# This obtains a specific instance of a listing, which I'll need to store the listing in a watchlist
current_listing_instance = Listings.objects.get(id=listing_id)
# This stores the seller ID of the current listing
# seller_id = current_listing.seller_id
# This obtains all of the data from all of the sellers
seller = User.objects.all()
# This imports the form that will store the bids
bid_form = BidForm()
# This imports the form that stores the comments
comment_form = CommentForm()
# DEBUGGING message that will show up if the user clicks on "Bid"
debugging_message_bid_button = "You didn't click on the 'Bid' button."
# Confirmation or error message for bids
bid_message = ''
# This gets the initial price of the product of the current page
current_product_price = current_listing_instance.initial_price
# This gets the real current price of the current product
real_current_product_price = current_listing_instance.current_price
# This checks the number of bids that have been placed for the current product, if any
number_of_bids = Bids.objects.filter(listing=listing_id).count()
# This will tell the product page whether to render the "Close auction" button
display_close_auction_button = False
# Debugging message to check if I'm getting the active status from the Listings model
debugging_message_active_status = "Nothing has happened."
# This will check if the current listing is active
is_listing_active = current_listing_instance.active
# This will make the "Close Auction" button to be active if the seller hasn't closed the auction
is_close_auction_button_active = True
# This declares the variable that will store the name of the winner of an auction
auction_winner_name = "Nobody has won the auction yet."
# This declares the variable that will store the victory message for the winner of an auction
victory_message = ''
# This is the declaration of the variable that will display the amount for the highest bid
highest_bid_amount = ''
# This takes the highest bidder from the Bids model
highest_bidder_id = "No one has bid for this listing yet."
if number_of_bids > 0:
# This stores an instance of a bid where the amount bid is equal to the price displayed on the product page ...
# highest_bid_instance = Bids.objects.get(bid=current_product_price)
# This obtains all the bids for the current product
all_bids_for_current_product = Bids.objects.filter(listing=listing_id)
# This obtains the instance of the highest bid
highest_bid_instance = all_bids_for_current_product.order_by('-bid').first()
# This stores the name of the highest bidder
highest_bidder_id = highest_bid_instance.buyer
# This stores the highest bid as a price amount
highest_bid_amount = highest_bid_instance.bid
# This will print the name of an auction's winner
if highest_bid_instance.is_auction_winner:
auction_winner_name = highest_bidder_id
# If there are no bids for the current product, I will set its price back to its initial price ...
elif number_of_bids == 0:
Listings.objects.filter(pk=listing_id).update(current_price=current_product_price)
# This updates Bids table so that the highest bidder is inserted into the database
# Bids.objects.filter(bid=current_product_price).update(is_auction_winner=True)
# This will check the database to decide whether to activate the "Close Auction" button
if is_listing_active:
is_close_auction_button_active = True
debugging_message_active_status = "This listing is currently active."
else:
is_close_auction_button_active = False
debugging_message_active_status = "This listing is NOT active."
# If the user is logged in, I will store their ID
if request.user.is_authenticated:
logged_user = request.user # This stores the data from the currently logged in user
logged_user_id = logged_user.id # PK of the currently logged in user
# This stores the username of the user that's currently logged in
logged_user_username = logged_user.username
# This creates an instance of the User table, which I'll need to use in the Query Set syntax
user_instance = User.objects.get(id=logged_user_id)
# This stores the seller ID of the current product
current_product_seller_id = current_listing_instance.seller_id_id
# This checks if the current user is the seller of the current product
if logged_user_id == current_product_seller_id:
# If the condition applies, I will render the button
display_close_auction_button = True
# This executes if the user clicks on "Close Auction", and closes the auction
if 'close_auction' in request.POST:
# This sets the current listing to become inactive
Listings.objects.filter(pk=listing_id).update(active=False)
# This disables the "Close Auction" button
is_close_auction_button_active = False
# This stores the winner of the auction
# auction_winner_name = highest_bidder_id
# This updates Bids table so that the highest bidder is inserted into the database
Bids.objects.filter(bid=real_current_product_price).update(is_auction_winner=True)
# This will check if somebody won the current auction
if auction_winner_name != "Nobody has won the auction yet.":
# This will check if the winner of the auction is currently logged in
if str(auction_winner_name) == str(logged_user_username):
# This will print a message telling the auction's winner that they won the auction
victory_message = "Congrats! You have won the auction for the current listing."
# DEBUGGING message
else:
victory_message = "Sorry. You're not the winner of this auction."
# This array will store all the products from a user's watchlist
watchlist_array = []
# This will make it so that the "Remove" button won't appear by default, and to prevent a bug that makes
# the "Add to Watchlist" button to appear multiple times
display_remove_button = False
# This gets all the products inside the currently logged-in user's watchlist
users_products_in_watchlist = Watchlists.objects.values_list('product_id', flat=True).filter(user=logged_user_id)
for product in users_products_in_watchlist:
watchlist_array.append(product)
if int(listing_id) == int(product):
display_remove_button = True
# if listing_id in Watchlists.objects.filter(user=logged_user_id):
# display_remove_button = True
# else:
# display_remove_button = False
# This stores the 1st half of the currently selected product's URL
listing_url_1st_half = "listing/"
# This stores the full URL for the currently selected product
product_page_complete_url = listing_url_1st_half + listing_id
# This executes if the user clicks on "Add to Watchlist"
if 'submit_add_or_remove' in request.POST:
add_or_remove = request.POST["add_or_remove"]
if add_or_remove == "add":
# This prepares the Query Set statement for inserting the product into the Watchlist table
add_to_watchlist = Watchlists(user=user_instance, product_url=product_page_complete_url)
add_to_watchlist.save() # This saves the entry into the database
# This will add the product's ID into the Watchlist database
add_to_watchlist.product_id.add(current_listing_instance)
if add_or_remove == "remove":
remove_message = "DEBUGGING MESSAGE: Remove from database"
# This deletes the current product from the watchlist
Watchlists.objects.filter(product_id=listing_id).delete()
# This will reload the current page, so that the button changes without having to exit the page
return HttpResponseRedirect(f"/listing/{listing_id}")
# This checks if the "Bid" submit button was pressed
elif 'submit_bid' in request.POST:
debugging_message_bid_button = "Great! You clicked on the 'Bid' button!"
# This stores the submitted bid on a variable
submitted_bid = request.POST["your_bid"]
# This checks if the user placed a bid that's equal or higher than the price shown on the product page
if float(submitted_bid) >= float(real_current_product_price):
# This checks if there's at least one bid in the Bids table
if number_of_bids > 0:
# This checks if the current bid is greater than the previous one
if float(submitted_bid) > float(real_current_product_price):
debugging_message_bid_button = "Good! Your bid is greater than the one placed by someone else."
bid_message = "Your bid has been successfully registered!"
# This inserts the bid into the Bids table
insert_bid_into_bids_table = Bids(buyer=user_instance, listing=current_listing_instance,
bid=submitted_bid)
insert_bid_into_bids_table.save() # This saves the entry into the database
# This modifies the price of the product on the Listings table
Listings.objects.filter(pk=listing_id).update(current_price=submitted_bid)
# This will reload the current page, so that the price changes without having to exit the page
# return HttpResponseRedirect(f"/listing/{listing_id}")
# This will redirect the user to the home page and show them a confirmation message
messages.success(request, "Your bid has been successfully registered!")
return redirect('/')
# If the current bid is the same as the previous bid, I'll show an error message
elif float(submitted_bid) == float(real_current_product_price):
debugging_message_bid_button = "Sorry, but you need to place a bid that's higher than the previous one."
# Error message
bid_message = "Sorry, but you need to place a bid that's higher than the previous one."
# This checks if there are no bids on the Bids table
elif number_of_bids == 0:
debugging_message_bid_button = "Awesome! You're the first person to bid on this product!"
# Confirmation message
bid_message = "Your bid has been successfully registered!"
# This inserts the bid into the Bids table
insert_bid_into_bids_table = Bids(buyer=user_instance, listing=current_listing_instance,
bid=submitted_bid)
insert_bid_into_bids_table.save() # This saves the entry into the database
# This modifies the price of the product on the Listings table
Listings.objects.filter(pk=listing_id).update(current_price=submitted_bid)
# This will reload the current page
# return HttpResponseRedirect(f"/listing/{listing_id}")
# This will redirect the user to the home page and show them a confirmation message
messages.success(request, "Your bid has been successfully registered!")
return redirect('/')
# This tells the user that they need to place a bid that's at least as high as the one displayed on the page
else:
debugging_message_bid_button = "Sorry, but you need to place a bid that's at least as high as the one currently listed."
# Error message
bid_message = "Sorry, but you need to place a bid that's at least as high as the one currently listed."
# Ths will prevent any bugs that won't let me show a product's page if I'm not logged in
else:
watchlist_array = []
display_remove_button = False
# This executes if the user clicks on "Comment"
if 'submit_comment' in request.POST:
# This gets the comment from the POST form
comment = request.POST["comment"]
# This inserts the comment into the database, on the Comments model
insert_comment = Comments(comment=comment, user=user_instance, listing=current_listing_instance)
insert_comment.save()
# This will obtain all the comments for the current listing
current_listing_comments = Comments.objects.filter(listing=listing_id)
# This renders the selected listing
return render(request, "auctions/listing.html", {
"current_listing": current_listing,
"seller": seller,
"current_listing_id": listing_id,
"watchlist_array": watchlist_array,
"display_remove_button": display_remove_button,
"bid_form": bid_form,
"debugging_message_bid_button": debugging_message_bid_button,
"bid_message": bid_message,
"display_close_auction_button": display_close_auction_button,
"is_close_auction_button_active": is_close_auction_button_active,
"debugging_message_active_status": debugging_message_active_status,
"highest_bidder_id": highest_bidder_id,
"auction_winner_name": auction_winner_name,
"victory_message": victory_message,
"comment_form": comment_form,
"current_listing_comments": current_listing_comments,
"highest_bid_amount": highest_bid_amount
# "logged_user_username": logged_user_username
# "users_products_in_watchlist": users_products_in_watchlist
})
""" This is the view for the Watchlist page. This is really similar to the index page, that is, the page that displays
the active listings. The difference is that, instead of displaying all of the active listings, I will only display the
products from the watchlist of the user that’s currently logged in.
The first thing that I need to do is to create the page that will display the Watchlists. I need to create the html
file for the page. I will also have to include in the layout of the site (in layout.html) a link to the watchlists
page, which will only be accessible users who have logged in. I will also need to add a link to the page in urls.py to
the Watchlist page. Additionally, I will need to create a view function to display the watchlist. This needs to be
done in the watchlist() view.
First, I need to store an instance of the user’s watchlist. This is done with Query Set’s get() function. Turns out I
can't do this since each time that an user adds something to a watchlist, a new entry is created on the Watchlist
table. So, I will have to use Query Set's filter() function, not get().
I will need to store all of the listing IDs from that watchlist into a variable, to have all of the products inside
that particular watchlist in a single variable. Then, I will send that variable via Jinja to the watchlist page.
Afterwards, I will use a “for” loop to print each element of that variable, which corresponds to every item in
that person’s watchlist.
That way, I’m getting the IDs of every product that belong to the logged user’s watchlist. Now that I have those IDs,
I could go to the watchlist() view, and create another “for” loop. In this case, I want to compare the IDs of every
product in the user’s watchlist to the product IDs from the Listings table. Then, if both IDs match, I will get that
all of the data from those products where a match occurred. This, way, I will get all of the data from each product.
Finally, I will send that to the watchlist.html page via Jinja, and print that information.
The way that I’m going to print the information for each product in the user’s watchlist will be similar (if not the
same) as I printed every product in the “Active Listings” page.
"""
@login_required
def watchlist(request):
logged_user = request.user # Instance of the user that's logged in
logged_user_id = logged_user.id # ID of the user that's logged in
# Instance of the logged user's watchlist
# watchlist_instance = Watchlists.objects.get(user=logged_user)
# This gets all the product IDs inside the currently logged user's watchlist
watchlist_products = Watchlists.objects.values_list('product_id', flat=True).filter(user=logged_user_id)
# This gets all the info from all the products in the active listings
active_products = Listings.objects.filter(active=True)
# This will get all the data from each product
# for watchlist_product in watchlist_products:
# for product in active_products:
# if int(product.id) == int(watchlist_product):
# pass
# watchlist_products = Watchlists.objects.filter()
# DEBUGGING message: this is just to display the page
# watchlist_products = "This is your watchlist."
return render(request, "auctions/watchlist.html", {
"watchlist_products": watchlist_products,
"active_products": active_products,
})
""" This is the view for the Categories page.
For question 6 in general, I will need to create 2 pages: one for displaying all of the categories, and one for
displaying all of the active listings for a specific category. For now, I will create the page that will only display
the name of all categories.
So, since I need to create a new page, I need to create a view, a URL, and an HTML file.
Everyone will be able to access the categories page, even if they’re not logged.
Now, I need to get all of the categories from the Categories table, and store them in a variable. That can be done
with Query Set’s filter() function, or with objects.all(), since I want all of the categories.
"""
def categories(request):
# This will store all the categories from the Categories table
category_list = Categories.objects.all()
return render(request, "auctions/categories.html", {
"category_list": category_list,
})
""" This will show the page that displays all of the active listings for a specific category.
Once again, I need to create a view, a URL, and an HTML file. The URL should be similar to that as the one used for
the display_listing() view, since there will be a different page for each specific category. The URL could be something
like “/categories/1”, or “categories/Fashion”. The latter would make it easier for users to bookmark the page, or to
type directly into the URL their favorite category. However, since I don’t know if I made unique each instance of the
name of the category, I will play it safe and use the ID of each category, and display it in the URL. So, basically, I
will make the URL for the specific categories really similar to the display_listing() view.
The page that shows all of the active listings for a specific category will be really similar to the index.html file
(the Active Listings page.) In fact, it will be the same, except that it will only display the listings that belong to
a specific category.
To obtain the specific category that the user typed or clicked, and check to which category that belongs in the
database, I need to use the get() function from Query Set, and check that the ID is the same as the one typed as a
parameter in the URL.
Now, I need to obtain all of the products in the Listings table that meet two requirements: that are active, and that
belong to the category of the current page. To check that the two conditions are fulfilled, I could use an “if”
statement. The columns that I want to check from the Listings table are “category” and “active”. And, since I will be
obtaining multiple products, I will use Query Set’s filter() function. Now that I think about it, I can simply put the
conditions in the filter() function without needing to use an “if” statement.
"""
def category_listings(request, category_id):
# This will obtain the current category from the Categories table
current_category_instance = Categories.objects.get(id=category_id)
# This stores the name of the current category
current_category_name = current_category_instance.category
# This will store all the active listings for the current category
products_in_selected_category = Listings.objects.filter(category=current_category_name, active=True)
return render(request, "auctions/category_listings.html", {
"current_category_instance": current_category_instance,
"products_in_selected_category": products_in_selected_category,
}) | eduardoluis11/commerce | auctions/views.py | views.py | py | 78,265 | python | en | code | 0 | github-code | 13 |
31045830723 | #-*- codeing=utf-8 -*-
#@time: 2020/9/29 15:45
#@Author: Shang-gang Lee
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
import pandas as pd
data,label=load_iris(return_X_y=True)
RFC=RandomForestClassifier(n_estimators=9,criterion='gini')
param={'max_features':[i for i in range(1,5)]}
clf=GridSearchCV(estimator=RFC,param_grid=param,cv=5,scoring='accuracy',n_jobs=-1,return_train_score=True)
clf.fit(data,label)
print("best_params:",clf.best_params_)
print('best_score:',clf.best_score_)
print(clf.cv_results_)
results=pd.DataFrame(clf.cv_results_)
results.to_csv('max_features-result.xls',index=False)
#visdom
max_features=pd.read_csv('max_features-result.xls')
mean_test_score=max_features['mean_test_score']
std_test_score=max_features['std_test_score']
mean_train_score=max_features['mean_train_score']
std_train_score=max_features['std_train_score']
plt.figure(figsize=(12,8))
plt.plot(mean_test_score)
plt.plot(mean_train_score)
plt.legend(['mean_test_score','mean_train_score'])
plt.title('fine-tuning-max_features')
plt.xlabel('max_features')
plt.ylabel('std-score')
plt.xticks(max_features.param_max_features)
plt.show()
plt.figure(figsize=(12,8))
plt.plot(std_test_score)
plt.plot(std_train_score)
plt.legend(['std_test_score','std_train_score'])
plt.title('fine-tuning-max_features')
plt.xlabel('max_features')
plt.ylabel('mean-score')
plt.xticks(max_features.param_max_features)
plt.show()
| shanggangli/Machine-learning | fine-tuning-ensemble/fine-tuning-max_feature-RFC.py | fine-tuning-max_feature-RFC.py | py | 1,532 | python | en | code | 0 | github-code | 13 |
73640665937 | from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver import ChromeOptions
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from time import sleep
class Data :
def __init__(self,url):
self.url = url
self.data = self.get_data()
def get_data(self) :
options = ChromeOptions()
options.add_argument("headless")
options.add_argument('--log-level=1')
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()) , options=options)
driver.get(self.url)
sleep(2)
while True :
Load_More = driver.find_elements(By.CSS_SELECTOR , 'button[data-disable-with="Loading more…"]' )
if len(Load_More) > 0 :
Load_More[0].click()
sleep(1)
else :
break
Topics = [elem.text for elem in driver.find_elements(By.CSS_SELECTOR ,'p.f3.lh-condensed.mb-0.mt-1.Link--primary')]
Descriptions = [ elem.text for elem in driver.find_elements(By.CSS_SELECTOR ,'p.f5.color-fg-muted.mb-0.mt-1')]
Topics_url = [elem.get_attribute('href') for elem in driver.find_elements(By.CSS_SELECTOR ,'a.no-underline.flex-grow-0')]
del Topics[0]
del Topics[1]
del Topics[2]
del Descriptions[0]
del Descriptions[1]
del Descriptions[2]
data = {'Topics' : Topics , 'Descriptions' :Descriptions , 'Topics_url' : Topics_url}
return data | ChoukriLach/Github-Scraping | Data.py | Data.py | py | 1,581 | python | en | code | 0 | github-code | 13 |
4249333857 | from typing import Optional, Any, List, Type, TypeVar, Union
from app.exceptions import *
from aiogoogle import Aiogoogle
from app.core.config import settings
from app.providers.google.utills import build_client_creds, build_aiogoogle
from bson.objectid import ObjectId
from app.database import get_db
from app.services.models import instance, Base
from app.services.models.account import Account
from app.services.models.user import User
from app.services.models.addon.sequence import Sequence
from app.services.models.addon.template import Template
from app.services.models.addon.test import Test
class AdminDashboardService():
def __init__(self):
initialized = False
try:
exist = instance.db
if exist:
initialized = True
except Exception as e:
initialized = False
self.db = None
if not initialized:
self.db = get_db()
instance.init(self.db)
####################################################################################
######################## SHOW GET methods ##########################################
####################################################################################
async def get_templates(self,
limit: int = 1000):
cursor = Template.find().limit(limit)
return list(await cursor.to_list(length=limit))
async def get_users(self,
limit: int = 1000):
cursor = User.find().limit(limit)
return list(await cursor.to_list(length=limit))
async def get_accounts(self,
limit: int = 1000):
cursor = Account.find().limit(limit)
return list(await cursor.to_list(length=limit))
async def get_sequences(self,
limit: int = 1000):
cursor = Sequence.find().limit(limit)
return list(await cursor.to_list(length=limit))
async def get_tests(self,
limit: int = 1000):
cursor = Test.find().limit(limit)
return list(await cursor.to_list(length=limit))
####################################################################################
######################## CHANGE methods ##########################################
####################################################################################
async def change_account_status(self,
account_id: str,
active:bool):
exist = await Account.find_one({'_id' : ObjectId(account_id)})
if not exist:
raise AppErrors(f"No such account {account_id}")
exist.active = active
await exist.commit()
await exist.reload()
return exist
async def connect_account(self,
account_id: str,
user_id: str) -> Any:
user = await User.find_one({'_id' : ObjectId(user_id)})
if not user:
raise AppErrors(f"No such user {user_id}")
account = await Account.find_one({'_id' : ObjectId(account_id)})
if not account:
raise AppErrors(f"No such account {account_id}")
account.owner_id = user.id
await account.commit()
await account.reload()
return account
async def disconnect_account(self,
account_id: str,
user_id: str) -> Any:
account = await Account.find_one({'_id': ObjectId(account_id)})
if not account:
raise AppErrors(f"No such account {account_id}")
account.owner_id = None
await account.commit()
await account.reload()
return account
async def edit_template(self,
template_id: str,
data: dict):
template = await Template.find_one({'_id' : ObjectId(template_id)})
if not template:
raise AppErrors(f"template doesn't exist id={template_id}")
current_data = {}
if template.data:
current_data = template.data.to_mongo()
changed = False
for k, v in data.items():
if k and v:
current_data[k] = v
changed = True
if changed:
template.data = current_data
await template.commit()
await template.reload()
return template
async def change_template_status(self,
template_id: str,
active: bool):
template = await Template.find_one({'_id' : ObjectId(template_id)})
if not template:
raise AppErrors(f"template doesn't exist id={template_id}")
template.active = active
await template.commit()
await template.reload()
return template
async def edit_sequence(self,
sequence_id: str,
data: dict):
sequence = await Sequence.find_one({'_id' : ObjectId(sequence_id)})
if not sequence:
raise AppErrors(f"sequence doesn't exist id={sequence_id}")
current_data = {}
if sequence.data:
current_data = sequence.data.to_mongo()
changed = False
for k, v in data.items():
if k and v:
current_data[k] = v
changed = True
if changed:
sequence.data = current_data
await sequence.commit()
await sequence.reload()
return sequence
async def change_sequence_status(self,
sequence_id: str,
active: bool):
sequence = await Sequence.find_one({'_id' : ObjectId(sequence_id)})
if not sequence:
raise AppErrors(f"sequence doesn't exist id={sequence_id}")
sequence.active = active
await sequence.commit()
await sequence.reload()
return sequence
async def change_user_level(self,
user_id: str,
level: int):
user = await User.find_one({'_id' : ObjectId(user_id)})
if not user:
raise AppErrors(f"user doesn't exist id={user_id}")
custom_info = {}
if user.custom_info:
custom_info = user.custom_info.to_mongo()
custom_info['level'] = level
user.custom_info = custom_info
await user.commit()
await user.reload()
return user
async def connect_template(self,
template_id: str,
account_ids: List[str],
to_all:bool = False,
limit:int = 1000):
if (not to_all) and (not account_ids):
raise AppErrors(f"specify account_ids for template_id={template_id}")
template = await Template.find_one({'_id' : ObjectId(template_id)})
if not template:
raise AppErrors(f"there is not such template={template_id}")
current_accounts = []
if template.accounts:
current_accounts = template.accounts.to_mongo()
if to_all:
cursor = Account.find().limit(limit)
items = list(await cursor.to_list(length=limit))
ids = [str(l['_id']) for l in items]
if not ids:
raise AppErrors(f"There is not accounts in the system")
current_accounts.extend(ids)
else:
current_accounts.extend(account_ids)
unique_accounts = set(current_accounts)
template.accounts = list(unique_accounts)
await template.commit()
await template.reload()
return template
async def disconnect_template(self,
template_id:str,
account_ids: List[str],
from_all: bool = False):
if (not from_all) and (not account_ids):
raise AppErrors(f"specify account_ids for template_id={template_id}")
template = await Template.find_one({'_id' : ObjectId(template_id)})
if not template:
raise AppErrors(f"there is not such template={template_id}")
current_accounts = []
if template.accounts:
current_accounts = template.accounts.to_mongo()
if from_all:
current_accounts = []
else:
current_accounts = [item for item in current_accounts if item not in account_ids]
unique_accounts = set(current_accounts)
template.accounts = list(unique_accounts)
await template.commit()
await template.reload()
return template
####################################################################################
######################## Clients methods #########################################
####################################################################################
async def upload_csv(self,
csv):
pass
####################################################################################
######################## CLOSE.com METHODS #########################################
####################################################################################
async def send_sequence(self,
sequence_id: str):
pass
async def send_leads(self,
upload_id: str):
pass | Grinnbob/g_theclone | app/services/admin_dashboard_service.py | admin_dashboard_service.py | py | 9,631 | python | en | code | 0 | github-code | 13 |
40430963649 | def adunare2():
print("Introduceti va rog un numar de doua cifre:")
a = int(input())
z = a // 10
u = a % 10
print("Numarul rezultat prin adunarea zecilor si a unitatilor este:", z+u)
def adunare3():
print("Introduceti va rog un numar de trei cifre:")
a = int(input())
s = a // 100
z = a // 10 % 10
u = a % 10
print("Numarul rezultat prin adunarea sutelor, zecilor si a unitatilor este:", s + z + u)
def capetesipicioare():
print("Introduceti va rog numarul de oi:")
o = int(input())
print("Introduceti va rog numarul de gaini:")
g=int(input())
print("Numarul de capete este:", g+o, ",iar numarul de picioare este:", 2*g+4*o)
def eliminare():
print("Introduceti va rog un numar de trei cifre:")
a = int(input())
s = a // 100
z = a // 10 % 10
u = a % 10
print("Numarul rezultat prin eliminarea cifrei zecilor este:", s*10+u)
def sumagauss():
print("Introduceti un numar natural n:")
n = int(input())
print("Suma lui Gauss este:", n*(n+1)/2)
def ora():
print("Introduceti ora:")
h=int(input())
print("Introduceti minutele")
m=int(input())
print("Introduceti numarul de minute care se vor aduna la ora si minutele introduse anterior:")
x=int(input())
ov = h*60+m+x
print("Ora va fi", ov//60)
if __name__ == "__main__":
sumagauss()
| Gabi273/python | proiecte-main/exercitii.py | exercitii.py | py | 1,478 | python | ro | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.