content
stringlengths 85
101k
| title
stringlengths 0
150
| question
stringlengths 15
48k
| answers
list | answers_scores
list | non_answers
list | non_answers_scores
list | tags
list | name
stringlengths 35
137
|
|---|---|---|---|---|---|---|---|---|
Q:
TSF Gives Different Results on Different Machines Despite Fixed NumPy Seed
I have a time series forecasting application where the algorithm can be selected from 2 choices:
sklearn Linear Regression
statsmodels ARIMA vs. SARIMAX (based on the seasonality of the data)
There's no attribute to set the seed when the class object is initialized and then the fit function is called, so I am setting the NumPy random seed to a fixed seed: np.random.seed(123)
This seed is set globally in the first entry point of the application.
The problem is that the results of the forecasting are different between different machines, although we are running inside a Docker Image that depends on a Pipfile so all the dependencies have the same versions + the python version is the same as well (3.9):
numpy = "==1.22.3"
scikit-learn = "==1.1.1"
statsmodels = "==0.13.2"
scipy = "==1.9.3"
The differences in the forecasted values are also significant to our application for both algorithms. Example:
Locally windows 10
Azure
8499
12693
140277
41278
Can anyone support this?
Update Code snippet + sample can be found here:
import pandas as pd
import numpy as np
import pmdarima as pm
from pmdarima.arima.utils import ndiffs
from statsmodels.tsa.statespace.sarimax import SARIMAX
np.random.seed(123)
def train_sarima_model(df_historical: pd.DataFrame):
historical_data = df_historical["value"].to_numpy()
# select the best model trained on the historical data
smodel = pm.auto_arima(
historical_data,
start_p=1,
start_q=1,
test="adf",
max_p=3,
max_q=3,
m=12,
start_P=0,
seasonal=True,
d=None,
D=1,
trace=True,
error_action="ignore",
suppress_warnings=True,
stepwise=True
)
mod = SARIMAX(
historical_data,
order=smodel.order,
seasonal_order=smodel.seasonal_order,
enforce_stationarity=False,
enforce_invertibility=False
)
best_model = mod.fit(disp=0)
return best_model
df_forecasting = df[["date", "value"]]
df_forecasting = df_forecasting.sort_values(by=["date"], ascending=True)
df_forecasting.set_index("date", inplace=True)
best_model = train_sarima_model(df_forecasting)
forecasting_values = list(best_model.forecast(steps=forecasting_period))
{
"balance": [
{
"date": "2020-07-21",
"value": 0.0
},
{
"date": "2020-07-22",
"value": -3799.2
},
{
"date": "2020-07-23",
"value": -3799.2
},
{
"date": "2020-07-24",
"value": -3799.2
},
{
"date": "2020-07-25",
"value": -3799.2
},
{
"date": "2020-07-26",
"value": -3799.2
},
{
"date": "2020-07-27",
"value": -3799.2
},
{
"date": "2020-07-28",
"value": -3799.2
},
{
"date": "2020-07-29",
"value": -3799.2
},
{
"date": "2020-07-30",
"value": -3799.2
},
{
"date": "2020-07-31",
"value": -3799.2
},
{
"date": "2020-08-01",
"value": -3799.2
},
{
"date": "2020-08-02",
"value": -3799.2
},
{
"date": "2020-08-03",
"value": -3799.2
},
{
"date": "2020-08-04",
"value": -3799.2
},
{
"date": "2020-08-05",
"value": -3799.2
},
{
"date": "2020-08-06",
"value": -3799.2
},
{
"date": "2020-08-07",
"value": -3799.2
},
{
"date": "2020-08-08",
"value": -3799.2
},
{
"date": "2020-08-09",
"value": -3799.2
},
{
"date": "2020-08-10",
"value": -3799.2
},
{
"date": "2020-08-11",
"value": -3799.2
},
{
"date": "2020-08-12",
"value": -3799.2
},
{
"date": "2020-08-13",
"value": -3799.2
},
{
"date": "2020-08-14",
"value": -3799.2
},
{
"date": "2020-08-15",
"value": -3799.2
},
{
"date": "2020-08-16",
"value": -3799.2
},
{
"date": "2020-08-17",
"value": -3799.2
},
{
"date": "2020-08-18",
"value": -3799.2
},
{
"date": "2020-08-19",
"value": -3799.2
},
{
"date": "2020-08-20",
"value": -3799.2
},
{
"date": "2020-08-21",
"value": -3799.2
},
{
"date": "2020-08-22",
"value": -7598.4
},
{
"date": "2020-08-23",
"value": -7598.4
},
{
"date": "2020-08-24",
"value": -7598.4
},
{
"date": "2020-08-25",
"value": -7598.4
},
{
"date": "2020-08-26",
"value": -7598.4
},
{
"date": "2020-08-27",
"value": -7598.4
},
{
"date": "2020-08-28",
"value": -8199.33
},
{
"date": "2020-08-29",
"value": -8199.33
},
{
"date": "2020-08-30",
"value": -8199.33
},
{
"date": "2020-08-31",
"value": -8199.33
},
{
"date": "2020-09-01",
"value": -8199.33
},
{
"date": "2020-09-02",
"value": -8199.33
},
{
"date": "2020-09-03",
"value": -9084.37
},
{
"date": "2020-09-04",
"value": -9084.37
},
{
"date": "2020-09-05",
"value": -10582.2
},
{
"date": "2020-09-06",
"value": 3080.3
},
{
"date": "2020-09-07",
"value": 3080.3
},
{
"date": "2020-09-08",
"value": 38080.3
},
{
"date": "2020-09-09",
"value": 38080.3
},
{
"date": "2020-09-10",
"value": 38080.3
},
{
"date": "2020-09-11",
"value": 38080.3
},
{
"date": "2020-09-12",
"value": 38080.3
},
{
"date": "2020-09-13",
"value": 36951.98
},
{
"date": "2020-09-14",
"value": 36951.98
},
{
"date": "2020-09-15",
"value": 36951.98
},
{
"date": "2020-09-16",
"value": 35683.81
},
{
"date": "2020-09-17",
"value": 35683.81
},
{
"date": "2020-09-18",
"value": 35683.81
},
{
"date": "2020-09-19",
"value": 35683.81
},
{
"date": "2020-09-20",
"value": 35683.81
},
{
"date": "2020-09-21",
"value": 35683.81
},
{
"date": "2020-09-22",
"value": 31460.83
},
{
"date": "2020-09-23",
"value": 31460.83
},
{
"date": "2020-09-24",
"value": -41714.74
},
{
"date": "2020-09-25",
"value": -41714.74
},
{
"date": "2020-09-26",
"value": -46472.74
},
{
"date": "2020-09-27",
"value": -46472.74
},
{
"date": "2020-09-28",
"value": -47073.67
},
{
"date": "2020-09-29",
"value": -47304.67
},
{
"date": "2020-09-30",
"value": -47304.67
},
{
"date": "2020-10-01",
"value": -45426.27
},
{
"date": "2020-10-02",
"value": -45426.27
},
{
"date": "2020-10-03",
"value": -45426.27
},
{
"date": "2020-10-04",
"value": -46924.1
},
{
"date": "2020-10-05",
"value": -46924.1
},
{
"date": "2020-10-06",
"value": -46924.1
},
{
"date": "2020-10-07",
"value": -44681.6
},
{
"date": "2020-10-08",
"value": -44931.6
},
{
"date": "2020-10-09",
"value": -44931.6
},
{
"date": "2020-10-10",
"value": -49422.08
},
{
"date": "2020-10-11",
"value": -50228.33
},
{
"date": "2020-10-12",
"value": -50228.33
},
{
"date": "2020-10-13",
"value": -49350.63
},
{
"date": "2020-10-14",
"value": -49350.63
},
{
"date": "2020-10-15",
"value": -49350.63
},
{
"date": "2020-10-16",
"value": -49350.63
},
{
"date": "2020-10-17",
"value": -49363.73
},
{
"date": "2020-10-18",
"value": -49363.73
},
{
"date": "2020-10-19",
"value": -49530.26
},
{
"date": "2020-10-20",
"value": -49530.26
},
{
"date": "2020-10-21",
"value": -49530.26
},
{
"date": "2020-10-22",
"value": -49530.26
},
{
"date": "2020-10-23",
"value": -53329.46
},
{
"date": "2020-10-24",
"value": -53329.46
},
{
"date": "2020-10-25",
"value": -53135.66
},
{
"date": "2020-10-26",
"value": -53135.66
},
{
"date": "2020-10-27",
"value": -77135.66
},
{
"date": "2020-10-28",
"value": -77736.59
},
{
"date": "2020-10-29",
"value": -77736.59
},
{
"date": "2020-10-30",
"value": -77736.59
},
{
"date": "2020-10-31",
"value": -80083.59
},
{
"date": "2020-11-01",
"value": -80105.19
},
{
"date": "2020-11-02",
"value": -78731.24
},
{
"date": "2020-11-03",
"value": -78731.24
},
{
"date": "2020-11-04",
"value": -78807.11
},
{
"date": "2020-11-05",
"value": -80304.94
},
{
"date": "2020-11-06",
"value": -80304.94
},
{
"date": "2020-11-07",
"value": -80304.94
},
{
"date": "2020-11-08",
"value": -80878.69
},
{
"date": "2020-11-09",
"value": -80925.64
},
{
"date": "2020-11-10",
"value": -81305.64
},
{
"date": "2020-11-11",
"value": -80512.64
},
{
"date": "2020-11-12",
"value": -80512.64
},
{
"date": "2020-11-13",
"value": -80512.64
},
{
"date": "2020-11-14",
"value": -80512.64
},
{
"date": "2020-11-15",
"value": -84057.43
},
{
"date": "2020-11-16",
"value": -84057.43
},
{
"date": "2020-11-17",
"value": -84057.43
},
{
"date": "2020-11-18",
"value": -87557.43
},
{
"date": "2020-11-19",
"value": -92356.43
},
{
"date": "2020-11-20",
"value": -92356.43
},
{
"date": "2020-11-21",
"value": -92356.43
},
{
"date": "2020-11-22",
"value": -91356.43
},
{
"date": "2020-11-23",
"value": -96317.73
},
{
"date": "2020-11-24",
"value": -96317.73
},
{
"date": "2020-11-25",
"value": -99317.73
},
{
"date": "2020-11-26",
"value": -99317.73
},
{
"date": "2020-11-27",
"value": -97797.73
},
{
"date": "2020-11-28",
"value": -98398.66
},
{
"date": "2020-11-29",
"value": -104547.66
},
{
"date": "2020-11-30",
"value": -104547.66
},
{
"date": "2020-12-01",
"value": -104569.26
},
{
"date": "2020-12-02",
"value": -98420.26
},
{
"date": "2020-12-03",
"value": -98420.26
},
{
"date": "2020-12-04",
"value": -98420.26
},
{
"date": "2020-12-05",
"value": -99974.01
},
{
"date": "2020-12-06",
"value": -99974.01
},
{
"date": "2020-12-07",
"value": -102775.87
},
{
"date": "2020-12-08",
"value": -102775.87
},
{
"date": "2020-12-09",
"value": -102776.47
},
{
"date": "2020-12-10",
"value": -100968.77
},
{
"date": "2020-12-11",
"value": -100708.77
},
{
"date": "2020-12-12",
"value": -101976.77
},
{
"date": "2020-12-13",
"value": -102356.77
},
{
"date": "2020-12-14",
"value": -102356.77
},
{
"date": "2020-12-15",
"value": -98721.17
},
{
"date": "2020-12-16",
"value": -105721.17
},
{
"date": "2020-12-17",
"value": -106065.66
},
{
"date": "2020-12-18",
"value": -106065.66
},
{
"date": "2020-12-19",
"value": -106065.66
},
{
"date": "2020-12-20",
"value": -106065.66
},
{
"date": "2020-12-21",
"value": -106066.41
},
{
"date": "2020-12-22",
"value": -109865.61
},
{
"date": "2020-12-23",
"value": -111027.71
},
{
"date": "2020-12-24",
"value": -111027.71
},
{
"date": "2020-12-25",
"value": -111027.71
},
{
"date": "2020-12-26",
"value": -112047.23
},
{
"date": "2020-12-27",
"value": -119641.23
},
{
"date": "2020-12-28",
"value": -120242.16
},
{
"date": "2020-12-29",
"value": -120242.16
},
{
"date": "2020-12-30",
"value": -120242.16
},
{
"date": "2020-12-31",
"value": -120242.16
},
{
"date": "2021-01-01",
"value": -120263.76
},
{
"date": "2021-01-02",
"value": -113563.76
},
{
"date": "2021-01-03",
"value": -113563.76
},
{
"date": "2021-01-04",
"value": -113563.76
},
{
"date": "2021-01-05",
"value": -121368.63
},
{
"date": "2021-01-06",
"value": -121368.63
},
{
"date": "2021-01-07",
"value": -121368.63
},
{
"date": "2021-01-08",
"value": -134820.63
},
{
"date": "2021-01-09",
"value": -134820.63
},
{
"date": "2021-01-10",
"value": -136590.81
},
{
"date": "2021-01-11",
"value": -136590.81
},
{
"date": "2021-01-12",
"value": -137875.81
},
{
"date": "2021-01-13",
"value": -137875.81
},
{
"date": "2021-01-14",
"value": -138125.81
},
{
"date": "2021-01-15",
"value": -138127.26
},
{
"date": "2021-01-16",
"value": -138127.26
},
{
"date": "2021-01-17",
"value": -138127.26
},
{
"date": "2021-01-18",
"value": -138127.26
},
{
"date": "2021-01-19",
"value": -138127.26
},
{
"date": "2021-01-20",
"value": -138127.26
},
{
"date": "2021-01-21",
"value": -136651.06
},
{
"date": "2021-01-22",
"value": -137813.91
},
{
"date": "2021-01-23",
"value": -137813.91
},
{
"date": "2021-01-24",
"value": -137813.91
},
{
"date": "2021-01-25",
"value": -138063.91
},
{
"date": "2021-01-26",
"value": -138063.91
},
{
"date": "2021-01-27",
"value": -138063.91
},
{
"date": "2021-01-28",
"value": -138063.91
},
{
"date": "2021-01-29",
"value": -138063.91
},
{
"date": "2021-01-30",
"value": -138063.91
},
{
"date": "2021-01-31",
"value": -138063.91
},
{
"date": "2021-02-01",
"value": -138085.51
},
{
"date": "2021-02-02",
"value": -138461.51
},
{
"date": "2021-02-03",
"value": -138461.51
},
{
"date": "2021-02-04",
"value": -138461.51
},
{
"date": "2021-02-05",
"value": -103517.43
},
{
"date": "2021-02-06",
"value": -104615.43
},
{
"date": "2021-02-07",
"value": -104615.43
},
{
"date": "2021-02-08",
"value": -104615.43
},
{
"date": "2021-02-09",
"value": -103615.43
},
{
"date": "2021-02-10",
"value": -103615.43
},
{
"date": "2021-02-11",
"value": -104340.11
},
{
"date": "2021-02-12",
"value": -105660.51
},
{
"date": "2021-02-13",
"value": -106805.81
},
{
"date": "2021-02-14",
"value": -115238.45
},
{
"date": "2021-02-15",
"value": -115238.45
},
{
"date": "2021-02-16",
"value": -115238.45
},
{
"date": "2021-02-17",
"value": -115238.45
},
{
"date": "2021-02-18",
"value": -115238.45
},
{
"date": "2021-02-19",
"value": -115238.45
},
{
"date": "2021-02-20",
"value": -115238.45
},
{
"date": "2021-02-21",
"value": -115238.45
},
{
"date": "2021-02-22",
"value": -115238.45
},
{
"date": "2021-02-23",
"value": -115241.95
},
{
"date": "2021-02-24",
"value": -554.95
},
{
"date": "2021-02-25",
"value": -554.95
},
{
"date": "2021-02-26",
"value": -4.95
},
{
"date": "2021-02-27",
"value": -4.95
},
{
"date": "2021-02-28",
"value": -4.95
},
{
"date": "2021-03-01",
"value": 3726.85
},
{
"date": "2021-03-02",
"value": 3726.85
},
{
"date": "2021-03-03",
"value": 3726.85
},
{
"date": "2021-03-04",
"value": 3726.85
},
{
"date": "2021-03-05",
"value": 3726.85
},
{
"date": "2021-03-06",
"value": 3726.85
},
{
"date": "2021-03-07",
"value": 3726.85
},
{
"date": "2021-03-08",
"value": 3670.93
},
{
"date": "2021-03-09",
"value": 3670.93
},
{
"date": "2021-03-10",
"value": 3670.93
},
{
"date": "2021-03-11",
"value": 6465.25
},
{
"date": "2021-03-12",
"value": 5180.25
},
{
"date": "2021-03-13",
"value": 7230.25
},
{
"date": "2021-03-14",
"value": 7230.25
},
{
"date": "2021-03-15",
"value": 7210.3
},
{
"date": "2021-03-16",
"value": 7210.3
},
{
"date": "2021-03-17",
"value": 7210.3
},
{
"date": "2021-03-18",
"value": 7892.58
},
{
"date": "2021-03-19",
"value": 7892.58
},
{
"date": "2021-03-20",
"value": 7892.58
},
{
"date": "2021-03-21",
"value": 7892.58
},
{
"date": "2021-03-22",
"value": 7892.58
},
{
"date": "2021-03-23",
"value": 7314.48
},
{
"date": "2021-03-24",
"value": 7314.48
},
{
"date": "2021-03-25",
"value": 7314.48
},
{
"date": "2021-03-26",
"value": 7314.48
},
{
"date": "2021-03-27",
"value": 7314.48
},
{
"date": "2021-03-28",
"value": 7314.48
},
{
"date": "2021-03-29",
"value": 8514.48
},
{
"date": "2021-03-30",
"value": 9977.53
},
{
"date": "2021-03-31",
"value": 9957.58
},
{
"date": "2021-04-01",
"value": 9935.98
},
{
"date": "2021-04-02",
"value": 9935.98
},
{
"date": "2021-04-03",
"value": 9935.98
},
{
"date": "2021-04-04",
"value": 8412.2
},
{
"date": "2021-04-05",
"value": 8412.2
},
{
"date": "2021-04-06",
"value": 8412.2
},
{
"date": "2021-04-07",
"value": 8412.2
},
{
"date": "2021-04-08",
"value": 72412.2
},
{
"date": "2021-04-09",
"value": 72378.25
},
{
"date": "2021-04-10",
"value": 72378.25
},
{
"date": "2021-04-11",
"value": 72378.25
},
{
"date": "2021-04-12",
"value": 71093.25
},
{
"date": "2021-04-13",
"value": 71093.25
},
{
"date": "2021-04-14",
"value": 71092.75
},
{
"date": "2021-04-15",
"value": 71092.75
},
{
"date": "2021-04-16",
"value": 136685.95
},
{
"date": "2021-04-17",
"value": 137062.13
},
{
"date": "2021-04-18",
"value": 140697.76
},
{
"date": "2021-04-19",
"value": 140697.76
},
{
"date": "2021-04-20",
"value": 140697.76
},
{
"date": "2021-04-21",
"value": 140088.75
},
{
"date": "2021-04-22",
"value": 140088.75
},
{
"date": "2021-04-23",
"value": 113926.65
},
{
"date": "2021-04-24",
"value": 113926.65
},
{
"date": "2021-04-25",
"value": 96958.65
},
{
"date": "2021-04-26",
"value": 96958.65
},
{
"date": "2021-04-27",
"value": 96958.65
},
{
"date": "2021-04-28",
"value": 96956.65
},
{
"date": "2021-04-29",
"value": 96956.65
},
{
"date": "2021-04-30",
"value": 96956.65
},
{
"date": "2021-05-01",
"value": 96956.65
},
{
"date": "2021-05-02",
"value": 96956.65
},
{
"date": "2021-05-03",
"value": 96956.65
},
{
"date": "2021-05-04",
"value": 95432.87
},
{
"date": "2021-05-05",
"value": 95432.87
},
{
"date": "2021-05-06",
"value": 95465.28
},
{
"date": "2021-05-07",
"value": 95465.28
},
{
"date": "2021-05-08",
"value": 95465.28
},
{
"date": "2021-05-09",
"value": 95465.28
},
{
"date": "2021-05-10",
"value": 94740.6
},
{
"date": "2021-05-11",
"value": 94740.6
},
{
"date": "2021-05-12",
"value": 94740.6
},
{
"date": "2021-05-13",
"value": 91405.6
},
{
"date": "2021-05-14",
"value": 91405.6
},
{
"date": "2021-05-15",
"value": 91405.6
},
{
"date": "2021-05-16",
"value": 91405.6
},
{
"date": "2021-05-17",
"value": 91405.6
}
]
}
A:
Thanks to @NickODell comment, the issue was solved as the following:
ARIMA/SARIMAX
for the ARIMA/SARIMAX model, a random state should be defined with a specific seed and then be passed to the pm.auto_arima function. The reason is that pm.auto_arima uses a train_test_split function (which usually takes a seed) to find the best model.
random_state = np.random.RandomState(123)
smodel = pm.auto_arima(
df_historical.value,
start_p=1,
start_q=1,
test="adf",
max_p=3,
max_q=3,
m=12,
start_P=0,
seasonal=True,
d=None,
D=1,
trace=True,
error_action="ignore",
suppress_warnings=True,
stepwise=True,
random_state=random_state
)
mod = SARIMAX(
historical_data,
order=smodel.order,
seasonal_order=smodel.seasonal_order,
enforce_stationarity=False,
enforce_invertibility=False
)
best_model = mod.fit(disp=0)
Linear Regression
for the linear regression, the issue was solved by deactivating the intercept fitting since it contains a lot of averaging and shifting calculations and this causes some differences in the predicted values between different operating systems.
regr = LinearRegression(fit_intercept=False)
regr.fit(x, y)
|
TSF Gives Different Results on Different Machines Despite Fixed NumPy Seed
|
I have a time series forecasting application where the algorithm can be selected from 2 choices:
sklearn Linear Regression
statsmodels ARIMA vs. SARIMAX (based on the seasonality of the data)
There's no attribute to set the seed when the class object is initialized and then the fit function is called, so I am setting the NumPy random seed to a fixed seed: np.random.seed(123)
This seed is set globally in the first entry point of the application.
The problem is that the results of the forecasting are different between different machines, although we are running inside a Docker Image that depends on a Pipfile so all the dependencies have the same versions + the python version is the same as well (3.9):
numpy = "==1.22.3"
scikit-learn = "==1.1.1"
statsmodels = "==0.13.2"
scipy = "==1.9.3"
The differences in the forecasted values are also significant to our application for both algorithms. Example:
Locally windows 10
Azure
8499
12693
140277
41278
Can anyone support this?
Update Code snippet + sample can be found here:
import pandas as pd
import numpy as np
import pmdarima as pm
from pmdarima.arima.utils import ndiffs
from statsmodels.tsa.statespace.sarimax import SARIMAX
np.random.seed(123)
def train_sarima_model(df_historical: pd.DataFrame):
historical_data = df_historical["value"].to_numpy()
# select the best model trained on the historical data
smodel = pm.auto_arima(
historical_data,
start_p=1,
start_q=1,
test="adf",
max_p=3,
max_q=3,
m=12,
start_P=0,
seasonal=True,
d=None,
D=1,
trace=True,
error_action="ignore",
suppress_warnings=True,
stepwise=True
)
mod = SARIMAX(
historical_data,
order=smodel.order,
seasonal_order=smodel.seasonal_order,
enforce_stationarity=False,
enforce_invertibility=False
)
best_model = mod.fit(disp=0)
return best_model
df_forecasting = df[["date", "value"]]
df_forecasting = df_forecasting.sort_values(by=["date"], ascending=True)
df_forecasting.set_index("date", inplace=True)
best_model = train_sarima_model(df_forecasting)
forecasting_values = list(best_model.forecast(steps=forecasting_period))
{
"balance": [
{
"date": "2020-07-21",
"value": 0.0
},
{
"date": "2020-07-22",
"value": -3799.2
},
{
"date": "2020-07-23",
"value": -3799.2
},
{
"date": "2020-07-24",
"value": -3799.2
},
{
"date": "2020-07-25",
"value": -3799.2
},
{
"date": "2020-07-26",
"value": -3799.2
},
{
"date": "2020-07-27",
"value": -3799.2
},
{
"date": "2020-07-28",
"value": -3799.2
},
{
"date": "2020-07-29",
"value": -3799.2
},
{
"date": "2020-07-30",
"value": -3799.2
},
{
"date": "2020-07-31",
"value": -3799.2
},
{
"date": "2020-08-01",
"value": -3799.2
},
{
"date": "2020-08-02",
"value": -3799.2
},
{
"date": "2020-08-03",
"value": -3799.2
},
{
"date": "2020-08-04",
"value": -3799.2
},
{
"date": "2020-08-05",
"value": -3799.2
},
{
"date": "2020-08-06",
"value": -3799.2
},
{
"date": "2020-08-07",
"value": -3799.2
},
{
"date": "2020-08-08",
"value": -3799.2
},
{
"date": "2020-08-09",
"value": -3799.2
},
{
"date": "2020-08-10",
"value": -3799.2
},
{
"date": "2020-08-11",
"value": -3799.2
},
{
"date": "2020-08-12",
"value": -3799.2
},
{
"date": "2020-08-13",
"value": -3799.2
},
{
"date": "2020-08-14",
"value": -3799.2
},
{
"date": "2020-08-15",
"value": -3799.2
},
{
"date": "2020-08-16",
"value": -3799.2
},
{
"date": "2020-08-17",
"value": -3799.2
},
{
"date": "2020-08-18",
"value": -3799.2
},
{
"date": "2020-08-19",
"value": -3799.2
},
{
"date": "2020-08-20",
"value": -3799.2
},
{
"date": "2020-08-21",
"value": -3799.2
},
{
"date": "2020-08-22",
"value": -7598.4
},
{
"date": "2020-08-23",
"value": -7598.4
},
{
"date": "2020-08-24",
"value": -7598.4
},
{
"date": "2020-08-25",
"value": -7598.4
},
{
"date": "2020-08-26",
"value": -7598.4
},
{
"date": "2020-08-27",
"value": -7598.4
},
{
"date": "2020-08-28",
"value": -8199.33
},
{
"date": "2020-08-29",
"value": -8199.33
},
{
"date": "2020-08-30",
"value": -8199.33
},
{
"date": "2020-08-31",
"value": -8199.33
},
{
"date": "2020-09-01",
"value": -8199.33
},
{
"date": "2020-09-02",
"value": -8199.33
},
{
"date": "2020-09-03",
"value": -9084.37
},
{
"date": "2020-09-04",
"value": -9084.37
},
{
"date": "2020-09-05",
"value": -10582.2
},
{
"date": "2020-09-06",
"value": 3080.3
},
{
"date": "2020-09-07",
"value": 3080.3
},
{
"date": "2020-09-08",
"value": 38080.3
},
{
"date": "2020-09-09",
"value": 38080.3
},
{
"date": "2020-09-10",
"value": 38080.3
},
{
"date": "2020-09-11",
"value": 38080.3
},
{
"date": "2020-09-12",
"value": 38080.3
},
{
"date": "2020-09-13",
"value": 36951.98
},
{
"date": "2020-09-14",
"value": 36951.98
},
{
"date": "2020-09-15",
"value": 36951.98
},
{
"date": "2020-09-16",
"value": 35683.81
},
{
"date": "2020-09-17",
"value": 35683.81
},
{
"date": "2020-09-18",
"value": 35683.81
},
{
"date": "2020-09-19",
"value": 35683.81
},
{
"date": "2020-09-20",
"value": 35683.81
},
{
"date": "2020-09-21",
"value": 35683.81
},
{
"date": "2020-09-22",
"value": 31460.83
},
{
"date": "2020-09-23",
"value": 31460.83
},
{
"date": "2020-09-24",
"value": -41714.74
},
{
"date": "2020-09-25",
"value": -41714.74
},
{
"date": "2020-09-26",
"value": -46472.74
},
{
"date": "2020-09-27",
"value": -46472.74
},
{
"date": "2020-09-28",
"value": -47073.67
},
{
"date": "2020-09-29",
"value": -47304.67
},
{
"date": "2020-09-30",
"value": -47304.67
},
{
"date": "2020-10-01",
"value": -45426.27
},
{
"date": "2020-10-02",
"value": -45426.27
},
{
"date": "2020-10-03",
"value": -45426.27
},
{
"date": "2020-10-04",
"value": -46924.1
},
{
"date": "2020-10-05",
"value": -46924.1
},
{
"date": "2020-10-06",
"value": -46924.1
},
{
"date": "2020-10-07",
"value": -44681.6
},
{
"date": "2020-10-08",
"value": -44931.6
},
{
"date": "2020-10-09",
"value": -44931.6
},
{
"date": "2020-10-10",
"value": -49422.08
},
{
"date": "2020-10-11",
"value": -50228.33
},
{
"date": "2020-10-12",
"value": -50228.33
},
{
"date": "2020-10-13",
"value": -49350.63
},
{
"date": "2020-10-14",
"value": -49350.63
},
{
"date": "2020-10-15",
"value": -49350.63
},
{
"date": "2020-10-16",
"value": -49350.63
},
{
"date": "2020-10-17",
"value": -49363.73
},
{
"date": "2020-10-18",
"value": -49363.73
},
{
"date": "2020-10-19",
"value": -49530.26
},
{
"date": "2020-10-20",
"value": -49530.26
},
{
"date": "2020-10-21",
"value": -49530.26
},
{
"date": "2020-10-22",
"value": -49530.26
},
{
"date": "2020-10-23",
"value": -53329.46
},
{
"date": "2020-10-24",
"value": -53329.46
},
{
"date": "2020-10-25",
"value": -53135.66
},
{
"date": "2020-10-26",
"value": -53135.66
},
{
"date": "2020-10-27",
"value": -77135.66
},
{
"date": "2020-10-28",
"value": -77736.59
},
{
"date": "2020-10-29",
"value": -77736.59
},
{
"date": "2020-10-30",
"value": -77736.59
},
{
"date": "2020-10-31",
"value": -80083.59
},
{
"date": "2020-11-01",
"value": -80105.19
},
{
"date": "2020-11-02",
"value": -78731.24
},
{
"date": "2020-11-03",
"value": -78731.24
},
{
"date": "2020-11-04",
"value": -78807.11
},
{
"date": "2020-11-05",
"value": -80304.94
},
{
"date": "2020-11-06",
"value": -80304.94
},
{
"date": "2020-11-07",
"value": -80304.94
},
{
"date": "2020-11-08",
"value": -80878.69
},
{
"date": "2020-11-09",
"value": -80925.64
},
{
"date": "2020-11-10",
"value": -81305.64
},
{
"date": "2020-11-11",
"value": -80512.64
},
{
"date": "2020-11-12",
"value": -80512.64
},
{
"date": "2020-11-13",
"value": -80512.64
},
{
"date": "2020-11-14",
"value": -80512.64
},
{
"date": "2020-11-15",
"value": -84057.43
},
{
"date": "2020-11-16",
"value": -84057.43
},
{
"date": "2020-11-17",
"value": -84057.43
},
{
"date": "2020-11-18",
"value": -87557.43
},
{
"date": "2020-11-19",
"value": -92356.43
},
{
"date": "2020-11-20",
"value": -92356.43
},
{
"date": "2020-11-21",
"value": -92356.43
},
{
"date": "2020-11-22",
"value": -91356.43
},
{
"date": "2020-11-23",
"value": -96317.73
},
{
"date": "2020-11-24",
"value": -96317.73
},
{
"date": "2020-11-25",
"value": -99317.73
},
{
"date": "2020-11-26",
"value": -99317.73
},
{
"date": "2020-11-27",
"value": -97797.73
},
{
"date": "2020-11-28",
"value": -98398.66
},
{
"date": "2020-11-29",
"value": -104547.66
},
{
"date": "2020-11-30",
"value": -104547.66
},
{
"date": "2020-12-01",
"value": -104569.26
},
{
"date": "2020-12-02",
"value": -98420.26
},
{
"date": "2020-12-03",
"value": -98420.26
},
{
"date": "2020-12-04",
"value": -98420.26
},
{
"date": "2020-12-05",
"value": -99974.01
},
{
"date": "2020-12-06",
"value": -99974.01
},
{
"date": "2020-12-07",
"value": -102775.87
},
{
"date": "2020-12-08",
"value": -102775.87
},
{
"date": "2020-12-09",
"value": -102776.47
},
{
"date": "2020-12-10",
"value": -100968.77
},
{
"date": "2020-12-11",
"value": -100708.77
},
{
"date": "2020-12-12",
"value": -101976.77
},
{
"date": "2020-12-13",
"value": -102356.77
},
{
"date": "2020-12-14",
"value": -102356.77
},
{
"date": "2020-12-15",
"value": -98721.17
},
{
"date": "2020-12-16",
"value": -105721.17
},
{
"date": "2020-12-17",
"value": -106065.66
},
{
"date": "2020-12-18",
"value": -106065.66
},
{
"date": "2020-12-19",
"value": -106065.66
},
{
"date": "2020-12-20",
"value": -106065.66
},
{
"date": "2020-12-21",
"value": -106066.41
},
{
"date": "2020-12-22",
"value": -109865.61
},
{
"date": "2020-12-23",
"value": -111027.71
},
{
"date": "2020-12-24",
"value": -111027.71
},
{
"date": "2020-12-25",
"value": -111027.71
},
{
"date": "2020-12-26",
"value": -112047.23
},
{
"date": "2020-12-27",
"value": -119641.23
},
{
"date": "2020-12-28",
"value": -120242.16
},
{
"date": "2020-12-29",
"value": -120242.16
},
{
"date": "2020-12-30",
"value": -120242.16
},
{
"date": "2020-12-31",
"value": -120242.16
},
{
"date": "2021-01-01",
"value": -120263.76
},
{
"date": "2021-01-02",
"value": -113563.76
},
{
"date": "2021-01-03",
"value": -113563.76
},
{
"date": "2021-01-04",
"value": -113563.76
},
{
"date": "2021-01-05",
"value": -121368.63
},
{
"date": "2021-01-06",
"value": -121368.63
},
{
"date": "2021-01-07",
"value": -121368.63
},
{
"date": "2021-01-08",
"value": -134820.63
},
{
"date": "2021-01-09",
"value": -134820.63
},
{
"date": "2021-01-10",
"value": -136590.81
},
{
"date": "2021-01-11",
"value": -136590.81
},
{
"date": "2021-01-12",
"value": -137875.81
},
{
"date": "2021-01-13",
"value": -137875.81
},
{
"date": "2021-01-14",
"value": -138125.81
},
{
"date": "2021-01-15",
"value": -138127.26
},
{
"date": "2021-01-16",
"value": -138127.26
},
{
"date": "2021-01-17",
"value": -138127.26
},
{
"date": "2021-01-18",
"value": -138127.26
},
{
"date": "2021-01-19",
"value": -138127.26
},
{
"date": "2021-01-20",
"value": -138127.26
},
{
"date": "2021-01-21",
"value": -136651.06
},
{
"date": "2021-01-22",
"value": -137813.91
},
{
"date": "2021-01-23",
"value": -137813.91
},
{
"date": "2021-01-24",
"value": -137813.91
},
{
"date": "2021-01-25",
"value": -138063.91
},
{
"date": "2021-01-26",
"value": -138063.91
},
{
"date": "2021-01-27",
"value": -138063.91
},
{
"date": "2021-01-28",
"value": -138063.91
},
{
"date": "2021-01-29",
"value": -138063.91
},
{
"date": "2021-01-30",
"value": -138063.91
},
{
"date": "2021-01-31",
"value": -138063.91
},
{
"date": "2021-02-01",
"value": -138085.51
},
{
"date": "2021-02-02",
"value": -138461.51
},
{
"date": "2021-02-03",
"value": -138461.51
},
{
"date": "2021-02-04",
"value": -138461.51
},
{
"date": "2021-02-05",
"value": -103517.43
},
{
"date": "2021-02-06",
"value": -104615.43
},
{
"date": "2021-02-07",
"value": -104615.43
},
{
"date": "2021-02-08",
"value": -104615.43
},
{
"date": "2021-02-09",
"value": -103615.43
},
{
"date": "2021-02-10",
"value": -103615.43
},
{
"date": "2021-02-11",
"value": -104340.11
},
{
"date": "2021-02-12",
"value": -105660.51
},
{
"date": "2021-02-13",
"value": -106805.81
},
{
"date": "2021-02-14",
"value": -115238.45
},
{
"date": "2021-02-15",
"value": -115238.45
},
{
"date": "2021-02-16",
"value": -115238.45
},
{
"date": "2021-02-17",
"value": -115238.45
},
{
"date": "2021-02-18",
"value": -115238.45
},
{
"date": "2021-02-19",
"value": -115238.45
},
{
"date": "2021-02-20",
"value": -115238.45
},
{
"date": "2021-02-21",
"value": -115238.45
},
{
"date": "2021-02-22",
"value": -115238.45
},
{
"date": "2021-02-23",
"value": -115241.95
},
{
"date": "2021-02-24",
"value": -554.95
},
{
"date": "2021-02-25",
"value": -554.95
},
{
"date": "2021-02-26",
"value": -4.95
},
{
"date": "2021-02-27",
"value": -4.95
},
{
"date": "2021-02-28",
"value": -4.95
},
{
"date": "2021-03-01",
"value": 3726.85
},
{
"date": "2021-03-02",
"value": 3726.85
},
{
"date": "2021-03-03",
"value": 3726.85
},
{
"date": "2021-03-04",
"value": 3726.85
},
{
"date": "2021-03-05",
"value": 3726.85
},
{
"date": "2021-03-06",
"value": 3726.85
},
{
"date": "2021-03-07",
"value": 3726.85
},
{
"date": "2021-03-08",
"value": 3670.93
},
{
"date": "2021-03-09",
"value": 3670.93
},
{
"date": "2021-03-10",
"value": 3670.93
},
{
"date": "2021-03-11",
"value": 6465.25
},
{
"date": "2021-03-12",
"value": 5180.25
},
{
"date": "2021-03-13",
"value": 7230.25
},
{
"date": "2021-03-14",
"value": 7230.25
},
{
"date": "2021-03-15",
"value": 7210.3
},
{
"date": "2021-03-16",
"value": 7210.3
},
{
"date": "2021-03-17",
"value": 7210.3
},
{
"date": "2021-03-18",
"value": 7892.58
},
{
"date": "2021-03-19",
"value": 7892.58
},
{
"date": "2021-03-20",
"value": 7892.58
},
{
"date": "2021-03-21",
"value": 7892.58
},
{
"date": "2021-03-22",
"value": 7892.58
},
{
"date": "2021-03-23",
"value": 7314.48
},
{
"date": "2021-03-24",
"value": 7314.48
},
{
"date": "2021-03-25",
"value": 7314.48
},
{
"date": "2021-03-26",
"value": 7314.48
},
{
"date": "2021-03-27",
"value": 7314.48
},
{
"date": "2021-03-28",
"value": 7314.48
},
{
"date": "2021-03-29",
"value": 8514.48
},
{
"date": "2021-03-30",
"value": 9977.53
},
{
"date": "2021-03-31",
"value": 9957.58
},
{
"date": "2021-04-01",
"value": 9935.98
},
{
"date": "2021-04-02",
"value": 9935.98
},
{
"date": "2021-04-03",
"value": 9935.98
},
{
"date": "2021-04-04",
"value": 8412.2
},
{
"date": "2021-04-05",
"value": 8412.2
},
{
"date": "2021-04-06",
"value": 8412.2
},
{
"date": "2021-04-07",
"value": 8412.2
},
{
"date": "2021-04-08",
"value": 72412.2
},
{
"date": "2021-04-09",
"value": 72378.25
},
{
"date": "2021-04-10",
"value": 72378.25
},
{
"date": "2021-04-11",
"value": 72378.25
},
{
"date": "2021-04-12",
"value": 71093.25
},
{
"date": "2021-04-13",
"value": 71093.25
},
{
"date": "2021-04-14",
"value": 71092.75
},
{
"date": "2021-04-15",
"value": 71092.75
},
{
"date": "2021-04-16",
"value": 136685.95
},
{
"date": "2021-04-17",
"value": 137062.13
},
{
"date": "2021-04-18",
"value": 140697.76
},
{
"date": "2021-04-19",
"value": 140697.76
},
{
"date": "2021-04-20",
"value": 140697.76
},
{
"date": "2021-04-21",
"value": 140088.75
},
{
"date": "2021-04-22",
"value": 140088.75
},
{
"date": "2021-04-23",
"value": 113926.65
},
{
"date": "2021-04-24",
"value": 113926.65
},
{
"date": "2021-04-25",
"value": 96958.65
},
{
"date": "2021-04-26",
"value": 96958.65
},
{
"date": "2021-04-27",
"value": 96958.65
},
{
"date": "2021-04-28",
"value": 96956.65
},
{
"date": "2021-04-29",
"value": 96956.65
},
{
"date": "2021-04-30",
"value": 96956.65
},
{
"date": "2021-05-01",
"value": 96956.65
},
{
"date": "2021-05-02",
"value": 96956.65
},
{
"date": "2021-05-03",
"value": 96956.65
},
{
"date": "2021-05-04",
"value": 95432.87
},
{
"date": "2021-05-05",
"value": 95432.87
},
{
"date": "2021-05-06",
"value": 95465.28
},
{
"date": "2021-05-07",
"value": 95465.28
},
{
"date": "2021-05-08",
"value": 95465.28
},
{
"date": "2021-05-09",
"value": 95465.28
},
{
"date": "2021-05-10",
"value": 94740.6
},
{
"date": "2021-05-11",
"value": 94740.6
},
{
"date": "2021-05-12",
"value": 94740.6
},
{
"date": "2021-05-13",
"value": 91405.6
},
{
"date": "2021-05-14",
"value": 91405.6
},
{
"date": "2021-05-15",
"value": 91405.6
},
{
"date": "2021-05-16",
"value": 91405.6
},
{
"date": "2021-05-17",
"value": 91405.6
}
]
}
|
[
"Thanks to @NickODell comment, the issue was solved as the following:\n\nARIMA/SARIMAX\n\nfor the ARIMA/SARIMAX model, a random state should be defined with a specific seed and then be passed to the pm.auto_arima function. The reason is that pm.auto_arima uses a train_test_split function (which usually takes a seed) to find the best model.\nrandom_state = np.random.RandomState(123)\nsmodel = pm.auto_arima(\n df_historical.value,\n start_p=1,\n start_q=1,\n test=\"adf\",\n max_p=3,\n max_q=3,\n m=12,\n start_P=0,\n seasonal=True,\n d=None,\n D=1,\n trace=True,\n error_action=\"ignore\",\n suppress_warnings=True,\n stepwise=True,\n random_state=random_state\n )\n\nmod = SARIMAX(\n historical_data,\n order=smodel.order,\n seasonal_order=smodel.seasonal_order,\n enforce_stationarity=False,\n enforce_invertibility=False\n )\nbest_model = mod.fit(disp=0)\n\n\n\nLinear Regression\n\nfor the linear regression, the issue was solved by deactivating the intercept fitting since it contains a lot of averaging and shifting calculations and this causes some differences in the predicted values between different operating systems.\nregr = LinearRegression(fit_intercept=False)\nregr.fit(x, y)\n\n"
] |
[
0
] |
[] |
[] |
[
"numpy",
"python",
"random",
"scikit_learn",
"statsmodels"
] |
stackoverflow_0074474991_numpy_python_random_scikit_learn_statsmodels.txt
|
Q:
how to share contents of dataset across all processes
As shown in the code posted below, i have contents of a dataset represented in object mainTIFFImageDatasetContents.
each time run() is called i collect some data in a form of an objects as shonw in the next line:
if (pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt > 0):
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel = GridCellInnerLoopsIteratorsForNoneZeroCoverageModel()
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setRowValue(row)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setColValue(col)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setVericalStep(verticalStep)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setHorizontalStep(horizontalStep)
=====>gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setMainTIFFImageDatasetContents(mainTIFFImageDatasetContents) #<==================
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setNDVIsTIFFWindowedSegmentContentsInEPSG25832(NDVIsTIFFWindowedSegmentContentsInEPSG25832)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setURLOrFilePathForElevationsTIFFDatasetInEPSG25832(URLOrFilePathForElevationsTIFFDatasetInEPSG25832)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setPixelsValuesSatisfyThresholdInTIFFImageDatasetCnt(pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setPixelsValuesSatisfyThresholdInNoneZeroCoverageCell(_pixelsValuesSatisfyThresholdInNoneZeroCoverageCell)
when the run() return return gridCellInnerLoopsIteratorsForNoneZeroCoverageModel, i save the returned objects in a list __iterablesOfNoneZeroCoverageCell which will be the input iterables to another parallelized code which is indicated as shown below in
section postTask
the problem i am facing is related to pickling of mainTIFFImageDatasetContents each time an object in iterable __iterablesOfNoneZeroCoverageCell is passed to the run(), mainTIFFImageDatasetContents is picked and it is an expensive operation.
i would like to set mainTIFFImageDatasetContents only once in the run() ,shown in the below line, instead of setting it to each object in the iterable.:
for res in ZeroCoverageCellsProcessingPool.pool.map(func=self.run,iterable=self.__iterables,chunksize=self.__chunkSize):
in other words, i would like to do something like setting a static variable so that the object is set once and no need to set it every time for each created object. i hope my point is clear
DecoupleGridCellsProfilerLoopsPool
def postTask(self):
self.__postTaskStartTime = time.time()
with Pool(processes=int(config['MULTIPROCESSING']['proceses_count'])) as DecoupleGridCellsProfilerLoopsPool.pool:
self.__chunkSize = PoolUtils.getChunkSize(lst=self.__listOfLoopDecouplers,cpuCount=int(config['MULTIPROCESSING']['cpu_count']))
logger.info(f"DecoupleGridCellsProfilerLoopsPool.self.__chunkSize(task per processor):{self.__chunkSize}")
for res in DecoupleGridCellsProfilerLoopsPool.pool.map(self.run,self.__listOfLoopDecouplers,chunksize=self.__chunkSize):
if res[0] is not None and res[1] is None and res[2] is None:
self.__iterablesOfNoneZeroCoverageCell.append(res[0])
else:
raise Exception (f"WTF.")
DecoupleGridCellsProfilerLoopsPool.pool.join()
assert len(self.__iterablesOfNoneZeroCoverageCell)+len(self.__iterablesOfZeroCoverageCell)+len(self.__iterablesOfNoDataCells) == len(self.__listOfLoopDecouplers)
zeroCoverageCellsProcessingPool = ZeroCoverageCellsProcessingPool(self.__devModeForWSAWANTIVer2,self.__iterablesOfZeroCoverageCell)
zeroCoverageCellsProcessingPool.postTask()
def run(self,param:LoopDecoupler):
row = param.getRowValue()
col = param.getColValue()
elevationsTIFFWindowedSegmentContents = param.getElevationsTIFFWindowedSegment()
verticalStep = param.getVericalStep()
horizontalStep = param.getHorizontalStep()
mainTIFFImageDatasetContents = param.getMainTIFFImageDatasetContents()
NDVIsTIFFWindowedSegmentContentsInEPSG25832 = param.getNDVIsTIFFWindowedSegmentContentsInEPSG25832()
URLOrFilePathForElevationsTIFFDatasetInEPSG25832 = param.getURLOrFilePathForElevationsTIFFDatasetInEPSG25832()
threshold = param.getThreshold()
rowsCnt = 0
colsCnt = 0
pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt = 0
pixelsValuesDoNotSatisfyThresholdInTIFFImageDatasetCnt = int(config['window']['width']) * int(config['window']['height'])
pixelsWithNoDataValueInTIFFImageDatasetCnt = int(config['window']['width']) * int(config['window']['height'])
_pixelsValuesSatisfyThresholdInNoneZeroCoverageCell = []
_pixelsValuesDoNotSatisfyThresholdInZeroCoverageCell = []
_pixelsValuesInNoDataCell = []
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel = None
gridCellInnerLoopsIteratorsForZeroCoverageModel = None
gridCellInnerLoopsIteratorsForNoDataCellsModel = None
for x in range(row,row + verticalStep):
if rowsCnt == verticalStep:
rowsCnt = 0
for y in range(col,col + horizontalStep):
if colsCnt == horizontalStep:
colsCnt = 0
pixelValue = mainTIFFImageDatasetContents[0][x][y]
# windowIOUtils.writeContentsToFile(windowIOUtils.getPathToOutputDir()+"/"+config['window']['file_name']+".{0}".format(config['window']['file_extension']), "pixelValue:{0}\n".format(pixelValue))
if pixelValue >= float(threshold):
pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt+=1
_pixelsValuesSatisfyThresholdInNoneZeroCoverageCell.append(elevationsTIFFWindowedSegmentContents[0][rowsCnt][colsCnt])
elif ((pixelValue < float(threshold)) and (pixelValue > float(config['TIFF']['no_data_value']))):
pixelsValuesDoNotSatisfyThresholdInTIFFImageDatasetCnt-=1
_pixelsValuesDoNotSatisfyThresholdInZeroCoverageCell.append(elevationsTIFFWindowedSegmentContents[0][rowsCnt][colsCnt])
elif (pixelValue <= float(config['TIFF']['no_data_value'])):
pixelsWithNoDataValueInTIFFImageDatasetCnt-=1
_pixelsValuesInNoDataCell.append(elevationsTIFFWindowedSegmentContents[0][rowsCnt][colsCnt])
else:
raise Exception ("WTF.Exception: unhandled condition for pixel value: {0}".format(pixelValue))
# _pixelCoordinatesInWindow.append([x,y])
colsCnt+=1
rowsCnt+=1
'''collecting data'''
if (pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt > 0):
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel = GridCellInnerLoopsIteratorsForNoneZeroCoverageModel()
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setRowValue(row)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setColValue(col)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setVericalStep(verticalStep)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setHorizontalStep(horizontalStep)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setMainTIFFImageDatasetContents(mainTIFFImageDatasetContents)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setNDVIsTIFFWindowedSegmentContentsInEPSG25832(NDVIsTIFFWindowedSegmentContentsInEPSG25832)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setURLOrFilePathForElevationsTIFFDatasetInEPSG25832(URLOrFilePathForElevationsTIFFDatasetInEPSG25832)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setPixelsValuesSatisfyThresholdInTIFFImageDatasetCnt(pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setPixelsValuesSatisfyThresholdInNoneZeroCoverageCell(_pixelsValuesSatisfyThresholdInNoneZeroCoverageCell)
return gridCellInnerLoopsIteratorsForNoneZeroCoverageModel
ZeroCoverageCellsProcessingPool:
def postTask(self):
self.__postTaskStartTime = time.time()
'''initialization of variables'''
...
...
...
...
...
with Pool(processes=int(config['MULTIPROCESSING']['proceses_count'])) as ZeroCoverageCellsProcessingPool.pool:
self.__chunkSize = PoolUtils.getChunkSize(lst=self.__iterables,cpuCount=int(config['MULTIPROCESSING']['cpu_count']))
logger.info(f"ZeroCoverageCellsProcessingPool.self.__chunkSize(task per processor):{self.__chunkSize}")
for res in ZeroCoverageCellsProcessingPool.pool.map(func=self.run,iterable=self.__iterables,chunksize=self.__chunkSize):
resAllCellsForGridCellsClassifications.append(res[0])
pass
ZeroCoverageCellsProcessingPool.pool.join()
return
def run(self,params:GridCellInnerLoopsIteratorsForZeroCoverageModel):
if params is not None:
logger.info(f"Processing zero coverage cell @(row{params.getRowValue()},col:{params.getColValue()})")
row = params.getRowValue()
col = params.getColValue()
mainTIFFImageDatasetContents = params.getMainTIFFImageDatasetContents()
NDVIsTIFFWindowedSegmentContentsInEPSG25832 = params.getNDVIsTIFFWindowedSegmentContentsInEPSG25832()
URLOrFilePathForElevationsTIFFDatasetInEPSG25832 = params.getURLOrFilePathForElevationsTIFFDatasetInEPSG25832()
datasetElevationsTIFFInEPSG25832 = rasterio.open(URLOrFilePathForElevationsTIFFDatasetInEPSG25832,'r')
_pixelsValuesDoNotSatisfyThresholdInZeroCoverageCell = params.getPixelsValuesDoNotSatisfyThresholdInZeroCoverageCell()
pixelsValuesDoNotSatisfyThresholdInTIFFImageDatasetCnt = params.getPixelsValuesDoNotSatisfyThresholdInTIFFImageDatasetCnt()
countOfNoDataCells = params.getPixelsWithNoDataValueInTIFFImageDatasetCnt()
outFromEPSG25832ToEPSG4326ForZeroCoverageCells = []
fourCornersOfWindowsAsGeoJSONInEPSG4326ForZeroCoverageCell = []
output=(..,...,..,..,)
return output
code of postTask:
def postTask(self):
self.__postTaskStartTime = time.time()
with Pool(processes=int(config['MULTIPROCESSING']['proceses_count'])) as ZeroCoverageCellsProcessingPool.pool:
self.__chunkSize = PoolUtils.getChunkSize(lst=self.__iterables,cpuCount=int(config['MULTIPROCESSING']['cpu_count']))
for res in ZeroCoverageCellsProcessingPool.pool.map(func=self.run,iterable=self.__iterables,chunksize=self.__chunkSize):
resAllCellsForGridCellsClassifications.append(res[0])
# NDVIs
A:
this example shows how to copy a variable once to all children using an initializer that only runs once per child process.
from multiprocessing import Pool
def foo(number):
print(number, global_obj)
def initializer_func(argument):
global global_obj
global_obj = argument
if __name__ == "__main__":
parent_obj = "hello"
with Pool(processes=1, initializer=initializer_func,initargs=(parent_obj,)) as pool:
pool.map(foo, range(4))
0 hello
1 hello
2 hello
3 hello
PS: for a new programmers you might want your names to be descriptive to help everyone understand the code, but very long names is a clear sign of coupling, and makes it harder to avoid breaking PEP 8 line length stadard so you end up with code that is difficult to read and maintain and even harder to extend, for example no one (me included) bothered to read your code, and forcing the future maintainer to do so will be a crime, the only exception to that is if you get paid by the number of letters in your code.
|
how to share contents of dataset across all processes
|
As shown in the code posted below, i have contents of a dataset represented in object mainTIFFImageDatasetContents.
each time run() is called i collect some data in a form of an objects as shonw in the next line:
if (pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt > 0):
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel = GridCellInnerLoopsIteratorsForNoneZeroCoverageModel()
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setRowValue(row)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setColValue(col)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setVericalStep(verticalStep)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setHorizontalStep(horizontalStep)
=====>gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setMainTIFFImageDatasetContents(mainTIFFImageDatasetContents) #<==================
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setNDVIsTIFFWindowedSegmentContentsInEPSG25832(NDVIsTIFFWindowedSegmentContentsInEPSG25832)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setURLOrFilePathForElevationsTIFFDatasetInEPSG25832(URLOrFilePathForElevationsTIFFDatasetInEPSG25832)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setPixelsValuesSatisfyThresholdInTIFFImageDatasetCnt(pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setPixelsValuesSatisfyThresholdInNoneZeroCoverageCell(_pixelsValuesSatisfyThresholdInNoneZeroCoverageCell)
when the run() return return gridCellInnerLoopsIteratorsForNoneZeroCoverageModel, i save the returned objects in a list __iterablesOfNoneZeroCoverageCell which will be the input iterables to another parallelized code which is indicated as shown below in
section postTask
the problem i am facing is related to pickling of mainTIFFImageDatasetContents each time an object in iterable __iterablesOfNoneZeroCoverageCell is passed to the run(), mainTIFFImageDatasetContents is picked and it is an expensive operation.
i would like to set mainTIFFImageDatasetContents only once in the run() ,shown in the below line, instead of setting it to each object in the iterable.:
for res in ZeroCoverageCellsProcessingPool.pool.map(func=self.run,iterable=self.__iterables,chunksize=self.__chunkSize):
in other words, i would like to do something like setting a static variable so that the object is set once and no need to set it every time for each created object. i hope my point is clear
DecoupleGridCellsProfilerLoopsPool
def postTask(self):
self.__postTaskStartTime = time.time()
with Pool(processes=int(config['MULTIPROCESSING']['proceses_count'])) as DecoupleGridCellsProfilerLoopsPool.pool:
self.__chunkSize = PoolUtils.getChunkSize(lst=self.__listOfLoopDecouplers,cpuCount=int(config['MULTIPROCESSING']['cpu_count']))
logger.info(f"DecoupleGridCellsProfilerLoopsPool.self.__chunkSize(task per processor):{self.__chunkSize}")
for res in DecoupleGridCellsProfilerLoopsPool.pool.map(self.run,self.__listOfLoopDecouplers,chunksize=self.__chunkSize):
if res[0] is not None and res[1] is None and res[2] is None:
self.__iterablesOfNoneZeroCoverageCell.append(res[0])
else:
raise Exception (f"WTF.")
DecoupleGridCellsProfilerLoopsPool.pool.join()
assert len(self.__iterablesOfNoneZeroCoverageCell)+len(self.__iterablesOfZeroCoverageCell)+len(self.__iterablesOfNoDataCells) == len(self.__listOfLoopDecouplers)
zeroCoverageCellsProcessingPool = ZeroCoverageCellsProcessingPool(self.__devModeForWSAWANTIVer2,self.__iterablesOfZeroCoverageCell)
zeroCoverageCellsProcessingPool.postTask()
def run(self,param:LoopDecoupler):
row = param.getRowValue()
col = param.getColValue()
elevationsTIFFWindowedSegmentContents = param.getElevationsTIFFWindowedSegment()
verticalStep = param.getVericalStep()
horizontalStep = param.getHorizontalStep()
mainTIFFImageDatasetContents = param.getMainTIFFImageDatasetContents()
NDVIsTIFFWindowedSegmentContentsInEPSG25832 = param.getNDVIsTIFFWindowedSegmentContentsInEPSG25832()
URLOrFilePathForElevationsTIFFDatasetInEPSG25832 = param.getURLOrFilePathForElevationsTIFFDatasetInEPSG25832()
threshold = param.getThreshold()
rowsCnt = 0
colsCnt = 0
pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt = 0
pixelsValuesDoNotSatisfyThresholdInTIFFImageDatasetCnt = int(config['window']['width']) * int(config['window']['height'])
pixelsWithNoDataValueInTIFFImageDatasetCnt = int(config['window']['width']) * int(config['window']['height'])
_pixelsValuesSatisfyThresholdInNoneZeroCoverageCell = []
_pixelsValuesDoNotSatisfyThresholdInZeroCoverageCell = []
_pixelsValuesInNoDataCell = []
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel = None
gridCellInnerLoopsIteratorsForZeroCoverageModel = None
gridCellInnerLoopsIteratorsForNoDataCellsModel = None
for x in range(row,row + verticalStep):
if rowsCnt == verticalStep:
rowsCnt = 0
for y in range(col,col + horizontalStep):
if colsCnt == horizontalStep:
colsCnt = 0
pixelValue = mainTIFFImageDatasetContents[0][x][y]
# windowIOUtils.writeContentsToFile(windowIOUtils.getPathToOutputDir()+"/"+config['window']['file_name']+".{0}".format(config['window']['file_extension']), "pixelValue:{0}\n".format(pixelValue))
if pixelValue >= float(threshold):
pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt+=1
_pixelsValuesSatisfyThresholdInNoneZeroCoverageCell.append(elevationsTIFFWindowedSegmentContents[0][rowsCnt][colsCnt])
elif ((pixelValue < float(threshold)) and (pixelValue > float(config['TIFF']['no_data_value']))):
pixelsValuesDoNotSatisfyThresholdInTIFFImageDatasetCnt-=1
_pixelsValuesDoNotSatisfyThresholdInZeroCoverageCell.append(elevationsTIFFWindowedSegmentContents[0][rowsCnt][colsCnt])
elif (pixelValue <= float(config['TIFF']['no_data_value'])):
pixelsWithNoDataValueInTIFFImageDatasetCnt-=1
_pixelsValuesInNoDataCell.append(elevationsTIFFWindowedSegmentContents[0][rowsCnt][colsCnt])
else:
raise Exception ("WTF.Exception: unhandled condition for pixel value: {0}".format(pixelValue))
# _pixelCoordinatesInWindow.append([x,y])
colsCnt+=1
rowsCnt+=1
'''collecting data'''
if (pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt > 0):
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel = GridCellInnerLoopsIteratorsForNoneZeroCoverageModel()
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setRowValue(row)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setColValue(col)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setVericalStep(verticalStep)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setHorizontalStep(horizontalStep)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setMainTIFFImageDatasetContents(mainTIFFImageDatasetContents)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setNDVIsTIFFWindowedSegmentContentsInEPSG25832(NDVIsTIFFWindowedSegmentContentsInEPSG25832)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setURLOrFilePathForElevationsTIFFDatasetInEPSG25832(URLOrFilePathForElevationsTIFFDatasetInEPSG25832)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setPixelsValuesSatisfyThresholdInTIFFImageDatasetCnt(pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt)
gridCellInnerLoopsIteratorsForNoneZeroCoverageModel.setPixelsValuesSatisfyThresholdInNoneZeroCoverageCell(_pixelsValuesSatisfyThresholdInNoneZeroCoverageCell)
return gridCellInnerLoopsIteratorsForNoneZeroCoverageModel
ZeroCoverageCellsProcessingPool:
def postTask(self):
self.__postTaskStartTime = time.time()
'''initialization of variables'''
...
...
...
...
...
with Pool(processes=int(config['MULTIPROCESSING']['proceses_count'])) as ZeroCoverageCellsProcessingPool.pool:
self.__chunkSize = PoolUtils.getChunkSize(lst=self.__iterables,cpuCount=int(config['MULTIPROCESSING']['cpu_count']))
logger.info(f"ZeroCoverageCellsProcessingPool.self.__chunkSize(task per processor):{self.__chunkSize}")
for res in ZeroCoverageCellsProcessingPool.pool.map(func=self.run,iterable=self.__iterables,chunksize=self.__chunkSize):
resAllCellsForGridCellsClassifications.append(res[0])
pass
ZeroCoverageCellsProcessingPool.pool.join()
return
def run(self,params:GridCellInnerLoopsIteratorsForZeroCoverageModel):
if params is not None:
logger.info(f"Processing zero coverage cell @(row{params.getRowValue()},col:{params.getColValue()})")
row = params.getRowValue()
col = params.getColValue()
mainTIFFImageDatasetContents = params.getMainTIFFImageDatasetContents()
NDVIsTIFFWindowedSegmentContentsInEPSG25832 = params.getNDVIsTIFFWindowedSegmentContentsInEPSG25832()
URLOrFilePathForElevationsTIFFDatasetInEPSG25832 = params.getURLOrFilePathForElevationsTIFFDatasetInEPSG25832()
datasetElevationsTIFFInEPSG25832 = rasterio.open(URLOrFilePathForElevationsTIFFDatasetInEPSG25832,'r')
_pixelsValuesDoNotSatisfyThresholdInZeroCoverageCell = params.getPixelsValuesDoNotSatisfyThresholdInZeroCoverageCell()
pixelsValuesDoNotSatisfyThresholdInTIFFImageDatasetCnt = params.getPixelsValuesDoNotSatisfyThresholdInTIFFImageDatasetCnt()
countOfNoDataCells = params.getPixelsWithNoDataValueInTIFFImageDatasetCnt()
outFromEPSG25832ToEPSG4326ForZeroCoverageCells = []
fourCornersOfWindowsAsGeoJSONInEPSG4326ForZeroCoverageCell = []
output=(..,...,..,..,)
return output
code of postTask:
def postTask(self):
self.__postTaskStartTime = time.time()
with Pool(processes=int(config['MULTIPROCESSING']['proceses_count'])) as ZeroCoverageCellsProcessingPool.pool:
self.__chunkSize = PoolUtils.getChunkSize(lst=self.__iterables,cpuCount=int(config['MULTIPROCESSING']['cpu_count']))
for res in ZeroCoverageCellsProcessingPool.pool.map(func=self.run,iterable=self.__iterables,chunksize=self.__chunkSize):
resAllCellsForGridCellsClassifications.append(res[0])
# NDVIs
|
[
"this example shows how to copy a variable once to all children using an initializer that only runs once per child process.\nfrom multiprocessing import Pool\n\ndef foo(number):\n print(number, global_obj)\n\ndef initializer_func(argument):\n global global_obj\n global_obj = argument\n\nif __name__ == \"__main__\":\n parent_obj = \"hello\"\n with Pool(processes=1, initializer=initializer_func,initargs=(parent_obj,)) as pool:\n pool.map(foo, range(4))\n\n0 hello\n1 hello\n2 hello\n3 hello\n\nPS: for a new programmers you might want your names to be descriptive to help everyone understand the code, but very long names is a clear sign of coupling, and makes it harder to avoid breaking PEP 8 line length stadard so you end up with code that is difficult to read and maintain and even harder to extend, for example no one (me included) bothered to read your code, and forcing the future maintainer to do so will be a crime, the only exception to that is if you get paid by the number of letters in your code.\n"
] |
[
1
] |
[] |
[] |
[
"multiprocessing",
"python",
"python_multiprocessing"
] |
stackoverflow_0074516846_multiprocessing_python_python_multiprocessing.txt
|
Q:
Transpose column to row with Spark
I'm trying to transpose some columns of my table to row.
I'm using Python and Spark 1.5.0. Here is my initial table:
+-----+-----+-----+-------+
| A |col_1|col_2|col_...|
+-----+-------------------+
| 1 | 0.0| 0.6| ... |
| 2 | 0.6| 0.7| ... |
| 3 | 0.5| 0.9| ... |
| ...| ...| ...| ... |
I would like to have somthing like this:
+-----+--------+-----------+
| A | col_id | col_value |
+-----+--------+-----------+
| 1 | col_1| 0.0|
| 1 | col_2| 0.6|
| ...| ...| ...|
| 2 | col_1| 0.6|
| 2 | col_2| 0.7|
| ...| ...| ...|
| 3 | col_1| 0.5|
| 3 | col_2| 0.9|
| ...| ...| ...|
Does someone know haw I can do it? Thank you for your help.
A:
Spark >= 3.4
You can use built-in melt method. With Python:
df.melt(
ids=["A"], values=["col_1", "col_2"],
variableColumnName="key", valueColumnName="val"
)
with Scala
df.melt(Array($"A"), Array($"col_1", $"col_2"), "key", "val")
Spark < 3.4
It is relatively simple to do with basic Spark SQL functions.
Python
from pyspark.sql.functions import array, col, explode, struct, lit
df = sc.parallelize([(1, 0.0, 0.6), (1, 0.6, 0.7)]).toDF(["A", "col_1", "col_2"])
def to_long(df, by):
# Filter dtypes and split into column names and type description
cols, dtypes = zip(*((c, t) for (c, t) in df.dtypes if c not in by))
# Spark SQL supports only homogeneous columns
assert len(set(dtypes)) == 1, "All columns have to be of the same type"
# Create and explode an array of (column_name, column_value) structs
kvs = explode(array([
struct(lit(c).alias("key"), col(c).alias("val")) for c in cols
])).alias("kvs")
return df.select(by + [kvs]).select(by + ["kvs.key", "kvs.val"])
to_long(df, ["A"])
Scala:
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions.{array, col, explode, lit, struct}
val df = Seq((1, 0.0, 0.6), (1, 0.6, 0.7)).toDF("A", "col_1", "col_2")
def toLong(df: DataFrame, by: Seq[String]): DataFrame = {
val (cols, types) = df.dtypes.filter{ case (c, _) => !by.contains(c)}.unzip
require(types.distinct.size == 1, s"${types.distinct.toString}.length != 1")
val kvs = explode(array(
cols.map(c => struct(lit(c).alias("key"), col(c).alias("val"))): _*
))
val byExprs = by.map(col(_))
df
.select(byExprs :+ kvs.alias("_kvs"): _*)
.select(byExprs ++ Seq($"_kvs.key", $"_kvs.val"): _*)
}
toLong(df, Seq("A"))
A:
One way to solve with pyspark sql using functions create_map and explode.
from pyspark.sql import functions as func
#Use `create_map` to create the map of columns with constant
df = df.withColumn('mapCol', \
func.create_map(func.lit('col_1'),df.col_1,
func.lit('col_2'),df.col_2,
func.lit('col_3'),df.col_3
)
)
#Use explode function to explode the map
res = df.select('*',func.explode(df.mapCol).alias('col_id','col_value'))
res.show()
A:
The Spark local linear algebra libraries are presently very weak: and they do not include basic operations as the above.
There is a JIRA for fixing this for Spark 2.1 - but that will not help you today.
Something to consider: performing a transpose will likely require completely shuffling the data.
For now you will need to write RDD code directly. I have written transpose in scala - but not in python. Here is the scala version:
def transpose(mat: DMatrix) = {
val nCols = mat(0).length
val matT = mat
.flatten
.zipWithIndex
.groupBy {
_._2 % nCols
}
.toSeq.sortBy {
_._1
}
.map(_._2)
.map(_.map(_._1))
.toArray
matT
}
So you can convert that to python for your use. I do not have bandwidth to write/test that at this particular moment: let me know if you were unable to do that conversion.
At the least - the following are readily converted to python.
zipWithIndex --> enumerate() (python equivalent - credit to @zero323)
map --> [someOperation(x) for x in ..]
groupBy --> itertools.groupBy()
Here is the implementation for flatten which does not have a python equivalent:
def flatten(L):
for item in L:
try:
for i in flatten(item):
yield i
except TypeError:
yield item
So you should be able to put those together for a solution.
A:
You could use the stack function:
for example:
df.selectExpr("stack(2, 'col_1', col_1, 'col_2', col_2) as (key, value)")
where:
2 is the number of columns to stack (col_1 and col_2)
'col_1' is a string for the key
col_1 is the column from which to take the values
if you have several columns, you could build the whole stack string iterating the column names and pass that to selectExpr
A:
Use flatmap. Something like below should work
from pyspark.sql import Row
def rowExpander(row):
rowDict = row.asDict()
valA = rowDict.pop('A')
for k in rowDict:
yield Row(**{'A': valA , 'colID': k, 'colValue': row[k]})
newDf = sqlContext.createDataFrame(df.rdd.flatMap(rowExpander))
A:
I took the Scala answer that @javadba wrote and created a Python version for transposing all columns in a DataFrame. This might be a bit different from what OP was asking...
from itertools import chain
from pyspark.sql import DataFrame
def _sort_transpose_tuple(tup):
x, y = tup
return x, tuple(zip(*sorted(y, key=lambda v_k: v_k[1], reverse=False)))[0]
def transpose(X):
"""Transpose a PySpark DataFrame.
Parameters
----------
X : PySpark ``DataFrame``
The ``DataFrame`` that should be tranposed.
"""
# validate
if not isinstance(X, DataFrame):
raise TypeError('X should be a DataFrame, not a %s'
% type(X))
cols = X.columns
n_features = len(cols)
# Sorry for this unreadability...
return X.rdd.flatMap( # make into an RDD
lambda xs: chain(xs)).zipWithIndex().groupBy( # zip index
lambda val_idx: val_idx[1] % n_features).sortBy( # group by index % n_features as key
lambda grp_res: grp_res[0]).map( # sort by index % n_features key
lambda grp_res: _sort_transpose_tuple(grp_res)).map( # maintain order
lambda key_col: key_col[1]).toDF() # return to DF
For example:
>>> X = sc.parallelize([(1,2,3), (4,5,6), (7,8,9)]).toDF()
>>> X.show()
+---+---+---+
| _1| _2| _3|
+---+---+---+
| 1| 2| 3|
| 4| 5| 6|
| 7| 8| 9|
+---+---+---+
>>> transpose(X).show()
+---+---+---+
| _1| _2| _3|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
A:
A very handy way to implement:
from pyspark.sql import Row
def rowExpander(row):
rowDict = row.asDict()
valA = rowDict.pop('A')
for k in rowDict:
yield Row(**{'A': valA , 'colID' : k, 'colValue' : row[k]})
newDf = sqlContext.createDataFrame(df.rdd.flatMap(rowExpander)
A:
To transpose Dataframe in pySpark, I use pivot over the temporary created column, which I drop at the end of the operation.
Say, we have a table like this. What we wanna do is to find all users over each listed_days_bin value.
+------------------+-------------+
| listed_days_bin | users_count |
+------------------+-------------+
|1 | 5|
|0 | 2|
|0 | 1|
|1 | 3|
|1 | 4|
|2 | 5|
|2 | 7|
|2 | 2|
|1 | 1|
+------------------+-------------+
Create new temp column - 'pvt_value', aggregate over it and pivot results
import pyspark.sql.functions as F
agg_df = df.withColumn('pvt_value', lit(1))\
.groupby('pvt_value')\
.pivot('listed_days_bin')\
.agg(F.sum('users_count')).drop('pvt_value')
New Dataframe should look like:
+----+---+---+
| 0 | 1 | 2 | # Columns
+----+---+---+
| 3| 13| 14| # Users over the bin
+----+---+---+
A:
I found PySpark to be too complicated to transpose so I just convert my dataframe to Pandas and use the transpose() method and convert the dataframe back to PySpark if required.
dfOutput = spark.createDataFrame(dfPySpark.toPandas().transpose())
dfOutput.display()
|
Transpose column to row with Spark
|
I'm trying to transpose some columns of my table to row.
I'm using Python and Spark 1.5.0. Here is my initial table:
+-----+-----+-----+-------+
| A |col_1|col_2|col_...|
+-----+-------------------+
| 1 | 0.0| 0.6| ... |
| 2 | 0.6| 0.7| ... |
| 3 | 0.5| 0.9| ... |
| ...| ...| ...| ... |
I would like to have somthing like this:
+-----+--------+-----------+
| A | col_id | col_value |
+-----+--------+-----------+
| 1 | col_1| 0.0|
| 1 | col_2| 0.6|
| ...| ...| ...|
| 2 | col_1| 0.6|
| 2 | col_2| 0.7|
| ...| ...| ...|
| 3 | col_1| 0.5|
| 3 | col_2| 0.9|
| ...| ...| ...|
Does someone know haw I can do it? Thank you for your help.
|
[
"Spark >= 3.4\nYou can use built-in melt method. With Python:\ndf.melt(\n ids=[\"A\"], values=[\"col_1\", \"col_2\"],\n variableColumnName=\"key\", valueColumnName=\"val\"\n)\n\nwith Scala\ndf.melt(Array($\"A\"), Array($\"col_1\", $\"col_2\"), \"key\", \"val\")\n\nSpark < 3.4\nIt is relatively simple to do with basic Spark SQL functions.\nPython\nfrom pyspark.sql.functions import array, col, explode, struct, lit\n\ndf = sc.parallelize([(1, 0.0, 0.6), (1, 0.6, 0.7)]).toDF([\"A\", \"col_1\", \"col_2\"])\n\ndef to_long(df, by):\n\n # Filter dtypes and split into column names and type description\n cols, dtypes = zip(*((c, t) for (c, t) in df.dtypes if c not in by))\n # Spark SQL supports only homogeneous columns\n assert len(set(dtypes)) == 1, \"All columns have to be of the same type\"\n\n # Create and explode an array of (column_name, column_value) structs\n kvs = explode(array([\n struct(lit(c).alias(\"key\"), col(c).alias(\"val\")) for c in cols\n ])).alias(\"kvs\")\n\n return df.select(by + [kvs]).select(by + [\"kvs.key\", \"kvs.val\"])\n\nto_long(df, [\"A\"])\n \n\nScala:\nimport org.apache.spark.sql.DataFrame\nimport org.apache.spark.sql.functions.{array, col, explode, lit, struct}\n\nval df = Seq((1, 0.0, 0.6), (1, 0.6, 0.7)).toDF(\"A\", \"col_1\", \"col_2\")\n\ndef toLong(df: DataFrame, by: Seq[String]): DataFrame = {\n val (cols, types) = df.dtypes.filter{ case (c, _) => !by.contains(c)}.unzip\n require(types.distinct.size == 1, s\"${types.distinct.toString}.length != 1\") \n\n val kvs = explode(array(\n cols.map(c => struct(lit(c).alias(\"key\"), col(c).alias(\"val\"))): _*\n ))\n \n val byExprs = by.map(col(_))\n\n df\n .select(byExprs :+ kvs.alias(\"_kvs\"): _*)\n .select(byExprs ++ Seq($\"_kvs.key\", $\"_kvs.val\"): _*)\n}\n\ntoLong(df, Seq(\"A\"))\n\n",
"One way to solve with pyspark sql using functions create_map and explode.\nfrom pyspark.sql import functions as func\n#Use `create_map` to create the map of columns with constant \ndf = df.withColumn('mapCol', \\\n func.create_map(func.lit('col_1'),df.col_1,\n func.lit('col_2'),df.col_2,\n func.lit('col_3'),df.col_3\n ) \n )\n#Use explode function to explode the map \nres = df.select('*',func.explode(df.mapCol).alias('col_id','col_value'))\nres.show()\n\n",
"The Spark local linear algebra libraries are presently very weak: and they do not include basic operations as the above.\nThere is a JIRA for fixing this for Spark 2.1 - but that will not help you today.\nSomething to consider: performing a transpose will likely require completely shuffling the data.\nFor now you will need to write RDD code directly. I have written transpose in scala - but not in python. Here is the scala version:\n def transpose(mat: DMatrix) = {\n val nCols = mat(0).length\n val matT = mat\n .flatten\n .zipWithIndex\n .groupBy {\n _._2 % nCols\n }\n .toSeq.sortBy {\n _._1\n }\n .map(_._2)\n .map(_.map(_._1))\n .toArray\n matT\n }\n\nSo you can convert that to python for your use. I do not have bandwidth to write/test that at this particular moment: let me know if you were unable to do that conversion.\nAt the least - the following are readily converted to python. \n\nzipWithIndex --> enumerate() (python equivalent - credit to @zero323)\nmap --> [someOperation(x) for x in ..] \ngroupBy --> itertools.groupBy()\n\nHere is the implementation for flatten which does not have a python equivalent: \n def flatten(L):\n for item in L:\n try:\n for i in flatten(item):\n yield i\n except TypeError:\n yield item\n\nSo you should be able to put those together for a solution.\n",
"You could use the stack function:\nfor example:\ndf.selectExpr(\"stack(2, 'col_1', col_1, 'col_2', col_2) as (key, value)\")\n\nwhere:\n\n2 is the number of columns to stack (col_1 and col_2)\n'col_1' is a string for the key\ncol_1 is the column from which to take the values\n\nif you have several columns, you could build the whole stack string iterating the column names and pass that to selectExpr\n",
"Use flatmap. Something like below should work\nfrom pyspark.sql import Row\n\ndef rowExpander(row):\n rowDict = row.asDict()\n valA = rowDict.pop('A')\n for k in rowDict:\n yield Row(**{'A': valA , 'colID': k, 'colValue': row[k]})\n\nnewDf = sqlContext.createDataFrame(df.rdd.flatMap(rowExpander))\n\n",
"I took the Scala answer that @javadba wrote and created a Python version for transposing all columns in a DataFrame. This might be a bit different from what OP was asking...\nfrom itertools import chain\nfrom pyspark.sql import DataFrame\n\n\ndef _sort_transpose_tuple(tup):\n x, y = tup\n return x, tuple(zip(*sorted(y, key=lambda v_k: v_k[1], reverse=False)))[0]\n\n\ndef transpose(X):\n \"\"\"Transpose a PySpark DataFrame.\n\n Parameters\n ----------\n X : PySpark ``DataFrame``\n The ``DataFrame`` that should be tranposed.\n \"\"\"\n # validate\n if not isinstance(X, DataFrame):\n raise TypeError('X should be a DataFrame, not a %s' \n % type(X))\n\n cols = X.columns\n n_features = len(cols)\n\n # Sorry for this unreadability...\n return X.rdd.flatMap( # make into an RDD\n lambda xs: chain(xs)).zipWithIndex().groupBy( # zip index\n lambda val_idx: val_idx[1] % n_features).sortBy( # group by index % n_features as key\n lambda grp_res: grp_res[0]).map( # sort by index % n_features key\n lambda grp_res: _sort_transpose_tuple(grp_res)).map( # maintain order\n lambda key_col: key_col[1]).toDF() # return to DF\n\nFor example:\n>>> X = sc.parallelize([(1,2,3), (4,5,6), (7,8,9)]).toDF()\n>>> X.show()\n+---+---+---+\n| _1| _2| _3|\n+---+---+---+\n| 1| 2| 3|\n| 4| 5| 6|\n| 7| 8| 9|\n+---+---+---+\n\n>>> transpose(X).show()\n+---+---+---+\n| _1| _2| _3|\n+---+---+---+\n| 1| 4| 7|\n| 2| 5| 8|\n| 3| 6| 9|\n+---+---+---+\n\n",
"A very handy way to implement:\nfrom pyspark.sql import Row\n\ndef rowExpander(row):\n rowDict = row.asDict()\n valA = rowDict.pop('A')\n for k in rowDict:\n yield Row(**{'A': valA , 'colID' : k, 'colValue' : row[k]})\n\n newDf = sqlContext.createDataFrame(df.rdd.flatMap(rowExpander)\n\n",
"To transpose Dataframe in pySpark, I use pivot over the temporary created column, which I drop at the end of the operation.\nSay, we have a table like this. What we wanna do is to find all users over each listed_days_bin value.\n+------------------+-------------+\n| listed_days_bin | users_count | \n+------------------+-------------+\n|1 | 5| \n|0 | 2|\n|0 | 1| \n|1 | 3| \n|1 | 4| \n|2 | 5| \n|2 | 7| \n|2 | 2| \n|1 | 1|\n+------------------+-------------+\n\nCreate new temp column - 'pvt_value', aggregate over it and pivot results\nimport pyspark.sql.functions as F\n\n\nagg_df = df.withColumn('pvt_value', lit(1))\\\n .groupby('pvt_value')\\\n .pivot('listed_days_bin')\\\n .agg(F.sum('users_count')).drop('pvt_value')\n\nNew Dataframe should look like:\n+----+---+---+\n| 0 | 1 | 2 | # Columns \n+----+---+---+\n| 3| 13| 14| # Users over the bin\n+----+---+---+\n\n",
"I found PySpark to be too complicated to transpose so I just convert my dataframe to Pandas and use the transpose() method and convert the dataframe back to PySpark if required.\ndfOutput = spark.createDataFrame(dfPySpark.toPandas().transpose())\ndfOutput.display()\n\n"
] |
[
70,
11,
7,
6,
2,
1,
1,
1,
0
] |
[] |
[] |
[
"apache_spark",
"pivot",
"python",
"transpose"
] |
stackoverflow_0037864222_apache_spark_pivot_python_transpose.txt
|
Q:
Python: create a boolean df from existing df, if column values equal to
I noticed an error in my code and would like to use your help with my GUI.
I have a function which get a selected column name (line 3), identifies all the unique values of the column and later on create new data frames equal to the number of unique values.
I noticed an issue with the line 8,
firstly I am using contain which can add any rows to two or more dataframes, while the goal is to add each row to one dataframe.
if the column is not string the function does not work due to contains() function, since I need to use .str before that
I couldn't a function equal to contains() but which checks the equality, and I am trying to avoid loops in this case. Any help will be appreciated. thanks!
1) def basic_splitter():
2) global df
3) column = combobox_column_list.get()
4) unique_values = df[column].unique()
5) for i in unique_values:
6)
7) # first df[] will split the original data frame into smaller data frames based on i value
8) df_output = df[df[column].str.contains(i)]
9)
10) output_path = csv_xlsx_file_path + '/' + i + '.xlsx'
11) df_output.to_excel(output_path, sheet_name = i, index = False)
12) label_after_split = Label(my_frame_1, text = "Saved in: " + csv_xlsx_file_path)
13) label_after_split.grid(row = 4, column = 1)
Error message:
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Users\orkhamir\AppData\Local\Programs\Python\Python310\lib\tkinter\__init__.py", line 1921, in __call__
return self.func(*args)
File "C:\Users\orkhamir\AppData\Local\Temp\1/ipykernel_1976/2220190921.py", line 76, in basic_splitter
df_output = df[df[column].str.contains(i)]
raise AttributeError("Can only use .str accessor with string values!")
AttributeError: Can only use .str accessor with string values!
converting column to str and then run the function.
UPDATE:
I have changed the code to the following one. To solve all the issues I had previously.
def basic_splitter():
global df
column = combobox_column_list.get()
unique_values = df[column].unique()
for i in range(len(unique_values)):
# create a new file to store the df
output_path = 'C:/Users/orkhamir/Desktop/New folder/' + str(unique_values[i]) + '.xlsx'
# create a first df where the column value is equal to first unique value
df_output = df[df[column] == unique_values[i]]
df_output.to_excel(output_path, sheet_name = str(unique_values[i]), index = False)
label_after_split = Label(my_frame_1, text = "Saved in: " + csv_xlsx_file_path)
label_after_split.grid(row = 4, column = 1)
A:
You need to make sure your column is of type string before trying to call the str accessor on it. Just try:
df_output = df[df[column].astype('string').str.contains(i)]
|
Python: create a boolean df from existing df, if column values equal to
|
I noticed an error in my code and would like to use your help with my GUI.
I have a function which get a selected column name (line 3), identifies all the unique values of the column and later on create new data frames equal to the number of unique values.
I noticed an issue with the line 8,
firstly I am using contain which can add any rows to two or more dataframes, while the goal is to add each row to one dataframe.
if the column is not string the function does not work due to contains() function, since I need to use .str before that
I couldn't a function equal to contains() but which checks the equality, and I am trying to avoid loops in this case. Any help will be appreciated. thanks!
1) def basic_splitter():
2) global df
3) column = combobox_column_list.get()
4) unique_values = df[column].unique()
5) for i in unique_values:
6)
7) # first df[] will split the original data frame into smaller data frames based on i value
8) df_output = df[df[column].str.contains(i)]
9)
10) output_path = csv_xlsx_file_path + '/' + i + '.xlsx'
11) df_output.to_excel(output_path, sheet_name = i, index = False)
12) label_after_split = Label(my_frame_1, text = "Saved in: " + csv_xlsx_file_path)
13) label_after_split.grid(row = 4, column = 1)
Error message:
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Users\orkhamir\AppData\Local\Programs\Python\Python310\lib\tkinter\__init__.py", line 1921, in __call__
return self.func(*args)
File "C:\Users\orkhamir\AppData\Local\Temp\1/ipykernel_1976/2220190921.py", line 76, in basic_splitter
df_output = df[df[column].str.contains(i)]
raise AttributeError("Can only use .str accessor with string values!")
AttributeError: Can only use .str accessor with string values!
converting column to str and then run the function.
UPDATE:
I have changed the code to the following one. To solve all the issues I had previously.
def basic_splitter():
global df
column = combobox_column_list.get()
unique_values = df[column].unique()
for i in range(len(unique_values)):
# create a new file to store the df
output_path = 'C:/Users/orkhamir/Desktop/New folder/' + str(unique_values[i]) + '.xlsx'
# create a first df where the column value is equal to first unique value
df_output = df[df[column] == unique_values[i]]
df_output.to_excel(output_path, sheet_name = str(unique_values[i]), index = False)
label_after_split = Label(my_frame_1, text = "Saved in: " + csv_xlsx_file_path)
label_after_split.grid(row = 4, column = 1)
|
[
"You need to make sure your column is of type string before trying to call the str accessor on it. Just try:\ndf_output = df[df[column].astype('string').str.contains(i)]\n\n"
] |
[
0
] |
[] |
[] |
[
"dataframe",
"python"
] |
stackoverflow_0074520074_dataframe_python.txt
|
Q:
check number that are non-negative integer and not alphabet
i want to continue the loop for asking input(), i use "type(p) is not int" to check the alphabet number, but i get UnboundLocalError when i use "type(p) is not int"
def check(p):
"""
>>> get the value which is non-negative integer and not alphabet
Checking if the input is negative or not.
or repeat asking for input
loop will terminate until positive integer
"""
while p < 0 or type(p) is not int:
p = float(input("Invaild response, please try again:"))
return p
check("4")
check(4)
i want to continue the loop for asking input(), i use "type(p) is not int" to check the alphabet number, but i get UnboundLocalError when i use "type(p) is not int"
A:
def check(num):
while type(num) is not int or num < 0:
try:
num = int(input("Invaild response, please try again:"))
except ValueError:
pass
return num
check("4")
Outputs:
print(check(-4))
#Output: Invaild response, please try again:
print(check(4.2))
#Output: Invaild response, please try again:
print(check("4"))
#Output: Invaild response, please try again:
print(check(4))
#Output: 4
|
check number that are non-negative integer and not alphabet
|
i want to continue the loop for asking input(), i use "type(p) is not int" to check the alphabet number, but i get UnboundLocalError when i use "type(p) is not int"
def check(p):
"""
>>> get the value which is non-negative integer and not alphabet
Checking if the input is negative or not.
or repeat asking for input
loop will terminate until positive integer
"""
while p < 0 or type(p) is not int:
p = float(input("Invaild response, please try again:"))
return p
check("4")
check(4)
i want to continue the loop for asking input(), i use "type(p) is not int" to check the alphabet number, but i get UnboundLocalError when i use "type(p) is not int"
|
[
"def check(num):\n while type(num) is not int or num < 0:\n try:\n num = int(input(\"Invaild response, please try again:\"))\n except ValueError:\n pass\n return num\n\n \ncheck(\"4\")\n\nOutputs:\nprint(check(-4))\n\n#Output: Invaild response, please try again:\n\nprint(check(4.2))\n\n#Output: Invaild response, please try again:\n\nprint(check(\"4\"))\n\n#Output: Invaild response, please try again:\n\nprint(check(4))\n\n#Output: 4\n\n"
] |
[
1
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074520106_python.txt
|
Q:
I want to Create a csv file containing 1000 rows for which random number is generated
Problem: Create a text/csv file containing 1000 rows with the following fields/columns:
StudentID: unique identifier 1:1000
Score: Random number between 40-100
Date: Any random date within the last 20 days. Eg: 18/11/2022
Description: Get a random word from a list of 10 words of your choice
Ethnicity: Randomly assign an ethnicity(google if you don't know meaning)
Subject: Randomly assign one from 10 subjects. eg: Calculus, Statistics, Databases,
Hobby: Randomly assign one from a list of 10 hobbies
Interest: Randomly assign one from a list of 10 interests. Eg: music, nonfiction, debate, swimming,
import csv import random
fields = ['Student Id', 'score', 'date', 'Description',
'Ethinicity', 'Subject', 'Hobby', 'Intrest']
description_list=('Delhi','Noida','goa','varansi','Assam','Kerala','Kolkata','Shilong','Bangluru','Gujrat')
Ethinicity_list=('Sikh','Kashmiri','Rajput','Bhramin','Kayasth','Adiwasi','Odia','Maratha')
Subject_list=('English','Maths','Economics','Hindi','French','Zoology','Chemistry','Physics','Social
Science','Python')
Hobby_list=('Reading','Swimming','Cycling','Biking','Gaming','Sleeping','Teaching','Cricket','Shooting','Talking')
Intrest_list=('Watching
TV','Esports','Football','MotoRacing','Music','Painting','Sketching','Cooking','Cars','Comedy')
data = [
[random.randint(1, 1000),random.randint(40,100),'__',random.choice(description_list),random.choice(Ethinicity_list),random.choice(Subject_list),random.choice(Hobby_list),random.choice(Intrest_list)]
]
filename = "assignment.csv"
with open(filename, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(data)
This senerates a single row of required data.
I want to generate 1000 rows with same data.
A:
you could use list comprehensions together with pandas dataframes something along the lines of this:
import pandas as pd
# randomly chooses an element of your list/tuple 1000 times
description_choices = [random.choice(description_list) for _ in range(1000)]
...
# create table with random data
df = pd.DataFrame({'Description': description_choices,
...
})
df.to_csv('results.csv')
|
I want to Create a csv file containing 1000 rows for which random number is generated
|
Problem: Create a text/csv file containing 1000 rows with the following fields/columns:
StudentID: unique identifier 1:1000
Score: Random number between 40-100
Date: Any random date within the last 20 days. Eg: 18/11/2022
Description: Get a random word from a list of 10 words of your choice
Ethnicity: Randomly assign an ethnicity(google if you don't know meaning)
Subject: Randomly assign one from 10 subjects. eg: Calculus, Statistics, Databases,
Hobby: Randomly assign one from a list of 10 hobbies
Interest: Randomly assign one from a list of 10 interests. Eg: music, nonfiction, debate, swimming,
import csv import random
fields = ['Student Id', 'score', 'date', 'Description',
'Ethinicity', 'Subject', 'Hobby', 'Intrest']
description_list=('Delhi','Noida','goa','varansi','Assam','Kerala','Kolkata','Shilong','Bangluru','Gujrat')
Ethinicity_list=('Sikh','Kashmiri','Rajput','Bhramin','Kayasth','Adiwasi','Odia','Maratha')
Subject_list=('English','Maths','Economics','Hindi','French','Zoology','Chemistry','Physics','Social
Science','Python')
Hobby_list=('Reading','Swimming','Cycling','Biking','Gaming','Sleeping','Teaching','Cricket','Shooting','Talking')
Intrest_list=('Watching
TV','Esports','Football','MotoRacing','Music','Painting','Sketching','Cooking','Cars','Comedy')
data = [
[random.randint(1, 1000),random.randint(40,100),'__',random.choice(description_list),random.choice(Ethinicity_list),random.choice(Subject_list),random.choice(Hobby_list),random.choice(Intrest_list)]
]
filename = "assignment.csv"
with open(filename, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(data)
This senerates a single row of required data.
I want to generate 1000 rows with same data.
|
[
"you could use list comprehensions together with pandas dataframes something along the lines of this:\nimport pandas as pd\n\n# randomly chooses an element of your list/tuple 1000 times\ndescription_choices = [random.choice(description_list) for _ in range(1000)]\n...\n\n# create table with random data\ndf = pd.DataFrame({'Description': description_choices, \n ...\n })\n\ndf.to_csv('results.csv')\n\n"
] |
[
0
] |
[] |
[] |
[
"csv",
"python",
"python_3.x"
] |
stackoverflow_0074520201_csv_python_python_3.x.txt
|
Q:
Pytorch progress bar disappear on vscode jupyter
I have problem when training Pytorch model, the progress bar of is disappeared by no reason today. It still work properly the days before. I'm using jupyter through vs code, connect to the kernel that run on the Ubuntu subsystem. How can I show the progress bar as normal
A:
I had this issue and it seems to come from a problem with tqdm for new versions of ipywidget (see https://github.com/microsoft/vscode-jupyter/issues/8552).
As mentioned in the link, I solved it by downgrading ipywidgets:
pip install ipywidgets==7.7.2
|
Pytorch progress bar disappear on vscode jupyter
|
I have problem when training Pytorch model, the progress bar of is disappeared by no reason today. It still work properly the days before. I'm using jupyter through vs code, connect to the kernel that run on the Ubuntu subsystem. How can I show the progress bar as normal
|
[
"I had this issue and it seems to come from a problem with tqdm for new versions of ipywidget (see https://github.com/microsoft/vscode-jupyter/issues/8552).\nAs mentioned in the link, I solved it by downgrading ipywidgets:\npip install ipywidgets==7.7.2\n\n"
] |
[
1
] |
[] |
[] |
[
"progress",
"python",
"pytorch"
] |
stackoverflow_0073526940_progress_python_pytorch.txt
|
Q:
calculating the percentage of count in pandas groupby
I want to discover the underlying pattern between my features and target so I tried to use groupby but instead of the count I want to calculate the ratio or the percentage compared to the total of the count of each class
the following code is similar to the work I have done.
fet1=["A","B","C"]
fet2=["X","Y","Z"]
target=["0","1"]
df = pd.DataFrame(data={"fet1":np.random.choice(fet1,1000),"fet2":np.random.choice(fet2,1000),"class":np.random.choice(target,1000)})
df.groupby(['fet1','fet2','class'])['class'].agg(['count'])
A:
You can achieve this more simply with:
out = df.groupby('class').value_counts(normalize=True).mul(100)
Output:
class fet1 fet2
0 A Y 13.859275
B Y 12.366738
X 12.153518
C X 11.513859
Y 10.660981
B Z 10.447761
A Z 10.021322
C Z 9.594883
A X 9.381663
1 A Y 14.124294
C Z 13.935970
B Z 11.676083
Y 11.111111
C Y 11.111111
X 11.111111
A X 10.169492
B X 9.416196
A Z 7.344633
dtype: float64
If you want the same order of multiindex:
out = (df
.groupby('class').value_counts(normalize=True).mul(100)
.reorder_levels(['fet1', 'fet2', 'class']).sort_index()
)
Output:
fet1 fet2 class
A X 0 9.381663
1 10.169492
Y 0 13.859275
1 14.124294
Z 0 10.021322
1 7.344633
B X 0 12.153518
1 9.416196
Y 0 12.366738
1 11.111111
Z 0 10.447761
1 11.676083
C X 0 11.513859
1 11.111111
Y 0 10.660981
1 11.111111
Z 0 9.594883
1 13.935970
dtype: float64
|
calculating the percentage of count in pandas groupby
|
I want to discover the underlying pattern between my features and target so I tried to use groupby but instead of the count I want to calculate the ratio or the percentage compared to the total of the count of each class
the following code is similar to the work I have done.
fet1=["A","B","C"]
fet2=["X","Y","Z"]
target=["0","1"]
df = pd.DataFrame(data={"fet1":np.random.choice(fet1,1000),"fet2":np.random.choice(fet2,1000),"class":np.random.choice(target,1000)})
df.groupby(['fet1','fet2','class'])['class'].agg(['count'])
|
[
"You can achieve this more simply with:\nout = df.groupby('class').value_counts(normalize=True).mul(100)\n\nOutput:\nclass fet1 fet2\n0 A Y 13.859275\n B Y 12.366738\n X 12.153518\n C X 11.513859\n Y 10.660981\n B Z 10.447761\n A Z 10.021322\n C Z 9.594883\n A X 9.381663\n1 A Y 14.124294\n C Z 13.935970\n B Z 11.676083\n Y 11.111111\n C Y 11.111111\n X 11.111111\n A X 10.169492\n B X 9.416196\n A Z 7.344633\ndtype: float64\n\nIf you want the same order of multiindex:\nout = (df\n .groupby('class').value_counts(normalize=True).mul(100)\n .reorder_levels(['fet1', 'fet2', 'class']).sort_index()\n)\n\nOutput:\nfet1 fet2 class\nA X 0 9.381663\n 1 10.169492\n Y 0 13.859275\n 1 14.124294\n Z 0 10.021322\n 1 7.344633\nB X 0 12.153518\n 1 9.416196\n Y 0 12.366738\n 1 11.111111\n Z 0 10.447761\n 1 11.676083\nC X 0 11.513859\n 1 11.111111\n Y 0 10.660981\n 1 11.111111\n Z 0 9.594883\n 1 13.935970\ndtype: float64\n\n"
] |
[
1
] |
[
"I achieved it by doing this\nfet1=[\"A\",\"B\",\"C\"]\nfet2=[\"X\",\"Y\",\"Z\"]\ntarget=[\"0\",\"1\"]\ndf = pd.DataFrame(data={\"fet1\":np.random.choice(fet1,1000),\"fet2\":np.random.choice(fet2,1000),\"class\":np.random.choice(target,1000)})\ndf.groupby(['fet1','fet2','class'])['class'].agg(['count'])/df.groupby(['class'])['class'].agg(['count'])*100\n\n\n"
] |
[
-1
] |
[
"pandas",
"python"
] |
stackoverflow_0074520280_pandas_python.txt
|
Q:
How to return a value through recursive call in Python
I am trying a to solve a problem. There are a few programs where I have to return a value in variable throughout various function calls(Recursive calls). I am not sure how to do that.
I am trying Merge Sort algorithm in python this is the implementation:
def merge(arr,lo,hi):
mid=(lo+hi)//2
c=0
i=lo; j=mid+1; k=0;
temp=[]
while(i<=mid and j<=hi):
if arr[i]>arr[j]:
temp.append(arr[j])
c+=mid-i+1
j+=1
k+=1
else:
temp.append(arr[i])
i+=1
k+=1
while(i<=mid):
temp.append(arr[i])
i+=1
k+=1
while(j<=hi):
temp.append(arr[j])
j+=1
k+=1
for i in range(k):
arr[lo+i]=temp[i]
return c
def mergeSort(arr,lo,hi):
if lo==hi:
return
mid=(lo+hi)//2
mergeSort(arr,lo,mid)
mergeSort(arr,mid+1,hi)
merge(arr,lo,hi)
In addition to merge sort I am counting the number of elements which are smaller than a particular element(not much Important). For which I am using a count variable 'c'.
Now I have to return the C value through all the recursive calls and back to my main function. I am not sure how to do that. Someone help me to return it.
I also tried returning like this:
return mergeSort(arr,lo,mid)
But It just returns 0.
My main function gives the call to mergeSort(arr,0,n-1)
Thanks in advance.
A:
All you need to do is add up the values returned by each recursive call and by merge, then return that value.
def mergeSort(arr,lo,hi):
if lo==hi:
return 0
c = 0
mid=(lo+hi)//2
c += mergeSort(arr,lo,mid)
c += mergeSort(arr,mid+1,hi)
c += merge(arr,lo,hi)
return c
|
How to return a value through recursive call in Python
|
I am trying a to solve a problem. There are a few programs where I have to return a value in variable throughout various function calls(Recursive calls). I am not sure how to do that.
I am trying Merge Sort algorithm in python this is the implementation:
def merge(arr,lo,hi):
mid=(lo+hi)//2
c=0
i=lo; j=mid+1; k=0;
temp=[]
while(i<=mid and j<=hi):
if arr[i]>arr[j]:
temp.append(arr[j])
c+=mid-i+1
j+=1
k+=1
else:
temp.append(arr[i])
i+=1
k+=1
while(i<=mid):
temp.append(arr[i])
i+=1
k+=1
while(j<=hi):
temp.append(arr[j])
j+=1
k+=1
for i in range(k):
arr[lo+i]=temp[i]
return c
def mergeSort(arr,lo,hi):
if lo==hi:
return
mid=(lo+hi)//2
mergeSort(arr,lo,mid)
mergeSort(arr,mid+1,hi)
merge(arr,lo,hi)
In addition to merge sort I am counting the number of elements which are smaller than a particular element(not much Important). For which I am using a count variable 'c'.
Now I have to return the C value through all the recursive calls and back to my main function. I am not sure how to do that. Someone help me to return it.
I also tried returning like this:
return mergeSort(arr,lo,mid)
But It just returns 0.
My main function gives the call to mergeSort(arr,0,n-1)
Thanks in advance.
|
[
"All you need to do is add up the values returned by each recursive call and by merge, then return that value.\ndef mergeSort(arr,lo,hi):\n if lo==hi:\n return 0\n c = 0\n mid=(lo+hi)//2\n c += mergeSort(arr,lo,mid)\n c += mergeSort(arr,mid+1,hi)\n c += merge(arr,lo,hi)\n return c\n\n"
] |
[
0
] |
[] |
[] |
[
"mergesort",
"python",
"recursion"
] |
stackoverflow_0074519892_mergesort_python_recursion.txt
|
Q:
How to read file with space separated values in pandas
I try to read the file into pandas.
The file has values separated by space, but with different number of spaces
I tried:
pd.read_csv('file.csv', delimiter=' ')
but it doesn't work
A:
add delim_whitespace=True argument, it's faster than regex.
A:
you can use regex as the delimiter:
pd.read_csv("whitespace.csv", header=None, delimiter=r"\s+")
A:
If you can't get text parsing to work using the accepted answer (e.g if your text file contains non uniform rows) then it's worth trying with Python's csv library - here's an example using a user defined Dialect:
import csv
csv.register_dialect('skip_space', skipinitialspace=True)
with open(my_file, 'r') as f:
reader=csv.reader(f , delimiter=' ', dialect='skip_space')
for item in reader:
print(item)
A:
Pandas read_fwf for the win:
import pandas as pd
df = pd.read_fwf(file_path)
|
How to read file with space separated values in pandas
|
I try to read the file into pandas.
The file has values separated by space, but with different number of spaces
I tried:
pd.read_csv('file.csv', delimiter=' ')
but it doesn't work
|
[
"add delim_whitespace=True argument, it's faster than regex.\n",
"you can use regex as the delimiter:\npd.read_csv(\"whitespace.csv\", header=None, delimiter=r\"\\s+\")\n\n",
"If you can't get text parsing to work using the accepted answer (e.g if your text file contains non uniform rows) then it's worth trying with Python's csv library - here's an example using a user defined Dialect:\n import csv\n\n csv.register_dialect('skip_space', skipinitialspace=True)\n with open(my_file, 'r') as f:\n reader=csv.reader(f , delimiter=' ', dialect='skip_space')\n for item in reader:\n print(item)\n\n",
"Pandas read_fwf for the win:\nimport pandas as pd\n\ndf = pd.read_fwf(file_path)\n\n"
] |
[
218,
47,
0,
0
] |
[] |
[] |
[
"csv",
"pandas",
"python"
] |
stackoverflow_0019632075_csv_pandas_python.txt
|
Q:
MT5 machine learning model for paraphrasing
I'm trying to create a machine learning model to paraphrase given Persian text. I was introduced to mt5 as a multilingual text-to-text model. However, I can't figure out how to implement this. I have gathered the data. Here's a sample of the data:
Data sample
---UPDATE---
I have tried to paraphrase using the T5 model, and it works well for English. However, I can't get logical results from the MT5 model. Here is the T5 version code:
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws")
sentence = sentence_1
text = "paraphrase: " + sentence + " </s>"
encoding = tokenizer.encode_plus(text,pad_to_max_length=True, return_tensors="pt")
input_ids, attention_masks = encoding["input_ids"], encoding["attention_mask"]
outputs = model.generate(
input_ids=input_ids, attention_mask=attention_masks,
max_length=256,
do_sample=True,
top_k=120,
top_p=0.95,
early_stopping=False,
num_return_sequences=5
)
print ("\n")
print("Origianl sentence:")
print(sentence)
print ("\n")
print("Paraphrasing:")
for output in outputs:
line = tokenizer.decode(output, skip_special_tokens=True,clean_up_tokenization_spaces=True)
print(line)
When I give the following sentence to the model, it returns the following results:
Original sentence:
Washing your hands Properly will keep you away from COVID-19.
Paraphrasing:
By properly washing your hands, you will keep away from COVID-19.
Washing your hands correctly will keep you away from COVID-19.
Washing your hands correctly will keep you away from COVID-19.
Washing your hands correctly will keep you from COVID-19.
Washing your hands properly will keep you away from COVID-19.
But when I change the model to the MT5-base, the results are absurd. Here is an example:
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("google/mt5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("google/mt5-base")
Original sentence:
Washing your hands Properly will keep you away from COVID-19.
Paraphrasing:
<extra_id_0>, left.
<extra_id_0>, also.
<extra_id_0>. Comment
<extra_id_0>.
<extra_id_0>o.
A:
IMHO mT5 can't be used for paraphrase generation out-of-the-box, just like the T5 can. You can find fine-tuned versions of the T5 model intended for paraphrase generation on the HuggingFace Hub, such as this one. There's a paper associated with the model and you may find the solution there. As far as I understand it, you need a labeled dataset with which you will fine-tune the T5 model to generate paraphrases in your language.
|
MT5 machine learning model for paraphrasing
|
I'm trying to create a machine learning model to paraphrase given Persian text. I was introduced to mt5 as a multilingual text-to-text model. However, I can't figure out how to implement this. I have gathered the data. Here's a sample of the data:
Data sample
---UPDATE---
I have tried to paraphrase using the T5 model, and it works well for English. However, I can't get logical results from the MT5 model. Here is the T5 version code:
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws")
sentence = sentence_1
text = "paraphrase: " + sentence + " </s>"
encoding = tokenizer.encode_plus(text,pad_to_max_length=True, return_tensors="pt")
input_ids, attention_masks = encoding["input_ids"], encoding["attention_mask"]
outputs = model.generate(
input_ids=input_ids, attention_mask=attention_masks,
max_length=256,
do_sample=True,
top_k=120,
top_p=0.95,
early_stopping=False,
num_return_sequences=5
)
print ("\n")
print("Origianl sentence:")
print(sentence)
print ("\n")
print("Paraphrasing:")
for output in outputs:
line = tokenizer.decode(output, skip_special_tokens=True,clean_up_tokenization_spaces=True)
print(line)
When I give the following sentence to the model, it returns the following results:
Original sentence:
Washing your hands Properly will keep you away from COVID-19.
Paraphrasing:
By properly washing your hands, you will keep away from COVID-19.
Washing your hands correctly will keep you away from COVID-19.
Washing your hands correctly will keep you away from COVID-19.
Washing your hands correctly will keep you from COVID-19.
Washing your hands properly will keep you away from COVID-19.
But when I change the model to the MT5-base, the results are absurd. Here is an example:
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("google/mt5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("google/mt5-base")
Original sentence:
Washing your hands Properly will keep you away from COVID-19.
Paraphrasing:
<extra_id_0>, left.
<extra_id_0>, also.
<extra_id_0>. Comment
<extra_id_0>.
<extra_id_0>o.
|
[
"IMHO mT5 can't be used for paraphrase generation out-of-the-box, just like the T5 can. You can find fine-tuned versions of the T5 model intended for paraphrase generation on the HuggingFace Hub, such as this one. There's a paper associated with the model and you may find the solution there. As far as I understand it, you need a labeled dataset with which you will fine-tune the T5 model to generate paraphrases in your language.\n"
] |
[
0
] |
[] |
[] |
[
"machine_learning",
"nlp",
"python"
] |
stackoverflow_0074149057_machine_learning_nlp_python.txt
|
Q:
Fancy indexing calculation of adjacency matrix from adjacency list
Problem:
I want to calculate at several times the adjacency matrix A_ij given the adjacency list E_ij, where E_ij[t,i] = j gives the edge from i to j at time t.
I can do it with the following code:
import numpy as np
nTimes = 100
nParticles = 10
A_ij = np.full((nTimes, nParticles, nParticles), False)
E_ij = np.random.randint(0, 9, (100, 10))
for t in range(nTimes):
for i in range(nParticles):
A_ij[t, i, E_ij[t,i]] = True
Question:
How can I do it in a vectorized way, either with fancy indexing or using numpy functions such as np.take_along_axis?
What I tried:
I expected this to work:
A_ij[:,np.arange(nParticles)[None,:,None], E_ij[:,None,np.arange(nParticles)]] = True
But it does not.
Related to: Trying to convert adjacency list to adjacency matrix in Python
A:
I think this might work:
import numpy as np
nTimes = 100
nParticles = 10
A_ij = np.full((nTimes, nParticles, nParticles), False)
E_ij = np.random.randint(0, 9, (100, 10))
np.put_along_axis(A_ij, E_ij[..., None], True, axis=2)
A:
In case it may help other people, I also found a way to do fancy indexing in this problem, but @Chrysophylaxs answer was faster and simpler (I guess I was confused with the indices and I could not think about it). I also add @Mercury answer for comparison.
Code:
import numpy as np
import matplotlib.pyplot as plt
import time
nTimes = 1000000
nParticles = 10
A_ij1 = np.full((nTimes, nParticles, nParticles), False)
A_ij2 = np.full((nTimes, nParticles, nParticles), False)
A_ij3 = np.full((nTimes, nParticles, nParticles), False)
A_ij4 = np.full((nTimes, nParticles, nParticles), False)
E_ij = np.random.randint(0, 9, (nTimes, 10))
start_time = time.time()
for t in range(nTimes):
for i in range(nParticles):
A_ij1[t, i, E_ij[t,i]] = True
print("Loop: %s s" % (time.time() - start_time))
start_time = time.time()
A_ij2[np.arange(nTimes)[:,None],np.arange(nParticles)[None,:], E_ij[np.arange(nTimes)[:,None],np.arange(nParticles)[None,:]]] = True
print("Fancy indexing: %s s" % (time.time() - start_time))
start_time = time.time()
np.put_along_axis(A_ij3, E_ij[..., None], True, axis=2)
print("Put along axis: %s s" % (time.time() - start_time))
start_time = time.time()
i, j = np.mgrid[:nTimes, :nParticles]
A_ij4[i, j, E_ij] = True
print("mgrid: %s s" % (time.time() - start_time))
print(np.allclose(A_ij1, A_ij2))
print(np.allclose(A_ij1, A_ij3))
print(np.allclose(A_ij1, A_ij4))
Output:
Loop: 2.5006823539733887 s
Fancy indexing: 0.11996173858642578 s
Put along axis: 0.0814671516418457 s
mgrid: 0.19223332405090332 s
True
True
True
A:
Another way to do it, and close to what you actually tried at the end would be something like:
i, j = np.mgrid[:nTimes, :nParticles]
A_ij[i, j, E_ij] = True
But the accepted answer is definitely the better way to go about the problem, no need to construct indices.
|
Fancy indexing calculation of adjacency matrix from adjacency list
|
Problem:
I want to calculate at several times the adjacency matrix A_ij given the adjacency list E_ij, where E_ij[t,i] = j gives the edge from i to j at time t.
I can do it with the following code:
import numpy as np
nTimes = 100
nParticles = 10
A_ij = np.full((nTimes, nParticles, nParticles), False)
E_ij = np.random.randint(0, 9, (100, 10))
for t in range(nTimes):
for i in range(nParticles):
A_ij[t, i, E_ij[t,i]] = True
Question:
How can I do it in a vectorized way, either with fancy indexing or using numpy functions such as np.take_along_axis?
What I tried:
I expected this to work:
A_ij[:,np.arange(nParticles)[None,:,None], E_ij[:,None,np.arange(nParticles)]] = True
But it does not.
Related to: Trying to convert adjacency list to adjacency matrix in Python
|
[
"I think this might work:\nimport numpy as np\n\nnTimes = 100\nnParticles = 10\nA_ij = np.full((nTimes, nParticles, nParticles), False)\nE_ij = np.random.randint(0, 9, (100, 10))\n\nnp.put_along_axis(A_ij, E_ij[..., None], True, axis=2)\n\n",
"In case it may help other people, I also found a way to do fancy indexing in this problem, but @Chrysophylaxs answer was faster and simpler (I guess I was confused with the indices and I could not think about it). I also add @Mercury answer for comparison.\nCode:\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\n\nnTimes = 1000000\nnParticles = 10\nA_ij1 = np.full((nTimes, nParticles, nParticles), False)\nA_ij2 = np.full((nTimes, nParticles, nParticles), False)\nA_ij3 = np.full((nTimes, nParticles, nParticles), False)\nA_ij4 = np.full((nTimes, nParticles, nParticles), False)\n\n\nE_ij = np.random.randint(0, 9, (nTimes, 10))\n\nstart_time = time.time()\nfor t in range(nTimes):\n for i in range(nParticles):\n A_ij1[t, i, E_ij[t,i]] = True\nprint(\"Loop: %s s\" % (time.time() - start_time))\n\n \nstart_time = time.time()\nA_ij2[np.arange(nTimes)[:,None],np.arange(nParticles)[None,:], E_ij[np.arange(nTimes)[:,None],np.arange(nParticles)[None,:]]] = True\nprint(\"Fancy indexing: %s s\" % (time.time() - start_time))\n\nstart_time = time.time()\nnp.put_along_axis(A_ij3, E_ij[..., None], True, axis=2)\nprint(\"Put along axis: %s s\" % (time.time() - start_time))\n\nstart_time = time.time()\ni, j = np.mgrid[:nTimes, :nParticles]\nA_ij4[i, j, E_ij] = True\nprint(\"mgrid: %s s\" % (time.time() - start_time))\n\n\nprint(np.allclose(A_ij1, A_ij2))\nprint(np.allclose(A_ij1, A_ij3))\nprint(np.allclose(A_ij1, A_ij4))\n\nOutput:\nLoop: 2.5006823539733887 s\nFancy indexing: 0.11996173858642578 s\nPut along axis: 0.0814671516418457 s\nmgrid: 0.19223332405090332 s\nTrue\nTrue\nTrue\n\n",
"Another way to do it, and close to what you actually tried at the end would be something like:\ni, j = np.mgrid[:nTimes, :nParticles]\nA_ij[i, j, E_ij] = True\n\nBut the accepted answer is definitely the better way to go about the problem, no need to construct indices.\n"
] |
[
2,
1,
1
] |
[] |
[] |
[
"array_broadcasting",
"numpy",
"numpy_ndarray",
"python",
"vectorization"
] |
stackoverflow_0074519974_array_broadcasting_numpy_numpy_ndarray_python_vectorization.txt
|
Q:
Storing The Output of a Permutation as a List of Lists
When I run the following code I get rows of tuples:
{perm = itertools.permutations(['A','B','C','D','E','F'],4)
for val in perm:
print(val)}.
How do I make the code give me the output as a single list of lists instead of rows of tuples?
When I run the code I get something like this
('F', 'E', 'B', 'C')
('F', 'E', 'B', 'D')
('F', 'E', 'C', 'A')
('F', 'E', 'C', 'B')
type here
etc.
What I want is something like this
[['F', 'E', 'B', 'C'],
['F', 'E', 'B', 'D'],
['F', 'E', 'C', 'A'],...,]
A:
cast val into a list and append it to another list.
import itertools
perm = itertools.permutations(['A','B','C','D','E','F'],4)
result = []
for val in perm:
result.append(list(val))
print(result)
The question is, do you want to generate all permutations and store them?
As you have it now, the generator will give you one permutation each time, which is memory efficient.
You can generate all of them into a list of lists, but just think if you really want that, since the number of permutations could be very large.
|
Storing The Output of a Permutation as a List of Lists
|
When I run the following code I get rows of tuples:
{perm = itertools.permutations(['A','B','C','D','E','F'],4)
for val in perm:
print(val)}.
How do I make the code give me the output as a single list of lists instead of rows of tuples?
When I run the code I get something like this
('F', 'E', 'B', 'C')
('F', 'E', 'B', 'D')
('F', 'E', 'C', 'A')
('F', 'E', 'C', 'B')
type here
etc.
What I want is something like this
[['F', 'E', 'B', 'C'],
['F', 'E', 'B', 'D'],
['F', 'E', 'C', 'A'],...,]
|
[
"cast val into a list and append it to another list.\nimport itertools\nperm = itertools.permutations(['A','B','C','D','E','F'],4)\n\nresult = []\nfor val in perm:\n result.append(list(val))\n\nprint(result)\n\nThe question is, do you want to generate all permutations and store them?\nAs you have it now, the generator will give you one permutation each time, which is memory efficient.\nYou can generate all of them into a list of lists, but just think if you really want that, since the number of permutations could be very large.\n"
] |
[
2
] |
[] |
[] |
[
"list",
"loops",
"permutation",
"python",
"tuples"
] |
stackoverflow_0074520401_list_loops_permutation_python_tuples.txt
|
Q:
how to search for links using telethon
Is there any way I can filter the messages I get from:
client.get_messages()
to a spasific pattern? (in this case links)
I can filter it after I get all the messages but if there is a way to do it earlier that would be better.
A:
get_messages (or iter_messages) supports filter argument that takes any of MessagesFilter constructors.
so in your case, use:
await client.get_messages(chat,
filter=telethon.types.InputMessagesFilterUrl()
)
|
how to search for links using telethon
|
Is there any way I can filter the messages I get from:
client.get_messages()
to a spasific pattern? (in this case links)
I can filter it after I get all the messages but if there is a way to do it earlier that would be better.
|
[
"get_messages (or iter_messages) supports filter argument that takes any of MessagesFilter constructors.\nso in your case, use:\nawait client.get_messages(chat,\n filter=telethon.types.InputMessagesFilterUrl()\n)\n\n"
] |
[
3
] |
[] |
[] |
[
"python",
"scrape",
"telegram",
"telethon"
] |
stackoverflow_0074520325_python_scrape_telegram_telethon.txt
|
Q:
Pd.crosstab missing data?
I am using pd.crosstab to count presence/absence data. In the first column, I have several presence counts (represented by 1's), in the second column I have just one 'presence'. Howwever, when I run crosstab on this data that single presence in the second column isn't counted. Could anyone shed some light on why this happening and what I'm doing wrong?
Python v. 3.8.5
Pandas v. 1.2.3
System: MacOS Monterey v. 12.5.1
Column1:
>>> mbx_final['Cmpd1640']
OV745_1A 0
OV745_1B 0
OV745_1C 1
OV745_1D 1
OV745_1E 0
OV745_4A 1
OV745_4B 1
OV745_4C 0
OV22_12A 1
OV22_12B 1
OV22_12C 1
OV22_12D 0
OV22_12E 0
OV22_12F 0
OV22_13A 0
OV22_13B 0
OV22_13C 0
OV86_6A 1
OV86_6D 1
OV86_6E 1
OV86_6F 1
OV86_6G 1
OV86_6H 1
OV86_6I 1
OV86_6J 1
OV86_6K 0
OV86_6L 1
OV86_8A 1
OV86_8B 1
OV86_8C 1
OB1B 1
OB1C 1
SK3A 0
SK3B 0
SK3C 0
SK7A 1
SK7B 0
Column2:
>>> mgx_final['Otu2409']
OV745_1A 0
OV745_1B 0
OV745_1C 0
OV745_1D 0
OV745_1E 0
OV745_4A 0
OV745_4B 0
OV745_4C 0
OV22_12A 0
OV22_12B 0
OV22_12C 0
OV22_12D 0
OV22_12E 0
OV22_12F 0
OV22_13A 0
OV22_13B 0
OV22_13C 0
OV86_6A 0
OV86_6D 0
OV86_6E 0
OV86_6F 0
OV86_6G 0
OV86_6H 0
OV86_6I 0
OV86_6J 0
OV86_6K 0
OV86_6L 0
OV86_8A 0
OV86_8B 0
OV86_8C 0
OB1A 1
OB1C 0
SK3A 0
SK3B 0
SK3C 0
SK7A 0
SK7B 0
Crosstab command:
contingency_tab = pd.crosstab(mbx_final['Cmpd1640'],mgx_final['Otu2409'],margins=True)
Results:
>>> contingency_tab
Otu2409 0 All
Cmpd1640
0 15 15
1 21 21
All 36 36
I would expect to see a result like this:
>>> contingency_tab
Otu2409 0 1 All
Cmpd1640
0 15 0 15
1 21 1 22
All 36 1 37
What am I doing wrong?
A:
You can use the dropna parameter, which is by default set to True. Setting it to False will include columns whose entries are all NaN.
contingency_tab = pd.crosstab(mbx_final['Cmpd1640'],mgx_final['Otu2409'],margins=True, dropna=False)
You can read more on the official documentation here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.crosstab.html
Edit 1:
I've replicated your dataset and code and run the following:
df_in = pd.read_excel("Book1.xlsx", index_col="index")
mbx_final = df_in[["Cmpd1640"]]
mgx_final = df_in[["Otu2409"]]
contingency_tab = pd.crosstab(mbx_final['Cmpd1640'], mgx_final['Otu2409'], margins=True)
display(contingency_tab)
And I get your expected output:
There might be something wrong with how you're displaying the crosstab function output.
|
Pd.crosstab missing data?
|
I am using pd.crosstab to count presence/absence data. In the first column, I have several presence counts (represented by 1's), in the second column I have just one 'presence'. Howwever, when I run crosstab on this data that single presence in the second column isn't counted. Could anyone shed some light on why this happening and what I'm doing wrong?
Python v. 3.8.5
Pandas v. 1.2.3
System: MacOS Monterey v. 12.5.1
Column1:
>>> mbx_final['Cmpd1640']
OV745_1A 0
OV745_1B 0
OV745_1C 1
OV745_1D 1
OV745_1E 0
OV745_4A 1
OV745_4B 1
OV745_4C 0
OV22_12A 1
OV22_12B 1
OV22_12C 1
OV22_12D 0
OV22_12E 0
OV22_12F 0
OV22_13A 0
OV22_13B 0
OV22_13C 0
OV86_6A 1
OV86_6D 1
OV86_6E 1
OV86_6F 1
OV86_6G 1
OV86_6H 1
OV86_6I 1
OV86_6J 1
OV86_6K 0
OV86_6L 1
OV86_8A 1
OV86_8B 1
OV86_8C 1
OB1B 1
OB1C 1
SK3A 0
SK3B 0
SK3C 0
SK7A 1
SK7B 0
Column2:
>>> mgx_final['Otu2409']
OV745_1A 0
OV745_1B 0
OV745_1C 0
OV745_1D 0
OV745_1E 0
OV745_4A 0
OV745_4B 0
OV745_4C 0
OV22_12A 0
OV22_12B 0
OV22_12C 0
OV22_12D 0
OV22_12E 0
OV22_12F 0
OV22_13A 0
OV22_13B 0
OV22_13C 0
OV86_6A 0
OV86_6D 0
OV86_6E 0
OV86_6F 0
OV86_6G 0
OV86_6H 0
OV86_6I 0
OV86_6J 0
OV86_6K 0
OV86_6L 0
OV86_8A 0
OV86_8B 0
OV86_8C 0
OB1A 1
OB1C 0
SK3A 0
SK3B 0
SK3C 0
SK7A 0
SK7B 0
Crosstab command:
contingency_tab = pd.crosstab(mbx_final['Cmpd1640'],mgx_final['Otu2409'],margins=True)
Results:
>>> contingency_tab
Otu2409 0 All
Cmpd1640
0 15 15
1 21 21
All 36 36
I would expect to see a result like this:
>>> contingency_tab
Otu2409 0 1 All
Cmpd1640
0 15 0 15
1 21 1 22
All 36 1 37
What am I doing wrong?
|
[
"You can use the dropna parameter, which is by default set to True. Setting it to False will include columns whose entries are all NaN.\ncontingency_tab = pd.crosstab(mbx_final['Cmpd1640'],mgx_final['Otu2409'],margins=True, dropna=False)\n\nYou can read more on the official documentation here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.crosstab.html\n\nEdit 1:\nI've replicated your dataset and code and run the following:\ndf_in = pd.read_excel(\"Book1.xlsx\", index_col=\"index\")\nmbx_final = df_in[[\"Cmpd1640\"]]\nmgx_final = df_in[[\"Otu2409\"]]\ncontingency_tab = pd.crosstab(mbx_final['Cmpd1640'], mgx_final['Otu2409'], margins=True)\ndisplay(contingency_tab)\n\nAnd I get your expected output:\n\nThere might be something wrong with how you're displaying the crosstab function output.\n"
] |
[
2
] |
[] |
[] |
[
"contingency",
"pandas",
"pivot_table",
"python"
] |
stackoverflow_0074520394_contingency_pandas_pivot_table_python.txt
|
Q:
jaydebeapi.connect always returning: "TypeError: Class sajdbc4.jar is not found"
I am trying to use the jaydebeapi python package to create a jdbc database connection, but no matter what argument I put in the connect method I get the same error: "TypeError: Class [first_argurment_str] is not found"
import jaydebeapi
conn = jaydebeapi.connect('sajdbc4.jar', connectionString,[userName, Password])
I tried adding jar files to the directory, and referring directly to the paths in $CLASSPATH neither changed the error.
Originally, I was trying to connect to a Sybase database, but I'd settle for anything at this point.
Full Error Text:
File "H:\Data-Sources\Connection-Examples\Sybase\jayBeaOnly.py", line 2, in <module>
conn = jaydebeapi.connect("org.hsqldb.jdbcDriver",
File "C:\Users\username\AppData\Roaming\Python\Python39\site-packages\jaydebeapi\__init__.py", line 412, in connect
jconn = _jdbc_connect(jclassname, url, driver_args, jars, libs)
File "C:\Users\username\AppData\Roaming\Python\Python39\site-packages\jaydebeapi\__init__.py", line 221, in _jdbc_connect_jpype
jpype.JClass(jclassname)
File "C:\Users\username\AppData\Roaming\Python\Python39\site-packages\jpype\_jclass.py", line 99, in __new__
return _jpype._getClass(jc)
TypeError: Class org.hsqldb.jdbcDriver is not found
python 3.9;
Windows 10;
JayDeBeApi 1.2.3;
openjdk version "1.8.0_332"
A:
I fixed the problem by adding several file paths to the $CLASSPATH variable and ensuring that the environment variables were present in the script. Ultimately, I needed more than one file added to $CLASSPATH. Solution below (your file paths will obviously differ)
os.environ['Path'] = os.environ['Path']+f';D:\\Users\\{user}\\Sybase\\IQ-16_0\\Bin32'
os.environ['JAVA_HOME'] = 'C:\\Program Files\\Microsoft\\jdk-11.0.15.10-hotspot'
os.environ['CLASSPATH'] = (f'D:\\Users\\{user}\\Sybase\\IQ-16_0\\Java\\sajdbc4.jar;'+
f'D:\\Users\\{user}\\Sybase\\IQ-16_0\\Bin32\\dbjdbc16.dll;'+
f'D:\\Users\\{user}\\Sybase\\jConnect-7_0\\classes\\jconn4.jar;'+
f'D:\\Users\\{user}\\Sybase\\jConnect-7_0\\devclasses\\jconn4.jar')
A:
For me, the solution was to copy jconn4.jar to "<JAVA_HOME>/jre/lib/ext"
|
jaydebeapi.connect always returning: "TypeError: Class sajdbc4.jar is not found"
|
I am trying to use the jaydebeapi python package to create a jdbc database connection, but no matter what argument I put in the connect method I get the same error: "TypeError: Class [first_argurment_str] is not found"
import jaydebeapi
conn = jaydebeapi.connect('sajdbc4.jar', connectionString,[userName, Password])
I tried adding jar files to the directory, and referring directly to the paths in $CLASSPATH neither changed the error.
Originally, I was trying to connect to a Sybase database, but I'd settle for anything at this point.
Full Error Text:
File "H:\Data-Sources\Connection-Examples\Sybase\jayBeaOnly.py", line 2, in <module>
conn = jaydebeapi.connect("org.hsqldb.jdbcDriver",
File "C:\Users\username\AppData\Roaming\Python\Python39\site-packages\jaydebeapi\__init__.py", line 412, in connect
jconn = _jdbc_connect(jclassname, url, driver_args, jars, libs)
File "C:\Users\username\AppData\Roaming\Python\Python39\site-packages\jaydebeapi\__init__.py", line 221, in _jdbc_connect_jpype
jpype.JClass(jclassname)
File "C:\Users\username\AppData\Roaming\Python\Python39\site-packages\jpype\_jclass.py", line 99, in __new__
return _jpype._getClass(jc)
TypeError: Class org.hsqldb.jdbcDriver is not found
python 3.9;
Windows 10;
JayDeBeApi 1.2.3;
openjdk version "1.8.0_332"
|
[
"I fixed the problem by adding several file paths to the $CLASSPATH variable and ensuring that the environment variables were present in the script. Ultimately, I needed more than one file added to $CLASSPATH. Solution below (your file paths will obviously differ)\nos.environ['Path'] = os.environ['Path']+f';D:\\\\Users\\\\{user}\\\\Sybase\\\\IQ-16_0\\\\Bin32'\nos.environ['JAVA_HOME'] = 'C:\\\\Program Files\\\\Microsoft\\\\jdk-11.0.15.10-hotspot'\nos.environ['CLASSPATH'] = (f'D:\\\\Users\\\\{user}\\\\Sybase\\\\IQ-16_0\\\\Java\\\\sajdbc4.jar;'+\nf'D:\\\\Users\\\\{user}\\\\Sybase\\\\IQ-16_0\\\\Bin32\\\\dbjdbc16.dll;'+\nf'D:\\\\Users\\\\{user}\\\\Sybase\\\\jConnect-7_0\\\\classes\\\\jconn4.jar;'+\nf'D:\\\\Users\\\\{user}\\\\Sybase\\\\jConnect-7_0\\\\devclasses\\\\jconn4.jar')\n\n",
"For me, the solution was to copy jconn4.jar to \"<JAVA_HOME>/jre/lib/ext\"\n"
] |
[
2,
0
] |
[] |
[] |
[
"java",
"jaydebeapi",
"jdbc",
"python",
"sybase"
] |
stackoverflow_0073514224_java_jaydebeapi_jdbc_python_sybase.txt
|
Q:
Deleting values from array with np.diff
I need to edit an array. The array has two columns. One for X-Values, the other for Y-Values. The X-Values are 0.0025 steps (0, 0.0025, 0.005, etc.) but sometimes there are wrong steps and I need to delete those. The others recommend that I use the following:
data = data[~np.r_[True, (np.diff(data[:,0])>0)&(np.diff(data[:, 0])<0.0024)]]
The problem is that the first value always gets deleted and the second problem is that it doesn´t just delete the wrong step but the one after it too.
A:
The reason the first element is always being deleted is because you invert the output of np.r_ which prepends True to the output of np.diff. When using ~, that gets turned into a False, and thus the first element gets deleted.
My guess that the step after gets deleted too is because np.diff checks the difference between consecutive elements. Consider:
0.0025, 0.005, 0.008, 0.01, 0.0125
~~~~~
# The diff here is going to look like:
0.0025, 0.003, 0.002, 0.0025
Note how the wrong element results in a wrong diff both before AND after that element.
If that is unexpected behavior, then you should not use np.diff, instead compare with the expected steps directly using np.arange
import numpy as np
# Solution:
data[ np.isclose(data[:, 0], np.arange(start, stop, 0.0025)) ]
# with I'm guessing start=0, and stop=data.shape[0]*0.0025
|
Deleting values from array with np.diff
|
I need to edit an array. The array has two columns. One for X-Values, the other for Y-Values. The X-Values are 0.0025 steps (0, 0.0025, 0.005, etc.) but sometimes there are wrong steps and I need to delete those. The others recommend that I use the following:
data = data[~np.r_[True, (np.diff(data[:,0])>0)&(np.diff(data[:, 0])<0.0024)]]
The problem is that the first value always gets deleted and the second problem is that it doesn´t just delete the wrong step but the one after it too.
|
[
"The reason the first element is always being deleted is because you invert the output of np.r_ which prepends True to the output of np.diff. When using ~, that gets turned into a False, and thus the first element gets deleted.\nMy guess that the step after gets deleted too is because np.diff checks the difference between consecutive elements. Consider:\n0.0025, 0.005, 0.008, 0.01, 0.0125\n ~~~~~\n# The diff here is going to look like:\n 0.0025, 0.003, 0.002, 0.0025\n\nNote how the wrong element results in a wrong diff both before AND after that element.\nIf that is unexpected behavior, then you should not use np.diff, instead compare with the expected steps directly using np.arange\nimport numpy as np\n\n# Solution:\ndata[ np.isclose(data[:, 0], np.arange(start, stop, 0.0025)) ]\n\n# with I'm guessing start=0, and stop=data.shape[0]*0.0025\n\n"
] |
[
0
] |
[] |
[] |
[
"arrays",
"numpy",
"python"
] |
stackoverflow_0074520324_arrays_numpy_python.txt
|
Q:
ValueError: Using a target size (torch.Size([16])) that is different to the input size (torch.Size([13456, 1])) is deprecated
My code: https://colab.research.google.com/drive/1qjfy2OsHYewhHDej-W83CMNercB7o7r8?usp=sharing
the error: ValueError: Using a target size (torch.Size([16])) that is different to the input size (torch.Size([13456, 1])) is deprecated. Please ensure they have the same size.
the dataset consists of 2 folders: 0 and 1, and in each of these two folders, there’re about 2500 512*512 images and a json file for each image.
the code was from the pytorch gan tutorial, i just changed the dataset.
I wonder where does the 13456 come from?
A:
The original code is intended for 64 x 64 images, not 512 x 512 ones. To fix the problem, you have to either downsize the images to 64 x 64 or modify the discriminator and the generator.
A:
label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
This creates an array of size b_size (16) filled with real_label (1) .
output = netD(real_cpu).view(-1)
output = output.unsqueeze(1)
The output is of size (x,1) where x is the number of outputs in your last convolutional network, which is not equal to the batch size.
|
ValueError: Using a target size (torch.Size([16])) that is different to the input size (torch.Size([13456, 1])) is deprecated
|
My code: https://colab.research.google.com/drive/1qjfy2OsHYewhHDej-W83CMNercB7o7r8?usp=sharing
the error: ValueError: Using a target size (torch.Size([16])) that is different to the input size (torch.Size([13456, 1])) is deprecated. Please ensure they have the same size.
the dataset consists of 2 folders: 0 and 1, and in each of these two folders, there’re about 2500 512*512 images and a json file for each image.
the code was from the pytorch gan tutorial, i just changed the dataset.
I wonder where does the 13456 come from?
|
[
"The original code is intended for 64 x 64 images, not 512 x 512 ones. To fix the problem, you have to either downsize the images to 64 x 64 or modify the discriminator and the generator.\n",
"label = torch.full((b_size,), real_label, dtype=torch.float, device=device)\n\nThis creates an array of size b_size (16) filled with real_label (1) .\noutput = netD(real_cpu).view(-1)\noutput = output.unsqueeze(1)\n\nThe output is of size (x,1) where x is the number of outputs in your last convolutional network, which is not equal to the batch size.\n"
] |
[
1,
0
] |
[] |
[] |
[
"python",
"pytorch"
] |
stackoverflow_0074513451_python_pytorch.txt
|
Q:
Extracting variable name from file name
I am trying to extract variable names from file names as follows:
happy = "LOL"
angry = "GRRRR"
surprised= "YUPPIE"
file_names=["happy.wav","angry.wav","surprised.wav"
for i in file_names:
name = i.split('.')
name_=name[0]
print(name_)
I get the output as:
happy
angry
surprised
when I wish to get the output as:
"LOL"
"GRRRR"
"YUPPIE"
What is my code missing?
A:
Use a dict, not individual variables, when the variable names should be treated as data.
d = {"happy": "LOL", "angry": "GRRRR", "surprised": "YUPPIE"}
for i in file_names:
name = i.split(".")[0]
print(d[name])
A:
I think you are going about this the wrong way. What if the file_names has more file names tomorrow? Would you create more variables? That would be poor design.
What you need is a dictionary. Something like this:
dictionaryOfFileNames = {"happy": "LOL", "angry": "GRRRR", "surprised": "YUPPIE"}
file_names=["happy.wav","angry.wav","surprised.wav"]
for i in file_names:
name = i.split('.')
name_=name[0]
print(dictionaryOfFileNames.get(name_))
In this case, if the name does not exist, you will get a None.
|
Extracting variable name from file name
|
I am trying to extract variable names from file names as follows:
happy = "LOL"
angry = "GRRRR"
surprised= "YUPPIE"
file_names=["happy.wav","angry.wav","surprised.wav"
for i in file_names:
name = i.split('.')
name_=name[0]
print(name_)
I get the output as:
happy
angry
surprised
when I wish to get the output as:
"LOL"
"GRRRR"
"YUPPIE"
What is my code missing?
|
[
"Use a dict, not individual variables, when the variable names should be treated as data.\nd = {\"happy\": \"LOL\", \"angry\": \"GRRRR\", \"surprised\": \"YUPPIE\"}\nfor i in file_names:\n name = i.split(\".\")[0]\n print(d[name])\n\n",
"I think you are going about this the wrong way. What if the file_names has more file names tomorrow? Would you create more variables? That would be poor design.\nWhat you need is a dictionary. Something like this:\ndictionaryOfFileNames = {\"happy\": \"LOL\", \"angry\": \"GRRRR\", \"surprised\": \"YUPPIE\"}\nfile_names=[\"happy.wav\",\"angry.wav\",\"surprised.wav\"]\n\nfor i in file_names:\n name = i.split('.')\n name_=name[0]\n print(dictionaryOfFileNames.get(name_))\n\n\nIn this case, if the name does not exist, you will get a None.\n"
] |
[
0,
0
] |
[] |
[] |
[
"filenames",
"for_loop",
"python",
"string",
"variable_assignment"
] |
stackoverflow_0074520422_filenames_for_loop_python_string_variable_assignment.txt
|
Q:
How to use a jwt.io provisioned token with jwcrypto?
I am trying to use a jwt.io generated JWT within my python code using jwcrypto with some success. I am saying some success because I am able to retrieve the claims (the wrong way) without validating the signature.
Here's my code
from jwcrypto import jwt, jwk
jwtIoToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'
theJwt = jwt.JWT(jwt=jwtIoToken)
print(theJwt)
print(theJwt.token.objects)
The jwtIoToken value is taken verbatim from the jwt.io. I was expecting to be able to just do a theJwt.claims but it is set to None. My hunch is that I need to validate the signature, but I have no idea how to do it without the key for which I have no clue what jwt.io is using.
A:
You can find the key that jwt.io uses in the right column under "VERIFY SIGNATURE".
Unless you add anything different, the default value is "your-256-bit-secret".
When you use that value, you can verify the signature with the code below.
jwcrypto is a bit more complicated to use than pyjwt. Here you first have to initialize a JWK object and create the key from the given secret and then pass the key to the verify(key)-function:
from jwcrypto import jws, jwk
jwtIoToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'
key = jwk.JWK().from_password("your-256-bit-secret")
jwstoken = jws.JWS()
jwstoken.deserialize(jwtIoToken)
jwstoken.verify(key)
payload = jwstoken.payload
print(payload.decode())
A:
I can suggest you to use an alternative to jwcrypto named jwskate.
jwcrypto quite convoluted imports and API make it unintuitive to use, as you have noticed, while jwskate has a more convenient and Pythonic API:
from jwskate import Jwt, SymmetricJwk
# you can access the token contents, as a dict, without verifying the signature:
jwt = Jwt('eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c')
assert jwt.claims == {'sub': '1234567890', 'name': 'John Doe', 'iat': 1516239022}
# or you can access individual claims this way
assert jwt['sub'] == jwt.sub == '1234567890'
assert jwt['name'] == jwt.name == 'John Doe'
from datetime import datetime, timezone
assert jwt.issued_at == datetime(2018, 1, 18, 1, 30, 22, tzinfo=timezone.utc)
# if you want to verify the signature:
key = SymmetricJwk.from_bytes(b"your-256-bit-secret")
assert jwt.verify_signature(key, alg="HS256")
Disclaimer: I am the author of jwskate.
|
How to use a jwt.io provisioned token with jwcrypto?
|
I am trying to use a jwt.io generated JWT within my python code using jwcrypto with some success. I am saying some success because I am able to retrieve the claims (the wrong way) without validating the signature.
Here's my code
from jwcrypto import jwt, jwk
jwtIoToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'
theJwt = jwt.JWT(jwt=jwtIoToken)
print(theJwt)
print(theJwt.token.objects)
The jwtIoToken value is taken verbatim from the jwt.io. I was expecting to be able to just do a theJwt.claims but it is set to None. My hunch is that I need to validate the signature, but I have no idea how to do it without the key for which I have no clue what jwt.io is using.
|
[
"You can find the key that jwt.io uses in the right column under \"VERIFY SIGNATURE\".\nUnless you add anything different, the default value is \"your-256-bit-secret\".\n\nWhen you use that value, you can verify the signature with the code below.\njwcrypto is a bit more complicated to use than pyjwt. Here you first have to initialize a JWK object and create the key from the given secret and then pass the key to the verify(key)-function:\nfrom jwcrypto import jws, jwk\n\njwtIoToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'\nkey = jwk.JWK().from_password(\"your-256-bit-secret\")\n\njwstoken = jws.JWS()\njwstoken.deserialize(jwtIoToken)\njwstoken.verify(key)\npayload = jwstoken.payload\n\nprint(payload.decode())\n\n",
"I can suggest you to use an alternative to jwcrypto named jwskate.\njwcrypto quite convoluted imports and API make it unintuitive to use, as you have noticed, while jwskate has a more convenient and Pythonic API:\nfrom jwskate import Jwt, SymmetricJwk\n\n# you can access the token contents, as a dict, without verifying the signature:\njwt = Jwt('eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c')\nassert jwt.claims == {'sub': '1234567890', 'name': 'John Doe', 'iat': 1516239022}\n# or you can access individual claims this way\nassert jwt['sub'] == jwt.sub == '1234567890'\nassert jwt['name'] == jwt.name == 'John Doe'\nfrom datetime import datetime, timezone\nassert jwt.issued_at == datetime(2018, 1, 18, 1, 30, 22, tzinfo=timezone.utc)\n\n# if you want to verify the signature:\nkey = SymmetricJwk.from_bytes(b\"your-256-bit-secret\")\nassert jwt.verify_signature(key, alg=\"HS256\")\n\nDisclaimer: I am the author of jwskate.\n"
] |
[
1,
1
] |
[] |
[] |
[
"jwcrypto",
"jwt",
"python"
] |
stackoverflow_0073692941_jwcrypto_jwt_python.txt
|
Q:
How to make and remove entity according to time in ursina
I want to make Maze-Runner in ursina and change maze according to real time.
So, I use 'from datetime import datetime' and get real time in every frame.
I want to make the maze change every 6 minutes in reality, but is there a way to create or eliminate entities over time?
There is part of my cord.
maze = []
maze1 = Entity(model='cube', origin_y=0, scale=(1, 7, 1), collider='box', color=color.hsv(0, 0, random.uniform(.9, 1)))
new_maze1 = duplicate(maze1, x=0, z=4, y=2)
new_maze2 = duplicate(maze1, x=4, z=0, y=2)
def update():
if held_keys['left mouse']:
shoot()
global t, hour, minute, second, text
t = datetime.now()
hour, minute, second = t.hour, t.minute, t.second
text.y = 1
text=Text(f"{str(minute%6).zfill(2)}: {str(second).zfill(2)}", position=(.78,.43),origin=(0,0), scale=2, background=True)
print(minute%6)
global new_maze1, new_maze2
if minute % 6 == 3 and second == 0:
maze=[]
maze.append(new_maze1)
destroy(new_maze2)
elif second == 0 and not minute % 6 == 3:
maze=[]
maze.append(new_maze2)
destroy(new_maze1)
A:
You can use invoke and function
Like:
invoke (recrete_maze, delay=6000)
I will check this code when I get home.
|
How to make and remove entity according to time in ursina
|
I want to make Maze-Runner in ursina and change maze according to real time.
So, I use 'from datetime import datetime' and get real time in every frame.
I want to make the maze change every 6 minutes in reality, but is there a way to create or eliminate entities over time?
There is part of my cord.
maze = []
maze1 = Entity(model='cube', origin_y=0, scale=(1, 7, 1), collider='box', color=color.hsv(0, 0, random.uniform(.9, 1)))
new_maze1 = duplicate(maze1, x=0, z=4, y=2)
new_maze2 = duplicate(maze1, x=4, z=0, y=2)
def update():
if held_keys['left mouse']:
shoot()
global t, hour, minute, second, text
t = datetime.now()
hour, minute, second = t.hour, t.minute, t.second
text.y = 1
text=Text(f"{str(minute%6).zfill(2)}: {str(second).zfill(2)}", position=(.78,.43),origin=(0,0), scale=2, background=True)
print(minute%6)
global new_maze1, new_maze2
if minute % 6 == 3 and second == 0:
maze=[]
maze.append(new_maze1)
destroy(new_maze2)
elif second == 0 and not minute % 6 == 3:
maze=[]
maze.append(new_maze2)
destroy(new_maze1)
|
[
"You can use invoke and function\nLike:\ninvoke (recrete_maze, delay=6000)\n\nI will check this code when I get home.\n"
] |
[
0
] |
[] |
[] |
[
"python",
"ursina"
] |
stackoverflow_0074414500_python_ursina.txt
|
Q:
Installing zlib on windows
I use python to automate boring stuff I do daily (I'm not really a "programmer").
I've been building a script to compress my files to a zip folder. For that, I'm using zipfile library, but it only creates a ZIP file without compressing them.
In order to do that, they recommend to install zlib module and use the ZIP_DEFLATED flag.
I'm having hard time to install the static library of zlib. I found that on git hub.
Assuming you have a functional zlib library installed in c:\lib\zlib
(Download from https://zlib.net , use cmake to generate VS solution
(with param -DCMAKE_INSTALL_PREFIX=c:\lib\zlib), then build the
INSTALL project from that solution, using Release mode settings)
But I don't know how to proceed with these instructions. Some one could translate to a lay guy how to install this library on windows?
Thank in advance!
A:
The solution as said by @mechanical_meat is to use Python 3 instead, because Python 2.x is deprecated,
Note: just use the built-in zipfile, example of how to create compressed archive files here: stackoverflow.com/a/38550416/42346 –
|
Installing zlib on windows
|
I use python to automate boring stuff I do daily (I'm not really a "programmer").
I've been building a script to compress my files to a zip folder. For that, I'm using zipfile library, but it only creates a ZIP file without compressing them.
In order to do that, they recommend to install zlib module and use the ZIP_DEFLATED flag.
I'm having hard time to install the static library of zlib. I found that on git hub.
Assuming you have a functional zlib library installed in c:\lib\zlib
(Download from https://zlib.net , use cmake to generate VS solution
(with param -DCMAKE_INSTALL_PREFIX=c:\lib\zlib), then build the
INSTALL project from that solution, using Release mode settings)
But I don't know how to proceed with these instructions. Some one could translate to a lay guy how to install this library on windows?
Thank in advance!
|
[
"The solution as said by @mechanical_meat is to use Python 3 instead, because Python 2.x is deprecated,\nNote: just use the built-in zipfile, example of how to create compressed archive files here: stackoverflow.com/a/38550416/42346 –\n"
] |
[
1
] |
[] |
[] |
[
"python",
"python_2.7"
] |
stackoverflow_0070462794_python_python_2.7.txt
|
Q:
Writing custom log files in Databricks Repos using the logging package
I would like to capture custom metrics as a notebook runs in Databricks. I would like to write these to a file using the logging package. The code below seems to run fine but it never writes to file. How do you achieve this in Databricks runtime 9.1?
Also note that I am running this is Repos so I have to explicitly write it to a location. Furthermore this code runs perfectly fine when run from my workspace.
logger = logging.getLogger('server_logger')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('/dbfs/tmp/my_log.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.warning(f'starting to log the process')
A:
Perhaps the /dbfs/tmp directory doesn't exist, or you don't have write access to it. Changing the log filename to just mylog.log, it works as expected:
~/SO-logging-misc$ python so_74519222.py
~/SO-logging-misc$ more my_log.log
2022-11-21 14:33:22 - WARNING - starting to log the process
|
Writing custom log files in Databricks Repos using the logging package
|
I would like to capture custom metrics as a notebook runs in Databricks. I would like to write these to a file using the logging package. The code below seems to run fine but it never writes to file. How do you achieve this in Databricks runtime 9.1?
Also note that I am running this is Repos so I have to explicitly write it to a location. Furthermore this code runs perfectly fine when run from my workspace.
logger = logging.getLogger('server_logger')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('/dbfs/tmp/my_log.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.warning(f'starting to log the process')
|
[
"Perhaps the /dbfs/tmp directory doesn't exist, or you don't have write access to it. Changing the log filename to just mylog.log, it works as expected:\n~/SO-logging-misc$ python so_74519222.py\n~/SO-logging-misc$ more my_log.log \n2022-11-21 14:33:22 - WARNING - starting to log the process\n\n"
] |
[
1
] |
[] |
[] |
[
"azure_repos",
"databricks",
"logging",
"python"
] |
stackoverflow_0074519222_azure_repos_databricks_logging_python.txt
|
Q:
How to put other model classes belonging (linked) to a main model class. And how to write this in Views.py. (This is Not FK)
I have a main model, called "Employees", and I need to link to it another 16 model classes (Employees Additional Data, Employees Observations, etc) in the same app. What would be the best way to write these classes in models.py?
Could be like that?
class Employees(models.Model):
class Meta:
db_table = "employees"
#fields
#fields
class EmployeesObs(models.Model):
class Meta:
db_table = "employeesobs"
#fields
#fields
class EmployeesAdditionalData(models.Model):
class Meta:
db_table = "employeesaditional"
#fields
#fields
Now, in this views.py i need:
Explaining this in the template, I need to have these other tabs (Employees Additional Data, Employees Observations, etc) in the employee register, as in the image:
Now how do I write this in views.py?
I'm using Class Based Views. Can someone help me by giving me an example of code, function or documentation?
Part of code in CBV:
class AddEmployeesView(SuccessMessageMixin, CreateView):
model = Employees
form_class = EmployeesForm
template_name = '../templates/employees/form_employees.html'
success_url = reverse_lazy('list_Employees')
success_message = "Employees %(EmployeesNome)s Added!"
class EditEmployeesView(SuccessMessageMixin, UpdateView):
model = Employees
form_class = EmployeesForm
template_name = '../templates/employees/form_employees.html'
success_url = reverse_lazy('list_Employees')
success_message = "Employees %(EmployeesNome)s Edited!"
I tried to put the other model names in the "model" part of the CBV, but I got errors.
A:
You'll need to use a foreign key to your Employee model :
class Employee(models.Model):
class Meta:
db_table = "employees"
#fields
#fields
class EmployeesObs(models.Model):
class Meta:
db_table = "employeesobs"
employee = models.ForeignKey(Employee, on_delete=models.CASCADE, related_name='observations')
#fields
class EmployeesAdditionalData(models.Model):
class Meta:
db_table = "employeesaditional"
employee = models.ForeignKey(Employee, on_delete=models.CASCADE, related_name='additional_data')
#fields
Then you can use CBV for these models as well. You can either add one observation at once or you can use an inline formset to add/edit multiple children at once.
Just keep in mind that these tabs can't be accessed before the employee is first created (because you need the ID to link to it)
|
How to put other model classes belonging (linked) to a main model class. And how to write this in Views.py. (This is Not FK)
|
I have a main model, called "Employees", and I need to link to it another 16 model classes (Employees Additional Data, Employees Observations, etc) in the same app. What would be the best way to write these classes in models.py?
Could be like that?
class Employees(models.Model):
class Meta:
db_table = "employees"
#fields
#fields
class EmployeesObs(models.Model):
class Meta:
db_table = "employeesobs"
#fields
#fields
class EmployeesAdditionalData(models.Model):
class Meta:
db_table = "employeesaditional"
#fields
#fields
Now, in this views.py i need:
Explaining this in the template, I need to have these other tabs (Employees Additional Data, Employees Observations, etc) in the employee register, as in the image:
Now how do I write this in views.py?
I'm using Class Based Views. Can someone help me by giving me an example of code, function or documentation?
Part of code in CBV:
class AddEmployeesView(SuccessMessageMixin, CreateView):
model = Employees
form_class = EmployeesForm
template_name = '../templates/employees/form_employees.html'
success_url = reverse_lazy('list_Employees')
success_message = "Employees %(EmployeesNome)s Added!"
class EditEmployeesView(SuccessMessageMixin, UpdateView):
model = Employees
form_class = EmployeesForm
template_name = '../templates/employees/form_employees.html'
success_url = reverse_lazy('list_Employees')
success_message = "Employees %(EmployeesNome)s Edited!"
I tried to put the other model names in the "model" part of the CBV, but I got errors.
|
[
"You'll need to use a foreign key to your Employee model :\nclass Employee(models.Model):\n class Meta:\n db_table = \"employees\"\n \n #fields\n #fields\n\n\nclass EmployeesObs(models.Model):\n class Meta:\n db_table = \"employeesobs\"\n \n employee = models.ForeignKey(Employee, on_delete=models.CASCADE, related_name='observations')\n #fields\n\n\nclass EmployeesAdditionalData(models.Model):\n class Meta:\n db_table = \"employeesaditional\" \n \n employee = models.ForeignKey(Employee, on_delete=models.CASCADE, related_name='additional_data')\n #fields\n\nThen you can use CBV for these models as well. You can either add one observation at once or you can use an inline formset to add/edit multiple children at once.\nJust keep in mind that these tabs can't be accessed before the employee is first created (because you need the ID to link to it)\n"
] |
[
0
] |
[] |
[] |
[
"django",
"django_models",
"django_views",
"python"
] |
stackoverflow_0074519706_django_django_models_django_views_python.txt
|
Q:
Convert index in column header in python dataframe
I am trying to convert python dataframe into column headers. I am using transpose function but results are not as expected. Which function can be used to accomplish the results as given below?
data is:
Year 2020
Month SEPTEMBER
Filed Date 29-11-2020
Year 2022
Month JULY
Filed Date 20-08-2022
Year 2022
Month APRIL
Filed Date 20-05-2022
Year 2017
Month AUGUST
Filed Date 21-09-2017
Year 2018
Month JULY
Filed Date 03-02-2019
Year 2021
Month MAY
Filed Date 22-06-2021
Year 2017
Month DECEMBER
Filed Date 19-01-2018
Year 2018
Month MAY
Filed Date 03-02-2019
Year 2019
Month MARCH
Filed Date 28-09-2019
and convert it into:
Year Month Filed Date
2020 September 29-11-2020
2022 July 20-08-2022
A:
You can do it like this:
df = pd.DataFrame(
[df1.iloc[i:i+3][1].tolist() for i in range(0, len(df1), 3)],
columns=df1.iloc[0:3][0].tolist(),
)
print(df):
Year Month Filed
0 2020 SEPTEMBER Date 29-11-2020
1 2022 JULY Date 20-08-2022
2 2022 APRIL Date 20-05-2022
3 2017 AUGUST Date 21-09-2017
4 2018 JULY Date 03-02-2019
5 2021 MAY Date 22-06-2021
6 2017 DECEMBER Date 19-01-2018
7 2018 MAY Date 03-02-2019
8 2019 MARCH Date 28-09-2019
A:
I have found a solution to my problem. Here df1 is:
Year 2020
Month SEPTEMBER
Filed Date 29-11-2020
Year 2022
Month JULY
Filed Date 20-08-2022
Year 2022
Month APRIL
Filed Date 20-05-2022
Year 2017
Month AUGUST
Filed Date 21-09-2017
Year 2018
Month JULY
Filed Date 03-02-2019
Year 2021
Month MAY
Filed Date 22-06-2021
Year 2017
Month DECEMBER
Filed Date 19-01-2018
Year 2018
Month MAY
Filed Date 03-02-2019
Year 2019
Month MARCH
Filed Date 28-09-2019
I used pivot function and approached the problem like this:
df=pd.DataFrame()
for i in range(0,len(df1),3):
df= df.append(df1.pivot(columns='A', values='B', index=None).bfill(axis = 0).iloc[i])
df.reset_index(drop=True, inplace=True)
print(df)
result:
A Filed Date Month Year
0 29-11-2020 SEPTEMBER 2020
1 20-08-2022 JULY 2022
2 20-05-2022 APRIL 2022
3 21-09-2017 AUGUST 2017
4 03-02-2019 JULY 2018
|
Convert index in column header in python dataframe
|
I am trying to convert python dataframe into column headers. I am using transpose function but results are not as expected. Which function can be used to accomplish the results as given below?
data is:
Year 2020
Month SEPTEMBER
Filed Date 29-11-2020
Year 2022
Month JULY
Filed Date 20-08-2022
Year 2022
Month APRIL
Filed Date 20-05-2022
Year 2017
Month AUGUST
Filed Date 21-09-2017
Year 2018
Month JULY
Filed Date 03-02-2019
Year 2021
Month MAY
Filed Date 22-06-2021
Year 2017
Month DECEMBER
Filed Date 19-01-2018
Year 2018
Month MAY
Filed Date 03-02-2019
Year 2019
Month MARCH
Filed Date 28-09-2019
and convert it into:
Year Month Filed Date
2020 September 29-11-2020
2022 July 20-08-2022
|
[
"You can do it like this:\ndf = pd.DataFrame(\n [df1.iloc[i:i+3][1].tolist() for i in range(0, len(df1), 3)],\n columns=df1.iloc[0:3][0].tolist(),\n)\n\nprint(df):\n Year Month Filed\n0 2020 SEPTEMBER Date 29-11-2020\n1 2022 JULY Date 20-08-2022\n2 2022 APRIL Date 20-05-2022\n3 2017 AUGUST Date 21-09-2017\n4 2018 JULY Date 03-02-2019\n5 2021 MAY Date 22-06-2021\n6 2017 DECEMBER Date 19-01-2018\n7 2018 MAY Date 03-02-2019\n8 2019 MARCH Date 28-09-2019\n\n",
"I have found a solution to my problem. Here df1 is:\nYear 2020\nMonth SEPTEMBER\nFiled Date 29-11-2020\nYear 2022\nMonth JULY\nFiled Date 20-08-2022\nYear 2022\nMonth APRIL\nFiled Date 20-05-2022\nYear 2017\nMonth AUGUST\nFiled Date 21-09-2017\nYear 2018\nMonth JULY\nFiled Date 03-02-2019\nYear 2021\nMonth MAY\nFiled Date 22-06-2021\nYear 2017\nMonth DECEMBER\nFiled Date 19-01-2018\nYear 2018\nMonth MAY\nFiled Date 03-02-2019\nYear 2019\nMonth MARCH\nFiled Date 28-09-2019\n\nI used pivot function and approached the problem like this:\ndf=pd.DataFrame()\nfor i in range(0,len(df1),3):\n df= df.append(df1.pivot(columns='A', values='B', index=None).bfill(axis = 0).iloc[i])\ndf.reset_index(drop=True, inplace=True)\nprint(df)\n\nresult:\nA Filed Date Month Year\n0 29-11-2020 SEPTEMBER 2020\n1 20-08-2022 JULY 2022\n2 20-05-2022 APRIL 2022\n3 21-09-2017 AUGUST 2017\n4 03-02-2019 JULY 2018\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074519555_python.txt
|
Q:
Snowflake REST APIs
The python native connector for snowflake uses REST Apis as per PEP249. Looking into the code of this connector, it seems to use REST APIs like
/queries/v1/query-request
ret = self.rest.request(
"/queries/v1/query-request?" + urlencode(url_parameters),
data,
client=client,
_no_results=_no_results,
_include_retry_params=True,
_no_retry=_no_retry,
)
A good description is here: https://amacal.medium.com/snowflake-and-rest-api-bf7bc2295700
Question is - where are these APIs and their endpoints defined in snowflake documentation? Or are these the same as the Snowflake SQL APIs defined here, which clearly don't have any mention of the above endpoint: https://docs.snowflake.com/en/developer-guide/sql-api/reference.html
A:
The REST API endpoints are not documented as you are supposed to use the Python connector API, not the REST API.
If you want to use a REST API then indeed use the SQL API.
|
Snowflake REST APIs
|
The python native connector for snowflake uses REST Apis as per PEP249. Looking into the code of this connector, it seems to use REST APIs like
/queries/v1/query-request
ret = self.rest.request(
"/queries/v1/query-request?" + urlencode(url_parameters),
data,
client=client,
_no_results=_no_results,
_include_retry_params=True,
_no_retry=_no_retry,
)
A good description is here: https://amacal.medium.com/snowflake-and-rest-api-bf7bc2295700
Question is - where are these APIs and their endpoints defined in snowflake documentation? Or are these the same as the Snowflake SQL APIs defined here, which clearly don't have any mention of the above endpoint: https://docs.snowflake.com/en/developer-guide/sql-api/reference.html
|
[
"The REST API endpoints are not documented as you are supposed to use the Python connector API, not the REST API.\nIf you want to use a REST API then indeed use the SQL API.\n"
] |
[
1
] |
[] |
[] |
[
"python",
"snowflake_cloud_data_platform"
] |
stackoverflow_0074520506_python_snowflake_cloud_data_platform.txt
|
Q:
How to get the same string repeated for all the values in a python list
I'm wondering which will be the best pythonic way to get all the same prefix for the values in a list.
Rule #1: the word will be compound by the following parts: prefix + name + end (separated by '_'.
Rule #2: all the word parts will be variable in length
Rule #3: the end of the word is a two-character string starting with letter 'A' followed by a integer from 0-9 and could be the same for all the values in the list
Rule #4: The names of each value will never be repeated in a list
Rule #5: all the values in the list will have the same "unknown"
prefix
Here are some examples from the rules listed above:
list1 = ['4_AR_P3_A0', '4_BCML_A0', '4_PA_RU_LR_A0', '4_Routes_A0']
get_prefix(list1) # output: '2'
list2 = ['MPL_TER_LA_Desse_A1', 'MPL_TER_LA_Magnit_Mach_A0', 'MPL_TER_LA_LR_A6', 'MPL_TER_LA_Routes_A0']
get_prefix(list2) # output: 'MPL_TER_LA'
A:
use os.path.commonprefix(). Please check out: os.path
A:
Thanks to Pranav:
import os
s_l = []
for v in list1: s_l.append(v.split('_'))
print(os.path.commonprefix(s_l))
|
How to get the same string repeated for all the values in a python list
|
I'm wondering which will be the best pythonic way to get all the same prefix for the values in a list.
Rule #1: the word will be compound by the following parts: prefix + name + end (separated by '_'.
Rule #2: all the word parts will be variable in length
Rule #3: the end of the word is a two-character string starting with letter 'A' followed by a integer from 0-9 and could be the same for all the values in the list
Rule #4: The names of each value will never be repeated in a list
Rule #5: all the values in the list will have the same "unknown"
prefix
Here are some examples from the rules listed above:
list1 = ['4_AR_P3_A0', '4_BCML_A0', '4_PA_RU_LR_A0', '4_Routes_A0']
get_prefix(list1) # output: '2'
list2 = ['MPL_TER_LA_Desse_A1', 'MPL_TER_LA_Magnit_Mach_A0', 'MPL_TER_LA_LR_A6', 'MPL_TER_LA_Routes_A0']
get_prefix(list2) # output: 'MPL_TER_LA'
|
[
"use os.path.commonprefix(). Please check out: os.path\n",
"Thanks to Pranav:\nimport os\ns_l = []\nfor v in list1: s_l.append(v.split('_'))\nprint(os.path.commonprefix(s_l))\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0073705031_python.txt
|
Q:
TypeError: 'float' object is not callable on streamlit site
so, i want to implement some math formula to my streamlit app, for my project. but there is an error like this:
import streamlit as st
st.title("PERSAMAAN FUNGSI KUADRAT")
st.header("DISKRIMINAN (b² -4 a c)")
db = st.number_input("Masukan Nilai b")
da = st.number_input("Masukan Nilai a")
dc = st.number_input("Masukan Nilai c")
dd = db*db(-4*da*dc)
st.subheader(dd)
it said TypeError: 'float' object is not callable. Any idea?
A:
import streamlit as st
st.title("PERSAMAAN FUNGSI KUADRAT")
st.header("DISKRIMINAN (b² -4 a c)")
db = st.number_input("Masukan Nilai b", value=0)
da = st.number_input("Masukan Nilai a", value=0)
dc = st.number_input("Masukan Nilai c", value=0)
dd = db * db - (4 * (da * dc)) # Correction
st.subheader(dd)
output:
|
TypeError: 'float' object is not callable on streamlit site
|
so, i want to implement some math formula to my streamlit app, for my project. but there is an error like this:
import streamlit as st
st.title("PERSAMAAN FUNGSI KUADRAT")
st.header("DISKRIMINAN (b² -4 a c)")
db = st.number_input("Masukan Nilai b")
da = st.number_input("Masukan Nilai a")
dc = st.number_input("Masukan Nilai c")
dd = db*db(-4*da*dc)
st.subheader(dd)
it said TypeError: 'float' object is not callable. Any idea?
|
[
"import streamlit as st\n\nst.title(\"PERSAMAAN FUNGSI KUADRAT\")\nst.header(\"DISKRIMINAN (b² -4 a c)\")\n\ndb = st.number_input(\"Masukan Nilai b\", value=0)\nda = st.number_input(\"Masukan Nilai a\", value=0)\ndc = st.number_input(\"Masukan Nilai c\", value=0)\n\ndd = db * db - (4 * (da * dc)) # Correction\nst.subheader(dd)\n\noutput:\n\n"
] |
[
1
] |
[] |
[] |
[
"python",
"streamlit"
] |
stackoverflow_0074519919_python_streamlit.txt
|
Q:
Pyomo: Best way to optimize size of power plants and TypeError: unsupported operand type(s) for *: 'float' and 'IndexedVar'
I am trying to solve a optimization problem where the load demand has to be met by two power plants.
These power plants have different power production. For example (random numbers)
power_prod1 = [2,0,1]
power_prod2 = [0,1,1]
The load demand and cost of different power plants is given in a similar way.The costs refer the size of the plant see plant1 and plant2 size below.
load_demand = [4,4,4]
Costs = {'power 1':60, 'power2':120}
To solve this I have tried
def plant1_size(model,i,j):
return(0, None)
model.PowerPlant1Size = pyo.Var(model.plants,model.periods,bounds=plant1_size)
def plant2_size(model,i,j):
return(0, None)
model.PowerPlant2Size = pyo.Var(model.plants,model.periods,bounds=plant2_size)
def load_balance(model,i,j):
return (power_prod1[j]*model.PowerPlant1Size + power_prod2[j]*model.PowerPlant2Size == load_demand[j])
model.load_constraint = pyo.Constraint(model.plants,model.periods,rule=load_balance)
and having a objective function where the objective is to minimize the costs by selecting the right size for the power plants. When i run this code i get an error "TypeError: unsupported operand type(s) for *: 'float' and 'IndexedVar'". I know why i get this error, but i cant figure out a way to solve for the size of the power plants. (There might be some excess code from when i tried to solve the problem another way and some code I havent included).
How do i rewrite this problem in a way pyomo can solve?
A:
There are several things that are troublesome here. I'm not sure if your underlying math problem is sound. I'd slow down with the implementation and lay out all of the variables and indices with pencil and paper to make sure it makes sense. For instance, you have plant size as a variable that is indexed over time periods... Does that mean that power plant # 5 can get bigger and smaller in different time periods?
On that same variable, you seem to be embedding the index into the name, even though you are indexing by plant number. I would expect to see something like the production from a particular plant as a variable like:
model.production = pyo.Var(model.plants, model.time_periods)
Oh, and the main source of the error you are seeing is that you are using an indexed variable without supplying the index for PowerPlantSize in your objective...
|
Pyomo: Best way to optimize size of power plants and TypeError: unsupported operand type(s) for *: 'float' and 'IndexedVar'
|
I am trying to solve a optimization problem where the load demand has to be met by two power plants.
These power plants have different power production. For example (random numbers)
power_prod1 = [2,0,1]
power_prod2 = [0,1,1]
The load demand and cost of different power plants is given in a similar way.The costs refer the size of the plant see plant1 and plant2 size below.
load_demand = [4,4,4]
Costs = {'power 1':60, 'power2':120}
To solve this I have tried
def plant1_size(model,i,j):
return(0, None)
model.PowerPlant1Size = pyo.Var(model.plants,model.periods,bounds=plant1_size)
def plant2_size(model,i,j):
return(0, None)
model.PowerPlant2Size = pyo.Var(model.plants,model.periods,bounds=plant2_size)
def load_balance(model,i,j):
return (power_prod1[j]*model.PowerPlant1Size + power_prod2[j]*model.PowerPlant2Size == load_demand[j])
model.load_constraint = pyo.Constraint(model.plants,model.periods,rule=load_balance)
and having a objective function where the objective is to minimize the costs by selecting the right size for the power plants. When i run this code i get an error "TypeError: unsupported operand type(s) for *: 'float' and 'IndexedVar'". I know why i get this error, but i cant figure out a way to solve for the size of the power plants. (There might be some excess code from when i tried to solve the problem another way and some code I havent included).
How do i rewrite this problem in a way pyomo can solve?
|
[
"There are several things that are troublesome here. I'm not sure if your underlying math problem is sound. I'd slow down with the implementation and lay out all of the variables and indices with pencil and paper to make sure it makes sense. For instance, you have plant size as a variable that is indexed over time periods... Does that mean that power plant # 5 can get bigger and smaller in different time periods?\nOn that same variable, you seem to be embedding the index into the name, even though you are indexing by plant number. I would expect to see something like the production from a particular plant as a variable like:\nmodel.production = pyo.Var(model.plants, model.time_periods)\n\nOh, and the main source of the error you are seeing is that you are using an indexed variable without supplying the index for PowerPlantSize in your objective...\n"
] |
[
0
] |
[] |
[] |
[
"gurobi",
"optimization",
"pyomo",
"python"
] |
stackoverflow_0074518220_gurobi_optimization_pyomo_python.txt
|
Q:
How to find the most frequent pixel value in an image?
Editors comment:
How to count pixels occurences in an image?
I have a set of images where each pixel consists of 3 integers in the range 0-255.
I am interested in finding one pixel that is "representative" (as much as possible) for the entire pixel-population as a whole, and that pixel must occur in the pixel-population.
I am determining which pixel is the most common (the median mode) in my set of images makes the most sense.
I am using python, but I am not sure how to go about it.
The images are stored as an numpy array with dimensions [n, h, w, c], where n is the number of images, h is the height, w is the widthandc` is the channels (RGB).
A:
I'm going to assume you need to find the most common element, which as Cris Luengo mentioned is called the mode. I'm also going to assume that the bit depth of the channels is 8-bit (value between 0 and 255, i.e. modulo 256).
Here is an implementation independent approach:
The aim is to maintain a count of all the different kinds of pixels encountered. It makes sense to use a dictionary for this, which would be of the form {pixel_value : count}.
Once this dictionary is populated, we can find the pixel with the highest count.
Now, 'pixels' are not hashable and hence cannot be stored in a dictionary directly. We need a way to assign an integer(which I'll be referring to as the pixel_value) to each unique pixel, i.e., you should be able to convert pixel_value <--> RGB value of a pixel
This function converts RGB values to an integer in the range of 0 to 16,777,215:
def get_pixel_value(pixel):
return pixel.red + 256*pixel.green + 256*256*pixel.blue
and to convert pixel_value back into RGB values:
def get_rgb_values(pixel_value):
red = pixel_value%256
pixel_value //= 256
green = pixel_value%256
pixel_value //= 256
blue = pixel_value
return [red,green,blue]
This function can find the most frequent pixel in an image:
def find_most_common_pixel(image):
histogram = {} #Dictionary keeps count of different kinds of pixels in image
for pixel in image:
pixel_val = get_pixel_value(pixel)
if pixel_val in histogram:
histogram[pixel_val] += 1 #Increment count
else:
histogram[pixel_val] = 1 #pixel_val encountered for the first time
mode_pixel_val = max(histogram, key = histogram.get) #Find pixel_val whose count is maximum
return get_rgb_values(mode_pixel_val) #Returna a list containing RGB Value of the median pixel
If you wish to find the most frequent pixel in a set of images, simply add another loop for image in image_set and populate the dictionary for all pixel_values in all images.
A:
You can iterate over the x/y of the image.
a pixel will be img_array[x, y, :] (the : for the RBG channel)
you will add this to a Counter (from collections)
Here is an example of the concept over an Image
from PIL import Image
import numpy as np
from collections import Counter
# img_path is the path to your image
cnt = Counter()
img = Image.open(img_path)
img_arr = np.array(img)
for x in range(img_arr.shape[0]):
for y in range(img_arr.shape[1]):
cnt[str(img_arr[x, y, :])] += 1
print(cnt)
# Counter({'[255 255 255]': 89916, '[143 143 143]': 1491, '[0 0 0]': 891, '[211 208 209]': 185, ...
A More efficient way to do it is by using the power of numpy and some math manipulation (because we know values are bound [0, 255]
img = Image.open(img_path)
img_arr = np.array(img)
pixels_arr = (img_arr[:, :, 0] + img_arr[:, :, 1]*256 + img_arr[:, :, 2]*(256**2)).flatten()
cnt = Counter(pixels_arr)
# print(cnt)
# Counter({16777215: 89916, 9408399: 1491, 0: 891, 13750483: 185, 14803425: 177, 5263440: 122 ...
# print(cnt.most_common(1))
# [(16777215, 89916)]
pixel_value = cnt.most_common(1)[0][0]
Now a conversion back to the original 3 values is exactly like Aayush Mahajan have writte in his answer. But I've shorten it for the sake of simplicity:
r, b, g = pixel_value%256, (pixel_value//256)%256, pixel_value//(256**2)
So you are using the power of numpy fast computation (and it's significate improvement on run time.
You use Counter which is an extension of python dictionary, dedicated for counting.
|
How to find the most frequent pixel value in an image?
|
Editors comment:
How to count pixels occurences in an image?
I have a set of images where each pixel consists of 3 integers in the range 0-255.
I am interested in finding one pixel that is "representative" (as much as possible) for the entire pixel-population as a whole, and that pixel must occur in the pixel-population.
I am determining which pixel is the most common (the median mode) in my set of images makes the most sense.
I am using python, but I am not sure how to go about it.
The images are stored as an numpy array with dimensions [n, h, w, c], where n is the number of images, h is the height, w is the widthandc` is the channels (RGB).
|
[
"I'm going to assume you need to find the most common element, which as Cris Luengo mentioned is called the mode. I'm also going to assume that the bit depth of the channels is 8-bit (value between 0 and 255, i.e. modulo 256).\nHere is an implementation independent approach:\nThe aim is to maintain a count of all the different kinds of pixels encountered. It makes sense to use a dictionary for this, which would be of the form {pixel_value : count}.\nOnce this dictionary is populated, we can find the pixel with the highest count.\nNow, 'pixels' are not hashable and hence cannot be stored in a dictionary directly. We need a way to assign an integer(which I'll be referring to as the pixel_value) to each unique pixel, i.e., you should be able to convert pixel_value <--> RGB value of a pixel\nThis function converts RGB values to an integer in the range of 0 to 16,777,215:\ndef get_pixel_value(pixel):\n return pixel.red + 256*pixel.green + 256*256*pixel.blue \n\nand to convert pixel_value back into RGB values:\ndef get_rgb_values(pixel_value):\n red = pixel_value%256\n pixel_value //= 256\n green = pixel_value%256\n pixel_value //= 256\n blue = pixel_value\n return [red,green,blue]\n\nThis function can find the most frequent pixel in an image:\ndef find_most_common_pixel(image):\n histogram = {} #Dictionary keeps count of different kinds of pixels in image\n\n for pixel in image:\n pixel_val = get_pixel_value(pixel)\n if pixel_val in histogram:\n histogram[pixel_val] += 1 #Increment count\n else:\n histogram[pixel_val] = 1 #pixel_val encountered for the first time\n\n mode_pixel_val = max(histogram, key = histogram.get) #Find pixel_val whose count is maximum\n return get_rgb_values(mode_pixel_val) #Returna a list containing RGB Value of the median pixel\n\nIf you wish to find the most frequent pixel in a set of images, simply add another loop for image in image_set and populate the dictionary for all pixel_values in all images.\n",
"You can iterate over the x/y of the image.\na pixel will be img_array[x, y, :] (the : for the RBG channel)\nyou will add this to a Counter (from collections)\nHere is an example of the concept over an Image\nfrom PIL import Image\nimport numpy as np\nfrom collections import Counter\n\n# img_path is the path to your image\ncnt = Counter()\nimg = Image.open(img_path)\nimg_arr = np.array(img)\n\nfor x in range(img_arr.shape[0]):\n for y in range(img_arr.shape[1]):\n cnt[str(img_arr[x, y, :])] += 1\n\nprint(cnt)\n\n# Counter({'[255 255 255]': 89916, '[143 143 143]': 1491, '[0 0 0]': 891, '[211 208 209]': 185, ...\n\nA More efficient way to do it is by using the power of numpy and some math manipulation (because we know values are bound [0, 255]\nimg = Image.open(img_path)\nimg_arr = np.array(img)\npixels_arr = (img_arr[:, :, 0] + img_arr[:, :, 1]*256 + img_arr[:, :, 2]*(256**2)).flatten()\ncnt = Counter(pixels_arr)\n\n# print(cnt)\n# Counter({16777215: 89916, 9408399: 1491, 0: 891, 13750483: 185, 14803425: 177, 5263440: 122 ...\n\n# print(cnt.most_common(1))\n# [(16777215, 89916)]\npixel_value = cnt.most_common(1)[0][0]\n\nNow a conversion back to the original 3 values is exactly like Aayush Mahajan have writte in his answer. But I've shorten it for the sake of simplicity:\nr, b, g = pixel_value%256, (pixel_value//256)%256, pixel_value//(256**2)\n\nSo you are using the power of numpy fast computation (and it's significate improvement on run time.\nYou use Counter which is an extension of python dictionary, dedicated for counting.\n"
] |
[
3,
0
] |
[] |
[] |
[
"computer_vision",
"image",
"image_processing",
"python"
] |
stackoverflow_0052591281_computer_vision_image_image_processing_python.txt
|
Q:
Who do u get this errors while compiling discord bot
import discord
from discord.ext import commands
import sqlite3
from config import settings
client = discord.Client(intents = discord.Intents().all())
client = commands.Bot(command_prefix='!', intents= 8)
connection = sqlite3.connect('server.db')
cursor = connection.cursor()
@client.event
async def onReady():
cursor.execute("""CREATE TABLE IF NOT EXISTS users(
name TEXT,
id INT,
lvl INT,
goals INT,
assists INT,
cleensheets INT
)""")
g = bot.get_guild(696354089851813978)
members = await g.fetch_members(limit=3000, after=None).flatten()
for guild in client.guilds:
for member in guilds.member:
if cursor.execute(f"SELECT id FROM users WHERE id = {member.id}").fetchone() is None:
cursor.execute(f"INSERT INTO users VALUES ('{member}' , {member.id} , 0 , 0 , 0)")
else:
pass
connection.commit()
print("Bot connected suffecently")
@client.event
async def on_member_join(member):
if cursor.execute(f"SELECT id FROM users WHERE id = {member.id}").fetchone() is None:
cursor.execute(f"INSERT INTO users VALUES ('{member}' , {member.id} , 0 , 0 , 0 , 0)")
print("Bot connected suffecently")
else:
pass
@client.command(aliases = ['pass'])
async def mypass(ctx, member: discord.Member = None):
if member is None:
await ctx.send(embed = discord.Embed(
description = f"""User's pass **{ctx.author}** lvl is **{cursor.execute("SELECT lvl FROM users Where id = {}").format(ctx.author.id).fetchone()[0]}**"""
))
else:
await ctx.send(embed = discord.Embed(
description = f"""User's pass **{member}** lvl is **{cursor.execute("SELECT lvl FROM users Where id = {}").format(member.id).fetchone()[0]}**"""
))
client.run(settings['TOKEN'])
C:\Users\User\Desktop\CommunityBot>communitybot.py
Traceback (most recent call last):
File "C:\Users\User\Desktop\CommunityBot\communitybot.py", line 8, in
client = commands.Bot(command_prefix='!', intents= 8)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\ext\commands\bot.py", line 171, in init
super().init(intents=intents, **options)
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\ext\commands\core.py", line 1302, in init
super().init(*args, **kwargs)
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\client.py", line 253, in init
self._connection: ConnectionState = self._get_state(intents=intents, **options)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\client.py", line 284, in _get_state
return ConnectionState(dispatch=self.dispatch, handlers=self._handlers, hooks=self._hooks, http=self.http, **options)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\state.py", line 221, in init
raise TypeError(f'intents parameter must be Intent not {type(intents)!r}')
TypeError: intents parameter must be Intent not <class 'int'>
A:
Not sure why you are defining client twice right next to each other, but the intents section of the client definition doesn't accept an integer value, separate from permissions, which does. Here is the solution I came up with which is how I setup most of my bots:
intents = discord.Intents().all()
client = commands.Bot(command_prefix='!', intents=intents)
Hope this helps
|
Who do u get this errors while compiling discord bot
|
import discord
from discord.ext import commands
import sqlite3
from config import settings
client = discord.Client(intents = discord.Intents().all())
client = commands.Bot(command_prefix='!', intents= 8)
connection = sqlite3.connect('server.db')
cursor = connection.cursor()
@client.event
async def onReady():
cursor.execute("""CREATE TABLE IF NOT EXISTS users(
name TEXT,
id INT,
lvl INT,
goals INT,
assists INT,
cleensheets INT
)""")
g = bot.get_guild(696354089851813978)
members = await g.fetch_members(limit=3000, after=None).flatten()
for guild in client.guilds:
for member in guilds.member:
if cursor.execute(f"SELECT id FROM users WHERE id = {member.id}").fetchone() is None:
cursor.execute(f"INSERT INTO users VALUES ('{member}' , {member.id} , 0 , 0 , 0)")
else:
pass
connection.commit()
print("Bot connected suffecently")
@client.event
async def on_member_join(member):
if cursor.execute(f"SELECT id FROM users WHERE id = {member.id}").fetchone() is None:
cursor.execute(f"INSERT INTO users VALUES ('{member}' , {member.id} , 0 , 0 , 0 , 0)")
print("Bot connected suffecently")
else:
pass
@client.command(aliases = ['pass'])
async def mypass(ctx, member: discord.Member = None):
if member is None:
await ctx.send(embed = discord.Embed(
description = f"""User's pass **{ctx.author}** lvl is **{cursor.execute("SELECT lvl FROM users Where id = {}").format(ctx.author.id).fetchone()[0]}**"""
))
else:
await ctx.send(embed = discord.Embed(
description = f"""User's pass **{member}** lvl is **{cursor.execute("SELECT lvl FROM users Where id = {}").format(member.id).fetchone()[0]}**"""
))
client.run(settings['TOKEN'])
C:\Users\User\Desktop\CommunityBot>communitybot.py
Traceback (most recent call last):
File "C:\Users\User\Desktop\CommunityBot\communitybot.py", line 8, in
client = commands.Bot(command_prefix='!', intents= 8)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\ext\commands\bot.py", line 171, in init
super().init(intents=intents, **options)
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\ext\commands\core.py", line 1302, in init
super().init(*args, **kwargs)
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\client.py", line 253, in init
self._connection: ConnectionState = self._get_state(intents=intents, **options)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\client.py", line 284, in _get_state
return ConnectionState(dispatch=self.dispatch, handlers=self._handlers, hooks=self._hooks, http=self.http, **options)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\discord\state.py", line 221, in init
raise TypeError(f'intents parameter must be Intent not {type(intents)!r}')
TypeError: intents parameter must be Intent not <class 'int'>
|
[
"Not sure why you are defining client twice right next to each other, but the intents section of the client definition doesn't accept an integer value, separate from permissions, which does. Here is the solution I came up with which is how I setup most of my bots:\nintents = discord.Intents().all()\nclient = commands.Bot(command_prefix='!', intents=intents)\n\nHope this helps\n"
] |
[
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074503303_python.txt
|
Q:
Creating a new dataframe to contain a section of 1 column from multiple csv files in Python
So I am trying to create a new dataframe that includes some data from 300+ csv files.
Each file contains upto 200,000 rows of data, I am only interested in 1 of the columns within each file (the same column for each file)
I am trying to combine these columns into 1 dataframe, where column 6 from csv 1 would be in the 1st column of the new dataframe, column 6 from csv 2 would be in the 2nd column of the new dataframe, and so on up until the 315th csv file.
I dont need all 200,000 rows of data to be extracted, but I am unsure of how I would extract just 2,000 rows from the middle section of the data (each file ranges in number of rows so the same exact rows for each file arent necessary, as long as it is the middle 2000)
Any help in extracting the 2000 rows from each file to populate different columns in the new dataframe would be greatly appreciated.
So far, I have manipulated the data to only contain the relevant column for each file. This displays all the rows of data in the column for each file individually.
I tried to use the iloc function to reduce this to 2000 rows but it did not display any actual data in the output.
I am unsure as to how I would now extract this data into a dataframe for all the columns to be contained.
import pandas as pd
import os
import glob
import itertools
#glob to get all csv files
path = os.getcwd()
csv_files = glob.glob(os.path.join('filepath/', "*.csv"))
#loop list of csv files
for f in csv_files:
df = pd.read_csv(f, header=None)
df.rename(columns={6: 'AE'}, inplace=True)
new_df = df.filter(['AE'])
print('Location:', f)
print('File Name:', f.split("\\")[-1])
print('Content:')
display(new_df)
print()
A:
Based on your description, I am inferring that you have a number of different files in csv format, each of which has at least 2000 lines and 6 columns. You want to take the data only from the 6th column of each file and only for the middle 2000 records in each file and to put all of those blocks of 2000 records into a new dataframe, with a column that in some way identifies which file the block came from.
You can read each file using pandas, as you have done, and then you need to use loc, as one of the commenters said, to select the 2000 records you want to keep. If you save each of those blocks of records in a separate dataframe you can then use the pandas concat method to join them all together into different columns of a new dataframe.
Here is some code that I hope will be self-explanatory. I have assumed that you want the 6th column, which is the one with index 5 in pandas because we start counting from 0. I have also used usecols to keep only the 6th column, and I rename the column to an index number based on the order in which the files are being read. You would need to change this for your own choice of column naming.
I choose the middle 2000 records by defining the starting point as record x, say, so that x + 2000 + x = total number of records, therefore x=(total number of records) / 2 - 1000. This might not be exactly how you want to define the middle 2000 records, so you could change this.
df_middles is a list to which we append every new dataframe of the new file's middle 2000 records. We use pd.concat at the end to put all the columns into a new dataframe.
import os
import glob
import pandas as pd
# glob to get all csv files
path = os.getcwd()
csv_files = glob.glob(os.path.join("filepath/", "*.csv"))
df_middles = []
# loop list of csv files
for idx, f in enumerate(csv_files, 1):
# only keep the 6th column (index 5)
df = pd.read_csv(f, header=None, usecols=[5])
colname = f"column_{idx}"
df.rename(columns={5: colname}, inplace=True)
number_of_lines = len(df)
if number_of_lines < 2000:
raise IOError(f"Not enough lines in the input file: {f}")
middle_range_start = int(number_of_lines / 2) - 1000
middle_range_end = middle_range_start + 1999
df_middle = df.loc[middle_range_start:middle_range_end].reset_index(drop=True)
df_middles.append(df_middle)
df_final = pd.concat(df_middles, axis="columns")
|
Creating a new dataframe to contain a section of 1 column from multiple csv files in Python
|
So I am trying to create a new dataframe that includes some data from 300+ csv files.
Each file contains upto 200,000 rows of data, I am only interested in 1 of the columns within each file (the same column for each file)
I am trying to combine these columns into 1 dataframe, where column 6 from csv 1 would be in the 1st column of the new dataframe, column 6 from csv 2 would be in the 2nd column of the new dataframe, and so on up until the 315th csv file.
I dont need all 200,000 rows of data to be extracted, but I am unsure of how I would extract just 2,000 rows from the middle section of the data (each file ranges in number of rows so the same exact rows for each file arent necessary, as long as it is the middle 2000)
Any help in extracting the 2000 rows from each file to populate different columns in the new dataframe would be greatly appreciated.
So far, I have manipulated the data to only contain the relevant column for each file. This displays all the rows of data in the column for each file individually.
I tried to use the iloc function to reduce this to 2000 rows but it did not display any actual data in the output.
I am unsure as to how I would now extract this data into a dataframe for all the columns to be contained.
import pandas as pd
import os
import glob
import itertools
#glob to get all csv files
path = os.getcwd()
csv_files = glob.glob(os.path.join('filepath/', "*.csv"))
#loop list of csv files
for f in csv_files:
df = pd.read_csv(f, header=None)
df.rename(columns={6: 'AE'}, inplace=True)
new_df = df.filter(['AE'])
print('Location:', f)
print('File Name:', f.split("\\")[-1])
print('Content:')
display(new_df)
print()
|
[
"Based on your description, I am inferring that you have a number of different files in csv format, each of which has at least 2000 lines and 6 columns. You want to take the data only from the 6th column of each file and only for the middle 2000 records in each file and to put all of those blocks of 2000 records into a new dataframe, with a column that in some way identifies which file the block came from.\nYou can read each file using pandas, as you have done, and then you need to use loc, as one of the commenters said, to select the 2000 records you want to keep. If you save each of those blocks of records in a separate dataframe you can then use the pandas concat method to join them all together into different columns of a new dataframe.\nHere is some code that I hope will be self-explanatory. I have assumed that you want the 6th column, which is the one with index 5 in pandas because we start counting from 0. I have also used usecols to keep only the 6th column, and I rename the column to an index number based on the order in which the files are being read. You would need to change this for your own choice of column naming.\nI choose the middle 2000 records by defining the starting point as record x, say, so that x + 2000 + x = total number of records, therefore x=(total number of records) / 2 - 1000. This might not be exactly how you want to define the middle 2000 records, so you could change this.\ndf_middles is a list to which we append every new dataframe of the new file's middle 2000 records. We use pd.concat at the end to put all the columns into a new dataframe.\nimport os\nimport glob\nimport pandas as pd\n\n# glob to get all csv files\npath = os.getcwd()\ncsv_files = glob.glob(os.path.join(\"filepath/\", \"*.csv\"))\n\ndf_middles = []\n\n# loop list of csv files\nfor idx, f in enumerate(csv_files, 1):\n # only keep the 6th column (index 5)\n df = pd.read_csv(f, header=None, usecols=[5])\n colname = f\"column_{idx}\"\n df.rename(columns={5: colname}, inplace=True)\n number_of_lines = len(df)\n if number_of_lines < 2000:\n raise IOError(f\"Not enough lines in the input file: {f}\")\n middle_range_start = int(number_of_lines / 2) - 1000\n middle_range_end = middle_range_start + 1999\n df_middle = df.loc[middle_range_start:middle_range_end].reset_index(drop=True)\n df_middles.append(df_middle)\n\ndf_final = pd.concat(df_middles, axis=\"columns\")\n\n"
] |
[
0
] |
[] |
[] |
[
"csv",
"dataframe",
"pandas",
"python"
] |
stackoverflow_0074488035_csv_dataframe_pandas_python.txt
|
Q:
How do I install pygame with cmd?
I have an idea for a game that uses the module pygame. The thing is, I don’t know how to install it.
I have tried to open up cmd and type:
pip install pygame
But it came up with an error saying:
pip is not recognized as an internal or external command
Please help me.
A:
To install pygame you need to write the command:
pip install pygame
in your command prompt, if that does not work try:
pip3 install pygame
If that fails, make sure to install python from this link.
Make sure to add python and pip to your environment path. Restart your computer and then try again!
If you have already installed python, try running the installer again and this time including pip in the environment path or you can add it manually, this link will help you do this.
A:
You need to add the path of your pip installation to your PATH system variable.
After this, use: pip install pygame
A:
Well it looks like you don't have pip installed on your system. You should download it first.
|
How do I install pygame with cmd?
|
I have an idea for a game that uses the module pygame. The thing is, I don’t know how to install it.
I have tried to open up cmd and type:
pip install pygame
But it came up with an error saying:
pip is not recognized as an internal or external command
Please help me.
|
[
"To install pygame you need to write the command:\npip install pygame\nin your command prompt, if that does not work try:\npip3 install pygame\nIf that fails, make sure to install python from this link.\nMake sure to add python and pip to your environment path. Restart your computer and then try again!\nIf you have already installed python, try running the installer again and this time including pip in the environment path or you can add it manually, this link will help you do this.\n",
"You need to add the path of your pip installation to your PATH system variable.\nAfter this, use: pip install pygame\n",
"Well it looks like you don't have pip installed on your system. You should download it first.\n"
] |
[
0,
0,
0
] |
[] |
[] |
[
"pip",
"pycharm",
"pygame",
"python"
] |
stackoverflow_0066070289_pip_pycharm_pygame_python.txt
|
Q:
Pandas declare dtypes before loading data
i have a problem with RAM usage - I fetch quite a lot of data from DB and pour it into a pandas DataFrame, where I do groub_by to list - something DB is not very good at.
Thing is, as I fetch around 40 columns, pandas is not really good in determining the dtypes for each column. I would love to specify dtype for each column separately, so pandas does not use so much memory using object dtype everywhere. I know, I can transform the dataframe afterwards, but that does not solve the RAM overreach.
import pandas as pd
import numpy as np
# Just a sample sql
sql = "select premise_id, parent_id, addr_ward FROM table;"
# This is list of tuples from database
rows = safe_call_db_read(db.conn, sql)
logger.info("Db fetched dataframe")
dtype = {
'premise_id': np.int64,
'parent_id': np.int64,
'addr_ward': object
}
data_frame = pd.DataFrame(data=rows, dtype=dtype)
This fails, ofc, because only one dtype is allowed as parameter, throwing this
TypeError: object of type 'type' has no len()
This SUCKS.
Is there some way of declaring dtypes for each column before actual loading data, that would save each column optimaly and thus saving me some RAM?
Maybe creating empty data frame, declaring dtype for each column and then appending the rows?
A:
you might wanna try the pandas method read_sql_query to directly read the sql query into a dataframe, you can give the dtype dict that you created exactly as you made it as the dtype arg.
only extra thing you need is to create a connection to your database beforehand through sqlite3 for example.
A:
I would try pandas.from_records which has a coerce_float option, and says that it's useful for SQL result sets. As @maxxel_ pointed out, reading from a SQL database is easiest, because Pandas can use the SQL definitions to get the datatypes, but it seems from your code that you have a subroutine with extra handling/etc.
Here's an example copied from the documentation, it shows the dtype defined independently for each column:
>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
|
Pandas declare dtypes before loading data
|
i have a problem with RAM usage - I fetch quite a lot of data from DB and pour it into a pandas DataFrame, where I do groub_by to list - something DB is not very good at.
Thing is, as I fetch around 40 columns, pandas is not really good in determining the dtypes for each column. I would love to specify dtype for each column separately, so pandas does not use so much memory using object dtype everywhere. I know, I can transform the dataframe afterwards, but that does not solve the RAM overreach.
import pandas as pd
import numpy as np
# Just a sample sql
sql = "select premise_id, parent_id, addr_ward FROM table;"
# This is list of tuples from database
rows = safe_call_db_read(db.conn, sql)
logger.info("Db fetched dataframe")
dtype = {
'premise_id': np.int64,
'parent_id': np.int64,
'addr_ward': object
}
data_frame = pd.DataFrame(data=rows, dtype=dtype)
This fails, ofc, because only one dtype is allowed as parameter, throwing this
TypeError: object of type 'type' has no len()
This SUCKS.
Is there some way of declaring dtypes for each column before actual loading data, that would save each column optimaly and thus saving me some RAM?
Maybe creating empty data frame, declaring dtype for each column and then appending the rows?
|
[
"you might wanna try the pandas method read_sql_query to directly read the sql query into a dataframe, you can give the dtype dict that you created exactly as you made it as the dtype arg.\nonly extra thing you need is to create a connection to your database beforehand through sqlite3 for example.\n",
"I would try pandas.from_records which has a coerce_float option, and says that it's useful for SQL result sets. As @maxxel_ pointed out, reading from a SQL database is easiest, because Pandas can use the SQL definitions to get the datatypes, but it seems from your code that you have a subroutine with extra handling/etc.\nHere's an example copied from the documentation, it shows the dtype defined independently for each column:\n>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],\n... dtype=[('col_1', 'i4'), ('col_2', 'U1')])\n>>> pd.DataFrame.from_records(data)\n col_1 col_2\n0 3 a\n1 2 b\n2 1 c\n3 0 d\n\n"
] |
[
0,
0
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074520410_pandas_python.txt
|
Q:
Is there a short form for accessing dictionary values in for loop in Python?
Is there a short form for accessing dictionary values in for loop in Python?
I have the following example code:
dict = [{"name": "testdata"}, {"name": "testdata2"}]
for x in dict:
print(x["name"])
Is there a way to write the dictionary key directly into the line of the for loop, e.g.
dict = [{"name": "testdata"}, {"name": "testdata2"}]
for x in dict["name"]:
print(x)
which obviously does not work. But the main idea is that x should already be the string "testdata" or "testdata2". I want to avoid this:
dict = [{"name": "testdata"}, {"name": "testdata2"}]
for x in dict:
x = x["name"]
A:
You can't destructure a dict on assignment, so the only way would be to loop over an iterable that contains only the one value you want, e.g.:
for x in (i['name'] for i in dict):
...
or:
from operator import itemgetter
for x in map(itemgetter('name'), dict):
...
A:
You won't get around calling the key for each element but you can do it in a list comprehension to convert your list of dictionaries to a list of 'name' elements and then loop through that:
dict = [{"name": "testdata"}, {"name": "testdata2"}]
for name in [x["name"] for x in dict]:
print(name)
|
Is there a short form for accessing dictionary values in for loop in Python?
|
Is there a short form for accessing dictionary values in for loop in Python?
I have the following example code:
dict = [{"name": "testdata"}, {"name": "testdata2"}]
for x in dict:
print(x["name"])
Is there a way to write the dictionary key directly into the line of the for loop, e.g.
dict = [{"name": "testdata"}, {"name": "testdata2"}]
for x in dict["name"]:
print(x)
which obviously does not work. But the main idea is that x should already be the string "testdata" or "testdata2". I want to avoid this:
dict = [{"name": "testdata"}, {"name": "testdata2"}]
for x in dict:
x = x["name"]
|
[
"You can't destructure a dict on assignment, so the only way would be to loop over an iterable that contains only the one value you want, e.g.:\nfor x in (i['name'] for i in dict):\n ...\n\nor:\nfrom operator import itemgetter\n\nfor x in map(itemgetter('name'), dict):\n ...\n\n",
"You won't get around calling the key for each element but you can do it in a list comprehension to convert your list of dictionaries to a list of 'name' elements and then loop through that:\ndict = [{\"name\": \"testdata\"}, {\"name\": \"testdata2\"}]\n\nfor name in [x[\"name\"] for x in dict]:\n print(name)\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"for_loop",
"python"
] |
stackoverflow_0074520666_for_loop_python.txt
|
Q:
How do I create a column using strings retrieved from another column on python?
I am trying to read information from a column in my csv file and use it to create a new column. Please help
I imported the csv file and printed the first 10 rows (+ header) but now I would like to create a column for the years in the title column.
```
import csv
from itertools import islice
from operator import itemgetter
#opening the CSV file
with open('/home/raymondossai/movies.csv', mode ='r')as file:
#reading the CSV file
csvFile = csv.reader(file)
#displaying the contents of the CSV file
for row in islice(csvFile, 11): # first 10 only
print(row)
```
Result:
['movieId', 'title', 'genres']
['1', 'Toy Story (1995)', 'Adventure|Animation|Children|Comedy|Fantasy']
['2', 'Jumanji (1995)', 'Adventure|Children|Fantasy']
['3', 'Grumpier Old Men (1995)', 'Comedy|Romance']
['4', 'Waiting to Exhale (1995)', 'Comedy|Drama|Romance']
['5', 'Father of the Bride Part II (1995)', 'Comedy']
['6', 'Heat (1995)', 'Action|Crime|Thriller']
['7', 'Sabrina (1995)', 'Comedy|Romance']
['8', 'Tom and Huck (1995)', 'Adventure|Children']
['9', 'Sudden Death (1995)', 'Action']
['10', 'GoldenEye (1995)', 'Action|Adventure|Thriller']
A:
You can use re to extract the year from the title:
rows = [
["movieId", "title", "genres"],
["1", "Toy Story (1995)", "Adventure|Animation|Children|Comedy|Fantasy"],
["2", "Jumanji (1995)", "Adventure|Children|Fantasy"],
["3", "Grumpier Old Men (1995)", "Comedy|Romance"],
["4", "Waiting to Exhale (1995)", "Comedy|Drama|Romance"],
["5", "Father of the Bride Part II (1995)", "Comedy"],
["6", "Heat (1995)", "Action|Crime|Thriller"],
["7", "Sabrina (1995)", "Comedy|Romance"],
["8", "Tom and Huck (1995)", "Adventure|Children"],
["9", "Sudden Death (1995)", "Action"],
["10", "GoldenEye (1995)", "Action|Adventure|Thriller"],
]
import re
pat = re.compile(r"\((\d{4})\)")
for movie_id, title, genres in rows[1:]:
year = pat.search(title)
print([movie_id, title, genres, year.group(1) if year else "N/A"])
Prints:
['1', 'Toy Story (1995)', 'Adventure|Animation|Children|Comedy|Fantasy', '1995']
['2', 'Jumanji (1995)', 'Adventure|Children|Fantasy', '1995']
['3', 'Grumpier Old Men (1995)', 'Comedy|Romance', '1995']
['4', 'Waiting to Exhale (1995)', 'Comedy|Drama|Romance', '1995']
['5', 'Father of the Bride Part II (1995)', 'Comedy', '1995']
['6', 'Heat (1995)', 'Action|Crime|Thriller', '1995']
['7', 'Sabrina (1995)', 'Comedy|Romance', '1995']
['8', 'Tom and Huck (1995)', 'Adventure|Children', '1995']
['9', 'Sudden Death (1995)', 'Action', '1995']
['10', 'GoldenEye (1995)', 'Action|Adventure|Thriller', '1995']
A:
you should definetly use pandas for this, it's way easier to work with tables and way cleaner too.
try to read in the csv file like so:
import pandas as pd
df = pd.read_csv('/home/raymondossai/movies.csv')
the df object is basically your csv table represented as an object in python.
for having the year as an extra column you could use the str.split() method since the year always follows after a ' (' expression:
# get the 4 characters of the year (first 4 characters after the ' (' expression)
df['Year'] = df['title'].str.split(pat=' (', expand=True)[1][:4].astype(int)
|
How do I create a column using strings retrieved from another column on python?
|
I am trying to read information from a column in my csv file and use it to create a new column. Please help
I imported the csv file and printed the first 10 rows (+ header) but now I would like to create a column for the years in the title column.
```
import csv
from itertools import islice
from operator import itemgetter
#opening the CSV file
with open('/home/raymondossai/movies.csv', mode ='r')as file:
#reading the CSV file
csvFile = csv.reader(file)
#displaying the contents of the CSV file
for row in islice(csvFile, 11): # first 10 only
print(row)
```
Result:
['movieId', 'title', 'genres']
['1', 'Toy Story (1995)', 'Adventure|Animation|Children|Comedy|Fantasy']
['2', 'Jumanji (1995)', 'Adventure|Children|Fantasy']
['3', 'Grumpier Old Men (1995)', 'Comedy|Romance']
['4', 'Waiting to Exhale (1995)', 'Comedy|Drama|Romance']
['5', 'Father of the Bride Part II (1995)', 'Comedy']
['6', 'Heat (1995)', 'Action|Crime|Thriller']
['7', 'Sabrina (1995)', 'Comedy|Romance']
['8', 'Tom and Huck (1995)', 'Adventure|Children']
['9', 'Sudden Death (1995)', 'Action']
['10', 'GoldenEye (1995)', 'Action|Adventure|Thriller']
|
[
"You can use re to extract the year from the title:\nrows = [\n [\"movieId\", \"title\", \"genres\"],\n [\"1\", \"Toy Story (1995)\", \"Adventure|Animation|Children|Comedy|Fantasy\"],\n [\"2\", \"Jumanji (1995)\", \"Adventure|Children|Fantasy\"],\n [\"3\", \"Grumpier Old Men (1995)\", \"Comedy|Romance\"],\n [\"4\", \"Waiting to Exhale (1995)\", \"Comedy|Drama|Romance\"],\n [\"5\", \"Father of the Bride Part II (1995)\", \"Comedy\"],\n [\"6\", \"Heat (1995)\", \"Action|Crime|Thriller\"],\n [\"7\", \"Sabrina (1995)\", \"Comedy|Romance\"],\n [\"8\", \"Tom and Huck (1995)\", \"Adventure|Children\"],\n [\"9\", \"Sudden Death (1995)\", \"Action\"],\n [\"10\", \"GoldenEye (1995)\", \"Action|Adventure|Thriller\"],\n]\n\nimport re\n\npat = re.compile(r\"\\((\\d{4})\\)\")\n\nfor movie_id, title, genres in rows[1:]:\n year = pat.search(title)\n print([movie_id, title, genres, year.group(1) if year else \"N/A\"])\n\nPrints:\n['1', 'Toy Story (1995)', 'Adventure|Animation|Children|Comedy|Fantasy', '1995']\n['2', 'Jumanji (1995)', 'Adventure|Children|Fantasy', '1995']\n['3', 'Grumpier Old Men (1995)', 'Comedy|Romance', '1995']\n['4', 'Waiting to Exhale (1995)', 'Comedy|Drama|Romance', '1995']\n['5', 'Father of the Bride Part II (1995)', 'Comedy', '1995']\n['6', 'Heat (1995)', 'Action|Crime|Thriller', '1995']\n['7', 'Sabrina (1995)', 'Comedy|Romance', '1995']\n['8', 'Tom and Huck (1995)', 'Adventure|Children', '1995']\n['9', 'Sudden Death (1995)', 'Action', '1995']\n['10', 'GoldenEye (1995)', 'Action|Adventure|Thriller', '1995']\n\n",
"you should definetly use pandas for this, it's way easier to work with tables and way cleaner too.\ntry to read in the csv file like so:\nimport pandas as pd\ndf = pd.read_csv('/home/raymondossai/movies.csv')\n\nthe df object is basically your csv table represented as an object in python.\nfor having the year as an extra column you could use the str.split() method since the year always follows after a ' (' expression:\n# get the 4 characters of the year (first 4 characters after the ' (' expression)\ndf['Year'] = df['title'].str.split(pat=' (', expand=True)[1][:4].astype(int)\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"arrays",
"csv",
"python"
] |
stackoverflow_0074520586_arrays_csv_python.txt
|
Q:
Importing file in pandas with read_table() cuts decimal places
I have a txt file like this (separated by tab):
Variance
Mean
0.001435955236
-0.001117
0.002473570225
0.003123
0.002334629124
-0.003471
...and so on.
I load it using pandas.read_table() and the result is a dataframe like this:
Variance
Mean
0
0.001436
-0.001117
1
0.002474
0.003123
2
0.002335
-0.003471
Why it cuts the decimal places in Variance column? I need those values to be like in the original file.
The file can be found here: https://github.com/jarsonX/Temp_files
My code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df_assets = pd.read_table('assets.txt')
df_assets.head(10)
A:
Pandas does not actually "cut" the decimal place, it just rounds when printing. To print with display precision, use
with pd.option_context('display.precision', 10):
print(df_assets)
|
Importing file in pandas with read_table() cuts decimal places
|
I have a txt file like this (separated by tab):
Variance
Mean
0.001435955236
-0.001117
0.002473570225
0.003123
0.002334629124
-0.003471
...and so on.
I load it using pandas.read_table() and the result is a dataframe like this:
Variance
Mean
0
0.001436
-0.001117
1
0.002474
0.003123
2
0.002335
-0.003471
Why it cuts the decimal places in Variance column? I need those values to be like in the original file.
The file can be found here: https://github.com/jarsonX/Temp_files
My code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df_assets = pd.read_table('assets.txt')
df_assets.head(10)
|
[
"Pandas does not actually \"cut\" the decimal place, it just rounds when printing. To print with display precision, use\nwith pd.option_context('display.precision', 10):\n print(df_assets)\n\n"
] |
[
2
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074520784_pandas_python.txt
|
Q:
Convert CSV to JSON using python pandas
I have an CSV file of having data I want to convert into JSON format but I get issue about the formation.
Data input in csv file:
Full CSV: rarities.csv
I have tried this code but it doesn't get the desired result.
Here is the code :
import pandas as pd
df = pd.read_csv(r'rarities.csv')
df.to_json(r'rarities.json', orient='records')
properties name case doesn't matter
The data format I want in JSON:
[
{
"name": "Common",
"level_count": 14,
"relative_level": 0,
"tournament_level_index": 10,
"mirror_relative_level": 0,
"clone_relative_level": 0,
"donate_capacity": 1,
"sort_capacity": 1,
"donate_reward": 5,
"donate_xp": 1,
"overflow_prestige": 1,
"gold_conversion_value": 5,
"max_level_donation_cost": 5,
"trade_card_amount": 250,
"chance_weight": 1000,
"balance_multiplier": 100,
"upgrade_exp": [4, 5, 6, 10, 25, 50, 100, 200, 400, 600, 800, 1600, 2000, 0],
"upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 400, 800, 1000, 1500, 3000, 5000, 0],
"original_upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 400, 800, 1000, 2000, 5000, 0, 0],
"upgrade_cost": [5, 20, 50, 150, 400, 1000, 2000, 4000, 8000, 15000, 35000, 75000, 100000, 0],
"power_level_multiplier": [110, 121, 133, 146, 160, 176, 193, 212, 233, 256, 281, 309, 339, 372, 409, 450, 495, 545, 600]
},
{
"name": "Rare",
"level_count": 12,
"relative_level": 2,
"tournament_level_index": 8,
"mirror_relative_level": 2,
"clone_relative_level": 2,
"donate_capacity": 10,
"sort_capacity": 7,
"donate_reward": 50,
"donate_xp": 10,
"overflow_prestige": 10,
"gold_conversion_value": 50,
"max_level_donation_cost": 50,
"trade_card_amount": 50,
"chance_weight": 400,
"balance_multiplier": 130,
"upgrade_exp": [6, 10, 25, 50, 100, 200, 400, 600, 800, 1600, 2000, 0],
"upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 400, 500, 750, 1250, 0],
"original_upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 400, 800, 1000, 0, 0],
"upgrade_cost": [50, 150, 400, 1000, 2000, 4000, 8000, 15000, 35000, 75000, 100000, 0],
"power_level_multiplier": [110, 121, 133, 146, 160, 176, 193, 212, 233, 256, 281, 309, 340, 374, 411, 452, 497]
},
{
"name": "Epic",
"level_count": 9,
"relative_level": 5,
"tournament_level_index": 5,
"mirror_relative_level": 5,
"clone_relative_level": 5,
"donate_capacity": 10,
"sort_capacity": 80,
"donate_reward": 500,
"donate_xp": 10,
"overflow_prestige": 100,
"gold_conversion_value": 500,
"max_level_donation_cost": 500,
"trade_card_amount": 10,
"chance_weight": 40,
"balance_multiplier": 180,
"upgrade_exp": [25, 100, 200, 400, 600, 800, 1600, 2000, 0],
"upgrade_material_count": [2, 4, 10, 20, 40, 50, 100, 200, 0],
"original_upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 0, 0],
"upgrade_cost": [400, 2000, 4000, 8000, 15000, 35000, 75000, 100000, 0],
"power_level_multiplier": [110, 121, 133, 146, 160, 176, 193, 212, 233, 256, 282, 310, 341, 375]
}
]
full JSON: rarities.json
thanks for help.
A:
you can use:
df = df.drop(0) #delete first row. We will not use.
df['Name'] = df['Name'].ffill() #fillna in name column with first values until change
dfv = df.pivot_table(index='Name',aggfunc=list) #pivot table by name and put items to list
dfv = dfv.applymap(lambda x: [i for i in x if str(i) != 'nan']) #remove nans in lists
dfv = dfv.applymap(lambda x: x[0] if len(x)==1 else x) #if list lenght ==1, convert to string
dfv = dfv.applymap(lambda x: np.nan if x==[] else x) #convert empty lists to nan
dfv = dfv.reset_index()
final_json = dfv.to_dict('records')
|
Convert CSV to JSON using python pandas
|
I have an CSV file of having data I want to convert into JSON format but I get issue about the formation.
Data input in csv file:
Full CSV: rarities.csv
I have tried this code but it doesn't get the desired result.
Here is the code :
import pandas as pd
df = pd.read_csv(r'rarities.csv')
df.to_json(r'rarities.json', orient='records')
properties name case doesn't matter
The data format I want in JSON:
[
{
"name": "Common",
"level_count": 14,
"relative_level": 0,
"tournament_level_index": 10,
"mirror_relative_level": 0,
"clone_relative_level": 0,
"donate_capacity": 1,
"sort_capacity": 1,
"donate_reward": 5,
"donate_xp": 1,
"overflow_prestige": 1,
"gold_conversion_value": 5,
"max_level_donation_cost": 5,
"trade_card_amount": 250,
"chance_weight": 1000,
"balance_multiplier": 100,
"upgrade_exp": [4, 5, 6, 10, 25, 50, 100, 200, 400, 600, 800, 1600, 2000, 0],
"upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 400, 800, 1000, 1500, 3000, 5000, 0],
"original_upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 400, 800, 1000, 2000, 5000, 0, 0],
"upgrade_cost": [5, 20, 50, 150, 400, 1000, 2000, 4000, 8000, 15000, 35000, 75000, 100000, 0],
"power_level_multiplier": [110, 121, 133, 146, 160, 176, 193, 212, 233, 256, 281, 309, 339, 372, 409, 450, 495, 545, 600]
},
{
"name": "Rare",
"level_count": 12,
"relative_level": 2,
"tournament_level_index": 8,
"mirror_relative_level": 2,
"clone_relative_level": 2,
"donate_capacity": 10,
"sort_capacity": 7,
"donate_reward": 50,
"donate_xp": 10,
"overflow_prestige": 10,
"gold_conversion_value": 50,
"max_level_donation_cost": 50,
"trade_card_amount": 50,
"chance_weight": 400,
"balance_multiplier": 130,
"upgrade_exp": [6, 10, 25, 50, 100, 200, 400, 600, 800, 1600, 2000, 0],
"upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 400, 500, 750, 1250, 0],
"original_upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 400, 800, 1000, 0, 0],
"upgrade_cost": [50, 150, 400, 1000, 2000, 4000, 8000, 15000, 35000, 75000, 100000, 0],
"power_level_multiplier": [110, 121, 133, 146, 160, 176, 193, 212, 233, 256, 281, 309, 340, 374, 411, 452, 497]
},
{
"name": "Epic",
"level_count": 9,
"relative_level": 5,
"tournament_level_index": 5,
"mirror_relative_level": 5,
"clone_relative_level": 5,
"donate_capacity": 10,
"sort_capacity": 80,
"donate_reward": 500,
"donate_xp": 10,
"overflow_prestige": 100,
"gold_conversion_value": 500,
"max_level_donation_cost": 500,
"trade_card_amount": 10,
"chance_weight": 40,
"balance_multiplier": 180,
"upgrade_exp": [25, 100, 200, 400, 600, 800, 1600, 2000, 0],
"upgrade_material_count": [2, 4, 10, 20, 40, 50, 100, 200, 0],
"original_upgrade_material_count": [2, 4, 10, 20, 50, 100, 200, 0, 0],
"upgrade_cost": [400, 2000, 4000, 8000, 15000, 35000, 75000, 100000, 0],
"power_level_multiplier": [110, 121, 133, 146, 160, 176, 193, 212, 233, 256, 282, 310, 341, 375]
}
]
full JSON: rarities.json
thanks for help.
|
[
"you can use:\ndf = df.drop(0) #delete first row. We will not use.\ndf['Name'] = df['Name'].ffill() #fillna in name column with first values until change\ndfv = df.pivot_table(index='Name',aggfunc=list) #pivot table by name and put items to list\ndfv = dfv.applymap(lambda x: [i for i in x if str(i) != 'nan']) #remove nans in lists\ndfv = dfv.applymap(lambda x: x[0] if len(x)==1 else x) #if list lenght ==1, convert to string\n\ndfv = dfv.applymap(lambda x: np.nan if x==[] else x) #convert empty lists to nan\ndfv = dfv.reset_index()\nfinal_json = dfv.to_dict('records')\n\n"
] |
[
1
] |
[] |
[] |
[
"converters",
"csv",
"json",
"pandas",
"python"
] |
stackoverflow_0074519829_converters_csv_json_pandas_python.txt
|
Q:
How to get list of all variables in jinja 2 templates
I am trying to get list of all variables and blocks in a template. I don't want to create my own parser to find variables. I tried using following snippet.
from jinja2 import Environment, PackageLoader
env = Environment(loader=PackageLoader('gummi', 'templates'))
template = env.get_template('chat.html')
template.blocks is dict where keys are blocks, how can I get all variables inside the blocks ?
A:
Since no one has answered the question and I found the answer
from jinja2 import Environment, PackageLoader, meta
env = Environment(loader=PackageLoader('gummi', 'templates'))
template_source = env.loader.get_source(env, 'page_content.html')
parsed_content = env.parse(template_source)
meta.find_undeclared_variables(parsed_content)
This will yield list of undeclared variables since this is not executed at run time, it will yield list of all variables.
Note: This will yield html files which are included using include and extends.
A:
I had the same need and I've written a tool called jinja2schema. It provides a heuristic algorithm for inferring types from Jinja2 templates and can also be used for getting a list of all template variables, including nested ones.
Here is a short example of doing that:
>>> import jinja2
>>> import jinja2schema
>>>
>>> template = '''
... {{ x }}
... {% for y in ys %}
... {{ y.nested_field_1 }}
... {{ y.nested_field_2 }}
... {% endfor %}
... '''
>>> variables = jinja2schema.infer(template)
>>>
>>> variables
{'x': <scalar>,
'ys': [{'nested_field_1': <scalar>, 'nested_field_2': <scalar>}]}
>>>
>>> variables.keys()
['x', 'ys']
>>> variables['ys'].item.keys()
['nested_field_2', 'nested_field_1']
A:
For my pelican theme, i have created a tools for analyse all jinja variables in my templates files.
I share my code
This script generate a sample configuration from all variables exists in template files and get a variables from my official pelicanconf.py
The function that extract all variables from template file
def get_variables(filename):
env = Environment(loader=FileSystemLoader('templates'))
template_source = env.loader.get_source(env, filename)[0]
parsed_content = env.parse(template_source)
The complete script
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# use:
# generate_pelicanconf-sample.py my_official_blog/pelicanconf.py
import sys
import imp
import os
from jinja2 import Environment, FileSystemLoader, meta
# Search all template files
def list_html_templates():
dirList = os.listdir('templates')
return dirList
# get all variable in template file
def get_variables(filename):
env = Environment(loader=FileSystemLoader('templates'))
template_source = env.loader.get_source(env, filename)[0]
parsed_content = env.parse(template_source)
return meta.find_undeclared_variables(parsed_content)
# Check if the pelicanconf.py is in param
if len(sys.argv) != 2:
print("Please indicate the pelicanconf.py file")
sys.exit()
# Get all vars from templates files
all_vars = set()
files = list_html_templates()
for fname in files:
variables = get_variables(fname)
for var in variables:
if var.isupper():
all_vars.add(var)
m = imp.load_source('pelicanconf', sys.argv[1])
# Show pelicanconf.py vars content
for var in all_vars:
varname = 'm.%s' % var
if var in m.__dict__:
print ("%s = %s" % (var, repr(m.__dict__[var])))
return meta.find_undeclared_variables(parsed_content)
The sample result of this program
LINKS = ((u'Home', u'/'), (u'archives', u'/archives.html'), (u'tags', u'/tags.html'), (u'A propos', u'http://bruno.adele.im'))
SITESUBTITLE = u'Une famille compl\xe8tement 633<'
DEFAULT_LANG = u'fr'
SITEURL = u'http://blog.jesuislibre.org'
AUTHOR = u'Bruno Adel\xe9'
SITENAME = u'Famille de geeks'
SOCIAL = ((u'adele', u'http://adele.im'), (u'feed', u'http://feeds.feedburner.com/FamilleDeGeek'), (u'twitter', u'http://twitter.com/jesuislibre.org'), (u'google+', u'https://plus.google.com/100723270029692582967'), (u'blog', u'http://blog.jesuislibre.org'), (u'facebook', u'http://www.facebook.com/bruno.adele'), (u'flickr', u'http://www.flickr.com/photos/b_adele'), (u'linkedin', u'http://fr.linkedin.com/in/brunoadele'))
FEED_DOMAIN = u'http://blog.jesuislibre.org'
FEED_ALL_ATOM = u'feed.atom'
DISQUS_SITENAME = u'blogdejesuislibreorg'
DEFAULT_PAGINATION = 10
GITHUB_BLOG_SITE = u'https://github.com/badele/blog.jesuislibre.org'
For more détail of this script see https://github.com/badele/pelican-theme-jesuislibre
A:
For me jinja2.meta.find_undeclared_variables(parsed_content) is not a good fit because it does not provide nested variables.
jinja2schema tool was kinda ok for simple scenarios but with all the loops and other jinja2 dark powers it was failing with errors.
I have played around with jinja2 data structures and was able to get all variables including nested ones. For my use case this was enough. Maybe this will also help for somebody else :)
Here is the code:
from jinja2 import Environment, FileSystemLoader, nodes
def get_variables(path, filename):
template_variables = set()
env = Environment(loader=FileSystemLoader(searchpath=path))
template_source = env.loader.get_source(env, filename)[0]
parsed_content = env.parse(template_source)
if parsed_content.body and hasattr(parsed_content.body[0], 'nodes'):
for variable in parsed_content.body[0].nodes:
if type(variable) is nodes.Name or type(variable) is nodes.Getattr:
parsed_variable = parse_jinja_variable(variable)
if parsed_variable:
template_variables.add(parsed_variable)
return template_variables
def parse_jinja_variable(variable, suffix=''):
if type(variable) is nodes.Name:
variable_key = join_keys(variable.name, suffix)
return variable_key
elif type(variable) is nodes.Getattr:
return parse_jinja_variable(variable.node, join_keys(variable.attr, suffix))
def join_keys(parent_key, child_key):
key = child_key if child_key else parent_key
if parent_key and child_key:
key = parent_key + '.' + key
return key
if __name__ == "__main__":
variable_keys = get_variables({replace_with_your_template directory}, {replace_with_your_template_file})
print(*variable_keys, sep='\n')
A:
Based on @Kracekumar's answer, but for the simplest use-case of just extracting tokens from a template passed as a string argument with no loading semantics or filter overrides:
env = jinja2.Environment()
parsed_content = env.parse(template_source)
tokens = jinja2.meta.find_undeclared_variables(parsed_content)
tokens will be a set.
A:
Why not regex?
If find it a lot easier to use regex:
import re
with open('templates/templatename.html') as f:
variables = re.findall("\{\{\s(.*?)\s\}\}", f.read())
|
How to get list of all variables in jinja 2 templates
|
I am trying to get list of all variables and blocks in a template. I don't want to create my own parser to find variables. I tried using following snippet.
from jinja2 import Environment, PackageLoader
env = Environment(loader=PackageLoader('gummi', 'templates'))
template = env.get_template('chat.html')
template.blocks is dict where keys are blocks, how can I get all variables inside the blocks ?
|
[
"Since no one has answered the question and I found the answer\nfrom jinja2 import Environment, PackageLoader, meta\nenv = Environment(loader=PackageLoader('gummi', 'templates'))\ntemplate_source = env.loader.get_source(env, 'page_content.html')\nparsed_content = env.parse(template_source)\nmeta.find_undeclared_variables(parsed_content)\n\nThis will yield list of undeclared variables since this is not executed at run time, it will yield list of all variables.\nNote: This will yield html files which are included using include and extends.\n",
"I had the same need and I've written a tool called jinja2schema. It provides a heuristic algorithm for inferring types from Jinja2 templates and can also be used for getting a list of all template variables, including nested ones.\nHere is a short example of doing that:\n>>> import jinja2\n>>> import jinja2schema\n>>>\n>>> template = '''\n... {{ x }}\n... {% for y in ys %}\n... {{ y.nested_field_1 }}\n... {{ y.nested_field_2 }}\n... {% endfor %}\n... '''\n>>> variables = jinja2schema.infer(template)\n>>>\n>>> variables\n{'x': <scalar>,\n 'ys': [{'nested_field_1': <scalar>, 'nested_field_2': <scalar>}]}\n>>>\n>>> variables.keys()\n['x', 'ys']\n>>> variables['ys'].item.keys()\n['nested_field_2', 'nested_field_1']\n\n",
"For my pelican theme, i have created a tools for analyse all jinja variables in my templates files.\nI share my code\nThis script generate a sample configuration from all variables exists in template files and get a variables from my official pelicanconf.py\nThe function that extract all variables from template file\n\ndef get_variables(filename):\n env = Environment(loader=FileSystemLoader('templates'))\n template_source = env.loader.get_source(env, filename)[0]\n parsed_content = env.parse(template_source)\n\nThe complete script\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# use:\n# generate_pelicanconf-sample.py my_official_blog/pelicanconf.py\n\nimport sys\nimport imp\nimport os\n\nfrom jinja2 import Environment, FileSystemLoader, meta\n\n\n# Search all template files\ndef list_html_templates():\n dirList = os.listdir('templates')\n\n return dirList\n\n\n# get all variable in template file\ndef get_variables(filename):\n env = Environment(loader=FileSystemLoader('templates'))\n template_source = env.loader.get_source(env, filename)[0]\n parsed_content = env.parse(template_source)\n\n return meta.find_undeclared_variables(parsed_content)\n\n\n# Check if the pelicanconf.py is in param\nif len(sys.argv) != 2:\n print(\"Please indicate the pelicanconf.py file\")\n sys.exit()\n\n# Get all vars from templates files\nall_vars = set()\nfiles = list_html_templates()\nfor fname in files:\n variables = get_variables(fname)\n for var in variables:\n if var.isupper():\n all_vars.add(var)\n\nm = imp.load_source('pelicanconf', sys.argv[1])\n\n# Show pelicanconf.py vars content\nfor var in all_vars:\n varname = 'm.%s' % var\n if var in m.__dict__:\n print (\"%s = %s\" % (var, repr(m.__dict__[var])))\n\n\n return meta.find_undeclared_variables(parsed_content)\n\nThe sample result of this program\nLINKS = ((u'Home', u'/'), (u'archives', u'/archives.html'), (u'tags', u'/tags.html'), (u'A propos', u'http://bruno.adele.im'))\nSITESUBTITLE = u'Une famille compl\\xe8tement 633<'\nDEFAULT_LANG = u'fr'\nSITEURL = u'http://blog.jesuislibre.org'\nAUTHOR = u'Bruno Adel\\xe9'\nSITENAME = u'Famille de geeks'\nSOCIAL = ((u'adele', u'http://adele.im'), (u'feed', u'http://feeds.feedburner.com/FamilleDeGeek'), (u'twitter', u'http://twitter.com/jesuislibre.org'), (u'google+', u'https://plus.google.com/100723270029692582967'), (u'blog', u'http://blog.jesuislibre.org'), (u'facebook', u'http://www.facebook.com/bruno.adele'), (u'flickr', u'http://www.flickr.com/photos/b_adele'), (u'linkedin', u'http://fr.linkedin.com/in/brunoadele'))\nFEED_DOMAIN = u'http://blog.jesuislibre.org'\nFEED_ALL_ATOM = u'feed.atom'\nDISQUS_SITENAME = u'blogdejesuislibreorg'\nDEFAULT_PAGINATION = 10\nGITHUB_BLOG_SITE = u'https://github.com/badele/blog.jesuislibre.org'\n\nFor more détail of this script see https://github.com/badele/pelican-theme-jesuislibre\n",
"For me jinja2.meta.find_undeclared_variables(parsed_content) is not a good fit because it does not provide nested variables.\njinja2schema tool was kinda ok for simple scenarios but with all the loops and other jinja2 dark powers it was failing with errors.\nI have played around with jinja2 data structures and was able to get all variables including nested ones. For my use case this was enough. Maybe this will also help for somebody else :)\nHere is the code:\nfrom jinja2 import Environment, FileSystemLoader, nodes\n\n\ndef get_variables(path, filename):\n template_variables = set()\n env = Environment(loader=FileSystemLoader(searchpath=path))\n template_source = env.loader.get_source(env, filename)[0]\n parsed_content = env.parse(template_source)\n if parsed_content.body and hasattr(parsed_content.body[0], 'nodes'):\n for variable in parsed_content.body[0].nodes:\n if type(variable) is nodes.Name or type(variable) is nodes.Getattr:\n parsed_variable = parse_jinja_variable(variable)\n if parsed_variable:\n template_variables.add(parsed_variable)\n\n return template_variables\n\n\ndef parse_jinja_variable(variable, suffix=''):\n if type(variable) is nodes.Name:\n variable_key = join_keys(variable.name, suffix)\n return variable_key\n elif type(variable) is nodes.Getattr:\n return parse_jinja_variable(variable.node, join_keys(variable.attr, suffix))\n\n\ndef join_keys(parent_key, child_key):\n key = child_key if child_key else parent_key\n if parent_key and child_key:\n key = parent_key + '.' + key\n return key\n\n\nif __name__ == \"__main__\":\n variable_keys = get_variables({replace_with_your_template directory}, {replace_with_your_template_file})\n print(*variable_keys, sep='\\n')\n\n\n\n",
"Based on @Kracekumar's answer, but for the simplest use-case of just extracting tokens from a template passed as a string argument with no loading semantics or filter overrides:\nenv = jinja2.Environment()\nparsed_content = env.parse(template_source)\ntokens = jinja2.meta.find_undeclared_variables(parsed_content)\n\ntokens will be a set.\n",
"Why not regex?\nIf find it a lot easier to use regex:\nimport re\nwith open('templates/templatename.html') as f:\n variables = re.findall(\"\\{\\{\\s(.*?)\\s\\}\\}\", f.read())\n\n"
] |
[
76,
14,
7,
2,
1,
0
] |
[] |
[] |
[
"jinja2",
"python",
"template_variables"
] |
stackoverflow_0008260490_jinja2_python_template_variables.txt
|
Q:
how to add matrix reading from .txt to the travelling salesmen problem code?
I'm freshman to python . Need your help
I'm trynna read matrix from .txt and add it to the traveling salesmen problem code . Can you explain what do I do wrong?
Input.txt looks:
Place; date1;date2;date3
#1;65;27;16
#2;46;56;11
#3;36;14;28
script
import csv
f= open("input1.txt","r")
sum=(1 for line in open("input1.txt","r"))
print (sum)
def Min(lst, myindex):
return min(x for idx, x in enumerate(lst) if idx != myindex)
def Delete(matrix, index1, index2):
del matrix[index1]
for i in matrix:
del i[index2]
return matrix
n = sum
matrix = []
H = 0
PathLenght = 0
Str = []
Stb = []
res = []
result = []
StartMatrix = []
for i in range(n):
Str.append(i)
Stb.append(i)
for i in range(n):
matrix.append(map(float, row) for row in
csv.reader(f,delimiter=";"))
print (matrix)
for i in range(n):
StartMatrix.append(matrix[i].copy())
for i in range(1,n):
matrix[i][i] = float('inf')
while True:
for i in range(1,len(matrix)):
temp = min(matrix[i])
H += temp
for j in range(1,len(matrix)):
matrix[i][j] -= temp
for i in range(1,len(matrix)):
temp = min(row[i] for row in matrix)
H += temp
for j in range(1,len(matrix)):
matrix[j][i] -= temp
NullMax = 0
index1 = 0
index2 = 0
tmp = 0
for i in range(1,len(matrix)):
for j in range(1,len(matrix)):
if matrix[i][j] == 0:
tmp = Min(matrix[i], j)+Min((row[j] for row in matrix), i)
if tmp >= NullMax:
NullMax = tmp
index1 = i
index2 = j
res.append(Str[index1]+1)
res.append(Stb[index2]+1)
oldIndex1 = Str[index1]
oldIndex2 = Stb[index2]
if oldIndex2 in Str and oldIndex1 in Stb:
NewIndex1 = Str.index(oldIndex2)
NewIndex2 = Stb.index(oldIndex1)
matrix[NewIndex1][NewIndex2] = float('inf')
del Str[index1]
del Stb[index2]
matrix = Delete(matrix, index1, index2)
while 1:
if len(matrix) == 1:
break
for i in range(1, len(res)-1, 2):
if res.count(res[i]) < 2:
result.append(res[i])
result.append(res[i+1])
for i in range(1, len(res)-1, 2):
for j in range(1, len(res)-1, 2):
if result[len(result)-1] == res[j]:
result.append(res[j])
result.append(res[j+1])
print(result)
for i in range(1, len(result)-1, 2):
if i == len(result)-2:
PathLenght += StartMatrix[result[i]-1][result[i+1]-1]
PathLenght += StartMatrix[result[i+1]-1][result[1]-1]
else:
PathLenght += StartMatrix[result[i]-1][result[i+1]-1]
print(PathLenght)
input()
When I trynna check it shows
<generator object at 0x038341B0>
[<generator object at 0x038341B0>,<generator object at 0x038341B0>,<generator object at 0x038341B0>,<generator object at 0x038341B0>]
A:
I recommend use pandas to read csv files (dont forget set non-standard separator).
df = pd.read_csv(file_name, sep=";")
here's answer for your question:
https://realpython.com/introduction-to-python-generators/
You need to create generator before u will use it.
# def of generator
def my_gen():
i = 0
for i in range(5):
yield i
i+=1
# creating generator
gen = my_gen()
# method 1 - yields until the generator finishes
for num in gen:
# do something
# method 2 - yields only once
next(gen)
EDIT
Some python tips
use snake_case style:
PathLenght += StartMatrix(...) -> path_lenght += start_matrix(...)
use extend method instead of multiple append:
list_0.extend([item_0, item_1])
use negative index:
my_list[len(my_list)-1] <=> my_list[-1] (it also work for -2, -3 etc.)
other way to copy list:
my_list.copy() <=> my_list[:] (returns sublist from first to last element)
u can use syntax like this:
var0 = var1 = var2 = 0
every above variable has value 0 but dont use it when u want to create some lists and other mutable objects. (https://medium.com/@meghamohan/mutable-and-immutable-side-of-python-c2145cf72747)
|
how to add matrix reading from .txt to the travelling salesmen problem code?
|
I'm freshman to python . Need your help
I'm trynna read matrix from .txt and add it to the traveling salesmen problem code . Can you explain what do I do wrong?
Input.txt looks:
Place; date1;date2;date3
#1;65;27;16
#2;46;56;11
#3;36;14;28
script
import csv
f= open("input1.txt","r")
sum=(1 for line in open("input1.txt","r"))
print (sum)
def Min(lst, myindex):
return min(x for idx, x in enumerate(lst) if idx != myindex)
def Delete(matrix, index1, index2):
del matrix[index1]
for i in matrix:
del i[index2]
return matrix
n = sum
matrix = []
H = 0
PathLenght = 0
Str = []
Stb = []
res = []
result = []
StartMatrix = []
for i in range(n):
Str.append(i)
Stb.append(i)
for i in range(n):
matrix.append(map(float, row) for row in
csv.reader(f,delimiter=";"))
print (matrix)
for i in range(n):
StartMatrix.append(matrix[i].copy())
for i in range(1,n):
matrix[i][i] = float('inf')
while True:
for i in range(1,len(matrix)):
temp = min(matrix[i])
H += temp
for j in range(1,len(matrix)):
matrix[i][j] -= temp
for i in range(1,len(matrix)):
temp = min(row[i] for row in matrix)
H += temp
for j in range(1,len(matrix)):
matrix[j][i] -= temp
NullMax = 0
index1 = 0
index2 = 0
tmp = 0
for i in range(1,len(matrix)):
for j in range(1,len(matrix)):
if matrix[i][j] == 0:
tmp = Min(matrix[i], j)+Min((row[j] for row in matrix), i)
if tmp >= NullMax:
NullMax = tmp
index1 = i
index2 = j
res.append(Str[index1]+1)
res.append(Stb[index2]+1)
oldIndex1 = Str[index1]
oldIndex2 = Stb[index2]
if oldIndex2 in Str and oldIndex1 in Stb:
NewIndex1 = Str.index(oldIndex2)
NewIndex2 = Stb.index(oldIndex1)
matrix[NewIndex1][NewIndex2] = float('inf')
del Str[index1]
del Stb[index2]
matrix = Delete(matrix, index1, index2)
while 1:
if len(matrix) == 1:
break
for i in range(1, len(res)-1, 2):
if res.count(res[i]) < 2:
result.append(res[i])
result.append(res[i+1])
for i in range(1, len(res)-1, 2):
for j in range(1, len(res)-1, 2):
if result[len(result)-1] == res[j]:
result.append(res[j])
result.append(res[j+1])
print(result)
for i in range(1, len(result)-1, 2):
if i == len(result)-2:
PathLenght += StartMatrix[result[i]-1][result[i+1]-1]
PathLenght += StartMatrix[result[i+1]-1][result[1]-1]
else:
PathLenght += StartMatrix[result[i]-1][result[i+1]-1]
print(PathLenght)
input()
When I trynna check it shows
<generator object at 0x038341B0>
[<generator object at 0x038341B0>,<generator object at 0x038341B0>,<generator object at 0x038341B0>,<generator object at 0x038341B0>]
|
[
"I recommend use pandas to read csv files (dont forget set non-standard separator).\ndf = pd.read_csv(file_name, sep=\";\")\nhere's answer for your question:\nhttps://realpython.com/introduction-to-python-generators/\nYou need to create generator before u will use it.\n# def of generator\ndef my_gen():\n i = 0\n for i in range(5):\n yield i\n i+=1\n# creating generator\ngen = my_gen()\n\n# method 1 - yields until the generator finishes \nfor num in gen:\n # do something \n\n# method 2 - yields only once\nnext(gen)\n\nEDIT\nSome python tips\n\nuse snake_case style:\nPathLenght += StartMatrix(...) -> path_lenght += start_matrix(...)\n\nuse extend method instead of multiple append:\nlist_0.extend([item_0, item_1])\n\nuse negative index:\nmy_list[len(my_list)-1] <=> my_list[-1] (it also work for -2, -3 etc.)\n\nother way to copy list:\nmy_list.copy() <=> my_list[:] (returns sublist from first to last element)\n\nu can use syntax like this:\nvar0 = var1 = var2 = 0\nevery above variable has value 0 but dont use it when u want to create some lists and other mutable objects. (https://medium.com/@meghamohan/mutable-and-immutable-side-of-python-c2145cf72747)\n\n\n"
] |
[
1
] |
[] |
[] |
[
"python",
"python_3.x"
] |
stackoverflow_0074520743_python_python_3.x.txt
|
Q:
Pandas check if two columns can be considered the composite key of the dataframe
A sample dataframe:
data = {
"col_A": ["a","a","b","c"],
"col_B": [1, 2, 2, 3],
"col_C": ["demo", "demo", "demo", "demo"]
}
df = pd.DataFrame(data)
Dataframe
col_A col_B col_C
a 1 demo
a 2 demo
b 2 demo
c 3 demo
I can easily check if all values in col_A are unique or not by df['col_A'].is_unique.
Is there any way to check for two columns i.e. something like df['col_A', 'col_B'].is_unique
If col_A and col_B are the composite key of the data frame or not?
A:
You can set all columns that should be included in the composite key as index and then check for is_unique on the index.
df.set_index(['col_A', 'col_B']).index.is_unique
#True
A:
Use DataFrame.duplicated with Series.any()
not df[['col_A', 'col_B']].duplicated().any()
|
Pandas check if two columns can be considered the composite key of the dataframe
|
A sample dataframe:
data = {
"col_A": ["a","a","b","c"],
"col_B": [1, 2, 2, 3],
"col_C": ["demo", "demo", "demo", "demo"]
}
df = pd.DataFrame(data)
Dataframe
col_A col_B col_C
a 1 demo
a 2 demo
b 2 demo
c 3 demo
I can easily check if all values in col_A are unique or not by df['col_A'].is_unique.
Is there any way to check for two columns i.e. something like df['col_A', 'col_B'].is_unique
If col_A and col_B are the composite key of the data frame or not?
|
[
"You can set all columns that should be included in the composite key as index and then check for is_unique on the index.\ndf.set_index(['col_A', 'col_B']).index.is_unique\n\n#True\n\n",
"Use DataFrame.duplicated with Series.any()\nnot df[['col_A', 'col_B']].duplicated().any()\n\n"
] |
[
2,
1
] |
[] |
[] |
[
"dataframe",
"pandas",
"python"
] |
stackoverflow_0074520885_dataframe_pandas_python.txt
|
Q:
Save console output in txt file as it happens
I want to save my console output in a text file, but I want it to be as it happens so that if the programm crashes, logs will be saved.
Do you have some ideas ?
I can't just specify file in logger because I have a lot of different loggers that are printing into the console.
A:
I think that you indeed can use a logger, just adding a file handler, from the logging module you can read this
As an example you can use something like this, which logs both to the terminal and to a file:
import logging
from pathlib import Path
root_path = <YOUR PATH>
log_level = logging.DEBUG
# Print to the terminal
logging.root.setLevel(log_level)
formatter = logging.Formatter("%(asctime)s | %(levelname)s | %(message)s", "%Y-%m-%d %H:%M:%S")
stream = logging.StreamHandler()
stream.setLevel(log_level)
stream.setFormatter(formatter)
log = logging.getLogger("pythonConfig")
if not log.hasHandlers():
log.setLevel(log_level)
log.addHandler(stream)
# file handler:
file_handler = logging.FileHandler(Path(root_path / "process.log"), mode="w")
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.info("test")
If you have multiple loggers, you can still use this solutions as loggers can inherit from other just put the handler in the root logger and ensure that the others take the handler from that one.
As an alternative you can use nohup command which will keep the process running even if the terminal closes and will return the outputs to the desired location:
nohup python main.py & > log_file.out &
A:
There are literally many ways to do this. However, they are not all suitable for different reasons (maintainability, ease of use, reinvent the wheel, etc.).
If you don't mind using your operating system built-ins you can:
forward standard output and error streams to a file of your choice with python3 -u ./myscript.py 2>&1 outputfile.txt.
forward standard output and error streams to a file of your choice AND display it to the console too with python3 -u ./myscript.py 2>&1 tee outputfile.txt. The -u option specifies the output is unbuffered (i.e.: whats put in the pipe goes immediately out).
If you want to do it from the Python side you can:
use the logging module to output the generated logs to a file handle instead of the standard output.
override the stdout and stderr streams defined in sys (sys.stdout and sys.stderr) so that they point to an opened file handle of your choice. For instance sys.stdout = open("log-stdout.txt", "w").
As a personnal preference, the simpler, the better. The logging module is made for the purpose of logging and provides all the necessary mechanisms to achieve what you want. So.. I would suggest that you stick with it. Here is a link to the logging module documentation which also provides many examples from simple use to more complex and advanced use.
|
Save console output in txt file as it happens
|
I want to save my console output in a text file, but I want it to be as it happens so that if the programm crashes, logs will be saved.
Do you have some ideas ?
I can't just specify file in logger because I have a lot of different loggers that are printing into the console.
|
[
"I think that you indeed can use a logger, just adding a file handler, from the logging module you can read this\nAs an example you can use something like this, which logs both to the terminal and to a file:\nimport logging\nfrom pathlib import Path\n\nroot_path = <YOUR PATH>\n\nlog_level = logging.DEBUG\n\n# Print to the terminal\nlogging.root.setLevel(log_level)\nformatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\nstream = logging.StreamHandler()\nstream.setLevel(log_level)\nstream.setFormatter(formatter)\nlog = logging.getLogger(\"pythonConfig\")\nif not log.hasHandlers():\n log.setLevel(log_level)\n log.addHandler(stream)\n\n# file handler:\nfile_handler = logging.FileHandler(Path(root_path / \"process.log\"), mode=\"w\")\nfile_handler.setLevel(log_level)\nfile_handler.setFormatter(formatter)\nlog.addHandler(file_handler)\n\nlog.info(\"test\")\n\nIf you have multiple loggers, you can still use this solutions as loggers can inherit from other just put the handler in the root logger and ensure that the others take the handler from that one.\nAs an alternative you can use nohup command which will keep the process running even if the terminal closes and will return the outputs to the desired location:\nnohup python main.py & > log_file.out &\n\n",
"There are literally many ways to do this. However, they are not all suitable for different reasons (maintainability, ease of use, reinvent the wheel, etc.).\nIf you don't mind using your operating system built-ins you can:\n\nforward standard output and error streams to a file of your choice with python3 -u ./myscript.py 2>&1 outputfile.txt.\n\nforward standard output and error streams to a file of your choice AND display it to the console too with python3 -u ./myscript.py 2>&1 tee outputfile.txt. The -u option specifies the output is unbuffered (i.e.: whats put in the pipe goes immediately out).\n\n\nIf you want to do it from the Python side you can:\n\nuse the logging module to output the generated logs to a file handle instead of the standard output.\n\noverride the stdout and stderr streams defined in sys (sys.stdout and sys.stderr) so that they point to an opened file handle of your choice. For instance sys.stdout = open(\"log-stdout.txt\", \"w\").\n\n\nAs a personnal preference, the simpler, the better. The logging module is made for the purpose of logging and provides all the necessary mechanisms to achieve what you want. So.. I would suggest that you stick with it. Here is a link to the logging module documentation which also provides many examples from simple use to more complex and advanced use.\n"
] |
[
4,
3
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074474093_python.txt
|
Q:
Create a new column A, where the value will be taken from a specific column based on the value in column C
I want to create a new column A (verbatim), where the value will be taken from a specific column based on the value in column C (category)
The data I have:
ID pos neg better_than_comp less_well_than_comp Category code
1 good service quick response price and range POSITIVE Satsfied
2 good service quick response price and range BETTER THAN COMP Speed
3 good service quick response price and range LESS WELL THAN COMP Cost
4 good service quick response price and range LESS WELL THAN COMP Choice
Desired output:
ID pos neg better_than_comp less_well_than_comp Category code verbatim
1 good service quick response price and range POSITIVE Satsfiedgood service
2 good service quick response price and range BETTER THAN COMP Speed quick response
3 good service quick response price and range LESS WELL THAN COMP Cost price and range
4 good service quick response price and range LESS WELL THAN COMP Choice price and range
I've tried something like:
df['verbatim']=df['Category'].apply(lambda x: x['better_than_comp'] if
x == 'BETTER THAN COMP'
else x['less_well_than_comp']
if x=='LESS WELL THAN COMP'
else x['pos'] if
x=='POSITIVE'
else x)
But I get an error: TypeError: string indices must be integers
The data is actually a melt of another dataset, if that matters, that's why the values are repeated in columns 1:5.
A:
you should use it like this.
df['verbatim']=df.apply(lambda x: x['better_than_comp'] if
x['Category'] == 'BETTER THAN COMP'
else x['less_well_than_comp']
if x['Category']=='LESS WELL THAN COMP'
else x['pos'] if
x['Category']=='POSITIVE'
else x['Category'],axis=1)
Another option np.select (much faster):
condlist=[df['Category']=='BETTER THAN COMP',df['Category']=='LESS WELL THAN COMP',df['Category']=='POSITIVE']
choicelist=[df['better_than_comp'],df['less_well_than_comp'],df['pos']]
df['verbatim']=np.select(condlist,choicelist)
|
Create a new column A, where the value will be taken from a specific column based on the value in column C
|
I want to create a new column A (verbatim), where the value will be taken from a specific column based on the value in column C (category)
The data I have:
ID pos neg better_than_comp less_well_than_comp Category code
1 good service quick response price and range POSITIVE Satsfied
2 good service quick response price and range BETTER THAN COMP Speed
3 good service quick response price and range LESS WELL THAN COMP Cost
4 good service quick response price and range LESS WELL THAN COMP Choice
Desired output:
ID pos neg better_than_comp less_well_than_comp Category code verbatim
1 good service quick response price and range POSITIVE Satsfiedgood service
2 good service quick response price and range BETTER THAN COMP Speed quick response
3 good service quick response price and range LESS WELL THAN COMP Cost price and range
4 good service quick response price and range LESS WELL THAN COMP Choice price and range
I've tried something like:
df['verbatim']=df['Category'].apply(lambda x: x['better_than_comp'] if
x == 'BETTER THAN COMP'
else x['less_well_than_comp']
if x=='LESS WELL THAN COMP'
else x['pos'] if
x=='POSITIVE'
else x)
But I get an error: TypeError: string indices must be integers
The data is actually a melt of another dataset, if that matters, that's why the values are repeated in columns 1:5.
|
[
"you should use it like this.\ndf['verbatim']=df.apply(lambda x: x['better_than_comp'] if \n x['Category'] == 'BETTER THAN COMP'\n else x['less_well_than_comp']\n if x['Category']=='LESS WELL THAN COMP'\n else x['pos'] if\n x['Category']=='POSITIVE'\n else x['Category'],axis=1)\n\nAnother option np.select (much faster):\ncondlist=[df['Category']=='BETTER THAN COMP',df['Category']=='LESS WELL THAN COMP',df['Category']=='POSITIVE']\nchoicelist=[df['better_than_comp'],df['less_well_than_comp'],df['pos']]\ndf['verbatim']=np.select(condlist,choicelist)\n\n"
] |
[
1
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074520950_pandas_python.txt
|
Q:
Plotly Express Overlay Two Line Graphs
I know that it is easy to overlay plots using Plotly Go.
import plotly.graph_objects as go
fig = go.Figure()
fig.add_traces([go.Scatter(x=[1,2,3], y=[2,1,2]),
go.Scatter(x=[1,2,3], y=[2,1,2]),
go.Scatter(x=[1,2,3], y=[1,1,2])])
fig.show()
However, I would like to accomplish same task using Poltly Express. Is there a way to accomplish such a task in Plotly Express?
A:
You can do it with add_traces
import pandas as pd
import numpy as np
import plotly.express as px
data = {'x':[1,2,3], 'y':range(3)}
df1 = pd.DataFrame(data)
data = {'x':[4,5,6], 'y':range(4,7)}
df2 = pd.DataFrame(data)
fig1 = px.line(df1, x='x', y='y', color_discrete_sequence=['red'])
fig2 = px.line(df2, x='x', y='y', labels='green', color_discrete_sequence=['green'])
fig1.add_traces(
list(fig2.select_traces())
)
name = ['red','green']
for i in range(len(fig1.data)):
fig1.data[i]['name'] = name[i]
fig1.data[i]['showlegend'] = True
fig1.show()
However, I prefer to use go plots, which are easier.
|
Plotly Express Overlay Two Line Graphs
|
I know that it is easy to overlay plots using Plotly Go.
import plotly.graph_objects as go
fig = go.Figure()
fig.add_traces([go.Scatter(x=[1,2,3], y=[2,1,2]),
go.Scatter(x=[1,2,3], y=[2,1,2]),
go.Scatter(x=[1,2,3], y=[1,1,2])])
fig.show()
However, I would like to accomplish same task using Poltly Express. Is there a way to accomplish such a task in Plotly Express?
|
[
"You can do it with add_traces\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\n\ndata = {'x':[1,2,3], 'y':range(3)}\ndf1 = pd.DataFrame(data)\n\ndata = {'x':[4,5,6], 'y':range(4,7)}\ndf2 = pd.DataFrame(data)\n\nfig1 = px.line(df1, x='x', y='y', color_discrete_sequence=['red'])\n\nfig2 = px.line(df2, x='x', y='y', labels='green', color_discrete_sequence=['green'])\n\nfig1.add_traces(\n list(fig2.select_traces())\n)\n\nname = ['red','green']\n\nfor i in range(len(fig1.data)):\n fig1.data[i]['name'] = name[i]\n fig1.data[i]['showlegend'] = True\n \nfig1.show()\n\n\nHowever, I prefer to use go plots, which are easier.\n"
] |
[
1
] |
[] |
[] |
[
"plotly",
"plotly_dash",
"python"
] |
stackoverflow_0074520782_plotly_plotly_dash_python.txt
|
Q:
Creating a Utility Matrix from CSV File for Collaborative Filtering
I have a CSV File Output Like this,
I need to create a Utility Matrix like this,
r=df.User.unique()
df2 = pd.DataFrame(data=r)
With the above code I created the User part but I'm Stuck at creating the Rating corresponding to each item for the Users.
Is there any method in Python to do this?
A:
You can use Pandas pivot_table to create a utility matrix (see https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.pivot_table.html)
In your case, it'll be utilityMatrix = ratingTable.pivot_table(values='rating', index='userId', columns='itemID')
|
Creating a Utility Matrix from CSV File for Collaborative Filtering
|
I have a CSV File Output Like this,
I need to create a Utility Matrix like this,
r=df.User.unique()
df2 = pd.DataFrame(data=r)
With the above code I created the User part but I'm Stuck at creating the Rating corresponding to each item for the Users.
Is there any method in Python to do this?
|
[
"You can use Pandas pivot_table to create a utility matrix (see https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.pivot_table.html)\nIn your case, it'll be utilityMatrix = ratingTable.pivot_table(values='rating', index='userId', columns='itemID')\n"
] |
[
0
] |
[] |
[] |
[
"collaborative_filtering",
"csv",
"matrix",
"pandas",
"python"
] |
stackoverflow_0066454310_collaborative_filtering_csv_matrix_pandas_python.txt
|
Q:
Get subclass name?
Is it possible to get the name of a subclass? For example:
class Foo:
def bar(self):
print type(self)
class SubFoo(Foo):
pass
SubFoo().bar()
will print: < type 'instance' >
I'm looking for a way to get "SubFoo".
I know you can do isinstance, but I don't know the name of the class a priori, so that doesn't work for me.
A:
you can use
SubFoo().__class__.__name__
which might be off-topic, since it gives you a class name :)
A:
#!/usr/bin/python
class Foo(object):
def bar(self):
print type(self)
class SubFoo(Foo):
pass
SubFoo().bar()
Subclassing from object gives you new-style classes (which are not so new any more - python 2.2!) Anytime you want to work with the self attribute a lot you will get a lot more for your buck if you subclass from object. Python's docs ... new style classes. Historically Python left the old-style way Foo() for backward compatibility. But, this was a long time ago. There is not much reason anymore not to subclass from object.
A:
It works a lot better when you use new-style classes.
class Foo(object):
....
A:
SubFoo.__name__
And parents: [cls.__name__ for cls in SubFoo.__bases__]
A:
Just a reminder that this issue doesn't exist in Python 3.x and
print(type(self)) will give the more informative <class '__main__.SubFoo'> instead of bad old < type 'instance' >.
This is because object is subclassed automatically in Python 3.x, making every class a new-style class.
More discussion at What is the purpose of subclassing the class "object" in Python?.
Like others pointed out, to just get the subclass name do print(type(self).__name__).
|
Get subclass name?
|
Is it possible to get the name of a subclass? For example:
class Foo:
def bar(self):
print type(self)
class SubFoo(Foo):
pass
SubFoo().bar()
will print: < type 'instance' >
I'm looking for a way to get "SubFoo".
I know you can do isinstance, but I don't know the name of the class a priori, so that doesn't work for me.
|
[
"you can use\nSubFoo().__class__.__name__\n\nwhich might be off-topic, since it gives you a class name :)\n",
"#!/usr/bin/python\nclass Foo(object):\n def bar(self):\n print type(self)\n\nclass SubFoo(Foo):\n pass\n\nSubFoo().bar()\n\nSubclassing from object gives you new-style classes (which are not so new any more - python 2.2!) Anytime you want to work with the self attribute a lot you will get a lot more for your buck if you subclass from object. Python's docs ... new style classes. Historically Python left the old-style way Foo() for backward compatibility. But, this was a long time ago. There is not much reason anymore not to subclass from object. \n",
"It works a lot better when you use new-style classes.\nclass Foo(object):\n ....\n\n",
"SubFoo.__name__\nAnd parents: [cls.__name__ for cls in SubFoo.__bases__]\n",
"Just a reminder that this issue doesn't exist in Python 3.x and\nprint(type(self)) will give the more informative <class '__main__.SubFoo'> instead of bad old < type 'instance' >.\nThis is because object is subclassed automatically in Python 3.x, making every class a new-style class.\nMore discussion at What is the purpose of subclassing the class \"object\" in Python?.\nLike others pointed out, to just get the subclass name do print(type(self).__name__).\n"
] |
[
16,
12,
3,
3,
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0003314627_python.txt
|
Q:
JWT encrypting payload in python? (JWE)
According to RFC 7516 it should be possible to encrypt the payload/claim, called JWE.
Are there any python libraries out there that support that?
I've checked PyJWT, python-jose and jwcrypto but they all just have examples for signing with HS256 (JWS).
Sorry if this is totally obvious, but when it comes to things involving crypto I'm extra cautious.
A:
Both Jose and jwcrypto libraries can do JWE.
For jose:
claims = {
'iss': 'http://www.example.com',
'sub': 42,
}
pubKey = {'k':\
'-----BEGIN PUBLIC KEY-----\n\
-----END PUBLIC KEY-----'
}
# decrypt on the other end using the private key
privKey = {'k':
'-----BEGIN RSA PRIVATE KEY-----\n'+\
'-----END RSA PRIVATE KEY-----'
}
encJwt = jose.encrypt(claims, pubKey)
serJwt = jose.serialize_compact(encJwt)
decJwt = jose.decrypt(jose.deserialize_compact(serJwt), privKey)
For jwcrypto:
# algorithm to use
eprot = {'alg': "RSA-OAEP", 'enc': "A128CBC-HS256"}
stringPayload = u'attack at dawn'
E = jwe.JWE(stringPayload, json_encode(eprot))
E.add_recipient(pubKey)
encrypted_token = E.serialize(compact=True)
E = jwe.JWE()
E.deserialize(encrypted_token, key=privKey)
decrypted_payload = E.payload
A:
I can add a new library to the above suggested libraries, named jwskate, as initials of "JSON Web Signing, Keys, Algorithms, Tokens, and Encryption". Disclaimer: I am the author of that lib. I wrote it because I was not satisfied with the APIs from the previous libs which are not Pythonic enough for my tastes.
Here is an usage example to encode/decode a JWE, in this particular case using ECDH-ES+A256KW and A128CBC-HS256, but obviously you can use any supported key management and encryption algorithm:
from jwskate import JweCompact, Jwk
plaintext = b"this is an example plaintext"
# I'll use this specific Elliptic Curve private key:
key = Jwk(
{'kid': '8-nLgBsa-vXI_geoGt061_ZiVZ8BB-hYBDSoOQj9QgI',
'alg': 'ECDH-ES+A256KW',
'crv': 'P-256',
'd': '39QMopTVL1u267FOx4ayvsueDU317vHaq_z-PU_NioA',
'kty': 'EC',
'x': 'f_VRZlIk1Qd2eNGFVas9sNXx9wd43L8VymknAyP5Ntk',
'y': 'NmsfCs5VVOk6FEE31aaN9jB8rlfz1MWolBC3af_8DGs'}
)
# alternatively, you can generate one like this:
random_key = Jwk.generate_for_alg("ECDH-ES+A256KW").with_kid_thumbprint()
# sign your JWE
jwe = JweCompact.encrypt(plaintext, jwk=key.public_jwk(), enc="A128CBC-HS256")
print(jwe)
# it will look like:
# eyJlcGsiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiItVnNfYkdSNTdUUVY4MHNnUENwcWZhVjNmWXR4dWdTWmJRM1FLeTJEVDdNIiwieSI6IjBtc0pZSUFfMC1OY2lfM0plOWZLSml3RU1ZdGRBaE9kZDZhdkp5THd0dzQifSwiYWxnIjoiRUNESC1FUytBMjU2S1ciLCJlbmMiOiJBMTI4Q0JDLUhTMjU2Iiwia2lkIjoiOC1uTGdCc2EtdlhJX2dlb0d0MDYxX1ppVlo4QkItaFlCRFNvT1FqOVFnSSJ9.nnOEhmdonA19LRvyKSrL7f8aEb2vVwE7EU-zO91fyTUls4otMVppYg.h8h7Mxz4irvckPnknsnM0g.sRQJJq-RmiF7GeqvL8EpWTstS-daLbfgGnOPybWeOj8.z3heCfTiI0cjw8GaV0qcHw
# as recipient, you can decrypt your JWE like this:
jwe = JweCompact("""eyJlcGsiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiJkSllwMHNTZUVhMnhiMkc4M2Jnam
1VNnp4OEFxTkZRVmlJclJXUnlJYURzIiwieSI6InJXcEZ0OENESGNkQXFoMVR2eG9BZTFCT3FfZ2I3RzJya0hVd0hhNldfV0kif
SwiYWxnIjoiRUNESC1FUytBMjU2S1ciLCJlbmMiOiJBMTI4Q0JDLUhTMjU2Iiwia2lkIjoiOC1uTGdCc2EtdlhJX2dlb0d0MDYx
X1ppVlo4QkItaFlCRFNvT1FqOVFnSSJ9.Nt89wpmYDZmbmjQCEZnZOygTOP5x2s7trvzLFehw1I_lMzTU-qlrcg.SQgJPG_WNUn
F13XnCJMAtw.wWwu_VUG7LPbsnWFTv-rAyiG84RW4tszR2fQ-AQaLBI.Onf3K4MSKhXaUrS8NpMDIA""")
assert jwe.decrypt(key) == plaintext
|
JWT encrypting payload in python? (JWE)
|
According to RFC 7516 it should be possible to encrypt the payload/claim, called JWE.
Are there any python libraries out there that support that?
I've checked PyJWT, python-jose and jwcrypto but they all just have examples for signing with HS256 (JWS).
Sorry if this is totally obvious, but when it comes to things involving crypto I'm extra cautious.
|
[
"Both Jose and jwcrypto libraries can do JWE.\nFor jose:\nclaims = {\n'iss': 'http://www.example.com',\n'sub': 42,\n}\npubKey = {'k':\\\n '-----BEGIN PUBLIC KEY-----\\n\\\n-----END PUBLIC KEY-----'\n }\n# decrypt on the other end using the private key\nprivKey = {'k': \n '-----BEGIN RSA PRIVATE KEY-----\\n'+\\\n'-----END RSA PRIVATE KEY-----'\n}\n\nencJwt = jose.encrypt(claims, pubKey)\nserJwt = jose.serialize_compact(encJwt)\ndecJwt = jose.decrypt(jose.deserialize_compact(serJwt), privKey)\n\n\nFor jwcrypto:\n# algorithm to use\neprot = {'alg': \"RSA-OAEP\", 'enc': \"A128CBC-HS256\"}\nstringPayload = u'attack at dawn'\nE = jwe.JWE(stringPayload, json_encode(eprot))\nE.add_recipient(pubKey)\nencrypted_token = E.serialize(compact=True)\nE = jwe.JWE()\nE.deserialize(encrypted_token, key=privKey)\ndecrypted_payload = E.payload\n\n",
"I can add a new library to the above suggested libraries, named jwskate, as initials of \"JSON Web Signing, Keys, Algorithms, Tokens, and Encryption\". Disclaimer: I am the author of that lib. I wrote it because I was not satisfied with the APIs from the previous libs which are not Pythonic enough for my tastes.\nHere is an usage example to encode/decode a JWE, in this particular case using ECDH-ES+A256KW and A128CBC-HS256, but obviously you can use any supported key management and encryption algorithm:\nfrom jwskate import JweCompact, Jwk\n\nplaintext = b\"this is an example plaintext\"\n\n# I'll use this specific Elliptic Curve private key:\nkey = Jwk(\n {'kid': '8-nLgBsa-vXI_geoGt061_ZiVZ8BB-hYBDSoOQj9QgI',\n 'alg': 'ECDH-ES+A256KW',\n 'crv': 'P-256',\n 'd': '39QMopTVL1u267FOx4ayvsueDU317vHaq_z-PU_NioA',\n 'kty': 'EC',\n 'x': 'f_VRZlIk1Qd2eNGFVas9sNXx9wd43L8VymknAyP5Ntk',\n 'y': 'NmsfCs5VVOk6FEE31aaN9jB8rlfz1MWolBC3af_8DGs'}\n)\n\n\n# alternatively, you can generate one like this:\nrandom_key = Jwk.generate_for_alg(\"ECDH-ES+A256KW\").with_kid_thumbprint()\n\n# sign your JWE\njwe = JweCompact.encrypt(plaintext, jwk=key.public_jwk(), enc=\"A128CBC-HS256\")\nprint(jwe)\n# it will look like: \n# eyJlcGsiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiItVnNfYkdSNTdUUVY4MHNnUENwcWZhVjNmWXR4dWdTWmJRM1FLeTJEVDdNIiwieSI6IjBtc0pZSUFfMC1OY2lfM0plOWZLSml3RU1ZdGRBaE9kZDZhdkp5THd0dzQifSwiYWxnIjoiRUNESC1FUytBMjU2S1ciLCJlbmMiOiJBMTI4Q0JDLUhTMjU2Iiwia2lkIjoiOC1uTGdCc2EtdlhJX2dlb0d0MDYxX1ppVlo4QkItaFlCRFNvT1FqOVFnSSJ9.nnOEhmdonA19LRvyKSrL7f8aEb2vVwE7EU-zO91fyTUls4otMVppYg.h8h7Mxz4irvckPnknsnM0g.sRQJJq-RmiF7GeqvL8EpWTstS-daLbfgGnOPybWeOj8.z3heCfTiI0cjw8GaV0qcHw\n\n\n# as recipient, you can decrypt your JWE like this:\njwe = JweCompact(\"\"\"eyJlcGsiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiJkSllwMHNTZUVhMnhiMkc4M2Jnam\n1VNnp4OEFxTkZRVmlJclJXUnlJYURzIiwieSI6InJXcEZ0OENESGNkQXFoMVR2eG9BZTFCT3FfZ2I3RzJya0hVd0hhNldfV0kif\nSwiYWxnIjoiRUNESC1FUytBMjU2S1ciLCJlbmMiOiJBMTI4Q0JDLUhTMjU2Iiwia2lkIjoiOC1uTGdCc2EtdlhJX2dlb0d0MDYx\nX1ppVlo4QkItaFlCRFNvT1FqOVFnSSJ9.Nt89wpmYDZmbmjQCEZnZOygTOP5x2s7trvzLFehw1I_lMzTU-qlrcg.SQgJPG_WNUn\nF13XnCJMAtw.wWwu_VUG7LPbsnWFTv-rAyiG84RW4tszR2fQ-AQaLBI.Onf3K4MSKhXaUrS8NpMDIA\"\"\")\nassert jwe.decrypt(key) == plaintext\n\n"
] |
[
13,
0
] |
[
"https://jwcrypto.readthedocs.io/en/latest/jwk.html#examples\nfrom jwcrypto import jwk\n_k = jwk.JWK.generate(kty='RSA', size=2048)\n_text = _k.export()\n\nimport json\n# loading the key back\n_import_key_dict = json.loads(_text)\nkey = jwk.JWK(**json.loads(_import_key_dict))\n\n\n"
] |
[
-1
] |
[
"jwe",
"jwt",
"pyjwt",
"python",
"python_jose"
] |
stackoverflow_0039163000_jwe_jwt_pyjwt_python_python_jose.txt
|
Q:
Selenium, Python, Chrome Driver -Send_Keys
could someone kindly point out to me where I'm going wrong please?
I've looked up the documentation and I thought I set it up correctly but keep getting the error:
line 29, in <module>
username.send_keys(cred_username)
^^^^^^^^^^^^^^^^^^
AttributeError: 'list' object has no attribute 'send_keys'
Currently I can:
Load and access the page
get past the security wall my browser throws up
load up the log in screen, but am not able to input my credentials.
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
cred_username = "username"
cred_password = "password"
s_1=Service("my driver location")
driver = webdriver.Chrome(service=s_1)
driver.get("https:website")
# Hurdle 01 Start : get past security step.
advanced = driver.find_element(By.ID, "details-button")
advanced.click()
proceed = driver.find_element(By.ID, "proceed-link")
proceed.click()
# Hurdle 01 Finish :
# Hurdle 02 Start : logging in
username = driver.find_elements(By.ID, "idUsername")
username.clear()
username.send_keys(cred_username)
password = driver.find_element(By.ID, "idPassword")
password.clear()
password.send_keys(cred_password)
password.send_keys(Keys.RETURN)
time.sleep(99)
I've tried setting it as a string but get the same error.
I've also tried a variation of "send_keys_to_element(element, *keys_to_send)"
I've also tried setting an xPATH but get the same results.
If anyone can point out where I'm going wrong or direct to a web page that explains where I'm going wrong I'd really appreciate it, thanks for having a look.
I thought what I had written would allow me to simply input my log-in details at this stage so I can then access a page which would allow me to upload a document. This is the only part of the process that requires input from a keyboard.
Update from answer:
Thanks for the answer [Prophet], I've removed the 's' as you suggest and tried using the XPATH again, but now have this error come up, I feel I'm close but there's something obvious I'm missing.
File "/Users/jace/Desktop/Filing_Cabinet/Python_Folder/my_phone_config01.py", line 27
username = driver.find_element(By.XPATH, "//input[@id="Username"]")
^^^^^^^^^^^^^^^^^^^^^^
SyntaxError: invalid syntax. Perhaps you forgot a comma?
[Finished in 25ms with exit code 1]
Update from comment:
pitcture of the inspection page
Resolved, Final update
Prophet had pointed out I may of needed to wait for the page to load fully, hence why I was having a hard time finding the element to send keys to.
Adding a wait allowed the element to actually load for my script to locate it.
# Step 03 Start : logging in
try:
username = WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.XPATH, "//input[@id='idUsername']"))
)
username.send_keys(cred_username)
password = WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.XPATH, "//input[@id='idPassword']"))
)
password.send_keys(cred_password)
password.send_keys(Keys.RETURN)
# Step 03 End : need to add wait commands/let the page load.
finally:
time.sleep(30)
A:
Your mistake is here: username = driver.find_elements(By.ID, "idUsername")
You need to use find_element method, not find_elements since find_element returns a web element object so you can apply send_keys method on it, while find_elements returns a list of web element and you can not apply send_keys method on a list.
UPD
As about your additional issue.
The following XPath expression can be fixed as following:
username = driver.find_element(By.XPATH, "//input[@id='Username']")
Or
username = driver.find_element(By.XPATH, '//input[@id="Username"]')
The rule is simple: in case you enclose the string with ", the internal strings should be enclosed with ' and wise versa.
|
Selenium, Python, Chrome Driver -Send_Keys
|
could someone kindly point out to me where I'm going wrong please?
I've looked up the documentation and I thought I set it up correctly but keep getting the error:
line 29, in <module>
username.send_keys(cred_username)
^^^^^^^^^^^^^^^^^^
AttributeError: 'list' object has no attribute 'send_keys'
Currently I can:
Load and access the page
get past the security wall my browser throws up
load up the log in screen, but am not able to input my credentials.
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
cred_username = "username"
cred_password = "password"
s_1=Service("my driver location")
driver = webdriver.Chrome(service=s_1)
driver.get("https:website")
# Hurdle 01 Start : get past security step.
advanced = driver.find_element(By.ID, "details-button")
advanced.click()
proceed = driver.find_element(By.ID, "proceed-link")
proceed.click()
# Hurdle 01 Finish :
# Hurdle 02 Start : logging in
username = driver.find_elements(By.ID, "idUsername")
username.clear()
username.send_keys(cred_username)
password = driver.find_element(By.ID, "idPassword")
password.clear()
password.send_keys(cred_password)
password.send_keys(Keys.RETURN)
time.sleep(99)
I've tried setting it as a string but get the same error.
I've also tried a variation of "send_keys_to_element(element, *keys_to_send)"
I've also tried setting an xPATH but get the same results.
If anyone can point out where I'm going wrong or direct to a web page that explains where I'm going wrong I'd really appreciate it, thanks for having a look.
I thought what I had written would allow me to simply input my log-in details at this stage so I can then access a page which would allow me to upload a document. This is the only part of the process that requires input from a keyboard.
Update from answer:
Thanks for the answer [Prophet], I've removed the 's' as you suggest and tried using the XPATH again, but now have this error come up, I feel I'm close but there's something obvious I'm missing.
File "/Users/jace/Desktop/Filing_Cabinet/Python_Folder/my_phone_config01.py", line 27
username = driver.find_element(By.XPATH, "//input[@id="Username"]")
^^^^^^^^^^^^^^^^^^^^^^
SyntaxError: invalid syntax. Perhaps you forgot a comma?
[Finished in 25ms with exit code 1]
Update from comment:
pitcture of the inspection page
Resolved, Final update
Prophet had pointed out I may of needed to wait for the page to load fully, hence why I was having a hard time finding the element to send keys to.
Adding a wait allowed the element to actually load for my script to locate it.
# Step 03 Start : logging in
try:
username = WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.XPATH, "//input[@id='idUsername']"))
)
username.send_keys(cred_username)
password = WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.XPATH, "//input[@id='idPassword']"))
)
password.send_keys(cred_password)
password.send_keys(Keys.RETURN)
# Step 03 End : need to add wait commands/let the page load.
finally:
time.sleep(30)
|
[
"Your mistake is here: username = driver.find_elements(By.ID, \"idUsername\")\nYou need to use find_element method, not find_elements since find_element returns a web element object so you can apply send_keys method on it, while find_elements returns a list of web element and you can not apply send_keys method on a list.\nUPD\nAs about your additional issue.\nThe following XPath expression can be fixed as following:\nusername = driver.find_element(By.XPATH, \"//input[@id='Username']\")\n\nOr\nusername = driver.find_element(By.XPATH, '//input[@id=\"Username\"]')\n\nThe rule is simple: in case you enclose the string with \", the internal strings should be enclosed with ' and wise versa.\n"
] |
[
2
] |
[] |
[] |
[
"authentication",
"python",
"selenium",
"selenium_chromedriver",
"selenium_webdriver"
] |
stackoverflow_0074520997_authentication_python_selenium_selenium_chromedriver_selenium_webdriver.txt
|
Q:
Check if a pdf is signed or not
I would like to write a python script to check if a pdf is signed or not. After quite a bit of looking around, I saw that pyPDF2 helps extract text from pdf files, but I am not sure if it can be used to extract the signature details such as Public Key etc.
I did go through some of the open source packages like pyhanko and cryptography but I am a bit stuck as to how to do it.
I have not worked on encryptions or PDFs using python before. Could you please guide me on the best way possible to execute this?
Thanks and best regards,
Raghu
I tried using cryptography package but was not quite sure how to extract the signature certificate from the pdf.
Adobe pdf offers export in PKCS7 and CER format.I would like to know how to do this using python. This is inorder to have a validation step for another process.
Also appreciate if there are other easy and smart ways to check if the pdf is signed or not.
A:
disclaimer: I am the author of borb the library used in this answer
Simply load the PDF using borb, get the DocumentInfo object, and call its has_signatures function.
from borb.pdf import PDF
from borb.pdf import Document
import typing
# read the PDF
doc: typing.Optional[Document] = None
with open("input.pdf", "rb") as fh:
doc = PDF.loads(fh)
# check whether anything has been read
# this may fail due to IO error
# or a corrupt PDF
assert doc is not None
# check whether signatures are in the PDF
doc.get_document_info().has_signatures()
|
Check if a pdf is signed or not
|
I would like to write a python script to check if a pdf is signed or not. After quite a bit of looking around, I saw that pyPDF2 helps extract text from pdf files, but I am not sure if it can be used to extract the signature details such as Public Key etc.
I did go through some of the open source packages like pyhanko and cryptography but I am a bit stuck as to how to do it.
I have not worked on encryptions or PDFs using python before. Could you please guide me on the best way possible to execute this?
Thanks and best regards,
Raghu
I tried using cryptography package but was not quite sure how to extract the signature certificate from the pdf.
Adobe pdf offers export in PKCS7 and CER format.I would like to know how to do this using python. This is inorder to have a validation step for another process.
Also appreciate if there are other easy and smart ways to check if the pdf is signed or not.
|
[
"disclaimer: I am the author of borb the library used in this answer\nSimply load the PDF using borb, get the DocumentInfo object, and call its has_signatures function.\nfrom borb.pdf import PDF\nfrom borb.pdf import Document\n\nimport typing\n\n# read the PDF\ndoc: typing.Optional[Document] = None\nwith open(\"input.pdf\", \"rb\") as fh:\n doc = PDF.loads(fh)\n\n# check whether anything has been read\n# this may fail due to IO error\n# or a corrupt PDF\nassert doc is not None\n\n# check whether signatures are in the PDF\ndoc.get_document_info().has_signatures()\n\n\n"
] |
[
0
] |
[] |
[] |
[
"encryption",
"pdf",
"python",
"python_3.x",
"signature"
] |
stackoverflow_0074513853_encryption_pdf_python_python_3.x_signature.txt
|
Q:
Powershell command to Python
Trying to get the below code to run but am struggling to find a solution for the below highlight portion it doesn't like the 'File' portion of the code. Any help appreciated.
import subprocess
def getaclsec():
pscommand = '$file1 = Import-Csv -Path "C:\\Source\\testpath.csv" ForEach ($file in $file1) {$infoSec = Get-Acl -Path $file.FullPAth $infoSec.Access | Select @{l='File';e={$file.FullPath}},* | Export-Csv -Path "C:\\Source\\newTestPathSect.csv" -Append}'
process=subprocess.Popen(["powershell","& {" + pscommand + "}"],stdout=subprocess.PIPE);
getaclsec()
A:
Answer:
import subprocess
def getaclsec():
pscommand = '$file1 = Import-Csv -Path "C:\\Source\\testpath.csv"; ForEach ($file in $file1) {$infoSec = Get-Acl -Path $file.FullPAth; $infoSec.Access | Select @{l="File";e={$file.FullPath}},* | Export-Csv -Path "C:\\Source\\newTestPathSect.csv" -Append}'
process=subprocess.Popen(["powershell","& {" + pscommand + "}"],stdout=subprocess.PIPE);
getaclsec()
|
Powershell command to Python
|
Trying to get the below code to run but am struggling to find a solution for the below highlight portion it doesn't like the 'File' portion of the code. Any help appreciated.
import subprocess
def getaclsec():
pscommand = '$file1 = Import-Csv -Path "C:\\Source\\testpath.csv" ForEach ($file in $file1) {$infoSec = Get-Acl -Path $file.FullPAth $infoSec.Access | Select @{l='File';e={$file.FullPath}},* | Export-Csv -Path "C:\\Source\\newTestPathSect.csv" -Append}'
process=subprocess.Popen(["powershell","& {" + pscommand + "}"],stdout=subprocess.PIPE);
getaclsec()
|
[
"Answer:\nimport subprocess \ndef getaclsec():\n pscommand = '$file1 = Import-Csv -Path \"C:\\\\Source\\\\testpath.csv\"; ForEach ($file in $file1) {$infoSec = Get-Acl -Path $file.FullPAth; $infoSec.Access | Select @{l=\"File\";e={$file.FullPath}},* | Export-Csv -Path \"C:\\\\Source\\\\newTestPathSect.csv\" -Append}'\n process=subprocess.Popen([\"powershell\",\"& {\" + pscommand + \"}\"],stdout=subprocess.PIPE);\ngetaclsec()\n\n"
] |
[
0
] |
[] |
[] |
[
"powershell",
"python"
] |
stackoverflow_0074494430_powershell_python.txt
|
Q:
How to make onchange field editable only for draft state?
I have onchange field, and i need to make it readonly for all state except the draft state.
My .py file:
class SaleOrderInherited(models.Model):
_inherit = 'sale.order'
custom_field = fields.Char(string='Test', store=True, default=randint(1, 1000)
)
@api.onchange('tax_totals_json', 'date_order')
def _onchage_test(self):
for record in self:
if int(json.loads(record.tax_totals_json)['amount_total']) == 0:
record.custom_field = randint(1, 1000)
else:
record.custom_field = f"{json.loads(record.tax_totals_json)['amount_total']} - {record.date_order}"
My .xml file:
<odoo>
<data>
<!--Inherit the sale order form view-->
<record id="view_sale_order_custom" model="ir.ui.view">
<field name="name">sale.order.custom.form.inherited</field>
<field name="model">sale.order</field>
<field name="inherit_id" ref="sale.view_order_form"/>
<field name="arch" type="xml">
<xpath expr="//field[@name='partner_id']" position="after">
<field name="custom_field"/>
</xpath>
</field>
</record>
</data>
</odoo>
I tried to use attrs="{'readonly':[('state','!=','draft')]}" in xml, and tried use
states={'draft': [('readonly', False)], 'sent': [('readonly', True)]} in py. Both variations works on Char field, but didnt give any result there.
A:
Set the readonly attribute to True then use states to make the field editable in draft state. You can find an example in sale_management module:
sale_order_template_id = fields.Many2one(
readonly=True, check_company=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
Using only attrs in XML arch should be enough to make the field read-only using a domain, the attrs you used should work (tested)
|
How to make onchange field editable only for draft state?
|
I have onchange field, and i need to make it readonly for all state except the draft state.
My .py file:
class SaleOrderInherited(models.Model):
_inherit = 'sale.order'
custom_field = fields.Char(string='Test', store=True, default=randint(1, 1000)
)
@api.onchange('tax_totals_json', 'date_order')
def _onchage_test(self):
for record in self:
if int(json.loads(record.tax_totals_json)['amount_total']) == 0:
record.custom_field = randint(1, 1000)
else:
record.custom_field = f"{json.loads(record.tax_totals_json)['amount_total']} - {record.date_order}"
My .xml file:
<odoo>
<data>
<!--Inherit the sale order form view-->
<record id="view_sale_order_custom" model="ir.ui.view">
<field name="name">sale.order.custom.form.inherited</field>
<field name="model">sale.order</field>
<field name="inherit_id" ref="sale.view_order_form"/>
<field name="arch" type="xml">
<xpath expr="//field[@name='partner_id']" position="after">
<field name="custom_field"/>
</xpath>
</field>
</record>
</data>
</odoo>
I tried to use attrs="{'readonly':[('state','!=','draft')]}" in xml, and tried use
states={'draft': [('readonly', False)], 'sent': [('readonly', True)]} in py. Both variations works on Char field, but didnt give any result there.
|
[
"Set the readonly attribute to True then use states to make the field editable in draft state. You can find an example in sale_management module:\nsale_order_template_id = fields.Many2one(\n readonly=True, check_company=True,\n states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})\n\nUsing only attrs in XML arch should be enough to make the field read-only using a domain, the attrs you used should work (tested)\n"
] |
[
0
] |
[] |
[] |
[
"odoo",
"odoo_15",
"python",
"python_3.x"
] |
stackoverflow_0074520308_odoo_odoo_15_python_python_3.x.txt
|
Q:
Either no errors, yet no output or math domain error
I'm currently majoring in maths and minoring in physics and hence need to do some homework assignments in Python.
I never coded in Python(miniscule experience in java) and never attended the Python classes assigned by uni(ik) and currently I try to approximate pi by measuring the area under the graph of a circle in Python.
My problem is that, although no errors appear in the console and I'm calling the function, no output is displayed(when I put return above the print statement)
When I put return after the print statement however I get a math domain error;
I previously couldn't use x^2 when defining y in the halfCircle function and tried to solve it by using pow(x,2) instead.
Currently I only implemented the end result to be pi/2.
And I know that it's a bad approximation, since I only let the area pieces be under the graph.
If this code works I'll implement another function which does the same for area pieces which go over the graph and then calculate their average.
Then I'll multiply it by 2.
I'm currently confused about where to place the return statement and what's up with the math domain error.
This is my code:
import math
def halfCircle(x):
y = abs(math.sqrt(1-pow(x,2)))
return y
#N is the Number of pieces we try to approximate pi with, the higher N, the closer G will be to pi.
def areaUnderGraph(N):
#x1 is initialized to be -1 since our range starts at x0 = -1
x1 = float(-1)
#x2 will be set to 0 at the moment.
x2 = float(0)
#We use G as the final area under the graph and set it to 0 to begin with.
G = 0
#Now we need to know how thick our areas will be, [-1,1] is 2 long on the x axis, so we divide 2 by N:
d = 2/N
#Now we come to calculating our areas and adding them up until we hit 1 as the end of the given range.
while x2 <= 1:
#x2 will be x1 + d since we're adding from left to right.
x2 = x1 + d
#Since we need the smaller area pieces we calculate the corresponding y values for x1 and x2 so as to discern the smaller area value.
y2 = halfCircle(x2)
y1 = halfCircle(x1)
#The area is smaller, if y is smaller;
a1 = (x2 - x1) * y1
a2 = (x2 - x1) * y2
if a1 > a2:
a = a2
else:
#if a2 and a1 are equal it doesn't matter which value a takes (a1 or a2)
a = a1
#for the next step we let x1 become x2 since that will be the leftmost point of the next area piece.
x1 = x2
#We define G to be the area made up of all the previous areas and the newly calculated one.
G = G + a
print('The Area under the graph is ' + G)
return G
areaUnderGraph(100)
A:
math.sqrt() does not take a negative number. When I was trying to use a negative value as a parameter, it is showing the math domain error.
See this example:
import math
math.sqrt(-4)
Traceback (most recent call last):
File "/usr/lib/python3.10/code.py", line 90, in runcode
exec(code, self.locals)
File "<input>", line 1, in <module>
ValueError: math domain error
So, I changed the function to use abs before calling the sqrt method.
Updated code:
import math
def halfCircle(x):
y = math.sqrt(abs(1 - pow(x, 2)))
return y
# N is the Number of pieces we try to approximate pi with, the higher N, the closer G will be to pi.
def areaUnderGraph(N):
# x1 is initialized to be -1 since our range starts at x0 = -1
x1 = float(-1)
# x2 will be set to 0 at the moment.
x2 = float(0)
# We use G as the final area under the graph and set it to 0 to begin with.
G = 0
# Now we need to know how thick our areas will be, [-1,1] is 2 long on the x axis, so we divide 2 by N:
d = 2 / N
# Now we come to calculating our areas and adding them up until we hit 1 as the end of the given range.
while x2 <= 1:
# x2 will be x1 + d since we're adding from left to right.
x2 = x1 + d
# Since we need the smaller area pieces we calculate the corresponding y values for x1 and x2 so as to discern the smaller area value.
y2 = halfCircle(x2)
y1 = halfCircle(x1)
# The area is smaller, if y is smaller;
a1 = (x2 - x1) * y1
a2 = (x2 - x1) * y2
if a1 > a2:
a = a2
else:
# if a2 and a1 are equal it doesn't matter wich value a takes (a1 or a2)
a = a1
# for the next step we let x1 become x2 since that will be the leftmost point of the next area piece.
x1 = x2
# We define G to be the area made up of all the previous areas and the newly calculated one.
G = G + a
return G
print(f'The Area under the graph is {areaUnderGraph(100)}')
Output:
The Area under the graph is 1.5491342564916826
|
Either no errors, yet no output or math domain error
|
I'm currently majoring in maths and minoring in physics and hence need to do some homework assignments in Python.
I never coded in Python(miniscule experience in java) and never attended the Python classes assigned by uni(ik) and currently I try to approximate pi by measuring the area under the graph of a circle in Python.
My problem is that, although no errors appear in the console and I'm calling the function, no output is displayed(when I put return above the print statement)
When I put return after the print statement however I get a math domain error;
I previously couldn't use x^2 when defining y in the halfCircle function and tried to solve it by using pow(x,2) instead.
Currently I only implemented the end result to be pi/2.
And I know that it's a bad approximation, since I only let the area pieces be under the graph.
If this code works I'll implement another function which does the same for area pieces which go over the graph and then calculate their average.
Then I'll multiply it by 2.
I'm currently confused about where to place the return statement and what's up with the math domain error.
This is my code:
import math
def halfCircle(x):
y = abs(math.sqrt(1-pow(x,2)))
return y
#N is the Number of pieces we try to approximate pi with, the higher N, the closer G will be to pi.
def areaUnderGraph(N):
#x1 is initialized to be -1 since our range starts at x0 = -1
x1 = float(-1)
#x2 will be set to 0 at the moment.
x2 = float(0)
#We use G as the final area under the graph and set it to 0 to begin with.
G = 0
#Now we need to know how thick our areas will be, [-1,1] is 2 long on the x axis, so we divide 2 by N:
d = 2/N
#Now we come to calculating our areas and adding them up until we hit 1 as the end of the given range.
while x2 <= 1:
#x2 will be x1 + d since we're adding from left to right.
x2 = x1 + d
#Since we need the smaller area pieces we calculate the corresponding y values for x1 and x2 so as to discern the smaller area value.
y2 = halfCircle(x2)
y1 = halfCircle(x1)
#The area is smaller, if y is smaller;
a1 = (x2 - x1) * y1
a2 = (x2 - x1) * y2
if a1 > a2:
a = a2
else:
#if a2 and a1 are equal it doesn't matter which value a takes (a1 or a2)
a = a1
#for the next step we let x1 become x2 since that will be the leftmost point of the next area piece.
x1 = x2
#We define G to be the area made up of all the previous areas and the newly calculated one.
G = G + a
print('The Area under the graph is ' + G)
return G
areaUnderGraph(100)
|
[
"math.sqrt() does not take a negative number. When I was trying to use a negative value as a parameter, it is showing the math domain error.\nSee this example:\nimport math\nmath.sqrt(-4)\nTraceback (most recent call last):\n File \"/usr/lib/python3.10/code.py\", line 90, in runcode\n exec(code, self.locals)\n File \"<input>\", line 1, in <module>\nValueError: math domain error\n\nSo, I changed the function to use abs before calling the sqrt method.\nUpdated code:\nimport math\n\n\ndef halfCircle(x):\n y = math.sqrt(abs(1 - pow(x, 2)))\n return y\n\n\n# N is the Number of pieces we try to approximate pi with, the higher N, the closer G will be to pi.\ndef areaUnderGraph(N):\n # x1 is initialized to be -1 since our range starts at x0 = -1\n x1 = float(-1)\n\n # x2 will be set to 0 at the moment.\n x2 = float(0)\n\n # We use G as the final area under the graph and set it to 0 to begin with.\n G = 0\n\n # Now we need to know how thick our areas will be, [-1,1] is 2 long on the x axis, so we divide 2 by N:\n d = 2 / N\n\n # Now we come to calculating our areas and adding them up until we hit 1 as the end of the given range.\n while x2 <= 1:\n # x2 will be x1 + d since we're adding from left to right.\n x2 = x1 + d\n\n # Since we need the smaller area pieces we calculate the corresponding y values for x1 and x2 so as to discern the smaller area value.\n y2 = halfCircle(x2)\n y1 = halfCircle(x1)\n\n # The area is smaller, if y is smaller;\n a1 = (x2 - x1) * y1\n a2 = (x2 - x1) * y2\n\n if a1 > a2:\n a = a2\n else:\n # if a2 and a1 are equal it doesn't matter wich value a takes (a1 or a2)\n a = a1\n # for the next step we let x1 become x2 since that will be the leftmost point of the next area piece.\n x1 = x2\n\n # We define G to be the area made up of all the previous areas and the newly calculated one.\n G = G + a\n\n return G\n\n\nprint(f'The Area under the graph is {areaUnderGraph(100)}')\n\nOutput:\nThe Area under the graph is 1.5491342564916826\n\n"
] |
[
0
] |
[] |
[] |
[
"math",
"output",
"python"
] |
stackoverflow_0074520954_math_output_python.txt
|
Q:
Getting error ImportError: No module named slack_sdk.webhook
I'm very new to Python. I'm using PyCharm and Python Virtual Environment and following is a piece of import code which is throwing error. I checked my requirements.txt file and it has got slack library configured and then I ran pip3 install -r requirements.txt and executed my file python .py but getting below error:-
Traceback (most recent call last):
File "send_slack_notification.py", line 8, in <module>
from slack_sdk.webhook import WebhookClient
ImportError: No module named slack_sdk.webhook
This is my code chunk:-
import argparse
import itertools
import json
import os
from slack_sdk.webhook import WebhookClient
from sonarqube import SonarQubeClient
from jinja2 import Environment, FileSystemLoader
requirements.txt:-
slack_sdk==3.19.4
Jinja2==3.1.2
python-sonarqube-api==1.3.0
python-gitlab==2.5.0
Please advise what mistake I'm doing it so that I take this as my learning, thanks
A:
Interpreter
like you can see in this picture... maybe is here the problem. When you create a new environment you need to choose the new python.exe inside of PYCHARM where you installed you library's. So...
Maybe you activate the Script on console, you are isntalling correct lib's in your env but you are opening your proyect pointing on C:....\Python.exe.
Find inside your env Script Folder with python.exe and select in Pycharm interpreter this Path.
|
Getting error ImportError: No module named slack_sdk.webhook
|
I'm very new to Python. I'm using PyCharm and Python Virtual Environment and following is a piece of import code which is throwing error. I checked my requirements.txt file and it has got slack library configured and then I ran pip3 install -r requirements.txt and executed my file python .py but getting below error:-
Traceback (most recent call last):
File "send_slack_notification.py", line 8, in <module>
from slack_sdk.webhook import WebhookClient
ImportError: No module named slack_sdk.webhook
This is my code chunk:-
import argparse
import itertools
import json
import os
from slack_sdk.webhook import WebhookClient
from sonarqube import SonarQubeClient
from jinja2 import Environment, FileSystemLoader
requirements.txt:-
slack_sdk==3.19.4
Jinja2==3.1.2
python-sonarqube-api==1.3.0
python-gitlab==2.5.0
Please advise what mistake I'm doing it so that I take this as my learning, thanks
|
[
"Interpreter\nlike you can see in this picture... maybe is here the problem. When you create a new environment you need to choose the new python.exe inside of PYCHARM where you installed you library's. So...\nMaybe you activate the Script on console, you are isntalling correct lib's in your env but you are opening your proyect pointing on C:....\\Python.exe.\nFind inside your env Script Folder with python.exe and select in Pycharm interpreter this Path.\n"
] |
[
1
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074521006_python.txt
|
Q:
Trying to get a basic highcharts cylinder chart working on streamlit
I've been able to get a highcharts waterfallchart and an area chart working on streamlit, but the cylinder chart is not displaying anything. Would love it if someone could take a look at my code....
import streamlit as st
import streamlit_highcharts as hct
chart_week_day={
"chart": {
"type": "cylinder",
"options3d": {
"enabled": True,
"alpha": 8,
"beta": 15,
"depth": 50,
"viewDistance": 150
}
},
"title": {
"text": "Return by Weekday"
},
"xAxis": {
"categories": ["Monday","Tuesday","Wednesday","Thursday","Friday"],
"title": {
"text": ''
}
},
"yAxis": {
"title": {
"margin": 40,
"text": '$'
}
},
"plotOptions": {
"series": {
"depth": 300,
"colorByPoint": False,
"negativeColor": 'pink'
}
},
"series": [{
"data": [200,300,400,500,600],
"name": '$',
"showInLegend": False
}]
}
hct.streamlit_highcharts(chart_week_day,500,key="week_day")
A:
Series.data expects numbers, whereasxAxis.categories should be strings. Therefore, you need to only swap categories and data values.
JS Demo:
https://jsfiddle.net/BlackLabel/qx3pzn1f/
API Reference:
https://api.highcharts.com/highcharts/series.cylinder.data
https://api.highcharts.com/highcharts/xAxis.categories
As @ferdy mentioned, don't forget to include the necessary modules:
<script src="https://code.highcharts.com/highcharts.js"></script>
<script src="https://code.highcharts.com/highcharts-3d.js"></script>
<script src="https://code.highcharts.com/modules/cylinder.js"></script>
|
Trying to get a basic highcharts cylinder chart working on streamlit
|
I've been able to get a highcharts waterfallchart and an area chart working on streamlit, but the cylinder chart is not displaying anything. Would love it if someone could take a look at my code....
import streamlit as st
import streamlit_highcharts as hct
chart_week_day={
"chart": {
"type": "cylinder",
"options3d": {
"enabled": True,
"alpha": 8,
"beta": 15,
"depth": 50,
"viewDistance": 150
}
},
"title": {
"text": "Return by Weekday"
},
"xAxis": {
"categories": ["Monday","Tuesday","Wednesday","Thursday","Friday"],
"title": {
"text": ''
}
},
"yAxis": {
"title": {
"margin": 40,
"text": '$'
}
},
"plotOptions": {
"series": {
"depth": 300,
"colorByPoint": False,
"negativeColor": 'pink'
}
},
"series": [{
"data": [200,300,400,500,600],
"name": '$',
"showInLegend": False
}]
}
hct.streamlit_highcharts(chart_week_day,500,key="week_day")
|
[
"Series.data expects numbers, whereasxAxis.categories should be strings. Therefore, you need to only swap categories and data values.\nJS Demo:\nhttps://jsfiddle.net/BlackLabel/qx3pzn1f/\nAPI Reference:\nhttps://api.highcharts.com/highcharts/series.cylinder.data\nhttps://api.highcharts.com/highcharts/xAxis.categories\nAs @ferdy mentioned, don't forget to include the necessary modules:\n<script src=\"https://code.highcharts.com/highcharts.js\"></script>\n<script src=\"https://code.highcharts.com/highcharts-3d.js\"></script>\n<script src=\"https://code.highcharts.com/modules/cylinder.js\"></script>\n\n"
] |
[
0
] |
[] |
[] |
[
"highcharts",
"python",
"streamlit"
] |
stackoverflow_0074499649_highcharts_python_streamlit.txt
|
Q:
Listing Kafka clusters and brokers
I try to develop a Kafka GUI on Django. I can list topics of brokers, partitions and clients using kafka-python.
Is a programmatic way to retrieve list of clusters and brokers?
I can save clusters and related brokers as database tables as an alternative.
A:
Use ClusterMetata.brokers()
You can only connect to one cluster at a time, so you need some other solution to find all Kafka clusters.
Alternatively, there's plenty of existing Kafka GUIs, most of which are built on JVM languages, however.
|
Listing Kafka clusters and brokers
|
I try to develop a Kafka GUI on Django. I can list topics of brokers, partitions and clients using kafka-python.
Is a programmatic way to retrieve list of clusters and brokers?
I can save clusters and related brokers as database tables as an alternative.
|
[
"Use ClusterMetata.brokers()\nYou can only connect to one cluster at a time, so you need some other solution to find all Kafka clusters.\nAlternatively, there's plenty of existing Kafka GUIs, most of which are built on JVM languages, however.\n"
] |
[
1
] |
[] |
[] |
[
"apache_kafka",
"python"
] |
stackoverflow_0074519840_apache_kafka_python.txt
|
Q:
Saving Login with Playwright
I'm trying to save my login with playwright, I've read the documentation and tried to implement it into my code but I am still getting errors
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo=50)
page = context.new_page()
page.fill("input#input-username", "demo")
page.fill("input#input-password", "demo")
page.click("button[type=submit]")
context = browser.new_context(storage_state="website1.json")
storage = context.storage_state(path="website1.json")
page = context.new_page()
NameError: name 'context' is not defined
Super confused.
A:
The main problem:
context.new_page() calls before variable declaration(context = ...)
Should works fine:
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo=50)
context = browser.new_context(storage_state="website1.json")
page = context.new_page()
page.goto('https://demo.opencart.com/admin')
page.fill("input#input-username", "demo")
page.fill("input#input-password", "demo")
page.click("button[type=submit]")
Also you can use Persistent authentication:
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
user_data_dir = 'FULL_PATH_TO_BROWSER_PROFILE'
browser = p.chromium.launch_persistent_context(user_data_dir, headless=False)
# login only 1 time...
JFYI: opencart uses user_token in GET parameters and validates it. playwright(storage_state, user_data_dir etc) should works fine. Example:
# run docker container with playwright
docker rm -fv example && docker run --name example -it mcr.microsoft.com/playwright/python:v1.27.1-focal bash
# create empty state
echo {} >> /tmp/state.json
# run python in interactive mode
python
# Python 3.8.10 (default, Jun 22 2022, 20:18:18)
# [GCC 9.4.0] on linux
# Type "help", "copyright", "credits" or "license" for more information.
Paste the next script:
from time import sleep
from urllib.parse import parse_qs
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
# login
browser = p.chromium.launch()
context = browser.new_context(storage_state='/tmp/state.json')
page = context.new_page()
page.goto('https://demo.opencart.com/admin')
page.fill('input#input-username', 'demo')
page.fill('input#input-password', 'demo')
page.click('button[type=submit]')
sleep(5) # just wait for redirect - you can wait for a element...
page.context.storage_state(path='/tmp/state.json')
page.screenshot(path='/tmp/after_state_init.png')
# parse user_token for GET requests
parsed_url = parse_qs(page.url)
user_token = parsed_url['user_token'][0]
print(f'user token {user_token}')
page.close()
browser.close()
with sync_playwright() as p:
# open admin dashboard without login...
browser = p.chromium.launch()
context = browser.new_context()
page = browser.new_page(storage_state='/tmp/state.json')
page.goto(f'https://demo.opencart.com/admin/index.php?route=common/dashboard&user_token={user_token}')
sleep(5)
page.screenshot(path='/tmp/open_using_saved_state.png')
page.close()
browser.close()
Now open a new terminal and copy screens from docker container:
docker cp example:/tmp/after_state_init.png ./
docker cp example:/tmp/open_using_saved_state.png ./
You'll see that they are the same - admin dashboard works without login.
|
Saving Login with Playwright
|
I'm trying to save my login with playwright, I've read the documentation and tried to implement it into my code but I am still getting errors
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo=50)
page = context.new_page()
page.fill("input#input-username", "demo")
page.fill("input#input-password", "demo")
page.click("button[type=submit]")
context = browser.new_context(storage_state="website1.json")
storage = context.storage_state(path="website1.json")
page = context.new_page()
NameError: name 'context' is not defined
Super confused.
|
[
"The main problem:\n\ncontext.new_page() calls before variable declaration(context = ...)\n\nShould works fine:\nfrom playwright.sync_api import sync_playwright\n\nwith sync_playwright() as p:\n browser = p.chromium.launch(headless=False, slow_mo=50)\n context = browser.new_context(storage_state=\"website1.json\")\n page = context.new_page()\n page.goto('https://demo.opencart.com/admin')\n page.fill(\"input#input-username\", \"demo\")\n page.fill(\"input#input-password\", \"demo\")\n page.click(\"button[type=submit]\")\n\nAlso you can use Persistent authentication:\nfrom playwright.sync_api import sync_playwright\n\nwith sync_playwright() as p:\n user_data_dir = 'FULL_PATH_TO_BROWSER_PROFILE'\n browser = p.chromium.launch_persistent_context(user_data_dir, headless=False)\n # login only 1 time...\n\nJFYI: opencart uses user_token in GET parameters and validates it. playwright(storage_state, user_data_dir etc) should works fine. Example:\n# run docker container with playwright\ndocker rm -fv example && docker run --name example -it mcr.microsoft.com/playwright/python:v1.27.1-focal bash\n# create empty state\necho {} >> /tmp/state.json\n# run python in interactive mode\npython\n# Python 3.8.10 (default, Jun 22 2022, 20:18:18) \n# [GCC 9.4.0] on linux\n# Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n\nPaste the next script:\nfrom time import sleep\nfrom urllib.parse import parse_qs\n\nfrom playwright.sync_api import sync_playwright\n\nwith sync_playwright() as p:\n # login\n browser = p.chromium.launch()\n context = browser.new_context(storage_state='/tmp/state.json')\n page = context.new_page()\n page.goto('https://demo.opencart.com/admin')\n page.fill('input#input-username', 'demo')\n page.fill('input#input-password', 'demo')\n page.click('button[type=submit]')\n sleep(5) # just wait for redirect - you can wait for a element...\n page.context.storage_state(path='/tmp/state.json')\n page.screenshot(path='/tmp/after_state_init.png')\n # parse user_token for GET requests\n parsed_url = parse_qs(page.url)\n user_token = parsed_url['user_token'][0]\n print(f'user token {user_token}')\n page.close()\n browser.close()\n\n\nwith sync_playwright() as p:\n # open admin dashboard without login... \n browser = p.chromium.launch()\n context = browser.new_context()\n page = browser.new_page(storage_state='/tmp/state.json')\n page.goto(f'https://demo.opencart.com/admin/index.php?route=common/dashboard&user_token={user_token}')\n sleep(5)\n page.screenshot(path='/tmp/open_using_saved_state.png')\n page.close()\n browser.close()\n\nNow open a new terminal and copy screens from docker container:\ndocker cp example:/tmp/after_state_init.png ./\ndocker cp example:/tmp/open_using_saved_state.png ./\n\nYou'll see that they are the same - admin dashboard works without login.\n"
] |
[
1
] |
[] |
[] |
[
"playwright",
"python"
] |
stackoverflow_0074520727_playwright_python.txt
|
Q:
Dash Radial Plot for Hours of a Day
I am looking for a plot in Ploty/Dash which is similar to radial chart below. The closest one I found in Ploty is polar charts, and line charts.
Here is my implementation:
import random
import pandas as pd
import numpy as np
import plotly.express as px
df = pd.DataFrame({'DATE_TIME':pd.date_range('2022-11-01', '2022-11-05 23:00:00',freq='h'),
'value':[random.uniform(110, 160) for n in range(120)]})
fig = px.line_polar(df, r='DATE_TIME', theta='value', line_close=True)
fig.show()
and the my output looks like:
Instead of angles, I would like to plot hours like 0, 3, 6, 9, 12, 15, and so on.
Besides, my line is not in circle shape. I would appreciate any hints, or solutions.
A:
You should convert your hourly data to string series as follows:
import random
import pandas as pd
import numpy as np
import plotly.express as px
df = pd.DataFrame({'DATE_TIME':pd.date_range('2022-11-01', '2022-11-05 23:00:00',freq="30min"),
'value':[random.uniform(110, 160) for n in range(239)]})
You can plot it this way:
fig = px.line_polar(df, r='value', theta=df['DATE_TIME'].dt.strftime('%H:%M'))
fig.update_layout(width=750, height=750)
fig.show()
|
Dash Radial Plot for Hours of a Day
|
I am looking for a plot in Ploty/Dash which is similar to radial chart below. The closest one I found in Ploty is polar charts, and line charts.
Here is my implementation:
import random
import pandas as pd
import numpy as np
import plotly.express as px
df = pd.DataFrame({'DATE_TIME':pd.date_range('2022-11-01', '2022-11-05 23:00:00',freq='h'),
'value':[random.uniform(110, 160) for n in range(120)]})
fig = px.line_polar(df, r='DATE_TIME', theta='value', line_close=True)
fig.show()
and the my output looks like:
Instead of angles, I would like to plot hours like 0, 3, 6, 9, 12, 15, and so on.
Besides, my line is not in circle shape. I would appreciate any hints, or solutions.
|
[
"You should convert your hourly data to string series as follows:\nimport random\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\n\ndf = pd.DataFrame({'DATE_TIME':pd.date_range('2022-11-01', '2022-11-05 23:00:00',freq=\"30min\"),\n 'value':[random.uniform(110, 160) for n in range(239)]})\n\nYou can plot it this way:\nfig = px.line_polar(df, r='value', theta=df['DATE_TIME'].dt.strftime('%H:%M'))\nfig.update_layout(width=750, height=750)\nfig.show()\n\n\n"
] |
[
1
] |
[] |
[] |
[
"plotly",
"plotly_dash",
"python"
] |
stackoverflow_0074518210_plotly_plotly_dash_python.txt
|
Q:
Django Forms does not submit radiobutton value and does not showing any output in terminal as well
This is HTML Code.
<form action = "." method = "post">
<div class="form_data">
{% csrf_token %}
<br><br>
{{form.myfield}}
<br><br>
<input type="submit" value="Submit" class="btn btn-success" />
</div>
</form>
This is forms.py code
class TestForm(forms.ModelForm):
class Meta:
model = TestModel
fields = "__all__"
widgets = {'myfield': forms.RadioSelect()}
This is models.py code
class TestModel(models.Model):
git_Id = models.CharField(max_length=200)
git_Response = models.CharField(max_length=200)
is_approved = models.IntegerField()
MY_CHOICES = (
('opt0', 'Approved'),
('opt1', 'Not Approved'),
)
myfield = models.CharField(max_length=10, choices=MY_CHOICES, default="N/A")
views.py code
def test(request):
if request.method == "POST":
form = TestForm(request.POST)
if form.is_valid():
print("Form is Valid")
selected = form.cleaned_data['myfield']
print(selected)
if selected == 'opt0':
from config import request_id as r
rq = r["request_id"]
print(rq)
s = sql()
query = f"""update request_form_mymodel
set is_approved=1
where request_id = '{rq}' """
print(query)
s.update_query(query)
else:
pass
else:
form = TestForm()
return render(request, 'test.html', {'form': form})
I am not getting any output, if i try to submit after selecting radio button then it does not working and not printing any variables values in terminal as well and form is not submitted.
What I want - I want to getting form is submitted and if radiobutton is selected opt0 then s.update() is called.
A:
The form is not going to the test view as you specified . in action attribute, kindly remove action attribute, since Django always takes current page route, so it will automatically go to test view so:
<form method="POST">
<div class="form_data">
{% csrf_token %}
<br><br>
{{form.myfield}}
<br><br>
<input type="submit" value="Submit" class="btn btn-success" />
</div>
</form>
|
Django Forms does not submit radiobutton value and does not showing any output in terminal as well
|
This is HTML Code.
<form action = "." method = "post">
<div class="form_data">
{% csrf_token %}
<br><br>
{{form.myfield}}
<br><br>
<input type="submit" value="Submit" class="btn btn-success" />
</div>
</form>
This is forms.py code
class TestForm(forms.ModelForm):
class Meta:
model = TestModel
fields = "__all__"
widgets = {'myfield': forms.RadioSelect()}
This is models.py code
class TestModel(models.Model):
git_Id = models.CharField(max_length=200)
git_Response = models.CharField(max_length=200)
is_approved = models.IntegerField()
MY_CHOICES = (
('opt0', 'Approved'),
('opt1', 'Not Approved'),
)
myfield = models.CharField(max_length=10, choices=MY_CHOICES, default="N/A")
views.py code
def test(request):
if request.method == "POST":
form = TestForm(request.POST)
if form.is_valid():
print("Form is Valid")
selected = form.cleaned_data['myfield']
print(selected)
if selected == 'opt0':
from config import request_id as r
rq = r["request_id"]
print(rq)
s = sql()
query = f"""update request_form_mymodel
set is_approved=1
where request_id = '{rq}' """
print(query)
s.update_query(query)
else:
pass
else:
form = TestForm()
return render(request, 'test.html', {'form': form})
I am not getting any output, if i try to submit after selecting radio button then it does not working and not printing any variables values in terminal as well and form is not submitted.
What I want - I want to getting form is submitted and if radiobutton is selected opt0 then s.update() is called.
|
[
"The form is not going to the test view as you specified . in action attribute, kindly remove action attribute, since Django always takes current page route, so it will automatically go to test view so:\n<form method=\"POST\">\n <div class=\"form_data\">\n\n {% csrf_token %}\n <br><br>\n\n {{form.myfield}}\n<br><br>\n <input type=\"submit\" value=\"Submit\" class=\"btn btn-success\" />\n </div>\n</form>\n\n"
] |
[
0
] |
[] |
[] |
[
"django",
"django_forms",
"django_templates",
"django_views",
"python"
] |
stackoverflow_0074521103_django_django_forms_django_templates_django_views_python.txt
|
Q:
How to use a Pydantic model with Form data in FastAPI?
I am trying to submit data from HTML forms and validate it with a Pydantic model.
Using this code
from fastapi import FastAPI, Form
from pydantic import BaseModel
from starlette.responses import HTMLResponse
app = FastAPI()
@app.get("/form", response_class=HTMLResponse)
def form_get():
return '''<form method="post">
<input type="text" name="no" value="1"/>
<input type="text" name="nm" value="abcd"/>
<input type="submit"/>
</form>'''
class SimpleModel(BaseModel):
no: int
nm: str = ""
@app.post("/form", response_model=SimpleModel)
def form_post(form_data: SimpleModel = Form(...)):
return form_data
However, I get the HTTP error: "422 Unprocessable Entity"
{
"detail": [
{
"loc": [
"body",
"form_data"
],
"msg": "field required",
"type": "value_error.missing"
}
]
}
The equivalent curl command (generated by Firefox) is
curl 'http://localhost:8001/form' -H 'Content-Type: application/x-www-form-urlencoded' --data 'no=1&nm=abcd'
Here the request body contains no=1&nm=abcd.
What am I doing wrong?
A:
I found a solution that can help us to use Pydantic with FastAPI forms :)
My code:
class AnyForm(BaseModel):
any_param: str
any_other_param: int = 1
@classmethod
def as_form(
cls,
any_param: str = Form(...),
any_other_param: int = Form(1)
) -> AnyForm:
return cls(any_param=any_param, any_other_param=any_other_param)
@router.post('')
async def any_view(form_data: AnyForm = Depends(AnyForm.as_form)):
...
It's shown in the Swagger as a usual form.
It can be more generic as a decorator:
import inspect
from typing import Type
from fastapi import Form
from pydantic import BaseModel
from pydantic.fields import ModelField
def as_form(cls: Type[BaseModel]):
new_parameters = []
for field_name, model_field in cls.__fields__.items():
model_field: ModelField # type: ignore
new_parameters.append(
inspect.Parameter(
model_field.alias,
inspect.Parameter.POSITIONAL_ONLY,
default=Form(...) if model_field.required else Form(model_field.default),
annotation=model_field.outer_type_,
)
)
async def as_form_func(**data):
return cls(**data)
sig = inspect.signature(as_form_func)
sig = sig.replace(parameters=new_parameters)
as_form_func.__signature__ = sig # type: ignore
setattr(cls, 'as_form', as_form_func)
return cls
And the usage looks like
@as_form
class Test(BaseModel):
param: str
a: int = 1
b: str = '2342'
c: bool = False
d: Optional[float] = None
@router.post('/me', response_model=Test)
async def me(request: Request, form: Test = Depends(Test.as_form)):
return form
A:
you can use data-form like below:
@app.post("/form", response_model=SimpleModel)
def form_post(no: int = Form(...),nm: str = Form(...)):
return SimpleModel(no=no,nm=nm)
A:
I implemented the solution found here Mause solution and it seemed to work
from fastapi.testclient import TestClient
from fastapi import FastAPI, Depends, Form
from pydantic import BaseModel
app = FastAPI()
def form_body(cls):
cls.__signature__ = cls.__signature__.replace(
parameters=[
arg.replace(default=Form(...))
for arg in cls.__signature__.parameters.values()
]
)
return cls
@form_body
class Item(BaseModel):
name: str
another: str
@app.post('/test', response_model=Item)
def endpoint(item: Item = Depends(Item)):
return item
tc = TestClient(app)
r = tc.post('/test', data={'name': 'name', 'another': 'another'})
assert r.status_code == 200
assert r.json() == {'name': 'name', 'another': 'another'}
A:
If you're only looking at abstracting the form data into a class you can do it with a plain class
from fastapi import Form, Depends
class AnyForm:
def __init__(self, any_param: str = Form(...), any_other_param: int = Form(1)):
self.any_param = any_param
self.any_other_param = any_other_param
def __str__(self):
return "AnyForm " + str(self.__dict__)
@app.post('/me')
async def me(form: AnyForm = Depends()):
print(form)
return form
And it can also be turned into a Pydantic Model
from uuid import UUID, uuid4
from fastapi import Form, Depends
from pydantic import BaseModel
class AnyForm(BaseModel):
id: UUID
any_param: str
any_other_param: int
def __init__(self, any_param: str = Form(...), any_other_param: int = Form(1)):
id = uuid4()
super().__init__(id, any_param, any_other_param)
@app.post('/me')
async def me(form: AnyForm = Depends()):
print(form)
return form
A:
You can do this even simpler using dataclasses
from dataclasses import dataclass
from fastapi import FastAPI, Form, Depends
from starlette.responses import HTMLResponse
app = FastAPI()
@app.get("/form", response_class=HTMLResponse)
def form_get():
return '''<form method="post">
<input type="text" name="no" value="1"/>
<input type="text" name="nm" value="abcd"/>
<input type="submit"/>
</form>'''
@dataclass
class SimpleModel:
no: int = Form(...)
nm: str = Form(...)
@app.post("/form")
def form_post(form_data: SimpleModel = Depends()):
return form_data
A:
Create the class this way:
from fastapi import Form
class SomeForm:
def __init__(
self,
username: str = Form(...),
password: str = Form(...),
authentication_code: str = Form(...)
):
self.username = username
self.password = password
self.authentication_code = authentication_code
@app.post("/login", tags=['Auth & Users'])
async def auth(
user: SomeForm = Depends()
):
# return something / set cookie
Result:
If you want then to make an http request from javascript you must use FormData to construct the request:
const fd = new FormData()
fd.append('username', username)
fd.append('password', password)
axios.post(`/login`, fd)
A:
Using Pydantic Dataclasses
from fastapi import FastAPI, Form, Depends
from pydantic.dataclasses import dataclass
app = FastAPI()
@dataclass
class UserFormModel:
username: str = Form("default name")
age: str = Form(22)
gender: str = Form(None) # default = null
email: str = Form(...) # required
@app.post("/form-data")
async def create_form_data(form_data: UserFormModel = Depends()):
return form_data
This solution is a complement to @Irfanuddin excellent suggestion.
A:
Tldr: a mypy compliant, inheritable version of other solutions that produces the correct generated OpenAPI schema field types rather than any/unknown types.
Existing solutions set the FastAPI params to typing.Any to prevent the validation from occurring twice and failing, this causes the generated API spec to have any/unknown param types for these form fields.
This solution temporarily injects the correct annotations to the routes before schema generation, and resets them in line with other solutions afterwards.
# Example usage
class ExampleForm(FormBaseModel):
name: str
age: int
@api.post("/test")
async def endpoint(form: ExampleForm = Depends(ExampleForm.as_form)):
return form.dict()
form_utils.py
import inspect
from pydantic import BaseModel, ValidationError
from fastapi import Form
from fastapi.exceptions import RequestValidationError
class FormBaseModel(BaseModel):
def __init_subclass__(cls, *args, **kwargs):
field_default = Form(...)
new_params = []
schema_params = []
for field in cls.__fields__.values():
new_params.append(
inspect.Parameter(
field.alias,
inspect.Parameter.POSITIONAL_ONLY,
default=Form(field.default) if not field.required else field_default,
annotation=inspect.Parameter.empty,
)
)
schema_params.append(
inspect.Parameter(
field.alias,
inspect.Parameter.POSITIONAL_ONLY,
default=Form(field.default) if not field.required else field_default,
annotation=field.annotation,
)
)
async def _as_form(**data):
try:
return cls(**data)
except ValidationError as e:
raise RequestValidationError(e.raw_errors)
async def _schema_mocked_call(**data):
"""
A fake version which is given the actual annotations, rather than typing.Any,
this version is used to generate the API schema, then the routes revert back to the original afterwards.
"""
pass
_as_form.__signature__ = inspect.signature(_as_form).replace(parameters=new_params) # type: ignore
setattr(cls, "as_form", _as_form)
_schema_mocked_call.__signature__ = inspect.signature(_schema_mocked_call).replace(parameters=schema_params) # type: ignore
# Set the schema patch func as an attr on the _as_form func so it can be accessed later from the route itself:
setattr(_as_form, "_schema_mocked_call", _schema_mocked_call)
@staticmethod
def as_form(parameters=[]) -> "FormBaseModel":
raise NotImplementedError
# asgi.py
from fastapi.routing import APIRoute
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from fastapi.dependencies.utils import get_dependant, get_body_field
api = FastAPI()
def custom_openapi():
if api.openapi_schema:
return api.openapi_schema
def create_reset_callback(route, deps, body_field):
def reset_callback():
route.dependant.dependencies = deps
route.body_field = body_field
return reset_callback
# The functions to call after schema generation to reset the routes to their original state:
reset_callbacks = []
for route in api.routes:
if isinstance(route, APIRoute):
orig_dependencies = list(route.dependant.dependencies)
orig_body_field = route.body_field
is_modified = False
for dep_index, dependency in enumerate(route.dependant.dependencies):
# If it's a form dependency, set the annotations to their true values:
if dependency.call.__name__ == "_as_form": # type: ignore
is_modified = True
route.dependant.dependencies[dep_index] = get_dependant(
path=dependency.path if dependency.path else route.path,
# This mocked func was set as an attribute on the original, correct function,
# replace it here temporarily:
call=dependency.call._schema_mocked_call, # type: ignore
name=dependency.name,
security_scopes=dependency.security_scopes,
use_cache=False, # Overriding, so don't want cached actual version.
)
if is_modified:
route.body_field = get_body_field(dependant=route.dependant, name=route.unique_id)
reset_callbacks.append(
create_reset_callback(route, orig_dependencies, orig_body_field)
)
openapi_schema = get_openapi(
title="foo",
version="bar",
routes=api.routes,
)
for callback in reset_callbacks:
callback()
api.openapi_schema = openapi_schema
return api.openapi_schema
api.openapi = custom_openapi # type: ignore[assignment]
|
How to use a Pydantic model with Form data in FastAPI?
|
I am trying to submit data from HTML forms and validate it with a Pydantic model.
Using this code
from fastapi import FastAPI, Form
from pydantic import BaseModel
from starlette.responses import HTMLResponse
app = FastAPI()
@app.get("/form", response_class=HTMLResponse)
def form_get():
return '''<form method="post">
<input type="text" name="no" value="1"/>
<input type="text" name="nm" value="abcd"/>
<input type="submit"/>
</form>'''
class SimpleModel(BaseModel):
no: int
nm: str = ""
@app.post("/form", response_model=SimpleModel)
def form_post(form_data: SimpleModel = Form(...)):
return form_data
However, I get the HTTP error: "422 Unprocessable Entity"
{
"detail": [
{
"loc": [
"body",
"form_data"
],
"msg": "field required",
"type": "value_error.missing"
}
]
}
The equivalent curl command (generated by Firefox) is
curl 'http://localhost:8001/form' -H 'Content-Type: application/x-www-form-urlencoded' --data 'no=1&nm=abcd'
Here the request body contains no=1&nm=abcd.
What am I doing wrong?
|
[
"I found a solution that can help us to use Pydantic with FastAPI forms :)\nMy code:\nclass AnyForm(BaseModel):\n any_param: str\n any_other_param: int = 1\n\n @classmethod\n def as_form(\n cls,\n any_param: str = Form(...),\n any_other_param: int = Form(1)\n ) -> AnyForm:\n return cls(any_param=any_param, any_other_param=any_other_param)\n\n@router.post('')\nasync def any_view(form_data: AnyForm = Depends(AnyForm.as_form)):\n ...\n\nIt's shown in the Swagger as a usual form.\nIt can be more generic as a decorator:\nimport inspect\nfrom typing import Type\n\nfrom fastapi import Form\nfrom pydantic import BaseModel\nfrom pydantic.fields import ModelField\n\ndef as_form(cls: Type[BaseModel]):\n new_parameters = []\n\n for field_name, model_field in cls.__fields__.items():\n model_field: ModelField # type: ignore\n\n new_parameters.append(\n inspect.Parameter(\n model_field.alias,\n inspect.Parameter.POSITIONAL_ONLY,\n default=Form(...) if model_field.required else Form(model_field.default),\n annotation=model_field.outer_type_,\n )\n )\n\n async def as_form_func(**data):\n return cls(**data)\n\n sig = inspect.signature(as_form_func)\n sig = sig.replace(parameters=new_parameters)\n as_form_func.__signature__ = sig # type: ignore\n setattr(cls, 'as_form', as_form_func)\n return cls\n\nAnd the usage looks like\n@as_form\nclass Test(BaseModel):\n param: str\n a: int = 1\n b: str = '2342'\n c: bool = False\n d: Optional[float] = None\n\n\n@router.post('/me', response_model=Test)\nasync def me(request: Request, form: Test = Depends(Test.as_form)):\n return form\n\n",
"you can use data-form like below:\n@app.post(\"/form\", response_model=SimpleModel)\ndef form_post(no: int = Form(...),nm: str = Form(...)):\n return SimpleModel(no=no,nm=nm)\n\n",
"I implemented the solution found here Mause solution and it seemed to work\nfrom fastapi.testclient import TestClient\nfrom fastapi import FastAPI, Depends, Form\nfrom pydantic import BaseModel\n\n\napp = FastAPI()\n\n\ndef form_body(cls):\n cls.__signature__ = cls.__signature__.replace(\n parameters=[\n arg.replace(default=Form(...))\n for arg in cls.__signature__.parameters.values()\n ]\n )\n return cls\n\n\n@form_body\nclass Item(BaseModel):\n name: str\n another: str\n\n\n@app.post('/test', response_model=Item)\ndef endpoint(item: Item = Depends(Item)):\n return item\n\n\ntc = TestClient(app)\n\n\nr = tc.post('/test', data={'name': 'name', 'another': 'another'})\n\nassert r.status_code == 200\nassert r.json() == {'name': 'name', 'another': 'another'}\n\n",
"If you're only looking at abstracting the form data into a class you can do it with a plain class\nfrom fastapi import Form, Depends\n\nclass AnyForm:\n def __init__(self, any_param: str = Form(...), any_other_param: int = Form(1)):\n self.any_param = any_param\n self.any_other_param = any_other_param\n\n def __str__(self):\n return \"AnyForm \" + str(self.__dict__)\n\n@app.post('/me')\nasync def me(form: AnyForm = Depends()):\n print(form)\n return form\n\nAnd it can also be turned into a Pydantic Model\nfrom uuid import UUID, uuid4\nfrom fastapi import Form, Depends\nfrom pydantic import BaseModel\n\nclass AnyForm(BaseModel):\n id: UUID\n any_param: str\n any_other_param: int\n\n def __init__(self, any_param: str = Form(...), any_other_param: int = Form(1)):\n id = uuid4()\n super().__init__(id, any_param, any_other_param)\n\n@app.post('/me')\nasync def me(form: AnyForm = Depends()):\n print(form)\n return form\n\n",
"You can do this even simpler using dataclasses\nfrom dataclasses import dataclass\nfrom fastapi import FastAPI, Form, Depends\nfrom starlette.responses import HTMLResponse\n\napp = FastAPI()\n\n\n@app.get(\"/form\", response_class=HTMLResponse)\ndef form_get():\n return '''<form method=\"post\"> \n <input type=\"text\" name=\"no\" value=\"1\"/> \n <input type=\"text\" name=\"nm\" value=\"abcd\"/> \n <input type=\"submit\"/> \n </form>'''\n\n\n@dataclass\nclass SimpleModel:\n no: int = Form(...)\n nm: str = Form(...)\n\n\n@app.post(\"/form\")\ndef form_post(form_data: SimpleModel = Depends()):\n return form_data\n\n\n",
"Create the class this way:\nfrom fastapi import Form\n\nclass SomeForm:\n\n def __init__(\n self,\n username: str = Form(...),\n password: str = Form(...),\n authentication_code: str = Form(...)\n ):\n self.username = username\n self.password = password\n self.authentication_code = authentication_code\n\n\n@app.post(\"/login\", tags=['Auth & Users'])\nasync def auth(\n user: SomeForm = Depends()\n):\n # return something / set cookie\n\nResult:\n\nIf you want then to make an http request from javascript you must use FormData to construct the request:\nconst fd = new FormData()\nfd.append('username', username)\nfd.append('password', password)\n\naxios.post(`/login`, fd)\n\n",
"Using Pydantic Dataclasses\nfrom fastapi import FastAPI, Form, Depends\nfrom pydantic.dataclasses import dataclass\n\napp = FastAPI()\n\n@dataclass\nclass UserFormModel:\n username: str = Form(\"default name\")\n age: str = Form(22)\n gender: str = Form(None) # default = null\n email: str = Form(...) # required\n\n\n@app.post(\"/form-data\")\nasync def create_form_data(form_data: UserFormModel = Depends()):\n return form_data\n\nThis solution is a complement to @Irfanuddin excellent suggestion.\n",
"Tldr: a mypy compliant, inheritable version of other solutions that produces the correct generated OpenAPI schema field types rather than any/unknown types.\nExisting solutions set the FastAPI params to typing.Any to prevent the validation from occurring twice and failing, this causes the generated API spec to have any/unknown param types for these form fields.\nThis solution temporarily injects the correct annotations to the routes before schema generation, and resets them in line with other solutions afterwards.\n# Example usage\nclass ExampleForm(FormBaseModel):\n name: str\n age: int\n\n@api.post(\"/test\")\nasync def endpoint(form: ExampleForm = Depends(ExampleForm.as_form)):\n return form.dict()\n\nform_utils.py\nimport inspect\nfrom pydantic import BaseModel, ValidationError\nfrom fastapi import Form\nfrom fastapi.exceptions import RequestValidationError\n\nclass FormBaseModel(BaseModel):\n\n def __init_subclass__(cls, *args, **kwargs):\n field_default = Form(...)\n new_params = []\n schema_params = []\n for field in cls.__fields__.values():\n new_params.append(\n inspect.Parameter(\n field.alias,\n inspect.Parameter.POSITIONAL_ONLY,\n default=Form(field.default) if not field.required else field_default,\n annotation=inspect.Parameter.empty,\n )\n )\n schema_params.append(\n inspect.Parameter(\n field.alias,\n inspect.Parameter.POSITIONAL_ONLY,\n default=Form(field.default) if not field.required else field_default,\n annotation=field.annotation,\n )\n )\n\n async def _as_form(**data):\n try:\n return cls(**data)\n except ValidationError as e:\n raise RequestValidationError(e.raw_errors)\n\n async def _schema_mocked_call(**data):\n \"\"\"\n A fake version which is given the actual annotations, rather than typing.Any,\n this version is used to generate the API schema, then the routes revert back to the original afterwards.\n \"\"\"\n pass\n\n _as_form.__signature__ = inspect.signature(_as_form).replace(parameters=new_params) # type: ignore\n setattr(cls, \"as_form\", _as_form)\n _schema_mocked_call.__signature__ = inspect.signature(_schema_mocked_call).replace(parameters=schema_params) # type: ignore\n # Set the schema patch func as an attr on the _as_form func so it can be accessed later from the route itself:\n setattr(_as_form, \"_schema_mocked_call\", _schema_mocked_call)\n\n @staticmethod\n def as_form(parameters=[]) -> \"FormBaseModel\":\n raise NotImplementedError\n\n# asgi.py\nfrom fastapi.routing import APIRoute\nfrom fastapi import FastAPI\nfrom fastapi.openapi.utils import get_openapi\nfrom fastapi.dependencies.utils import get_dependant, get_body_field\n\napi = FastAPI()\n\n\ndef custom_openapi():\n if api.openapi_schema:\n return api.openapi_schema\n\n def create_reset_callback(route, deps, body_field):\n def reset_callback():\n route.dependant.dependencies = deps\n route.body_field = body_field\n\n return reset_callback\n\n # The functions to call after schema generation to reset the routes to their original state:\n reset_callbacks = []\n\n for route in api.routes:\n if isinstance(route, APIRoute):\n orig_dependencies = list(route.dependant.dependencies)\n orig_body_field = route.body_field\n\n is_modified = False\n for dep_index, dependency in enumerate(route.dependant.dependencies):\n # If it's a form dependency, set the annotations to their true values:\n if dependency.call.__name__ == \"_as_form\": # type: ignore\n is_modified = True\n route.dependant.dependencies[dep_index] = get_dependant(\n path=dependency.path if dependency.path else route.path,\n # This mocked func was set as an attribute on the original, correct function,\n # replace it here temporarily:\n call=dependency.call._schema_mocked_call, # type: ignore\n name=dependency.name,\n security_scopes=dependency.security_scopes,\n use_cache=False, # Overriding, so don't want cached actual version.\n )\n\n if is_modified:\n route.body_field = get_body_field(dependant=route.dependant, name=route.unique_id)\n\n reset_callbacks.append(\n create_reset_callback(route, orig_dependencies, orig_body_field)\n )\n\n openapi_schema = get_openapi(\n title=\"foo\",\n version=\"bar\",\n routes=api.routes,\n )\n\n for callback in reset_callbacks:\n callback()\n\n api.openapi_schema = openapi_schema\n return api.openapi_schema\n\n\napi.openapi = custom_openapi # type: ignore[assignment]\n\n"
] |
[
52,
6,
5,
3,
3,
2,
0,
0
] |
[] |
[] |
[
"fastapi",
"pydantic",
"python"
] |
stackoverflow_0060127234_fastapi_pydantic_python.txt
|
Q:
find all elements > 0 in a np.array with np.where
I have a Array with Numbers ranging from (-infinite to +infinite)
Code looks like that:
delta_up = np.where(delta > 0, delta, 0)
delta_down = np.where(delta < 0, delta, 0)
Problem: I also have nan's in the array and they need to stay as nan's. But they are beeing converted to 0
How to solve it?
A:
my_array = np.array([1, 2, 3, 5, -1, -2, -3, None], dtype="float")
negative_idx = np.where(my_array<0) # np.nan values will be ignore
positive_idx = np.where(my_array>0) # np.nan values will be ignore
# getting subarray with values `array[indexes]`
negative_values = my_array[negative_idx]
positive_values = my_array[positive_idx]
|
find all elements > 0 in a np.array with np.where
|
I have a Array with Numbers ranging from (-infinite to +infinite)
Code looks like that:
delta_up = np.where(delta > 0, delta, 0)
delta_down = np.where(delta < 0, delta, 0)
Problem: I also have nan's in the array and they need to stay as nan's. But they are beeing converted to 0
How to solve it?
|
[
"my_array = np.array([1, 2, 3, 5, -1, -2, -3, None], dtype=\"float\")\n\n\nnegative_idx = np.where(my_array<0) # np.nan values will be ignore\npositive_idx = np.where(my_array>0) # np.nan values will be ignore\n\n# getting subarray with values `array[indexes]`\nnegative_values = my_array[negative_idx]\npositive_values = my_array[positive_idx] \n\n"
] |
[
1
] |
[] |
[] |
[
"arrays",
"numpy",
"numpy_ndarray",
"python"
] |
stackoverflow_0074521115_arrays_numpy_numpy_ndarray_python.txt
|
Q:
How to apply custom calculation between two IRIS cubes (GRIB files)? Considering also using xarray
I am trying to do some calculation between two iris cubes (GRIB files), here it is what I'm trying to achieve:
First cube:
ERA5-Land dataset, downloaded from official site via cdsapi API routine, cropped to custom Lat and Lon, in this example, I have only 2m air temperature, in celsius, hourly, for 3 days:
print(air_temperature)
air_temperature / (celsius) (time: 72; latitude: 18; longitude: 27)
Dimension coordinates:
time x - -
latitude - x -
longitude - - x
Auxiliary coordinates:
forecast_period x - -
Scalar coordinates:
height 2 m
originating_centre European Centre for Medium Range Weather Forecasts
Then, I have a series of sampling points at gives coordinates:
## Sample points coordinates
ws_latitudes = np.array([40.64, 41.19, 41.11, 41.19, 40.86, 40.93, 40.83, 40.25, 40.79, 40.56, 41.42, 41.42, 41.02, 41.24, 40.64, 40.13, 41.33, 40.61])
ws_longitudes = np.array([14.54, 15.13, 14.82, 13.83, 15.28, 14.02, 15.03, 15.66, 14.16, 15.23, 13.88, 15.04, 14.34, 14.47, 14.83, 15.45, 14.33, 14.97])
ws_samplepoints = [("latitude", ws_latitudes), ("longitude", ws_longitudes)]
The other cube (GRIB file) is a 2D cube ("timeless") of elevation:
I've downloaded ERA-Land geopontential GRIB2 file from here:
https://confluence.ecmwf.int/display/CKB/ERA5-Land%3A+data+documentation#ERA5Land:datadocumentation-parameterlistingParameterlistings
geopotential = "geo_1279l4_0.1x0.1.grib2"
geopot_cube = iris.load_cube(geopotential)
print(geopot_cube)
geopotential / (m2 s-2) (latitude: 1801; longitude: 3600)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period 0 hours
forecast_reference_time 2013-08-09 12:00:00
time 2013-08-09 12:00:00
Attributes:
GRIB_PARAM GRIB2:d000c003n004
centre 'European Centre for Medium Range Weather Forecasts'
z, Geopotential, m**2 s**-2
Then, to convert the geopotential to elevation, I've divided by 9.80665 m/s^2
elev_cube = geopot_cube / 9.80665
elev_cube.rename("Elevation")
elev_cube.units = "m"
print(elev_cube)
Elevation / (m) (latitude: 1801; longitude: 3600)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period 0 hours
forecast_reference_time 2013-08-09 12:00:00
time 2013-08-09 12:00:00
Attributes:
GRIB_PARAM GRIB2:d000c003n004
centre 'European Centre for Medium Range Weather Forecasts'
The resulting cube has been cropped to the same lat and lon as air temperature above (probably not necessary):
area_slicer = iris.Constraint(longitude=lambda v: 13.45 <= v <= 16.14, latitude=lambda v: 39.84 <= v <= 41.6)
elevcube_slice = elev_cube.extract(area_slicer)
print(elevcube_slice)
Elevation / (m) (latitude: 18; longitude: 27)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period 0 hours
forecast_reference_time 2013-08-09 12:00:00
time 2013-08-09 12:00:00
Attributes:
GRIB_PARAM GRIB2:d000c003n004
centre 'European Centre for Medium Range Weather Forecasts'
Now here is the point: having these two cubes, I have to calculate a new temperature value at every sample points given the linear equation:
where:
= temperature to calculate at given coordinates sample points;
= temperature read from the first GRIB file (2m air temperature) at sample points coordinates
= sample point elevation
= elevation from second GRIB file at sample points coordinates
as temperature/meter
How could I achieve this?
Even when I try to do very simple math between the two cubes, for example a simple multiplication:
print(air_temperature * elevcube_slice)
I have this error:
ValueError: Coordinate 'latitude' has different points for the LHS cube 'air_temperature' and RHS cube 'Elevation'.
To double check, both cubes have same CS:
cselev = elevcube_slice.coord_system()
cstemperature = air_temperature.coord_system()
print(cselev, cstemperature)
GeogCS(6371229.0) GeogCS(6371229.0)
I've also considered to switch to xarray if it is possible and suggested, probably working with xarray dataset is easier?
A:
Iris is strict about metadata and will fail loudly when they don't match in operations you try to do.
The error you get tells you what's going on: ValueError: Coordinate 'latitude' has different points for the LHS cube 'air_temperature' and RHS cube 'Elevation'.
So you can investigate and compare your left and right hand sides with
cube.coord('latitude').points
Xarray on the other hand is not strict about metadata and assumes you know what you are doing, i.e. will also do operations that will give wrong results.
Both have their merits. I'm siding with xarray for reading and analysing files. And iris when writing files.
|
How to apply custom calculation between two IRIS cubes (GRIB files)? Considering also using xarray
|
I am trying to do some calculation between two iris cubes (GRIB files), here it is what I'm trying to achieve:
First cube:
ERA5-Land dataset, downloaded from official site via cdsapi API routine, cropped to custom Lat and Lon, in this example, I have only 2m air temperature, in celsius, hourly, for 3 days:
print(air_temperature)
air_temperature / (celsius) (time: 72; latitude: 18; longitude: 27)
Dimension coordinates:
time x - -
latitude - x -
longitude - - x
Auxiliary coordinates:
forecast_period x - -
Scalar coordinates:
height 2 m
originating_centre European Centre for Medium Range Weather Forecasts
Then, I have a series of sampling points at gives coordinates:
## Sample points coordinates
ws_latitudes = np.array([40.64, 41.19, 41.11, 41.19, 40.86, 40.93, 40.83, 40.25, 40.79, 40.56, 41.42, 41.42, 41.02, 41.24, 40.64, 40.13, 41.33, 40.61])
ws_longitudes = np.array([14.54, 15.13, 14.82, 13.83, 15.28, 14.02, 15.03, 15.66, 14.16, 15.23, 13.88, 15.04, 14.34, 14.47, 14.83, 15.45, 14.33, 14.97])
ws_samplepoints = [("latitude", ws_latitudes), ("longitude", ws_longitudes)]
The other cube (GRIB file) is a 2D cube ("timeless") of elevation:
I've downloaded ERA-Land geopontential GRIB2 file from here:
https://confluence.ecmwf.int/display/CKB/ERA5-Land%3A+data+documentation#ERA5Land:datadocumentation-parameterlistingParameterlistings
geopotential = "geo_1279l4_0.1x0.1.grib2"
geopot_cube = iris.load_cube(geopotential)
print(geopot_cube)
geopotential / (m2 s-2) (latitude: 1801; longitude: 3600)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period 0 hours
forecast_reference_time 2013-08-09 12:00:00
time 2013-08-09 12:00:00
Attributes:
GRIB_PARAM GRIB2:d000c003n004
centre 'European Centre for Medium Range Weather Forecasts'
z, Geopotential, m**2 s**-2
Then, to convert the geopotential to elevation, I've divided by 9.80665 m/s^2
elev_cube = geopot_cube / 9.80665
elev_cube.rename("Elevation")
elev_cube.units = "m"
print(elev_cube)
Elevation / (m) (latitude: 1801; longitude: 3600)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period 0 hours
forecast_reference_time 2013-08-09 12:00:00
time 2013-08-09 12:00:00
Attributes:
GRIB_PARAM GRIB2:d000c003n004
centre 'European Centre for Medium Range Weather Forecasts'
The resulting cube has been cropped to the same lat and lon as air temperature above (probably not necessary):
area_slicer = iris.Constraint(longitude=lambda v: 13.45 <= v <= 16.14, latitude=lambda v: 39.84 <= v <= 41.6)
elevcube_slice = elev_cube.extract(area_slicer)
print(elevcube_slice)
Elevation / (m) (latitude: 18; longitude: 27)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period 0 hours
forecast_reference_time 2013-08-09 12:00:00
time 2013-08-09 12:00:00
Attributes:
GRIB_PARAM GRIB2:d000c003n004
centre 'European Centre for Medium Range Weather Forecasts'
Now here is the point: having these two cubes, I have to calculate a new temperature value at every sample points given the linear equation:
where:
= temperature to calculate at given coordinates sample points;
= temperature read from the first GRIB file (2m air temperature) at sample points coordinates
= sample point elevation
= elevation from second GRIB file at sample points coordinates
as temperature/meter
How could I achieve this?
Even when I try to do very simple math between the two cubes, for example a simple multiplication:
print(air_temperature * elevcube_slice)
I have this error:
ValueError: Coordinate 'latitude' has different points for the LHS cube 'air_temperature' and RHS cube 'Elevation'.
To double check, both cubes have same CS:
cselev = elevcube_slice.coord_system()
cstemperature = air_temperature.coord_system()
print(cselev, cstemperature)
GeogCS(6371229.0) GeogCS(6371229.0)
I've also considered to switch to xarray if it is possible and suggested, probably working with xarray dataset is easier?
|
[
"Iris is strict about metadata and will fail loudly when they don't match in operations you try to do.\nThe error you get tells you what's going on: ValueError: Coordinate 'latitude' has different points for the LHS cube 'air_temperature' and RHS cube 'Elevation'.\nSo you can investigate and compare your left and right hand sides with\ncube.coord('latitude').points\nXarray on the other hand is not strict about metadata and assumes you know what you are doing, i.e. will also do operations that will give wrong results.\nBoth have their merits. I'm siding with xarray for reading and analysing files. And iris when writing files.\n"
] |
[
1
] |
[] |
[] |
[
"python",
"python_iris",
"python_xarray"
] |
stackoverflow_0074287675_python_python_iris_python_xarray.txt
|
Q:
How to install yaml to site-packages
I want to know how to install YAML packages to site-packages because I need it for Blender. I already tried "C:\Program Files\Blender Foundation\Blender 3.0\3.0\python\bin\python.exe" -m pip install yaml -t"C:\Program Files\Blender Foundation\Blender 3.0\3.0\python\lib\site-packages" as administrator, but this appeared.
A:
The name of the package is PyYAML so you need to install it via
pip install PyYAML
NOTE: Be aware that while the name of the package on pypi and the name of the python module are almost always the same, they do not have to be. In this case, the package is called PyYAML, however once you've installed it, it's imported via
import yaml
|
How to install yaml to site-packages
|
I want to know how to install YAML packages to site-packages because I need it for Blender. I already tried "C:\Program Files\Blender Foundation\Blender 3.0\3.0\python\bin\python.exe" -m pip install yaml -t"C:\Program Files\Blender Foundation\Blender 3.0\3.0\python\lib\site-packages" as administrator, but this appeared.
|
[
"The name of the package is PyYAML so you need to install it via\npip install PyYAML\n\nNOTE: Be aware that while the name of the package on pypi and the name of the python module are almost always the same, they do not have to be. In this case, the package is called PyYAML, however once you've installed it, it's imported via\nimport yaml\n\n"
] |
[
1
] |
[] |
[] |
[
"cmd",
"pip",
"python",
"site_packages"
] |
stackoverflow_0074521251_cmd_pip_python_site_packages.txt
|
Q:
problem running pynput module in google colab
I am trying to install and import the pynput module using google colab. However, although I managed to install using "!pip install pynput", when I import the module such as:
from pynput.keyboard import Key, Listener
I get the below error:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-13-2da480c0a0bb> in <module>
----> 1 from pynput.keyboard import Key, Listener
2
3 # def show(key):
4
5 # print('\nYou Entered {0}'.format( key))
2 frames
/usr/local/lib/python3.7/dist-packages/pynput/_util/__init__.py in backend(package)
80 ' * {}'.format(s)
81 for s in resolutions))
---> 82 if resolutions else '')
83
84
ImportError: this platform is not supported: ('failed to acquire X connection: Bad
display name ""', DisplayNameError(''))
Try one of the following resolutions:
* Please make sure that you have an X server running, and that the DISPLAY
environment variable is set correctly
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
---------------------------------------------------------------------------
which code can I use the install the module and use it with google colab?
A:
Google colab runs on a machine instance in Google Cloud so that python can't able to gain control to the keyboard/monitor/mouse. In a nutshell developers interact with google colab through web browsers (google chrome, mozilla etc).
Long story short you are trying to control local hardware by runing code on cloud, which is in general not possible in this case.
So if you want to perform such type of experiment then do it on your local machine, using any IDE(pycharm etc).
you can check this:enter link description here
Still you want to do experiment in colab then use local runtime
you can check this for detail implementation:enter link description here
|
problem running pynput module in google colab
|
I am trying to install and import the pynput module using google colab. However, although I managed to install using "!pip install pynput", when I import the module such as:
from pynput.keyboard import Key, Listener
I get the below error:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-13-2da480c0a0bb> in <module>
----> 1 from pynput.keyboard import Key, Listener
2
3 # def show(key):
4
5 # print('\nYou Entered {0}'.format( key))
2 frames
/usr/local/lib/python3.7/dist-packages/pynput/_util/__init__.py in backend(package)
80 ' * {}'.format(s)
81 for s in resolutions))
---> 82 if resolutions else '')
83
84
ImportError: this platform is not supported: ('failed to acquire X connection: Bad
display name ""', DisplayNameError(''))
Try one of the following resolutions:
* Please make sure that you have an X server running, and that the DISPLAY
environment variable is set correctly
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
---------------------------------------------------------------------------
which code can I use the install the module and use it with google colab?
|
[
"Google colab runs on a machine instance in Google Cloud so that python can't able to gain control to the keyboard/monitor/mouse. In a nutshell developers interact with google colab through web browsers (google chrome, mozilla etc).\nLong story short you are trying to control local hardware by runing code on cloud, which is in general not possible in this case.\nSo if you want to perform such type of experiment then do it on your local machine, using any IDE(pycharm etc).\nyou can check this:enter link description here\nStill you want to do experiment in colab then use local runtime\nyou can check this for detail implementation:enter link description here\n"
] |
[
1
] |
[] |
[] |
[
"installation",
"pynput",
"python"
] |
stackoverflow_0074517767_installation_pynput_python.txt
|
Q:
How to search for a value in line and check if it matches given condition
I have a file in.txt.
name="XYZ_PP_0" number="0x12" bytesize="4" info="0x0000001A"
name="GK_LMP_2_0" number="0xA5" bytesize="8" info="0x00000000bbae321f"
name="MP_LKO_1_0" number="0x356" bytesize="4" info="0x00000234"
name="PNP_VXU_1_2_0" number="0x48A" bytesize="8" info="0x00000000a18c3ba3"
name="AVU_W_2_3_1" number="0x867" bytesize="1" info="0x0b"
From this file i need to search for number="0x867" and check if it's info value matches to the expected given info value which is 0x0a. if it matches print matches else doesn't matches.
then next i need to search for number="0x12" and store it's info value i.e info="0x0000001A" and then search for number="0x356" and store it's info value info="0x00000234" to another variable. this value should be equal to previous info value + 0x00000004 (i.e 0x0000001A + 0x00000004 = 0x0000001E).
if resulted value matches to info="0x00000234" then print number="0x12" info value 0x00000012 + 0x00000004 matches to info value of number="0x356".
else print resulted value not matching
This is current attempt in python:
with open("in.txt", "r") as infile:
XYZ = False
MP = False
AVU = False
xyz = ['number="0x12"', 'info="0x0000001A"']
mp = ['number="0x356"', 'info="0x00000234"']
avu = ['number="0x867"', 'info="0x0b"']
for line in infile:
if all(x in line for x in xyz):
XYZ = True
continue
if all(x in line for x in mp):
MP = True
continue
if all(x in line for x in avu):
AVU = True
continue
but this code will simply checks if the line is present in file or not. it won't check the conditions mentioned above.
Is there a way i can search for the number in the text file and store it's info value to variable?
A:
You have to escape the double quotes, or you can use single quotes so that you string can have double quotes in it like this:
'number="0x12"'
Also, in your if condition the first part is wrong. Here is the loop:
xyz_list = ['number="0x12"', 'info="0x0000001A"']
mp_list = ['number="0x356"', 'info="0x00000234"']
avu_list = ['number="0x867"', 'info="0x0b"']
for line in infile:
if all(x in line for x in xyz_list):
XYZ = True
continue
if all(x in line for x in mp_list):
MP = True
continue
if all(x in line for x in avu_list):
AVU = True
continue
The all() function will check substrings in the line and returns true if all exist.
A:
Try to convert the .txt file into dataframe and apply the conditions using the pandas datframe
import pandas as pd
df = pd.read_fwf('myfile.txt')
or
df = pd.read_csv('myfile.txt', sep=" ")
or
df = pd.read_csv('myfile.txt', ,delimiter="\t")
or
df = pd.read_csv('myfile.txt', sep=" ", header=None, names=["Pos", "Lang", "Perc"])
|
How to search for a value in line and check if it matches given condition
|
I have a file in.txt.
name="XYZ_PP_0" number="0x12" bytesize="4" info="0x0000001A"
name="GK_LMP_2_0" number="0xA5" bytesize="8" info="0x00000000bbae321f"
name="MP_LKO_1_0" number="0x356" bytesize="4" info="0x00000234"
name="PNP_VXU_1_2_0" number="0x48A" bytesize="8" info="0x00000000a18c3ba3"
name="AVU_W_2_3_1" number="0x867" bytesize="1" info="0x0b"
From this file i need to search for number="0x867" and check if it's info value matches to the expected given info value which is 0x0a. if it matches print matches else doesn't matches.
then next i need to search for number="0x12" and store it's info value i.e info="0x0000001A" and then search for number="0x356" and store it's info value info="0x00000234" to another variable. this value should be equal to previous info value + 0x00000004 (i.e 0x0000001A + 0x00000004 = 0x0000001E).
if resulted value matches to info="0x00000234" then print number="0x12" info value 0x00000012 + 0x00000004 matches to info value of number="0x356".
else print resulted value not matching
This is current attempt in python:
with open("in.txt", "r") as infile:
XYZ = False
MP = False
AVU = False
xyz = ['number="0x12"', 'info="0x0000001A"']
mp = ['number="0x356"', 'info="0x00000234"']
avu = ['number="0x867"', 'info="0x0b"']
for line in infile:
if all(x in line for x in xyz):
XYZ = True
continue
if all(x in line for x in mp):
MP = True
continue
if all(x in line for x in avu):
AVU = True
continue
but this code will simply checks if the line is present in file or not. it won't check the conditions mentioned above.
Is there a way i can search for the number in the text file and store it's info value to variable?
|
[
"You have to escape the double quotes, or you can use single quotes so that you string can have double quotes in it like this:\n'number=\"0x12\"'\n\nAlso, in your if condition the first part is wrong. Here is the loop:\nxyz_list = ['number=\"0x12\"', 'info=\"0x0000001A\"']\nmp_list = ['number=\"0x356\"', 'info=\"0x00000234\"']\navu_list = ['number=\"0x867\"', 'info=\"0x0b\"']\n\nfor line in infile:\n if all(x in line for x in xyz_list):\n XYZ = True\n continue\n\n if all(x in line for x in mp_list):\n MP = True\n continue\n \n if all(x in line for x in avu_list):\n AVU = True\n continue \n\nThe all() function will check substrings in the line and returns true if all exist.\n",
"Try to convert the .txt file into dataframe and apply the conditions using the pandas datframe\nimport pandas as pd\ndf = pd.read_fwf('myfile.txt')\nor \ndf = pd.read_csv('myfile.txt', sep=\" \")\nor \ndf = pd.read_csv('myfile.txt', ,delimiter=\"\\t\")\nor \ndf = pd.read_csv('myfile.txt', sep=\" \", header=None, names=[\"Pos\", \"Lang\", \"Perc\"])\n\n"
] |
[
0,
0
] |
[] |
[] |
[
"python",
"python_3.x"
] |
stackoverflow_0074520971_python_python_3.x.txt
|
Q:
Pygame collision with rectangles in list not working right
I'm making python pygame labyrinth game.
Moving works with moving walls, but not player because player never escapes the screen. Currently I'm working on moving on X axis, but something goes wrong. When player collides with left wall, it normally doesn't let it go more to the left. But when going to the right it slightly ignores collision and goes more to the right.
Reproducible example:
import pygame
import math
pygame.init()
surface = pygame.display.set_mode((1000,600))
clock = pygame.time.Clock()
surfrect = surface.get_rect()
player=pygame.Rect((0,0), (35,35))
player.center=surfrect.w/2,surfrect.h/2
touched = False
levelR=[]
control=pygame.Rect((0,0), (50,50))
controld=[]
yspeed,xspeed=0,0
for i in range(-1,2):
oxr=surfrect.w/2-75*3/2
oyr=surfrect.h/2-75*3/2+75*3*i
yr,xr=oyr,oxr
gf=[]
for y in '#-#n#-#n#-#'.split('n'):
for x in y:
if x=='#':
gf.append(pygame.Rect((xr, yr), (75,75)))
xr+=75
yr+=75
xr=oxr
levelR.append([gf,pygame.Rect((oxr,oyr),(75*3,75*3))])
while True:
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
pygame.quit()
elif ev.type == pygame.MOUSEBUTTONDOWN:
touched = True
elif ev.type == pygame.MOUSEBUTTONUP:
touched = False
surface.fill((0,0,0))
for sd in levelR:pygame.draw.rect(surface,(35,35,35),sd[1])
pygame.draw.circle(surface, (200,200,200), player.center, player.width/2-player.width/19, player.width)
for sd in levelR:
for rect in sd[0]:
scale=0.6
recti=pygame.Rect((rect.x,rect.y), (rect.width*scale,rect.height*scale))
recti.center=rect.center
pygame.draw.rect(surface,(255,255,255),rect)
pygame.draw.rect(surface,(0,0,0),recti)
if touched and not controld:
controld=pygame.mouse.get_pos()
control.center=controld
elif touched and controld:
radius = 150
x,y=pygame.mouse.get_pos()
distance=math.dist((x, y),controld)
if distance < radius:
pos=(x, y)
else:
dx=x-controld[0]
dy=y-controld[1]
ratio=radius/distance
pos=controld[0]+ratio*dx, controld[1]+ratio*dy
control.center=pos
try:xmd=(x-controld[0])/abs(x-controld[0])
except:xmd=0
try:ymd=(y-controld[1])/abs(y-controld[1])
except:ymd=0
sr=0.02*(150/radius)
xspeed=xmd*abs(control.centerx-controld[0])*sr
yspeed=ymd*abs(control.centery-controld[1])*sr
collisiont=5
for sd in levelR:
for block in sd[0]:
block.x-=xspeed
sd[1].x-=xspeed
for rect in [i for sd in levelR for i in sd[0]]:
if player.colliderect(rect):
for sd in levelR:
for block in sd[0]:
block.x+=xspeed
sd[1].x+=xspeed
break
pygame.draw.circle(surface, (255,255,255), control.center, control.width,control.width)
pygame.draw.circle(surface, (255,255,255), controld, radius, 5)
elif not touched and controld:controld=()
pygame.display.flip()
clock.tick(60)
I have tried debugging the collision and discovered that the moving to the right when collides with walls it stops the player from moving more to the right but player slightly moves to the right.
I expected walls not moving when player collides with walls.
It happened that the moving to the right when player collides with wall doesn't stop it directly.
A:
Since pygame.Rect is supposed to represent an area on the screen, a pygame.Rect object can only store integral data.
The coordinates for Rect objects are all integers. [...]
The fractional part of the coordinates is lost when the position of the Rect object is changed. In your case this means that the movement to the left and right behaves differently and block.x += xspeed is not the inverse operation of block.x -= xspeed. Consider that, 5 - 0.1 is 4, but 4 + 0.1 is still 4.
You can solve the problem by testing the object for collisions before you actually move it, and not moving the object at all if a collision is detected:
collide = False
for rect in [i for sd in levelR for i in sd[0]]:
text_rect = rect.copy()
text_rect.x -= xspeed
if player.colliderect(text_rect):
collide = True
break
if not collide:
for sd in levelR:
for block in sd[0]:
block.x-=xspeed
sd[1].x-=xspeed
|
Pygame collision with rectangles in list not working right
|
I'm making python pygame labyrinth game.
Moving works with moving walls, but not player because player never escapes the screen. Currently I'm working on moving on X axis, but something goes wrong. When player collides with left wall, it normally doesn't let it go more to the left. But when going to the right it slightly ignores collision and goes more to the right.
Reproducible example:
import pygame
import math
pygame.init()
surface = pygame.display.set_mode((1000,600))
clock = pygame.time.Clock()
surfrect = surface.get_rect()
player=pygame.Rect((0,0), (35,35))
player.center=surfrect.w/2,surfrect.h/2
touched = False
levelR=[]
control=pygame.Rect((0,0), (50,50))
controld=[]
yspeed,xspeed=0,0
for i in range(-1,2):
oxr=surfrect.w/2-75*3/2
oyr=surfrect.h/2-75*3/2+75*3*i
yr,xr=oyr,oxr
gf=[]
for y in '#-#n#-#n#-#'.split('n'):
for x in y:
if x=='#':
gf.append(pygame.Rect((xr, yr), (75,75)))
xr+=75
yr+=75
xr=oxr
levelR.append([gf,pygame.Rect((oxr,oyr),(75*3,75*3))])
while True:
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
pygame.quit()
elif ev.type == pygame.MOUSEBUTTONDOWN:
touched = True
elif ev.type == pygame.MOUSEBUTTONUP:
touched = False
surface.fill((0,0,0))
for sd in levelR:pygame.draw.rect(surface,(35,35,35),sd[1])
pygame.draw.circle(surface, (200,200,200), player.center, player.width/2-player.width/19, player.width)
for sd in levelR:
for rect in sd[0]:
scale=0.6
recti=pygame.Rect((rect.x,rect.y), (rect.width*scale,rect.height*scale))
recti.center=rect.center
pygame.draw.rect(surface,(255,255,255),rect)
pygame.draw.rect(surface,(0,0,0),recti)
if touched and not controld:
controld=pygame.mouse.get_pos()
control.center=controld
elif touched and controld:
radius = 150
x,y=pygame.mouse.get_pos()
distance=math.dist((x, y),controld)
if distance < radius:
pos=(x, y)
else:
dx=x-controld[0]
dy=y-controld[1]
ratio=radius/distance
pos=controld[0]+ratio*dx, controld[1]+ratio*dy
control.center=pos
try:xmd=(x-controld[0])/abs(x-controld[0])
except:xmd=0
try:ymd=(y-controld[1])/abs(y-controld[1])
except:ymd=0
sr=0.02*(150/radius)
xspeed=xmd*abs(control.centerx-controld[0])*sr
yspeed=ymd*abs(control.centery-controld[1])*sr
collisiont=5
for sd in levelR:
for block in sd[0]:
block.x-=xspeed
sd[1].x-=xspeed
for rect in [i for sd in levelR for i in sd[0]]:
if player.colliderect(rect):
for sd in levelR:
for block in sd[0]:
block.x+=xspeed
sd[1].x+=xspeed
break
pygame.draw.circle(surface, (255,255,255), control.center, control.width,control.width)
pygame.draw.circle(surface, (255,255,255), controld, radius, 5)
elif not touched and controld:controld=()
pygame.display.flip()
clock.tick(60)
I have tried debugging the collision and discovered that the moving to the right when collides with walls it stops the player from moving more to the right but player slightly moves to the right.
I expected walls not moving when player collides with walls.
It happened that the moving to the right when player collides with wall doesn't stop it directly.
|
[
"Since pygame.Rect is supposed to represent an area on the screen, a pygame.Rect object can only store integral data.\n\nThe coordinates for Rect objects are all integers. [...]\n\nThe fractional part of the coordinates is lost when the position of the Rect object is changed. In your case this means that the movement to the left and right behaves differently and block.x += xspeed is not the inverse operation of block.x -= xspeed. Consider that, 5 - 0.1 is 4, but 4 + 0.1 is still 4.\nYou can solve the problem by testing the object for collisions before you actually move it, and not moving the object at all if a collision is detected:\ncollide = False\nfor rect in [i for sd in levelR for i in sd[0]]:\n text_rect = rect.copy()\n text_rect.x -= xspeed\n if player.colliderect(text_rect):\n collide = True\n break\n\nif not collide:\n for sd in levelR:\n for block in sd[0]:\n block.x-=xspeed\n sd[1].x-=xspeed\n\n"
] |
[
0
] |
[] |
[] |
[
"pygame",
"python",
"python_3.x"
] |
stackoverflow_0074520434_pygame_python_python_3.x.txt
|
Q:
Find out the percentage of missing values in each column in the given dataset
import pandas as pd
df = pd.read_csv('https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0')
percent= 100*(len(df.loc[:,df.isnull().sum(axis=0)>=1 ].index) / len(df.index))
print(round(percent,2))
input is https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0
and the output should be
Ord_id 0.00
Prod_id 0.00
Ship_id 0.00
Cust_id 0.00
Sales 0.24
Discount 0.65
Order_Quantity 0.65
Profit 0.65
Shipping_Cost 0.65
Product_Base_Margin 1.30
dtype: float64
A:
How about this? I think I actually found something similar on here once before, but I'm not seeing it now...
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_df = pd.DataFrame({'column_name': df.columns,
'percent_missing': percent_missing})
And if you want the missing percentages sorted, follow the above with:
missing_value_df.sort_values('percent_missing', inplace=True)
As mentioned in the comments, you may also be able to get by with just the first line in my code above, i.e.:
percent_missing = df.isnull().sum() * 100 / len(df)
A:
Update let's use mean with isnull:
df.isnull().mean() * 100
Output:
Ord_id 0.000000
Prod_id 0.000000
Ship_id 0.000000
Cust_id 0.000000
Sales 0.238124
Discount 0.654840
Order_Quantity 0.654840
Profit 0.654840
Shipping_Cost 0.654840
Product_Base_Margin 1.297774
dtype: float64
IIUC:
df.isnull().sum() / df.shape[0] * 100.00
Output:
Ord_id 0.000000
Prod_id 0.000000
Ship_id 0.000000
Cust_id 0.000000
Sales 0.238124
Discount 0.654840
Order_Quantity 0.654840
Profit 0.654840
Shipping_Cost 0.654840
Product_Base_Margin 1.297774
dtype: float64
A:
single line solution
df.isnull().mean().round(4).mul(100).sort_values(ascending=False)
A:
To cover all missing values and round the results:
((df.isnull() | df.isna()).sum() * 100 / df.index.size).round(2)
The output:
Out[556]:
Ord_id 0.00
Prod_id 0.00
Ship_id 0.00
Cust_id 0.00
Sales 0.24
Discount 0.65
Order_Quantity 0.65
Profit 0.65
Shipping_Cost 0.65
Product_Base_Margin 1.30
dtype: float64
A:
The solution you're looking for is :
round(df.isnull().mean()*100,2)
This will round up the percentage upto 2 decimal places
Another way to do this is
round((df.isnull().sum()*100)/len(df),2)
but this is not efficient as using mean() is.
A:
import numpy as np
import pandas as pd
raw_data = {'first_name': ['Jason', np.nan, 'Tina', 'Jake', 'Amy'],
'last_name': ['Miller', np.nan, np.nan, 'Milner', 'Cooze'],
'age': [22, np.nan, 23, 24, 25],
'sex': ['m', np.nan, 'f', 'm', 'f'],
'Test1_Score': [4, np.nan, 0, 0, 0],
'Test2_Score': [25, np.nan, np.nan, 0, 0]}
results = pd.DataFrame(raw_data, columns = ['first_name', 'last_name', 'age', 'sex', 'Test1_Score', 'Test2_Score'])
results
first_name last_name age sex Test1_Score Test2_Score
0 Jason Miller 22.0 m 4.0 25.0
1 NaN NaN NaN NaN NaN NaN
2 Tina NaN 23.0 f 0.0 NaN
3 Jake Milner 24.0 m 0.0 0.0
4 Amy Cooze 25.0 f 0.0 0.0
You can use following function, which will give you output in Dataframe
Zero Values
Missing Values
% of Total Values
Total Zero Missing Values
% Total Zero Missing Values
Data Type
Just copy and paste following function and call it by passing your pandas Dataframe
def missing_zero_values_table(df):
zero_val = (df == 0.00).astype(int).sum(axis=0)
mis_val = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)
mz_table = mz_table.rename(
columns = {0 : 'Zero Values', 1 : 'Missing Values', 2 : '% of Total Values'})
mz_table['Total Zero Missing Values'] = mz_table['Zero Values'] + mz_table['Missing Values']
mz_table['% Total Zero Missing Values'] = 100 * mz_table['Total Zero Missing Values'] / len(df)
mz_table['Data Type'] = df.dtypes
mz_table = mz_table[
mz_table.iloc[:,1] != 0].sort_values(
'% of Total Values', ascending=False).round(1)
print ("Your selected dataframe has " + str(df.shape[1]) + " columns and " + str(df.shape[0]) + " Rows.\n"
"There are " + str(mz_table.shape[0]) +
" columns that have missing values.")
# mz_table.to_excel('D:/sampledata/missing_and_zero_values.xlsx', freeze_panes=(1,0), index = False)
return mz_table
missing_zero_values_table(results)
Output
Your selected dataframe has 6 columns and 5 Rows.
There are 6 columns that have missing values.
Zero Values Missing Values % of Total Values Total Zero Missing Values % Total Zero Missing Values Data Type
last_name 0 2 40.0 2 40.0 object
Test2_Score 2 2 40.0 4 80.0 float64
first_name 0 1 20.0 1 20.0 object
age 0 1 20.0 1 20.0 float64
sex 0 1 20.0 1 20.0 object
Test1_Score 3 1 20.0 4 80.0 float64
If you want to keep it simple then you can use following function to get missing values in %
def missing(dff):
print (round((dff.isnull().sum() * 100/ len(dff)),2).sort_values(ascending=False))
missing(results)
Test2_Score 40.0
last_name 40.0
Test1_Score 20.0
sex 20.0
age 20.0
first_name 20.0
dtype: float64
A:
If there are multiple dataframe below is the function to calculate number of missing value in each column with percentage
def miss_data(df):
x = ['column_name','missing_data', 'missing_in_percentage']
missing_data = pd.DataFrame(columns=x)
columns = df.columns
for col in columns:
icolumn_name = col
imissing_data = df[col].isnull().sum()
imissing_in_percentage = (df[col].isnull().sum()/df[col].shape[0])*100
missing_data.loc[len(missing_data)] = [icolumn_name, imissing_data, imissing_in_percentage]
print(missing_data)
A:
By this following code, you can get the corresponding percentage values from every columns. Just switch the name train_data with df, in case of yours.
Input:
In [1]:
all_data_na = (train_data.isnull().sum() / len(train_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head(20)
Output :
Out[1]:
Missing Ratio
left_eyebrow_outer_end_x 68.435239
left_eyebrow_outer_end_y 68.435239
right_eyebrow_outer_end_y 68.279189
right_eyebrow_outer_end_x 68.279189
left_eye_outer_corner_x 67.839410
left_eye_outer_corner_y 67.839410
right_eye_inner_corner_x 67.825223
right_eye_inner_corner_y 67.825223
right_eye_outer_corner_x 67.825223
right_eye_outer_corner_y 67.825223
mouth_left_corner_y 67.811037
mouth_left_corner_x 67.811037
left_eyebrow_inner_end_x 67.796851
left_eyebrow_inner_end_y 67.796851
right_eyebrow_inner_end_y 67.796851
mouth_right_corner_x 67.796851
mouth_right_corner_y 67.796851
right_eyebrow_inner_end_x 67.796851
left_eye_inner_corner_x 67.782664
left_eye_inner_corner_y 67.782664
A:
One-liner
I'm wondering nobody takes advantage of the size and count? It seems the shortest (and probably fastest) way to do it.
df.apply(lambda x: 1-(x.count()/x.size))
Resulting in:
Ord_id 0.000000
Prod_id 0.000000
Ship_id 0.000000
Cust_id 0.000000
Sales 0.002381
Discount 0.006548
Order_Quantity 0.006548
Profit 0.006548
Shipping_Cost 0.006548
Product_Base_Margin 0.012978
dtype: float64
If you find any reason why this is not a good way, please comment
A:
For me I did it like that :
def missing_percent(df):
# Total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_table = pd.concat([mis_val, mis_percent], axis=1)
# Rename the columns
mis_columns = mis_table.rename(
columns = {0 : 'Missing Values', 1 : 'Percent of Total Values'})
# Sort the table by percentage of missing descending
mis_columns = mis_columns[
mis_columns.iloc[:,1] != 0].sort_values(
'Percent of Total Values', ascending=False).round(2)
# Print some summary information
print ("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are " + str(mis_columns.shape[0]) +
" columns that have missing values.")
# Return the dataframe with missing information
return mis_columns
A:
Let's break down your ask
you want the percentage of missing value
it should be sorted in ascending order and the values to be rounded to 2 floating point
Explanation:
dhr[fill_cols].isnull().sum() - gives the total number of missing values column wise
dhr.shape[0] - gives the total number of rows
(dhr[fill_cols].isnull().sum()/dhr.shape[0]) - gives you a series with percentage as values and column names as index
since the output is a series you can round and sort based on the values
code:
(dhr[fill_cols].isnull().sum()/dhr.shape[0]).round(2).sort_values()
Reference:
sort, round
A:
import numpy as np
import pandas as pd
df = pd.read_csv('https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0')
df.loc[np.isnan(df['Product_Base_Margin']),['Product_Base_Margin']]=df['Product_Base_Margin'].mean()
print(round(100*(df.isnull().sum()/len(df.index)), 2))
A:
Try this solution
import pandas as pd
df = pd.read_csv('https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0')
print(round(100*(df.isnull().sum()/len(df.index)),2))
A:
The best solution I have found - (Only shows the missing columns)
missing_values = [feature for feature in df.columns if df[feature].isnull().sum() > 1]
for feature in missing_values:
print(f"{feature} {np.round(df[feature].isnull().mean(), 4)}% missing values")
A:
import pandas as pd
df = pd.read_csv('https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0')
df.isna().sum()
Output:
Ord_id 0
Prod_id 0
Ship_id 0
Cust_id 0
Sales 20
Discount 55
Order_Quantity 55
Profit 55
Shipping_Cost 55
Product_Base_Margin 109
dtype: int64
df.shape
Output: (8399, 10)
# for share [0; 1] of nan in each column
df.isna().sum() / df.shape[0]
Output:
Ord_id 0.0000
Prod_id 0.0000
Ship_id 0.0000
Cust_id 0.0000
Sales 0.0024 # (20 / 8399)
Discount 0.0065 # (55 / 8399)
Order_Quantity 0.0065 # (55 / 8399)
Profit 0.0065 # (55 / 8399)
Shipping_Cost 0.0065 # (55 / 8399)
Product_Base_Margin 0.0130 # (109 / 8399)
dtype: float64
# for percent [0; 100] of nan in each column
df.isna().sum() / (df.shape[0] / 100)
Output:
Ord_id 0.0000
Prod_id 0.0000
Ship_id 0.0000
Cust_id 0.0000
Sales 0.2381 # (20 / (8399 / 100))
Discount 0.6548 # (55 / (8399 / 100))
Order_Quantity 0.6548 # (55 / (8399 / 100))
Profit 0.6548 # (55 / (8399 / 100))
Shipping_Cost 0.6548 # (55 / (8399 / 100))
Product_Base_Margin 1.2978 # (109 / (8399 / 100))
dtype: float64
# for share [0; 1] of nan in dataframe
df.isna().sum() / (df.shape[0] * df.shape[1])
Output:
Ord_id 0.0000
Prod_id 0.0000
Ship_id 0.0000
Cust_id 0.0000
Sales 0.0002 # (20 / (8399 * 10))
Discount 0.0007 # (55 / (8399 * 10))
Order_Quantity 0.0007 # (55 / (8399 * 10))
Profit 0.0007 # (55 / (8399 * 10))
Shipping_Cost 0.0007 # (55 / (8399 * 10))
Product_Base_Margin 0.0013 # (109 / (8399 * 10))
dtype: float64
# for percent [0; 100] of nan in dataframe
df.isna().sum() / ((df.shape[0] * df.shape[1]) / 100)
Output:
Ord_id 0.0000
Prod_id 0.0000
Ship_id 0.0000
Cust_id 0.0000
Sales 0.0238 # (20 / ((8399 * 10) / 100))
Discount 0.0655 # (55 / ((8399 * 10) / 100))
Order_Quantity 0.0655 # (55 / ((8399 * 10) / 100))
Profit 0.0655 # (55 / ((8399 * 10) / 100))
Shipping_Cost 0.0655 # (55 / ((8399 * 10) / 100))
Product_Base_Margin 0.1298 # (109 / ((8399 * 10) / 100))
dtype: float64
|
Find out the percentage of missing values in each column in the given dataset
|
import pandas as pd
df = pd.read_csv('https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0')
percent= 100*(len(df.loc[:,df.isnull().sum(axis=0)>=1 ].index) / len(df.index))
print(round(percent,2))
input is https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0
and the output should be
Ord_id 0.00
Prod_id 0.00
Ship_id 0.00
Cust_id 0.00
Sales 0.24
Discount 0.65
Order_Quantity 0.65
Profit 0.65
Shipping_Cost 0.65
Product_Base_Margin 1.30
dtype: float64
|
[
"How about this? I think I actually found something similar on here once before, but I'm not seeing it now...\npercent_missing = df.isnull().sum() * 100 / len(df)\nmissing_value_df = pd.DataFrame({'column_name': df.columns,\n 'percent_missing': percent_missing})\n\nAnd if you want the missing percentages sorted, follow the above with:\nmissing_value_df.sort_values('percent_missing', inplace=True)\n\nAs mentioned in the comments, you may also be able to get by with just the first line in my code above, i.e.:\npercent_missing = df.isnull().sum() * 100 / len(df)\n\n",
"Update let's use mean with isnull:\ndf.isnull().mean() * 100\n\nOutput:\nOrd_id 0.000000\nProd_id 0.000000\nShip_id 0.000000\nCust_id 0.000000\nSales 0.238124\nDiscount 0.654840\nOrder_Quantity 0.654840\nProfit 0.654840\nShipping_Cost 0.654840\nProduct_Base_Margin 1.297774\ndtype: float64\n\nIIUC:\ndf.isnull().sum() / df.shape[0] * 100.00\n\nOutput:\nOrd_id 0.000000\nProd_id 0.000000\nShip_id 0.000000\nCust_id 0.000000\nSales 0.238124\nDiscount 0.654840\nOrder_Quantity 0.654840\nProfit 0.654840\nShipping_Cost 0.654840\nProduct_Base_Margin 1.297774\ndtype: float64\n\n",
"single line solution\ndf.isnull().mean().round(4).mul(100).sort_values(ascending=False)\n\n",
"To cover all missing values and round the results:\n((df.isnull() | df.isna()).sum() * 100 / df.index.size).round(2)\n\nThe output:\nOut[556]: \nOrd_id 0.00\nProd_id 0.00\nShip_id 0.00\nCust_id 0.00\nSales 0.24\nDiscount 0.65\nOrder_Quantity 0.65\nProfit 0.65\nShipping_Cost 0.65\nProduct_Base_Margin 1.30\ndtype: float64\n\n",
"The solution you're looking for is :\nround(df.isnull().mean()*100,2) \n\nThis will round up the percentage upto 2 decimal places\nAnother way to do this is \nround((df.isnull().sum()*100)/len(df),2)\n\nbut this is not efficient as using mean() is.\n",
"import numpy as np\nimport pandas as pd\n\nraw_data = {'first_name': ['Jason', np.nan, 'Tina', 'Jake', 'Amy'], \n 'last_name': ['Miller', np.nan, np.nan, 'Milner', 'Cooze'], \n 'age': [22, np.nan, 23, 24, 25], \n 'sex': ['m', np.nan, 'f', 'm', 'f'], \n 'Test1_Score': [4, np.nan, 0, 0, 0],\n 'Test2_Score': [25, np.nan, np.nan, 0, 0]}\nresults = pd.DataFrame(raw_data, columns = ['first_name', 'last_name', 'age', 'sex', 'Test1_Score', 'Test2_Score'])\n\n\nresults \n\n first_name last_name age sex Test1_Score Test2_Score\n0 Jason Miller 22.0 m 4.0 25.0\n1 NaN NaN NaN NaN NaN NaN\n2 Tina NaN 23.0 f 0.0 NaN\n3 Jake Milner 24.0 m 0.0 0.0\n4 Amy Cooze 25.0 f 0.0 0.0\n\nYou can use following function, which will give you output in Dataframe\n\nZero Values \nMissing Values \n% of Total Values\nTotal Zero Missing Values\n% Total Zero Missing Values\nData Type \n\nJust copy and paste following function and call it by passing your pandas Dataframe\ndef missing_zero_values_table(df):\n zero_val = (df == 0.00).astype(int).sum(axis=0)\n mis_val = df.isnull().sum()\n mis_val_percent = 100 * df.isnull().sum() / len(df)\n mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)\n mz_table = mz_table.rename(\n columns = {0 : 'Zero Values', 1 : 'Missing Values', 2 : '% of Total Values'})\n mz_table['Total Zero Missing Values'] = mz_table['Zero Values'] + mz_table['Missing Values']\n mz_table['% Total Zero Missing Values'] = 100 * mz_table['Total Zero Missing Values'] / len(df)\n mz_table['Data Type'] = df.dtypes\n mz_table = mz_table[\n mz_table.iloc[:,1] != 0].sort_values(\n '% of Total Values', ascending=False).round(1)\n print (\"Your selected dataframe has \" + str(df.shape[1]) + \" columns and \" + str(df.shape[0]) + \" Rows.\\n\" \n \"There are \" + str(mz_table.shape[0]) +\n \" columns that have missing values.\")\n# mz_table.to_excel('D:/sampledata/missing_and_zero_values.xlsx', freeze_panes=(1,0), index = False)\n return mz_table\n\nmissing_zero_values_table(results)\n\nOutput\nYour selected dataframe has 6 columns and 5 Rows.\nThere are 6 columns that have missing values.\n\n Zero Values Missing Values % of Total Values Total Zero Missing Values % Total Zero Missing Values Data Type\nlast_name 0 2 40.0 2 40.0 object\nTest2_Score 2 2 40.0 4 80.0 float64\nfirst_name 0 1 20.0 1 20.0 object\nage 0 1 20.0 1 20.0 float64\nsex 0 1 20.0 1 20.0 object\nTest1_Score 3 1 20.0 4 80.0 float64\n\nIf you want to keep it simple then you can use following function to get missing values in %\ndef missing(dff):\n print (round((dff.isnull().sum() * 100/ len(dff)),2).sort_values(ascending=False))\n\n\nmissing(results)\n\nTest2_Score 40.0\nlast_name 40.0\nTest1_Score 20.0\nsex 20.0\nage 20.0\nfirst_name 20.0\ndtype: float64\n\n",
"If there are multiple dataframe below is the function to calculate number of missing value in each column with percentage\ndef miss_data(df):\n x = ['column_name','missing_data', 'missing_in_percentage']\n missing_data = pd.DataFrame(columns=x)\n columns = df.columns\n for col in columns:\n icolumn_name = col\n imissing_data = df[col].isnull().sum()\n imissing_in_percentage = (df[col].isnull().sum()/df[col].shape[0])*100\n\n missing_data.loc[len(missing_data)] = [icolumn_name, imissing_data, imissing_in_percentage]\n print(missing_data) \n\n",
"By this following code, you can get the corresponding percentage values from every columns. Just switch the name train_data with df, in case of yours.\nInput:\nIn [1]:\n\nall_data_na = (train_data.isnull().sum() / len(train_data)) * 100\nall_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]\nmissing_data = pd.DataFrame({'Missing Ratio' :all_data_na})\nmissing_data.head(20)\n\nOutput :\nOut[1]: \n Missing Ratio\n left_eyebrow_outer_end_x 68.435239\n left_eyebrow_outer_end_y 68.435239\n right_eyebrow_outer_end_y 68.279189\n right_eyebrow_outer_end_x 68.279189\n left_eye_outer_corner_x 67.839410\n left_eye_outer_corner_y 67.839410\n right_eye_inner_corner_x 67.825223\n right_eye_inner_corner_y 67.825223\n right_eye_outer_corner_x 67.825223\n right_eye_outer_corner_y 67.825223\n mouth_left_corner_y 67.811037\n mouth_left_corner_x 67.811037\n left_eyebrow_inner_end_x 67.796851\n left_eyebrow_inner_end_y 67.796851\n right_eyebrow_inner_end_y 67.796851\n mouth_right_corner_x 67.796851\n mouth_right_corner_y 67.796851\n right_eyebrow_inner_end_x 67.796851\n left_eye_inner_corner_x 67.782664\n left_eye_inner_corner_y 67.782664\n\n",
"One-liner\nI'm wondering nobody takes advantage of the size and count? It seems the shortest (and probably fastest) way to do it.\ndf.apply(lambda x: 1-(x.count()/x.size))\n\nResulting in:\nOrd_id 0.000000\nProd_id 0.000000\nShip_id 0.000000\nCust_id 0.000000\nSales 0.002381\nDiscount 0.006548\nOrder_Quantity 0.006548\nProfit 0.006548\nShipping_Cost 0.006548\nProduct_Base_Margin 0.012978\ndtype: float64\n\nIf you find any reason why this is not a good way, please comment\n",
"For me I did it like that :\ndef missing_percent(df):\n # Total missing values\n mis_val = df.isnull().sum()\n \n # Percentage of missing values\n mis_percent = 100 * df.isnull().sum() / len(df)\n \n # Make a table with the results\n mis_table = pd.concat([mis_val, mis_percent], axis=1)\n \n # Rename the columns\n mis_columns = mis_table.rename(\n columns = {0 : 'Missing Values', 1 : 'Percent of Total Values'})\n \n # Sort the table by percentage of missing descending\n mis_columns = mis_columns[\n mis_columns.iloc[:,1] != 0].sort_values(\n 'Percent of Total Values', ascending=False).round(2)\n \n # Print some summary information\n print (\"Your selected dataframe has \" + str(df.shape[1]) + \" columns.\\n\" \n \"There are \" + str(mis_columns.shape[0]) +\n \" columns that have missing values.\")\n \n # Return the dataframe with missing information\n return mis_columns\n\n",
"Let's break down your ask\n\nyou want the percentage of missing value\nit should be sorted in ascending order and the values to be rounded to 2 floating point\n\nExplanation:\n\ndhr[fill_cols].isnull().sum() - gives the total number of missing values column wise\ndhr.shape[0] - gives the total number of rows\n(dhr[fill_cols].isnull().sum()/dhr.shape[0]) - gives you a series with percentage as values and column names as index\nsince the output is a series you can round and sort based on the values\n\ncode:\n(dhr[fill_cols].isnull().sum()/dhr.shape[0]).round(2).sort_values()\n\nReference:\nsort, round\n",
"import numpy as np\n\nimport pandas as pd\n\ndf = pd.read_csv('https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0')\n\ndf.loc[np.isnan(df['Product_Base_Margin']),['Product_Base_Margin']]=df['Product_Base_Margin'].mean()\n\nprint(round(100*(df.isnull().sum()/len(df.index)), 2))\n\n",
"Try this solution\n\nimport pandas as pd\ndf = pd.read_csv('https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0')\nprint(round(100*(df.isnull().sum()/len(df.index)),2))\n\n\n",
"The best solution I have found - (Only shows the missing columns)\nmissing_values = [feature for feature in df.columns if df[feature].isnull().sum() > 1]\n\nfor feature in missing_values:\n print(f\"{feature} {np.round(df[feature].isnull().mean(), 4)}% missing values\")\n\n",
"import pandas as pd\ndf = pd.read_csv('https://query.data.world/s/Hfu_PsEuD1Z_yJHmGaxWTxvkz7W_b0')\ndf.isna().sum()\n\nOutput:\n\nOrd_id 0\nProd_id 0\nShip_id 0\nCust_id 0\nSales 20\nDiscount 55\nOrder_Quantity 55\nProfit 55\nShipping_Cost 55\nProduct_Base_Margin 109\ndtype: int64\n\n\ndf.shape\n\nOutput: (8399, 10)\n\n\n# for share [0; 1] of nan in each column\n\ndf.isna().sum() / df.shape[0]\n\nOutput:\n\nOrd_id 0.0000\nProd_id 0.0000\nShip_id 0.0000\nCust_id 0.0000\nSales 0.0024 # (20 / 8399)\nDiscount 0.0065 # (55 / 8399)\nOrder_Quantity 0.0065 # (55 / 8399)\nProfit 0.0065 # (55 / 8399)\nShipping_Cost 0.0065 # (55 / 8399)\nProduct_Base_Margin 0.0130 # (109 / 8399)\ndtype: float64\n\n\n# for percent [0; 100] of nan in each column\n\ndf.isna().sum() / (df.shape[0] / 100)\n\nOutput:\n\nOrd_id 0.0000\nProd_id 0.0000\nShip_id 0.0000\nCust_id 0.0000\nSales 0.2381 # (20 / (8399 / 100))\nDiscount 0.6548 # (55 / (8399 / 100))\nOrder_Quantity 0.6548 # (55 / (8399 / 100))\nProfit 0.6548 # (55 / (8399 / 100))\nShipping_Cost 0.6548 # (55 / (8399 / 100))\nProduct_Base_Margin 1.2978 # (109 / (8399 / 100))\ndtype: float64\n\n\n# for share [0; 1] of nan in dataframe\n\ndf.isna().sum() / (df.shape[0] * df.shape[1])\n\nOutput:\n\nOrd_id 0.0000\nProd_id 0.0000\nShip_id 0.0000\nCust_id 0.0000\nSales 0.0002 # (20 / (8399 * 10))\nDiscount 0.0007 # (55 / (8399 * 10))\nOrder_Quantity 0.0007 # (55 / (8399 * 10))\nProfit 0.0007 # (55 / (8399 * 10))\nShipping_Cost 0.0007 # (55 / (8399 * 10))\nProduct_Base_Margin 0.0013 # (109 / (8399 * 10))\ndtype: float64\n\n\n# for percent [0; 100] of nan in dataframe\n\ndf.isna().sum() / ((df.shape[0] * df.shape[1]) / 100)\n\nOutput:\n\nOrd_id 0.0000\nProd_id 0.0000\nShip_id 0.0000\nCust_id 0.0000\nSales 0.0238 # (20 / ((8399 * 10) / 100))\nDiscount 0.0655 # (55 / ((8399 * 10) / 100))\nOrder_Quantity 0.0655 # (55 / ((8399 * 10) / 100))\nProfit 0.0655 # (55 / ((8399 * 10) / 100))\nShipping_Cost 0.0655 # (55 / ((8399 * 10) / 100))\nProduct_Base_Margin 0.1298 # (109 / ((8399 * 10) / 100))\ndtype: float64\n\n"
] |
[
111,
53,
13,
7,
3,
2,
1,
1,
1,
0,
0,
0,
0,
0,
0
] |
[] |
[] |
[
"numpy",
"pandas",
"python",
"python_3.x"
] |
stackoverflow_0051070985_numpy_pandas_python_python_3.x.txt
|
Q:
How can I overwrite a mapping of a column based on its current value and value of two other columns?
I have the following pandas dataframe
is_and_mp market_state reason
'100' None NaN
'400' None NaN
'100' ALGO NaN
'400' OPENING NaN
I want to write two mappings where if is_and_mp is either '100' or '400', and market_state == None and reason == NaN, then map market_state =CONTINUOUS_TRADING.
So the output would be:
is_and_mp market_state reason
'100' CONTINUOUS_TRADING NaN
'400' CONTINUOUS_TRADING NaN
'100' ALGO NaN
'400' OPENING NaN
It is important for the existing mappings not to change! Thanks
A:
Use DataFrame.loc with chained mask by & for bitwise AND:
df.loc[df.is_and_mp.isin([ '100', '400']) & df.market_state.isna() & df. reason.isna(), 'market_stat'] = 'CONTINUOUS_TRADING'
or if values are numeric:
df.loc[df.is_and_mp.isin([ 100, 400]) & df.market_state.isna() & df. reason.isna(), 'market_stat'] = 'CONTINUOUS_TRADING'
A:
Using & in complex query in df.loc should be inside parenthesis ()
import pandas as pd
data = {
"is_and_mp": ['100', '400', '100', '400'],
"market_state": [None, None, 'ALGO', 'OPENING'],
"reason": ['NaN', 'NaN', 'NaN', 'NaN']
}
df = pd.DataFrame(data)
df.loc[(df["is_and_mp"].isin(['100', '400'])) & (df["market_state"].isna()) & (df["reason"] == 'NaN'), "market_state"] = "CONTINUOUS_TRADING"
print(df)
Output:
is_and_mp market_state reason
0 100 CONTINUOUS_TRADING NaN
1 400 CONTINUOUS_TRADING NaN
2 100 ALGO NaN
3 400 OPENING NaN
|
How can I overwrite a mapping of a column based on its current value and value of two other columns?
|
I have the following pandas dataframe
is_and_mp market_state reason
'100' None NaN
'400' None NaN
'100' ALGO NaN
'400' OPENING NaN
I want to write two mappings where if is_and_mp is either '100' or '400', and market_state == None and reason == NaN, then map market_state =CONTINUOUS_TRADING.
So the output would be:
is_and_mp market_state reason
'100' CONTINUOUS_TRADING NaN
'400' CONTINUOUS_TRADING NaN
'100' ALGO NaN
'400' OPENING NaN
It is important for the existing mappings not to change! Thanks
|
[
"Use DataFrame.loc with chained mask by & for bitwise AND:\ndf.loc[df.is_and_mp.isin([ '100', '400']) & df.market_state.isna() & df. reason.isna(), 'market_stat'] = 'CONTINUOUS_TRADING'\n\nor if values are numeric:\ndf.loc[df.is_and_mp.isin([ 100, 400]) & df.market_state.isna() & df. reason.isna(), 'market_stat'] = 'CONTINUOUS_TRADING' \n\n",
"Using & in complex query in df.loc should be inside parenthesis ()\nimport pandas as pd\n\ndata = {\n \"is_and_mp\": ['100', '400', '100', '400'],\n \"market_state\": [None, None, 'ALGO', 'OPENING'],\n \"reason\": ['NaN', 'NaN', 'NaN', 'NaN']\n}\n\ndf = pd.DataFrame(data)\n\ndf.loc[(df[\"is_and_mp\"].isin(['100', '400'])) & (df[\"market_state\"].isna()) & (df[\"reason\"] == 'NaN'), \"market_state\"] = \"CONTINUOUS_TRADING\"\nprint(df)\n\nOutput:\n is_and_mp market_state reason\n0 100 CONTINUOUS_TRADING NaN\n1 400 CONTINUOUS_TRADING NaN\n2 100 ALGO NaN\n3 400 OPENING NaN\n\n"
] |
[
4,
0
] |
[] |
[] |
[
"dataframe",
"pandas",
"python"
] |
stackoverflow_0074521148_dataframe_pandas_python.txt
|
Q:
Have a global df in tkinter
Hello I am on a project for my school and I have to code a stock manager.
import tkinter as tk
from tkinter import filedialog, messagebox, ttk, simpledialog
from PIL import Image,ImageTk
import pandas as pd
# initalise the tkinter GUI
window = tk.Tk()
window.geometry("1280x720") # set the root dimensions
window.pack_propagate(False) # tells the root to not let the widgets inside it determine its size.
window.resizable(0, 0) # makes the root window fixed in size.
window.title('e-Zone Manager')
bg=tk.PhotoImage(file='image2.png')
canva=tk.Canvas(window)
canva.pack(fill="both",expand=True)
canva.create_image(0,0,image=bg,anchor="nw")
logo=tk.PhotoImage(file="logo.png")
window.iconphoto(False,logo)
# Frame for TreeView
frame = tk.LabelFrame(window,)
frame.place(height=300, width=750, rely=0.2, relx=0.21)
# Frame for open file dialog
data_frame = tk.LabelFrame(window, text="Open File")
data_frame.place(height=100, width=400, rely=0.75, relx=0.05)
#Frame pour les outils
tool_frame=tk.LabelFrame(window)
tool_frame.place(height=100,width=600,rely=0.75,relx=0.45)
# Buttons
button1 = tk.Button(data_frame, text="Browse A File", command=lambda: File_dialog())
button1.place(rely=0.65, relx=0.50)
button2 = tk.Button(data_frame, text="Load File", command=lambda: Load_excel_data())
button2.place(rely=0.65, relx=0.30)
button3 = tk.Button(tool_frame, text="Ajout", command=lambda: ajout())
button3.place(rely=0.65,relx=0.30)
button4=tk.Button(tool_frame,text="Supprimer",command=lambda: supp())
button4.place(rely=0.75,relx=0.40)
# The file/file path text
label_file = ttk.Label(data_frame, text="No File Selected")
label_file.place(rely=0, relx=0)
## Treeview Widget
tv1 = ttk.Treeview(frame)
tv1.place(relheight=1, relwidth=1) # set the height and width of the widget to 100% of its container (frame1).
treescrolly = tk.Scrollbar(frame, orient="vertical", command=tv1.yview) # command means update the yaxis view of the widget
treescrollx = tk.Scrollbar(frame, orient="horizontal", command=tv1.xview) # command means update the xaxis view of the widget
tv1.configure(xscrollcommand=treescrollx.set, yscrollcommand=treescrolly.set) # assign the scrollbars to the Treeview Widget
treescrollx.pack(side="bottom", fill="x") # make the scrollbar fill the x axis of the Treeview widget
treescrolly.pack(side="right", fill="y") # make the scrollbar fill the y axis of the Treeview widget
def File_dialog():
"""This Function will open the file explorer and assign the chosen file path to label_file"""
filename = filedialog.askopenfilename(initialdir="/",
title="Select A File",
filetype=(("xlsx files", "*.xlsx"),("All Files", "*.*")))
label_file["text"] = filename
return None
def refresh_df():
file_path = label_file["text"]
excel_filename = r"{}".format(file_path)
df = pd.read_excel(excel_filename)
clear_data()
tv1["column"] = list(df.columns)
tv1["show"] = "headings"
for column in tv1["columns"]:
tv1.heading(column, text=column) # let the column heading = column name
df_rows = df.to_numpy().tolist() # turns the dataframe into a list of lists
for row in df_rows:
tv1.insert("", "end", values=row) # inserts each list into the treeview. For parameters see https://docs.python.org/3/library/tkinter.ttk.html#tkinter.ttk.Treeview.insert
def Load_excel_data():
"""If the file selected is valid this will load the file into the Treeview"""
file_path = label_file["text"]
try:
excel_filename = r"{}".format(file_path)
if excel_filename[-4:] == ".csv":
df = pd.read_csv(excel_filename)
else:
df = pd.read_excel(excel_filename)
except ValueError:
tk.messagebox.showerror("Information", "The file you have chosen is invalid")
return None
except FileNotFoundError:
tk.messagebox.showerror("Information", f"No such file as {file_path}")
return None
clear_data()
tv1["column"] = list(df.columns)
tv1["show"] = "headings"
for column in tv1["columns"]:
tv1.heading(column, text=column) # let the column heading = column name
df_rows = df.to_numpy().tolist() # turns the dataframe into a list of lists
for row in df_rows:
tv1.insert("", "end", values=row)
return None
def clear_data():
tv1.delete(*tv1.get_children())
return None
def supp(df):
ligne_supp=simpledialog.askinteger("Input","Quel ligne voulez vous supprimer ?")
df=df.drop(ligne_supp)
#df.reset_index(inplace=True)
clear_data()
tv1["column"] = list(df.columns)
tv1["show"] = "headings"
for column in tv1["columns"]:
tv1.heading(column, text=column) # let the column heading = column name
df_rows = df.to_numpy().tolist() # turns the dataframe into a list of lists
for row in df_rows:
tv1.insert("", "end", values=row)
window.mainloop()
My problem is I want to have a global dataframe i can use in my differents functions, because now in each function i write on a new df and this don't update my df in my GUI.
But the fact that I ask for the path of my excel file, make that I can't declare my df in my main().
If you have hints or solutions I will be grateful !
I've tried to upload my df in each function but this wrote on my excel file ands create a new column, so I can't use my function two time.
A:
Global objects (not recommended)
You need to create global object
global variable_name
And in every function or method u need to mark that u want to use this object by at the beginning of the function/method
global variable_name
recommended
Pass df as function argument or create class and create internal attribute to store df
|
Have a global df in tkinter
|
Hello I am on a project for my school and I have to code a stock manager.
import tkinter as tk
from tkinter import filedialog, messagebox, ttk, simpledialog
from PIL import Image,ImageTk
import pandas as pd
# initalise the tkinter GUI
window = tk.Tk()
window.geometry("1280x720") # set the root dimensions
window.pack_propagate(False) # tells the root to not let the widgets inside it determine its size.
window.resizable(0, 0) # makes the root window fixed in size.
window.title('e-Zone Manager')
bg=tk.PhotoImage(file='image2.png')
canva=tk.Canvas(window)
canva.pack(fill="both",expand=True)
canva.create_image(0,0,image=bg,anchor="nw")
logo=tk.PhotoImage(file="logo.png")
window.iconphoto(False,logo)
# Frame for TreeView
frame = tk.LabelFrame(window,)
frame.place(height=300, width=750, rely=0.2, relx=0.21)
# Frame for open file dialog
data_frame = tk.LabelFrame(window, text="Open File")
data_frame.place(height=100, width=400, rely=0.75, relx=0.05)
#Frame pour les outils
tool_frame=tk.LabelFrame(window)
tool_frame.place(height=100,width=600,rely=0.75,relx=0.45)
# Buttons
button1 = tk.Button(data_frame, text="Browse A File", command=lambda: File_dialog())
button1.place(rely=0.65, relx=0.50)
button2 = tk.Button(data_frame, text="Load File", command=lambda: Load_excel_data())
button2.place(rely=0.65, relx=0.30)
button3 = tk.Button(tool_frame, text="Ajout", command=lambda: ajout())
button3.place(rely=0.65,relx=0.30)
button4=tk.Button(tool_frame,text="Supprimer",command=lambda: supp())
button4.place(rely=0.75,relx=0.40)
# The file/file path text
label_file = ttk.Label(data_frame, text="No File Selected")
label_file.place(rely=0, relx=0)
## Treeview Widget
tv1 = ttk.Treeview(frame)
tv1.place(relheight=1, relwidth=1) # set the height and width of the widget to 100% of its container (frame1).
treescrolly = tk.Scrollbar(frame, orient="vertical", command=tv1.yview) # command means update the yaxis view of the widget
treescrollx = tk.Scrollbar(frame, orient="horizontal", command=tv1.xview) # command means update the xaxis view of the widget
tv1.configure(xscrollcommand=treescrollx.set, yscrollcommand=treescrolly.set) # assign the scrollbars to the Treeview Widget
treescrollx.pack(side="bottom", fill="x") # make the scrollbar fill the x axis of the Treeview widget
treescrolly.pack(side="right", fill="y") # make the scrollbar fill the y axis of the Treeview widget
def File_dialog():
"""This Function will open the file explorer and assign the chosen file path to label_file"""
filename = filedialog.askopenfilename(initialdir="/",
title="Select A File",
filetype=(("xlsx files", "*.xlsx"),("All Files", "*.*")))
label_file["text"] = filename
return None
def refresh_df():
file_path = label_file["text"]
excel_filename = r"{}".format(file_path)
df = pd.read_excel(excel_filename)
clear_data()
tv1["column"] = list(df.columns)
tv1["show"] = "headings"
for column in tv1["columns"]:
tv1.heading(column, text=column) # let the column heading = column name
df_rows = df.to_numpy().tolist() # turns the dataframe into a list of lists
for row in df_rows:
tv1.insert("", "end", values=row) # inserts each list into the treeview. For parameters see https://docs.python.org/3/library/tkinter.ttk.html#tkinter.ttk.Treeview.insert
def Load_excel_data():
"""If the file selected is valid this will load the file into the Treeview"""
file_path = label_file["text"]
try:
excel_filename = r"{}".format(file_path)
if excel_filename[-4:] == ".csv":
df = pd.read_csv(excel_filename)
else:
df = pd.read_excel(excel_filename)
except ValueError:
tk.messagebox.showerror("Information", "The file you have chosen is invalid")
return None
except FileNotFoundError:
tk.messagebox.showerror("Information", f"No such file as {file_path}")
return None
clear_data()
tv1["column"] = list(df.columns)
tv1["show"] = "headings"
for column in tv1["columns"]:
tv1.heading(column, text=column) # let the column heading = column name
df_rows = df.to_numpy().tolist() # turns the dataframe into a list of lists
for row in df_rows:
tv1.insert("", "end", values=row)
return None
def clear_data():
tv1.delete(*tv1.get_children())
return None
def supp(df):
ligne_supp=simpledialog.askinteger("Input","Quel ligne voulez vous supprimer ?")
df=df.drop(ligne_supp)
#df.reset_index(inplace=True)
clear_data()
tv1["column"] = list(df.columns)
tv1["show"] = "headings"
for column in tv1["columns"]:
tv1.heading(column, text=column) # let the column heading = column name
df_rows = df.to_numpy().tolist() # turns the dataframe into a list of lists
for row in df_rows:
tv1.insert("", "end", values=row)
window.mainloop()
My problem is I want to have a global dataframe i can use in my differents functions, because now in each function i write on a new df and this don't update my df in my GUI.
But the fact that I ask for the path of my excel file, make that I can't declare my df in my main().
If you have hints or solutions I will be grateful !
I've tried to upload my df in each function but this wrote on my excel file ands create a new column, so I can't use my function two time.
|
[
"Global objects (not recommended)\nYou need to create global object\nglobal variable_name\nAnd in every function or method u need to mark that u want to use this object by at the beginning of the function/method\nglobal variable_name\nrecommended\nPass df as function argument or create class and create internal attribute to store df\n"
] |
[
0
] |
[] |
[] |
[
"dataframe",
"pandas",
"python",
"tkinter"
] |
stackoverflow_0074521278_dataframe_pandas_python_tkinter.txt
|
Q:
Python 3 - How to terminate a thread instantly?
In my code (a complex GUI application with Tkinter) I have a thread defined in a custom object (a progress bar). It runs a function with a while cicle like this:
def Start(self):
while self.is_active==True:
do it..
time.sleep(1)
do it..
time.sleep(1)
def Stop(self):
self.is_active=False
It can terminate only when another piece of code, placed in another thread, changes the attribute self.is_active using the method self.Stop(). I have the same situation in another custom object (a counter) and both of them have to work together when the another thread (the main one) works.
The code works, but I realized that the two threads associated with the progress bar and the counter don't terminate instantly as I wanted, because before to temrinate, they need to wait the end of their functions, and these ones are slow becose of the time.sleep(1) instructions. From the user point of view, it means see the end of the main thread with the progress bar and the cunter that terminate LATE and I don't like it.
To be honest I don't know how to solve this issue. Is there a way to force a thread to terminate instantly without waiting the end of the function?
A:
First off, to be clear, hard-killing a thread is a terrible idea in any language, and Python doesn't support it; if nothing else, the risk of that thread holding a lock which is never unlocked, causing any thread that tries to acquire it to deadlock, is a fatal flaw.
If you don't care about the thread at all, you can create it with the daemon=True argument, and it will die if all non-daemon threads in the process have exited. But if the thread really should die with proper cleanup (e.g. it might have with statements or the like that manage cleanup of resources outside the process, that won't be cleaned up on process termination), that's not a real solution.
That said, you can avoid waiting a second or more by switching from using a plain bool and time.sleep to using an Event and using the .wait method on it. This will allow the "sleeps" to be interrupted immediately, at the small expense of requiring you to reverse your condition (because Event.wait only blocks while it's false/unset, so you need the flag to be based on when you should stop, not when you are currently active):
class Spam:
def __init__(self):
self.should_stop = threading.Event() # Create an unset event on init
def Start(self):
while not self.should_stop.is_set():
# do it..
if self.should_stop.wait(1):
break
# do it..
if self.should_stop.wait(1):
break
def Stop(self):
self.should_stop.set()
On modern Python (3.1 and higher) the wait method returns True if the event was set (on beginning the wait or because it got set while waiting), and False otherwise, so whenever wait returns True, that means you were told to stop and you can immediately break out of the loop. You also get notified almost immediately, instead of waiting up to one second before you can check the flag.
This won't cause the real "do it.." code to exit immediately, but from what you said, it sounds like that part of the code isn't all that long, so waiting for it to complete isn't a big hassle.
If you really want to preserve the is_active attribute for testing whether it's still active, you can define it as a property that reverses the meaning of the Event, e.g.:
@property
def is_active(self):
return not self.should_stop.is_set()
A:
the safest way to do it without risking a segmentation fault, is to return.
def Start(self):
while self.is_active==True:
do it..
if not self.is_active: return
time.sleep(1)
if not self.is_active: return
do it..
if not self.is_active: return
time.sleep(1)
def Stop(self):
self.is_active=False
python threads need to free the associated resources, and while "killing" the thread is possible using some C tricks, you will be risking a segmentation fault or a memory leak.
here is a cleaner way to do it.
class MyError(Exception):
pass
def Start(self):
try:
while self.is_active==True:
do it..
self.check_termination()
time.sleep(1)
self.check_termination()
do it..
self.check_termination()
time.sleep(1)
except MyError:
return
def check_termination(self):
if not self.is_active:
raise MyError
and you can call self.check_termination() from inside any function to terminate this loop, not necessarily from inside Start directly.
Edit: ShadowRanger solution handles the "interruptable wait" better, i am just keeping this for implementing a kill switch for the thread that can be checked from anywhere inside the thread.
|
Python 3 - How to terminate a thread instantly?
|
In my code (a complex GUI application with Tkinter) I have a thread defined in a custom object (a progress bar). It runs a function with a while cicle like this:
def Start(self):
while self.is_active==True:
do it..
time.sleep(1)
do it..
time.sleep(1)
def Stop(self):
self.is_active=False
It can terminate only when another piece of code, placed in another thread, changes the attribute self.is_active using the method self.Stop(). I have the same situation in another custom object (a counter) and both of them have to work together when the another thread (the main one) works.
The code works, but I realized that the two threads associated with the progress bar and the counter don't terminate instantly as I wanted, because before to temrinate, they need to wait the end of their functions, and these ones are slow becose of the time.sleep(1) instructions. From the user point of view, it means see the end of the main thread with the progress bar and the cunter that terminate LATE and I don't like it.
To be honest I don't know how to solve this issue. Is there a way to force a thread to terminate instantly without waiting the end of the function?
|
[
"First off, to be clear, hard-killing a thread is a terrible idea in any language, and Python doesn't support it; if nothing else, the risk of that thread holding a lock which is never unlocked, causing any thread that tries to acquire it to deadlock, is a fatal flaw.\nIf you don't care about the thread at all, you can create it with the daemon=True argument, and it will die if all non-daemon threads in the process have exited. But if the thread really should die with proper cleanup (e.g. it might have with statements or the like that manage cleanup of resources outside the process, that won't be cleaned up on process termination), that's not a real solution.\nThat said, you can avoid waiting a second or more by switching from using a plain bool and time.sleep to using an Event and using the .wait method on it. This will allow the \"sleeps\" to be interrupted immediately, at the small expense of requiring you to reverse your condition (because Event.wait only blocks while it's false/unset, so you need the flag to be based on when you should stop, not when you are currently active):\nclass Spam:\n def __init__(self):\n self.should_stop = threading.Event() # Create an unset event on init\n \n def Start(self):\n while not self.should_stop.is_set():\n # do it..\n\n if self.should_stop.wait(1):\n break\n\n # do it..\n\n if self.should_stop.wait(1):\n break\n\n def Stop(self):\n self.should_stop.set()\n\nOn modern Python (3.1 and higher) the wait method returns True if the event was set (on beginning the wait or because it got set while waiting), and False otherwise, so whenever wait returns True, that means you were told to stop and you can immediately break out of the loop. You also get notified almost immediately, instead of waiting up to one second before you can check the flag.\nThis won't cause the real \"do it..\" code to exit immediately, but from what you said, it sounds like that part of the code isn't all that long, so waiting for it to complete isn't a big hassle.\nIf you really want to preserve the is_active attribute for testing whether it's still active, you can define it as a property that reverses the meaning of the Event, e.g.:\n @property\n def is_active(self):\n return not self.should_stop.is_set()\n\n",
"the safest way to do it without risking a segmentation fault, is to return.\ndef Start(self):\n while self.is_active==True:\n do it..\n if not self.is_active: return\n time.sleep(1)\n if not self.is_active: return\n do it..\n if not self.is_active: return\n time.sleep(1)\n \ndef Stop(self):\n self.is_active=False\n\npython threads need to free the associated resources, and while \"killing\" the thread is possible using some C tricks, you will be risking a segmentation fault or a memory leak.\nhere is a cleaner way to do it.\nclass MyError(Exception):\n pass\ndef Start(self):\n try:\n while self.is_active==True:\n do it..\n self.check_termination()\n time.sleep(1)\n self.check_termination()\n do it..\n self.check_termination()\n time.sleep(1)\n except MyError:\n return\n\ndef check_termination(self):\n if not self.is_active:\n raise MyError\n\nand you can call self.check_termination() from inside any function to terminate this loop, not necessarily from inside Start directly.\nEdit: ShadowRanger solution handles the \"interruptable wait\" better, i am just keeping this for implementing a kill switch for the thread that can be checked from anywhere inside the thread.\n"
] |
[
3,
0
] |
[] |
[] |
[
"multithreading",
"python",
"python_3.x",
"python_multithreading"
] |
stackoverflow_0074521233_multithreading_python_python_3.x_python_multithreading.txt
|
Q:
How hide image path in django?
Is it possible to hide the path to the image so that it is not visible in the element expect? I dont want to allow user know where is my images are a storing. How i can hide this in django?
<div class="avatar avatar--large active">
<img src="{{user.avatar.url}}"/>
</div>
Can you give an example with my code?
A:
To remove html from your DOM
$("#a").remove();
if you want to hide only then use
$("#a").hide();
|
How hide image path in django?
|
Is it possible to hide the path to the image so that it is not visible in the element expect? I dont want to allow user know where is my images are a storing. How i can hide this in django?
<div class="avatar avatar--large active">
<img src="{{user.avatar.url}}"/>
</div>
Can you give an example with my code?
|
[
"To remove html from your DOM\n $(\"#a\").remove();\n\nif you want to hide only then use\n$(\"#a\").hide();\n\n"
] |
[
0
] |
[] |
[] |
[
"django",
"html",
"python"
] |
stackoverflow_0074521340_django_html_python.txt
|
Q:
creating a list of all possible combination from a given list of words in python
i have a problem to create a list of all possible combinations of a given list of words.
the result should be a combination per line for all possible words. the max lengh of combination is based on the amount of words given in the input file. this means, if the file contains 7 words, the combination is max 7 words long. the output should be formated like shown below:
germany
germanygermany
germanygeranygermany
germanyspain
germanygermanyspain
germanygermanyspain
etc etc.
i've googled a bit and figured out, that itertools would be a possible solution for me.
the given words are located in a file called input.txt
i used this code from the Stack overflow entry here:
How to get all possible combinations of a list’s elements?
i just represent the main part as the file read part and file output is not part of the problem here.
so my given list of words is:
germany
spain
albania
netherlands
which works fine
from itertools import combinations
features = ['germany', 'spain', 'albania']
tmp = []
for i in range(len(features)):
oc = combinations(features, i + 1)
for c in oc:
tmp.append(list(c))
print (tmp)
The output is not as expected.
as my list contains 3 words i changed the code:
germany
spain
albania
which works fine
from itertools import combinations
features = ['germany', 'spain', 'albania']
tmp = []
for i in range(len(features)):
oc = combinations(features, i + 1)
for c in oc:
tmp.append(list(c))
print (tmp)
but, i believe the result is not as expected, it should be ALL possible combinations.
some combinations are missing, for example:
germany
germany germany
germany germany spain
germany germany germany
or something.
(the output is limited to 3 as the given list contains 3 words in the original question).
How do i get the germany germany etc combinations to the output and why are they missing?
i believe i should have the same issues when i use numbers as pins or something.
it cant start at 0 to 9999 but there should be a 00 and 000 and 0000 also in the list.
best regards
Fred
A:
I believe you want to use the function combinations_with_replacement:
from itertools import combinations_with_replacement
features = ['germany', 'spain', 'albania']
tmp = []
for i in range(len(features)):
oc = combinations_with_replacement(features, i + 1)
for c in oc:
tmp.append(list(c))
print (tmp)
|
creating a list of all possible combination from a given list of words in python
|
i have a problem to create a list of all possible combinations of a given list of words.
the result should be a combination per line for all possible words. the max lengh of combination is based on the amount of words given in the input file. this means, if the file contains 7 words, the combination is max 7 words long. the output should be formated like shown below:
germany
germanygermany
germanygeranygermany
germanyspain
germanygermanyspain
germanygermanyspain
etc etc.
i've googled a bit and figured out, that itertools would be a possible solution for me.
the given words are located in a file called input.txt
i used this code from the Stack overflow entry here:
How to get all possible combinations of a list’s elements?
i just represent the main part as the file read part and file output is not part of the problem here.
so my given list of words is:
germany
spain
albania
netherlands
which works fine
from itertools import combinations
features = ['germany', 'spain', 'albania']
tmp = []
for i in range(len(features)):
oc = combinations(features, i + 1)
for c in oc:
tmp.append(list(c))
print (tmp)
The output is not as expected.
as my list contains 3 words i changed the code:
germany
spain
albania
which works fine
from itertools import combinations
features = ['germany', 'spain', 'albania']
tmp = []
for i in range(len(features)):
oc = combinations(features, i + 1)
for c in oc:
tmp.append(list(c))
print (tmp)
but, i believe the result is not as expected, it should be ALL possible combinations.
some combinations are missing, for example:
germany
germany germany
germany germany spain
germany germany germany
or something.
(the output is limited to 3 as the given list contains 3 words in the original question).
How do i get the germany germany etc combinations to the output and why are they missing?
i believe i should have the same issues when i use numbers as pins or something.
it cant start at 0 to 9999 but there should be a 00 and 000 and 0000 also in the list.
best regards
Fred
|
[
"I believe you want to use the function combinations_with_replacement:\nfrom itertools import combinations_with_replacement\n\n\nfeatures = ['germany', 'spain', 'albania']\ntmp = []\nfor i in range(len(features)):\n oc = combinations_with_replacement(features, i + 1)\n for c in oc:\n tmp.append(list(c))\nprint (tmp)\n\n"
] |
[
2
] |
[] |
[] |
[
"combinations",
"python",
"string"
] |
stackoverflow_0074521479_combinations_python_string.txt
|
Q:
Bokeh for presenting data on Italy map
I need to use Bokeh to plot datas on Italian map.
To explain, something similar with:
http://docs.bokeh.org/en/latest/docs/gallery/texas.html
... but using italian provinces instead of Texas counties.
Can you help me pointing in the right direction?
Other tools suggested?
Thanks in advance, Gianluca
A:
I don't know if it can still help, but at http://www.istat.it/it/archivio/209722 (istat italian website)
you can find numerous free detailed and updated borders of Italian provinces and regions in .shp format.
Let me know if you manage to obtain your map and if yes, how, ty.
A:
I managed to create a plot with an Italian map.
I downloaded the borders of Italy from github (region borders or province borders).
Then I created a dataframe containing 3 columns: [Province (or region), x_bord, y_bord].
In the end I used it as a ColumnDataSource and create a Figure with patches.
Suggestion: if you want to improve your render, you can add a hover tool creating a new column with desired values (e.g. number of inhabitants). Moreover you can create new column containing rgb values (RGB(r,g,b)) if you want different colors for each province or region.
source= ColumnDataSource(dict(x=italy.x_bord,y=italy.y_bord,prov=italy.prov,val=italy.val,colors=italy.colors))
f = figure(...)
f.patches(xs='x',ys='y',source=source, fill_color='colors')
A:
The best way to do this is with shape files from ISTAT.
If you want you can use my library for geo analysis within ITALY in easier way (here you can find the github link).
You just need to download the whl, install (you can see the readme part) and use it.
Here an example:
Suppose you have a dataframe (df) like this
then you can run this code
from geo_ita.plot import plot_choropleth_map_provinciale_interactive
plot_choropleth_map_provinciale_interactive(df, province_tag='provincia', value_tag='valore')
|
Bokeh for presenting data on Italy map
|
I need to use Bokeh to plot datas on Italian map.
To explain, something similar with:
http://docs.bokeh.org/en/latest/docs/gallery/texas.html
... but using italian provinces instead of Texas counties.
Can you help me pointing in the right direction?
Other tools suggested?
Thanks in advance, Gianluca
|
[
"I don't know if it can still help, but at http://www.istat.it/it/archivio/209722 (istat italian website)\nyou can find numerous free detailed and updated borders of Italian provinces and regions in .shp format.\nLet me know if you manage to obtain your map and if yes, how, ty.\n",
"I managed to create a plot with an Italian map.\nI downloaded the borders of Italy from github (region borders or province borders).\nThen I created a dataframe containing 3 columns: [Province (or region), x_bord, y_bord].\nIn the end I used it as a ColumnDataSource and create a Figure with patches.\nSuggestion: if you want to improve your render, you can add a hover tool creating a new column with desired values (e.g. number of inhabitants). Moreover you can create new column containing rgb values (RGB(r,g,b)) if you want different colors for each province or region.\nsource= ColumnDataSource(dict(x=italy.x_bord,y=italy.y_bord,prov=italy.prov,val=italy.val,colors=italy.colors))\nf = figure(...)\nf.patches(xs='x',ys='y',source=source, fill_color='colors')\n\n",
"The best way to do this is with shape files from ISTAT.\nIf you want you can use my library for geo analysis within ITALY in easier way (here you can find the github link).\nYou just need to download the whl, install (you can see the readme part) and use it.\nHere an example:\nSuppose you have a dataframe (df) like this\n\nthen you can run this code\nfrom geo_ita.plot import plot_choropleth_map_provinciale_interactive\nplot_choropleth_map_provinciale_interactive(df, province_tag='provincia', value_tag='valore')\n\n"
] |
[
1,
0,
0
] |
[] |
[] |
[
"bokeh",
"geolocation",
"geospatial",
"python"
] |
stackoverflow_0041932493_bokeh_geolocation_geospatial_python.txt
|
Q:
Saving Excel file with Python (Openpyxl)
I have a problem, I have an Excel file (.xlsx) and this file have some buttons in it to help to change the language and a button that make a raport based of the data.
The problem is...If I write something in the file and then I save it with openpyxl the file will lose those buttons and looks like a normal excel.
What can I use to save that file with the same format?
I installed an addin to see those buttons.
What can I do?
EDIT: I tried to save it .xlsm but it doesn't open if I do that
A:
.xlsx dose not have macros. Save it as .xlsm instead.
A:
Try this:
wb.save('testsave.xlsm');
A:
I had a similar problem and I have found a workaround for it:
Problem
I have an .xlsm file that contain few sheets, one of them has some macros and buttons, I read it on python using openpyxl function load_workbook (and passing the param keep_vba=True to make sure the macros are preserved), I write some data into another sheet that has no macros nor buttons, and I save the file using .save(...) . When I open the file using excel, the macros are still there but the buttons are no longer buttons, they became images.
Solution
Added a macro that creates the buttons on file opening ( and macros enabling of course). This worked very well in my case.
|
Saving Excel file with Python (Openpyxl)
|
I have a problem, I have an Excel file (.xlsx) and this file have some buttons in it to help to change the language and a button that make a raport based of the data.
The problem is...If I write something in the file and then I save it with openpyxl the file will lose those buttons and looks like a normal excel.
What can I use to save that file with the same format?
I installed an addin to see those buttons.
What can I do?
EDIT: I tried to save it .xlsm but it doesn't open if I do that
|
[
".xlsx dose not have macros. Save it as .xlsm instead.\n",
"Try this: \nwb.save('testsave.xlsm');\n\n",
"I had a similar problem and I have found a workaround for it:\nProblem\nI have an .xlsm file that contain few sheets, one of them has some macros and buttons, I read it on python using openpyxl function load_workbook (and passing the param keep_vba=True to make sure the macros are preserved), I write some data into another sheet that has no macros nor buttons, and I save the file using .save(...) . When I open the file using excel, the macros are still there but the buttons are no longer buttons, they became images.\nSolution\nAdded a macro that creates the buttons on file opening ( and macros enabling of course). This worked very well in my case.\n"
] |
[
0,
0,
0
] |
[] |
[] |
[
"excel",
"python"
] |
stackoverflow_0060390029_excel_python.txt
|
Q:
Web Scraping when Table is one click away
I am trying to extract table data from this website https://www.svk.se/om-kraftsystemet/kontrollrummet/ where I want the last segment called "Förbrukning I Sverige". I am trying to extract with this code:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
driver.get("https://www.svk.se/om-kraftsystemet/kontrollrummet/")
html = driver.page_source
tables = pd.read_html(html)
data = tables[1]
driver.close()
ValueError: No tables found
The issue is that the table I want is not immediately shown, but the default is to show a graph. To display the table I need to push the "Tabell" button, which I can't do before the Error is shown. Is there a solution to this?
(Eventually, I want to extract data from multiple days from that table, so if someone wants to point me in the right direction to be able to do that I would be grateful.)
A:
You can try the next working example where you have to accept cookies at first then you have to click on table button using right element locator strategy along with WebDriverWait and execution of JavaScript.
from selenium import webdriver
import time
from bs4 import BeautifulSoup
import pandas as pd
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
options.add_experimental_option("detach", True)#optional
webdriver_service = Service("./chromedriver") #Your chromedriver path
driver = webdriver.Chrome(service=webdriver_service,options=options)
data = []
driver.get('https://www.svk.se/om-kraftsystemet/kontrollrummet/')
driver.maximize_window()
time.sleep(3)
cookie = tbutton = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, '//*[@class="btn wide cookie-accept-all"]'))).click()
tbutton = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, '(//*[@aria-controls="Agsid-3"])[2]')))
driver.execute_script("arguments[0].click();", tbutton)
time.sleep(1)
soup = BeautifulSoup(driver.page_source,"html.parser")
df = pd.read_html(str(soup))[0]
print(df)
Output:
Minut Sekund 00 Sekund 01 Sekund 02 ... Sekund 56 Sekund 57 Sekund 58 Sekund 59
0 15:54 49.949 Hz 49.949 Hz 49.948 Hz ... 49.942 Hz 49.942 Hz 49.942 Hz 49.942 Hz
1 15:55 49.942 Hz 49.942 Hz 49.942 Hz ... 49.941 Hz 49.941 Hz 49.942 Hz 49.942 Hz
2 15:56 49.942 Hz 49.942 Hz 49.943 Hz ... 49.952 Hz 49.953 Hz 49.953 Hz 49.953 Hz
3 15:57 49.953 Hz 49.954 Hz 49.954 Hz ... 49.942 Hz 49.942 Hz 49.942 Hz 49.942 Hz
4 15:58 49.942 Hz 49.942 Hz 49.943 Hz ... 49.943 Hz 49.943 Hz 49.943 Hz 49.943 Hz
5 15:59 49.943 Hz 49.943 Hz 49.942 Hz ... 49.959 Hz 49.959 Hz 49.959 Hz 49.959 Hz
6 16:00 49.959 Hz 49.96 Hz 49.96 Hz ... 50 Hz 50 Hz 50.001 Hz 50.001 Hz
7 16:01 50.001 Hz 50.002 Hz 50.002 Hz ... 50.025 Hz 50.025 Hz 50.026 Hz 50.026 Hz
8 16:02 50.027 Hz 50.027 Hz 50.028 Hz ... 50.043 Hz 50.044 Hz 50.044 Hz 50.045 Hz
9 16:03 50.045 Hz 50.045 Hz 50.046 Hz ... 50.046 Hz 50.046 Hz 50.047 Hz 50.047 Hz
10 16:04 50.047 Hz 50.048 Hz 50.048 Hz ... 50.045 Hz 50.046 Hz 50.046 Hz 50.046 Hz
11 16:05 50.047 Hz 50.047 Hz 50.048 Hz ... 50.035 Hz 50.035 Hz 50.035 Hz 50.034 Hz
12 16:06 50.034 Hz 50.034 Hz 50.034 Hz ... 50.033 Hz 50.032 Hz 50.032 Hz 50.031 Hz
13 16:07 50.03 Hz 50.03 Hz 50.029 Hz ... 50.027 Hz 50.026 Hz 50.026 Hz 50.025 Hz
14 16:08 50.024 Hz 50.023 Hz 50.023 Hz ... 50.02 Hz 50.02 Hz 50.02 Hz 50.02 Hz
15 16:09 50.02 Hz 50.019 Hz 50.019 Hz ... 50.013 Hz 50.013 Hz 50.013 Hz 50.014 Hz
16 16:10 50.014 Hz 50.014 Hz 50.014 Hz ... 50.003 Hz 50.003 Hz 50.003 Hz 50.003 Hz
17 16:11 50.002 Hz 50.002 Hz 50.002 Hz ... 50.019 Hz 50.02 Hz 50.02 Hz 50.021 Hz
18 16:12 50.021 Hz 50.021 Hz 50.022 Hz ... 50.015 Hz 50.015 Hz 50.014 Hz 50.014 Hz
19 16:13 50.014 Hz 50.014 Hz 50.013 Hz ... 50.002 Hz 50.001 Hz 50.001 Hz 50.001 Hz
20 16:14 50.001 Hz 50.001 Hz 50.001 Hz ... 50.02 Hz 50.02 Hz 50.02 Hz 50.02 Hz
21 16:15 50.02 Hz 50.02 Hz 50.02 Hz ... 50.015 Hz 50.015 Hz 50.015 Hz 50.015 Hz
22 16:16 50.015 Hz 50.015 Hz 50.015 Hz ... 50.016 Hz 50.016 Hz 50.016 Hz 50.016 Hz
23 16:17 50.015 Hz 50.015 Hz 50.015 Hz ... 50.023 Hz 50.023 Hz 50.024 Hz 50.024 Hz
24 16:18 50.024 Hz 50.024 Hz 50.025 Hz ... 50.022 Hz 50.021 Hz 50.021 Hz 50.021 Hz
25 16:19 50.021 Hz 50.021 Hz 50.021 Hz ... 50.039 Hz 50.039 Hz 50.039 Hz 50.039 Hz
26 16:20 50.038 Hz 50.038 Hz 50.038 Hz ... 50.023 Hz 50.023 Hz 50.023 Hz 50.023 Hz
27 16:21 50.022 Hz 50.022 Hz 50.022 Hz ... 50.031 Hz 50.032 Hz 50.032 Hz 50.032 Hz
28 16:22 50.032 Hz 50.032 Hz 50.032 Hz ... 50.029 Hz 50.029 Hz 50.029 Hz 50.03 Hz
29 16:23 50.03 Hz 50.03 Hz 50.03 Hz ... 50.043 Hz 50.043 Hz 50.044 Hz 50.044 Hz
30 16:24 50.044 Hz 50.044 Hz 50.045 Hz ... 50.033 Hz 50.033 Hz 50.034 Hz 50.034 Hz
31 16:25 50.034 Hz 50.034 Hz 50.035 Hz ... 50.047 Hz 50.047 Hz 50.047 Hz 50.047 Hz
32 16:26 50.047 Hz 50.047 Hz 50.047 Hz ... 50.045 Hz 50.046 Hz 50.046 Hz 50.046 Hz
33 16:27 50.046 Hz 50.046 Hz 50.046 Hz ... 50.036 Hz 50.037 Hz 50.037 Hz 50.037 Hz
34 16:28 50.037 Hz 50.038 Hz 50.038 Hz ... 50.04 Hz 50.04 Hz 50.04 Hz 50.04 Hz
35 16:29 50.04 Hz 50.04 Hz 50.04 Hz ... 50.039 Hz 50.039 Hz 50.039 Hz 50.04 Hz
36 16:30 50.04 Hz 50.04 Hz 50.04 Hz ... 50.037 Hz 50.037 Hz 50.037 Hz 50.037 Hz
37 16:31 50.038 Hz 50.038 Hz 50.038 Hz ... 50.034 Hz 50.034 Hz 50.034 Hz 50.034 Hz
38 16:32 50.033 Hz 50.033 Hz 50.033 Hz ... 50.041 Hz 50.042 Hz 50.042 Hz 50.042 Hz
39 16:33 50.042 Hz 50.042 Hz 50.042 Hz ... 50.032 Hz 50.031 Hz 50.031 Hz 50.031 Hz
40 16:34 50.03 Hz 50.03 Hz 50.03 Hz ... 50.043 Hz 50.044 Hz 50.044 Hz 50.044 Hz
41 16:35 50.045 Hz 50.045 Hz 50.045 Hz ... 50.032 Hz 50.033 Hz 50.033 Hz 50.034 Hz
42 16:36 50.034 Hz 50.035 Hz 50.035 Hz ... 50.017 Hz 50.017 Hz 50.017 Hz 50.017 Hz
43 16:37 50.017 Hz 50.017 Hz 50.017 Hz ... 50.01 Hz 50.011 Hz 50.011 Hz 50.011 Hz
44 16:38 50.012 Hz 50.012 Hz 50.012 Hz ... 50.021 Hz 50.022 Hz 50.022 Hz 50.022 Hz
45 16:39 50.022 Hz 50.023 Hz 50.023 Hz ... 50.016 Hz 50.016 Hz 50.016 Hz 50.016 Hz
46 16:40 50.016 Hz 50.016 Hz 50.016 Hz ... 50.003 Hz 50.002 Hz 50.002 Hz 50.002 Hz
47 16:41 50.002 Hz 50.002 Hz 50.002 Hz ... 49.999 Hz 49.999 Hz 49.999 Hz 50 Hz
48 16:42 50 Hz 50 Hz 50 Hz ... 50.004 Hz 50.004 Hz 50.004 Hz 50.004 Hz
49 16:43 50.004 Hz 50.004 Hz 50.004 Hz ... 49.998 Hz 49.998 Hz 49.998 Hz 49.998 Hz
50 16:44 49.999 Hz 49.999 Hz 49.999 Hz ... 49.989 Hz 49.989 Hz 49.989 Hz 49.989 Hz
51 16:45 49.989 Hz 49.99 Hz 49.99 Hz ... 49.994 Hz 49.994 Hz 49.994 Hz 49.994 Hz
52 16:46 49.994 Hz 49.994 Hz 49.994 Hz ... 49.988 Hz 49.988 Hz 49.988 Hz 49.988 Hz
53 16:47 49.988 Hz 49.988 Hz 49.988 Hz ... 49.997 Hz 49.997 Hz 49.998 Hz 49.998 Hz
54 16:48 49.998 Hz 49.999 Hz 49.999 Hz ... 50.007 Hz 50.007 Hz 50.007 Hz 50.006 Hz
55 16:49 50.006 Hz 50.006 Hz 50.006 Hz ... 50.002 Hz 50.002 Hz 50.002 Hz 50.002 Hz
56 16:50 50.002 Hz 50.002 Hz 50.002 Hz ... 50.001 Hz 50.002 Hz 50.002 Hz 50.003 Hz
57 16:51 50.003 Hz 50.004 Hz 50.004 Hz ... 50.006 Hz 50.006 Hz 50.006 Hz 50.006 Hz
58 16:52 50.006 Hz 50.006 Hz 50.006 Hz ... 50.003 Hz 50.003 Hz 50.003 Hz 50.004 Hz
59 16:53 50.004 Hz 50.004 Hz 50.004 Hz ... 50.007 Hz 50.007 Hz 50.007 Hz 50.008 Hz
[60 rows x 61 columns]
|
Web Scraping when Table is one click away
|
I am trying to extract table data from this website https://www.svk.se/om-kraftsystemet/kontrollrummet/ where I want the last segment called "Förbrukning I Sverige". I am trying to extract with this code:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
driver.get("https://www.svk.se/om-kraftsystemet/kontrollrummet/")
html = driver.page_source
tables = pd.read_html(html)
data = tables[1]
driver.close()
ValueError: No tables found
The issue is that the table I want is not immediately shown, but the default is to show a graph. To display the table I need to push the "Tabell" button, which I can't do before the Error is shown. Is there a solution to this?
(Eventually, I want to extract data from multiple days from that table, so if someone wants to point me in the right direction to be able to do that I would be grateful.)
|
[
"You can try the next working example where you have to accept cookies at first then you have to click on table button using right element locator strategy along with WebDriverWait and execution of JavaScript.\nfrom selenium import webdriver\nimport time\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium.webdriver.chrome.service import Service\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option(\"detach\", True)#optional\nwebdriver_service = Service(\"./chromedriver\") #Your chromedriver path\ndriver = webdriver.Chrome(service=webdriver_service,options=options)\n\ndata = []\ndriver.get('https://www.svk.se/om-kraftsystemet/kontrollrummet/')\ndriver.maximize_window()\ntime.sleep(3)\n\ncookie = tbutton = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, '//*[@class=\"btn wide cookie-accept-all\"]'))).click()\ntbutton = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, '(//*[@aria-controls=\"Agsid-3\"])[2]')))\ndriver.execute_script(\"arguments[0].click();\", tbutton)\ntime.sleep(1)\n\nsoup = BeautifulSoup(driver.page_source,\"html.parser\")\n\ndf = pd.read_html(str(soup))[0]\nprint(df)\n\nOutput:\n Minut Sekund 00 Sekund 01 Sekund 02 ... Sekund 56 Sekund 57 Sekund 58 Sekund 59\n0 15:54 49.949 Hz 49.949 Hz 49.948 Hz ... 49.942 Hz 49.942 Hz 49.942 Hz 49.942 Hz \n1 15:55 49.942 Hz 49.942 Hz 49.942 Hz ... 49.941 Hz 49.941 Hz 49.942 Hz 49.942 Hz \n2 15:56 49.942 Hz 49.942 Hz 49.943 Hz ... 49.952 Hz 49.953 Hz 49.953 Hz 49.953 Hz \n3 15:57 49.953 Hz 49.954 Hz 49.954 Hz ... 49.942 Hz 49.942 Hz 49.942 Hz 49.942 Hz \n4 15:58 49.942 Hz 49.942 Hz 49.943 Hz ... 49.943 Hz 49.943 Hz 49.943 Hz 49.943 Hz \n5 15:59 49.943 Hz 49.943 Hz 49.942 Hz ... 49.959 Hz 49.959 Hz 49.959 Hz 49.959 Hz \n6 16:00 49.959 Hz 49.96 Hz 49.96 Hz ... 50 Hz 50 Hz 50.001 Hz 50.001 Hz \n7 16:01 50.001 Hz 50.002 Hz 50.002 Hz ... 50.025 Hz 50.025 Hz 50.026 Hz 50.026 Hz \n8 16:02 50.027 Hz 50.027 Hz 50.028 Hz ... 50.043 Hz 50.044 Hz 50.044 Hz 50.045 Hz \n9 16:03 50.045 Hz 50.045 Hz 50.046 Hz ... 50.046 Hz 50.046 Hz 50.047 Hz 50.047 Hz \n10 16:04 50.047 Hz 50.048 Hz 50.048 Hz ... 50.045 Hz 50.046 Hz 50.046 Hz 50.046 Hz \n11 16:05 50.047 Hz 50.047 Hz 50.048 Hz ... 50.035 Hz 50.035 Hz 50.035 Hz 50.034 Hz \n12 16:06 50.034 Hz 50.034 Hz 50.034 Hz ... 50.033 Hz 50.032 Hz 50.032 Hz 50.031 Hz \n13 16:07 50.03 Hz 50.03 Hz 50.029 Hz ... 50.027 Hz 50.026 Hz 50.026 Hz 50.025 Hz \n14 16:08 50.024 Hz 50.023 Hz 50.023 Hz ... 50.02 Hz 50.02 Hz 50.02 Hz 50.02 Hz \n15 16:09 50.02 Hz 50.019 Hz 50.019 Hz ... 50.013 Hz 50.013 Hz 50.013 Hz 50.014 Hz \n16 16:10 50.014 Hz 50.014 Hz 50.014 Hz ... 50.003 Hz 50.003 Hz 50.003 Hz 50.003 Hz \n17 16:11 50.002 Hz 50.002 Hz 50.002 Hz ... 50.019 Hz 50.02 Hz 50.02 Hz 50.021 Hz \n18 16:12 50.021 Hz 50.021 Hz 50.022 Hz ... 50.015 Hz 50.015 Hz 50.014 Hz 50.014 Hz \n19 16:13 50.014 Hz 50.014 Hz 50.013 Hz ... 50.002 Hz 50.001 Hz 50.001 Hz 50.001 Hz \n20 16:14 50.001 Hz 50.001 Hz 50.001 Hz ... 50.02 Hz 50.02 Hz 50.02 Hz 50.02 Hz \n21 16:15 50.02 Hz 50.02 Hz 50.02 Hz ... 50.015 Hz 50.015 Hz 50.015 Hz 50.015 Hz \n22 16:16 50.015 Hz 50.015 Hz 50.015 Hz ... 50.016 Hz 50.016 Hz 50.016 Hz 50.016 Hz \n23 16:17 50.015 Hz 50.015 Hz 50.015 Hz ... 50.023 Hz 50.023 Hz 50.024 Hz 50.024 Hz \n24 16:18 50.024 Hz 50.024 Hz 50.025 Hz ... 50.022 Hz 50.021 Hz 50.021 Hz 50.021 Hz \n25 16:19 50.021 Hz 50.021 Hz 50.021 Hz ... 50.039 Hz 50.039 Hz 50.039 Hz 50.039 Hz \n26 16:20 50.038 Hz 50.038 Hz 50.038 Hz ... 50.023 Hz 50.023 Hz 50.023 Hz 50.023 Hz \n27 16:21 50.022 Hz 50.022 Hz 50.022 Hz ... 50.031 Hz 50.032 Hz 50.032 Hz 50.032 Hz \n28 16:22 50.032 Hz 50.032 Hz 50.032 Hz ... 50.029 Hz 50.029 Hz 50.029 Hz 50.03 Hz \n29 16:23 50.03 Hz 50.03 Hz 50.03 Hz ... 50.043 Hz 50.043 Hz 50.044 Hz 50.044 Hz \n30 16:24 50.044 Hz 50.044 Hz 50.045 Hz ... 50.033 Hz 50.033 Hz 50.034 Hz 50.034 Hz \n31 16:25 50.034 Hz 50.034 Hz 50.035 Hz ... 50.047 Hz 50.047 Hz 50.047 Hz 50.047 Hz \n32 16:26 50.047 Hz 50.047 Hz 50.047 Hz ... 50.045 Hz 50.046 Hz 50.046 Hz 50.046 Hz \n33 16:27 50.046 Hz 50.046 Hz 50.046 Hz ... 50.036 Hz 50.037 Hz 50.037 Hz 50.037 Hz \n34 16:28 50.037 Hz 50.038 Hz 50.038 Hz ... 50.04 Hz 50.04 Hz 50.04 Hz 50.04 Hz \n35 16:29 50.04 Hz 50.04 Hz 50.04 Hz ... 50.039 Hz 50.039 Hz 50.039 Hz 50.04 Hz \n36 16:30 50.04 Hz 50.04 Hz 50.04 Hz ... 50.037 Hz 50.037 Hz 50.037 Hz 50.037 Hz \n37 16:31 50.038 Hz 50.038 Hz 50.038 Hz ... 50.034 Hz 50.034 Hz 50.034 Hz 50.034 Hz \n38 16:32 50.033 Hz 50.033 Hz 50.033 Hz ... 50.041 Hz 50.042 Hz 50.042 Hz 50.042 Hz \n39 16:33 50.042 Hz 50.042 Hz 50.042 Hz ... 50.032 Hz 50.031 Hz 50.031 Hz 50.031 Hz \n40 16:34 50.03 Hz 50.03 Hz 50.03 Hz ... 50.043 Hz 50.044 Hz 50.044 Hz 50.044 Hz \n41 16:35 50.045 Hz 50.045 Hz 50.045 Hz ... 50.032 Hz 50.033 Hz 50.033 Hz 50.034 Hz \n42 16:36 50.034 Hz 50.035 Hz 50.035 Hz ... 50.017 Hz 50.017 Hz 50.017 Hz 50.017 Hz \n43 16:37 50.017 Hz 50.017 Hz 50.017 Hz ... 50.01 Hz 50.011 Hz 50.011 Hz 50.011 Hz \n44 16:38 50.012 Hz 50.012 Hz 50.012 Hz ... 50.021 Hz 50.022 Hz 50.022 Hz 50.022 Hz \n45 16:39 50.022 Hz 50.023 Hz 50.023 Hz ... 50.016 Hz 50.016 Hz 50.016 Hz 50.016 Hz \n46 16:40 50.016 Hz 50.016 Hz 50.016 Hz ... 50.003 Hz 50.002 Hz 50.002 Hz 50.002 Hz \n47 16:41 50.002 Hz 50.002 Hz 50.002 Hz ... 49.999 Hz 49.999 Hz 49.999 Hz 50 Hz \n48 16:42 50 Hz 50 Hz 50 Hz ... 50.004 Hz 50.004 Hz 50.004 Hz 50.004 Hz \n49 16:43 50.004 Hz 50.004 Hz 50.004 Hz ... 49.998 Hz 49.998 Hz 49.998 Hz 49.998 Hz \n50 16:44 49.999 Hz 49.999 Hz 49.999 Hz ... 49.989 Hz 49.989 Hz 49.989 Hz 49.989 Hz \n51 16:45 49.989 Hz 49.99 Hz 49.99 Hz ... 49.994 Hz 49.994 Hz 49.994 Hz 49.994 Hz \n52 16:46 49.994 Hz 49.994 Hz 49.994 Hz ... 49.988 Hz 49.988 Hz 49.988 Hz 49.988 Hz \n53 16:47 49.988 Hz 49.988 Hz 49.988 Hz ... 49.997 Hz 49.997 Hz 49.998 Hz 49.998 Hz \n54 16:48 49.998 Hz 49.999 Hz 49.999 Hz ... 50.007 Hz 50.007 Hz 50.007 Hz 50.006 Hz\n55 16:49 50.006 Hz 50.006 Hz 50.006 Hz ... 50.002 Hz 50.002 Hz 50.002 Hz 50.002 Hz\n56 16:50 50.002 Hz 50.002 Hz 50.002 Hz ... 50.001 Hz 50.002 Hz 50.002 Hz 50.003 Hz\n57 16:51 50.003 Hz 50.004 Hz 50.004 Hz ... 50.006 Hz 50.006 Hz 50.006 Hz 50.006 Hz\n58 16:52 50.006 Hz 50.006 Hz 50.006 Hz ... 50.003 Hz 50.003 Hz 50.003 Hz 50.004 Hz\n59 16:53 50.004 Hz 50.004 Hz 50.004 Hz ... 50.007 Hz 50.007 Hz 50.007 Hz 50.008 Hz\n\n[60 rows x 61 columns]\n\n"
] |
[
1
] |
[] |
[] |
[
"python",
"selenium",
"web_scraping"
] |
stackoverflow_0074520740_python_selenium_web_scraping.txt
|
Q:
Python/NumPy: Split non-consecutive values into discrete subset arrays
How can I slice arrays such as this into n-many subsets, where one subset consists of consecutive values?
arr = np.array((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 66, 67, 68, 69, 70, 71))
# tells me where they are consecutive
np.where(np.diff(arr) == 1)[0]
# where the break points are
cut_points = np.where(np.diff(arr) != 1)[0] + 1
# wont generalize well with n-many situations
arr[:cut_points[0] ]
arr[cut_points[0] : cut_points[1] ]
arr[cut_points[1] :, ]
A:
You can use np.split, and just pass in cut_points as the 2nd argument.
eg.
split_arr = np.split(arr, cut_points)
# split_arr looks like:
# [array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),
# array([39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]),
# array([66, 67, 68, 69, 70, 71])]
full solution:
import numpy as np
arr = np.array((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 66, 67, 68, 69, 70, 71))
cut_points = np.where(np.diff(arr) != 1)[0] + 1
split_arr = np.split(arr, split_points)
split_arr
# outputs:
[array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),
array([39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]),
array([66, 67, 68, 69, 70, 71])]
A:
Just as an alternative way, with no pandas/numpy.
If you don't care about the order of the input/ouput, you start at the end and do something like:
l = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 66, 67, 68, 69, 70, 71)
i = 0
current_index = -1
prev_value = None
result = []
for k in l[::-1]:
current_value = k + i
if prev_value != current_value:
prev_value = current_value
current_index += 1
result.append([])
result[current_index].append(k)
i += 1
print(result)
Then result will contain:
[
[71, 70, 69, 68, 67, 66],
[55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39],
[14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
]
|
Python/NumPy: Split non-consecutive values into discrete subset arrays
|
How can I slice arrays such as this into n-many subsets, where one subset consists of consecutive values?
arr = np.array((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 66, 67, 68, 69, 70, 71))
# tells me where they are consecutive
np.where(np.diff(arr) == 1)[0]
# where the break points are
cut_points = np.where(np.diff(arr) != 1)[0] + 1
# wont generalize well with n-many situations
arr[:cut_points[0] ]
arr[cut_points[0] : cut_points[1] ]
arr[cut_points[1] :, ]
|
[
"You can use np.split, and just pass in cut_points as the 2nd argument.\neg.\nsplit_arr = np.split(arr, cut_points)\n\n# split_arr looks like:\n# [array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),\n# array([39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]),\n# array([66, 67, 68, 69, 70, 71])]\n\nfull solution:\nimport numpy as np\narr = np.array((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 39, 40,\n 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 66, 67, 68, 69, 70, 71))\ncut_points = np.where(np.diff(arr) != 1)[0] + 1\nsplit_arr = np.split(arr, split_points)\nsplit_arr\n# outputs:\n[array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),\n array([39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]),\n array([66, 67, 68, 69, 70, 71])]\n\n",
"Just as an alternative way, with no pandas/numpy.\nIf you don't care about the order of the input/ouput, you start at the end and do something like:\nl = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 66, 67, 68, 69, 70, 71)\ni = 0\ncurrent_index = -1\nprev_value = None\nresult = []\nfor k in l[::-1]:\n current_value = k + i\n if prev_value != current_value:\n prev_value = current_value\n current_index += 1\n result.append([])\n result[current_index].append(k)\n i += 1\nprint(result)\n\nThen result will contain:\n[\n [71, 70, 69, 68, 67, 66],\n [55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39],\n [14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n]\n\n"
] |
[
2,
1
] |
[] |
[] |
[
"arrays",
"numpy",
"python"
] |
stackoverflow_0074521283_arrays_numpy_python.txt
|
Q:
Selenium properly clicks on the correct option, but when selecting the element for add to cart, it always adds only the first option
I'm creating a webscraper with Selenium that will add products to a cart, and then cycle through cities, states and zip codes to give me the total cost of shipping + taxes for each area.
The website I'm using is: https://www.power-systems.com/shop/product/proelite-competition-kettlebell
Everything in my code appears to be working normally - the window will open, close a few pop ups and I can see Selenium select the proper option. However, regardless of whatever I've tried, after selenium clicks the "Add to Cart" button, it always adds the first option, despite having selected the proper one
Here is what I have been trying:
#created to simplify the code since I'll be using this option often
def element_present_click1(path_type,selector):
element_present = EC.element_to_be_clickable((path_type , selector))
WebDriverWait(driver, 30).until(element_present)
try:
driver.find_element(path_type , selector).click()
except:
clicker = driver.find_element(path_type , selector)
driver.execute_script("arguments[0].click();", clicker)
path = "C:\Program Files (x86)\msedgedriver.exe"
driver = webdriver.Edge(path)
driver.get('https://www.power-systems.com/shop/product/proelite-competition-kettlebell')
element_present_click1(By.CSS_SELECTOR,'button[name="bluecoreCloseButton"]')
element_present_click1(By.CSS_SELECTOR,'a[id="hs-eu-decline-button"]')
###this will correctly select the proper element
element_present_click1(By.XPATH, f"//span[text()='32 kg']")
###after this is clicked, it will always add the first option, which is 8 kg
driver.find_element(By.CSS_SELECTOR,'button.btn.btn-primary.add-to-cart.js-add-to-cart-button').submit()
I've tried a few different things, adding in some time.sleep() after clicking the option, refreshing the browser page, or selecting the option twice - no matter what I try, when I click add to cart it always adds the first option
Is there something I'm missing? Any help would be appreciated
A:
You are using a wrong selector in the last step.
button.btn.btn-primary.add-to-cart.js-add-to-cart-button is not a unique locator.
You need to click the button inside the selected element block.
This will work:
driver.find_element(By.CSS_SELECTOR, ".variant-info.selected .add-to-cart-rollover").click()
A:
It looks to me that find_element returns ONLY the first element it can find. Having taken a look at find_element it looks like you'd want to replace
driver.find_element(By.CSS_SELECTOR,'button...').submit()
with
random.choice(driver.find_elements(By.CSS_SELECTOR,'button...')).submit()
|
Selenium properly clicks on the correct option, but when selecting the element for add to cart, it always adds only the first option
|
I'm creating a webscraper with Selenium that will add products to a cart, and then cycle through cities, states and zip codes to give me the total cost of shipping + taxes for each area.
The website I'm using is: https://www.power-systems.com/shop/product/proelite-competition-kettlebell
Everything in my code appears to be working normally - the window will open, close a few pop ups and I can see Selenium select the proper option. However, regardless of whatever I've tried, after selenium clicks the "Add to Cart" button, it always adds the first option, despite having selected the proper one
Here is what I have been trying:
#created to simplify the code since I'll be using this option often
def element_present_click1(path_type,selector):
element_present = EC.element_to_be_clickable((path_type , selector))
WebDriverWait(driver, 30).until(element_present)
try:
driver.find_element(path_type , selector).click()
except:
clicker = driver.find_element(path_type , selector)
driver.execute_script("arguments[0].click();", clicker)
path = "C:\Program Files (x86)\msedgedriver.exe"
driver = webdriver.Edge(path)
driver.get('https://www.power-systems.com/shop/product/proelite-competition-kettlebell')
element_present_click1(By.CSS_SELECTOR,'button[name="bluecoreCloseButton"]')
element_present_click1(By.CSS_SELECTOR,'a[id="hs-eu-decline-button"]')
###this will correctly select the proper element
element_present_click1(By.XPATH, f"//span[text()='32 kg']")
###after this is clicked, it will always add the first option, which is 8 kg
driver.find_element(By.CSS_SELECTOR,'button.btn.btn-primary.add-to-cart.js-add-to-cart-button').submit()
I've tried a few different things, adding in some time.sleep() after clicking the option, refreshing the browser page, or selecting the option twice - no matter what I try, when I click add to cart it always adds the first option
Is there something I'm missing? Any help would be appreciated
|
[
"You are using a wrong selector in the last step.\nbutton.btn.btn-primary.add-to-cart.js-add-to-cart-button is not a unique locator.\nYou need to click the button inside the selected element block.\nThis will work:\ndriver.find_element(By.CSS_SELECTOR, \".variant-info.selected .add-to-cart-rollover\").click()\n\n",
"It looks to me that find_element returns ONLY the first element it can find. Having taken a look at find_element it looks like you'd want to replace\ndriver.find_element(By.CSS_SELECTOR,'button...').submit()\n\nwith\nrandom.choice(driver.find_elements(By.CSS_SELECTOR,'button...')).submit()\n\n"
] |
[
1,
1
] |
[] |
[] |
[
"css_selectors",
"python",
"selenium",
"selenium_webdriver"
] |
stackoverflow_0074521186_css_selectors_python_selenium_selenium_webdriver.txt
|
Q:
problem with creating .exe file in python why it gets too big
I'm making archives to .exe using pyinstaller, but I have a big problem, every time I create a file, its size multiplies, it seems that it is multiplying the libraries, does anyone know how to solve it?
1° file size: 7mb
2° file size: 52mb
3° file size: 104mb
4° file size: 207mb
5° file size: 414mb
6° file size: 828mb
7° file size: 1.656mb
8° file size: 3.312mb
I tried to rename the files, deleted %tmp% files
A:
You can try AutoPyToExe
with python's virtual environment plugin
follow these commands:
Install, Create and Activate Virtual Environment:
python -m pip install virtualenv
python -m venv example_env
example_env/Scripts/activate
Install AutoPyToExe:
(example_env) python -m pip install auto-py-to-exe
To run AutoPyToExe, just write this command in Command Prompt or Terminal:
(example_env) auto-py-to-exe
Important: Installing only those modules in the virtual environment which are required by the project.
Working with virtualenv in python:
Activate virtual environment using this command:
Linux: <path_to_virtual_env>\bin\activate
Windows: <path_to_virtual_env>\Scripts\activate
Deactivate virtual environment using this command:
deactivate
A:
I did the above but it didn't work...
a solution I found for the problem is to create an .exe file .. I move it from the folder (to any other) I delete the "dist" and "build" folders that were created, then I create the other executable file...
|
problem with creating .exe file in python why it gets too big
|
I'm making archives to .exe using pyinstaller, but I have a big problem, every time I create a file, its size multiplies, it seems that it is multiplying the libraries, does anyone know how to solve it?
1° file size: 7mb
2° file size: 52mb
3° file size: 104mb
4° file size: 207mb
5° file size: 414mb
6° file size: 828mb
7° file size: 1.656mb
8° file size: 3.312mb
I tried to rename the files, deleted %tmp% files
|
[
"You can try AutoPyToExe\nwith python's virtual environment plugin\nfollow these commands:\n\nInstall, Create and Activate Virtual Environment:\n\n\npython -m pip install virtualenv\n\n\npython -m venv example_env\n\n\nexample_env/Scripts/activate\n\n\nInstall AutoPyToExe:\n\n\n(example_env) python -m pip install auto-py-to-exe\n\n\nTo run AutoPyToExe, just write this command in Command Prompt or Terminal:\n\n\n(example_env) auto-py-to-exe\n\n\nImportant: Installing only those modules in the virtual environment which are required by the project.\n\nWorking with virtualenv in python:\n\n\n\nActivate virtual environment using this command:\nLinux: <path_to_virtual_env>\\bin\\activate\nWindows: <path_to_virtual_env>\\Scripts\\activate\n\nDeactivate virtual environment using this command:\ndeactivate\n\n\n",
"I did the above but it didn't work...\na solution I found for the problem is to create an .exe file .. I move it from the folder (to any other) I delete the \"dist\" and \"build\" folders that were created, then I create the other executable file...\n"
] |
[
0,
0
] |
[] |
[] |
[
"exe",
"pyinstaller",
"python",
"python_3.x",
"selenium"
] |
stackoverflow_0074519729_exe_pyinstaller_python_python_3.x_selenium.txt
|
Q:
Error message about 'libsqlite3' when activate environment in jupyter Notebook
I try to activate my env to Jupyter notebook by using:
python -m ipykernel install --user --name native --display-name "python-gpu"
But error message:
Traceback (most recent call last):
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/ipykernel/__main__.py", line 2, in <module>
from ipykernel import kernelapp as app
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/ipykernel/kernelapp.py", line 18, in <module>
from IPython.core.application import (
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/IPython/__init__.py", line 53, in <module>
from .terminal.embed import embed
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/IPython/terminal/embed.py", line 15, in <module>
from IPython.core.interactiveshell import DummyMod, InteractiveShell
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/IPython/core/interactiveshell.py", line 73, in <module>
from IPython.core.history import HistoryManager
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/IPython/core/history.py", line 11, in <module>
import sqlite3
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/sqlite3/__init__.py", line 57, in <module>
from sqlite3.dbapi2 import *
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/sqlite3/dbapi2.py", line 27, in <module>
from _sqlite3 import *
ImportError: dlopen(/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/lib-dynload/_sqlite3.cpython-310-darwin.so, 0x0002): Symbol not found: _sqlite3_enable_load_extension
Referenced from: /Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/lib-dynload/_sqlite3.cpython-310-darwin.so
Expected in: /usr/lib/libsqlite3.dylib
It seems like something is wrong with my libsqlite3, but it goes well in my terminal.
My laptop is a Macbook pro with M1Pro. And I'm building an arm64 python environment in M1 version Anaconda.
Does anyone know how I can get around this?
A:
I had the same problem and I solved by installing a newer version:
mamba install sqlite=3.40.0
always checking that the new library is coming from conda-forge/osx-arm64.
|
Error message about 'libsqlite3' when activate environment in jupyter Notebook
|
I try to activate my env to Jupyter notebook by using:
python -m ipykernel install --user --name native --display-name "python-gpu"
But error message:
Traceback (most recent call last):
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/ipykernel/__main__.py", line 2, in <module>
from ipykernel import kernelapp as app
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/ipykernel/kernelapp.py", line 18, in <module>
from IPython.core.application import (
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/IPython/__init__.py", line 53, in <module>
from .terminal.embed import embed
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/IPython/terminal/embed.py", line 15, in <module>
from IPython.core.interactiveshell import DummyMod, InteractiveShell
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/IPython/core/interactiveshell.py", line 73, in <module>
from IPython.core.history import HistoryManager
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/site-packages/IPython/core/history.py", line 11, in <module>
import sqlite3
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/sqlite3/__init__.py", line 57, in <module>
from sqlite3.dbapi2 import *
File "/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/sqlite3/dbapi2.py", line 27, in <module>
from _sqlite3 import *
ImportError: dlopen(/Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/lib-dynload/_sqlite3.cpython-310-darwin.so, 0x0002): Symbol not found: _sqlite3_enable_load_extension
Referenced from: /Users/nianhua/opt/anaconda3/envs/native/lib/python3.10/lib-dynload/_sqlite3.cpython-310-darwin.so
Expected in: /usr/lib/libsqlite3.dylib
It seems like something is wrong with my libsqlite3, but it goes well in my terminal.
My laptop is a Macbook pro with M1Pro. And I'm building an arm64 python environment in M1 version Anaconda.
Does anyone know how I can get around this?
|
[
"I had the same problem and I solved by installing a newer version:\n\nmamba install sqlite=3.40.0\n\nalways checking that the new library is coming from conda-forge/osx-arm64.\n"
] |
[
0
] |
[] |
[] |
[
"anaconda",
"apple_m1",
"apple_silicon",
"jupyter_notebook",
"python"
] |
stackoverflow_0072312755_anaconda_apple_m1_apple_silicon_jupyter_notebook_python.txt
|
Q:
To find Oplog size using python
How to find oplog size in mongodb using python?
For example :
replGetSetStatus is equivalent to rs.status()
Is there any similar command to find rs.printReplicationInfo()
uri = "mongodb://usernamen:password@host:port/admin"
conn = pymongo.MongoClient(uri)
db = conn['admin']
db_stats = db.command({'replSetGetStatus' :1})
primary_optime = 0
secondary_optime = 0
for key in db_stats['members'] :
if key['stateStr'] == 'SECONDARY' :
secondary_optime = key['optimeDate']
if key['stateStr'] == 'PRIMARY' :
primary_optime =key['optimeDate']
print 'primary_optime : ' + str(primary_optime)
print 'secondary_optime : ' + str(secondary_optime)
seconds_lag = (primary_optime - secondary_optime ).total_seconds()
#total_seconds() userd to get the lag in seconds rather than datetime object
print 'secondary_lag : ' + str(seconds_lag)
This is my code. The db.command({'replSetGetStatus' :1}) is working.
Similarly I need for the oplog size.
A:
The following commands executed from any replicaSet member will give you the size of oplog:
Uncompressed size in MB:
db.getReplicationInfo().logSizeMB
Uncompressed current size in Bytes:
db.getSiblingDB('local').oplog.rs.stats().size
Compressed current size in Bytes:
db.getSiblingDB('local').oplog.rs.stats().storageSize
Max configured size:
db.getSiblingDB('local').oplog.rs.stats().maxSize
|
To find Oplog size using python
|
How to find oplog size in mongodb using python?
For example :
replGetSetStatus is equivalent to rs.status()
Is there any similar command to find rs.printReplicationInfo()
uri = "mongodb://usernamen:password@host:port/admin"
conn = pymongo.MongoClient(uri)
db = conn['admin']
db_stats = db.command({'replSetGetStatus' :1})
primary_optime = 0
secondary_optime = 0
for key in db_stats['members'] :
if key['stateStr'] == 'SECONDARY' :
secondary_optime = key['optimeDate']
if key['stateStr'] == 'PRIMARY' :
primary_optime =key['optimeDate']
print 'primary_optime : ' + str(primary_optime)
print 'secondary_optime : ' + str(secondary_optime)
seconds_lag = (primary_optime - secondary_optime ).total_seconds()
#total_seconds() userd to get the lag in seconds rather than datetime object
print 'secondary_lag : ' + str(seconds_lag)
This is my code. The db.command({'replSetGetStatus' :1}) is working.
Similarly I need for the oplog size.
|
[
"The following commands executed from any replicaSet member will give you the size of oplog:\nUncompressed size in MB:\n db.getReplicationInfo().logSizeMB\n\nUncompressed current size in Bytes:\n db.getSiblingDB('local').oplog.rs.stats().size\n\nCompressed current size in Bytes:\n db.getSiblingDB('local').oplog.rs.stats().storageSize\n \n\nMax configured size:\n db.getSiblingDB('local').oplog.rs.stats().maxSize\n\n"
] |
[
0
] |
[] |
[] |
[
"automation",
"mongodb",
"mongodb_oplog",
"pymongo",
"python"
] |
stackoverflow_0074521569_automation_mongodb_mongodb_oplog_pymongo_python.txt
|
Q:
How to select NumPy matrix rows that contain certain value/values?
I have the following NumPy array:
m = np.array([[1, 2, 3],
[2, 4, 3],
[1, 2, 1]])
I want to have an array that contains the rows of m where there is at least one occurence of 1 in any column, so:
np.array([[1, 2, 3],
[1, 2, 1]])
A:
Use any and boolean indexing:
out = m[(m==1).any(axis=1)]
Output:
array([[1, 2, 3],
[1, 2, 1]])
Intermediates:
(m==1)
array([[ True, False, False],
[False, False, False],
[ True, False, True]])
(m==1).any(axis=1)
array([ True, False, True])
|
How to select NumPy matrix rows that contain certain value/values?
|
I have the following NumPy array:
m = np.array([[1, 2, 3],
[2, 4, 3],
[1, 2, 1]])
I want to have an array that contains the rows of m where there is at least one occurence of 1 in any column, so:
np.array([[1, 2, 3],
[1, 2, 1]])
|
[
"Use any and boolean indexing:\nout = m[(m==1).any(axis=1)]\n\nOutput:\narray([[1, 2, 3],\n [1, 2, 1]])\n\nIntermediates:\n(m==1)\n\narray([[ True, False, False],\n [False, False, False],\n [ True, False, True]])\n\n\n(m==1).any(axis=1)\n\narray([ True, False, True])\n\n"
] |
[
5
] |
[] |
[] |
[
"matrix",
"numpy",
"python"
] |
stackoverflow_0074521663_matrix_numpy_python.txt
|
Q:
How to save a data frame and it's column to a text file?
I have a data frame DF as follows:
import pandas as pd
DF = pd.DataFrame({'A': [1], 'B': [2]})
I'm trying to save it to a Test.txt file by following this answer, with:
np.savetxt(r'Test.txt', DF, fmt='%s')
Which does save only DF values and not the column names:
1 2
How do I save it to have Test.txt with the following contents?
A B
1 2
A:
From the same answer you linked, if you want to use Pandas, just change header=True like:
DF.to_csv('Test.txt', header=True, index=None, sep=' ', mode='a')
If you want to use np.savetxt():
np.savetxt(
'Test.txt',
DF.values,
fmt='%s',
header=' '.join(DF.columns),
comments=''
)
Note that I changed the comments parameter to an empty string because the default is to add # in front of the header.
|
How to save a data frame and it's column to a text file?
|
I have a data frame DF as follows:
import pandas as pd
DF = pd.DataFrame({'A': [1], 'B': [2]})
I'm trying to save it to a Test.txt file by following this answer, with:
np.savetxt(r'Test.txt', DF, fmt='%s')
Which does save only DF values and not the column names:
1 2
How do I save it to have Test.txt with the following contents?
A B
1 2
|
[
"From the same answer you linked, if you want to use Pandas, just change header=True like:\nDF.to_csv('Test.txt', header=True, index=None, sep=' ', mode='a')\n\nIf you want to use np.savetxt():\nnp.savetxt(\n 'Test.txt',\n DF.values,\n fmt='%s',\n header=' '.join(DF.columns),\n comments=''\n)\n\nNote that I changed the comments parameter to an empty string because the default is to add # in front of the header.\n"
] |
[
1
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074521366_pandas_python.txt
|
Q:
Loading a deployed ONNX model only once
I have a big Machine learning/ Computer vision project that is using an ONNX model, using python.
the project takes around 3 seconds (locally) just to load the model + inference.
Time taken to load onnx model : 0.2702977657318115
Time taken for onnx inference 1.673530101776123
Time taken for onnx inference 0.7677013874053955
After deploying the project, this loading time is always initiated with each individual hit on the server.
for example, if 4 users request at once, all the results will take around 30 seconds. when having only 1 request, it takes only around 10 second.
Problem
Is there any way to load the onnx model only once when initializing the server, not with every and each post request?
I tried async.io
it helped queuing the requests, but still, the last request will have to wait 30 seconds for the results, even though, the CPU usage is not at 100%.
I am not sure the solution to my problem is loading the onnx model only once, or multithreading or am I doing the best thing by applying async.io to my project.
A:
Are you trying to load the model and then doing the inference every time that you have a request?
You should load once and keep the session throughout the time life of the inference process. You could also look into execution providers that come with ONNXruntime to try and speed up the inference time like CUDA, tensorRT...
import onnxruntime as ort
def load_model(onnx_model_path):
inference_session = ort.InferenceSession(
onnx_model_path,
)
# get input and output names of model layers for inference placeholder
inputs = inference_session.get_inputs()[0].name
outputs = [
output.name for output in inference_session.get_outputs()
]
return inference_session, {"inputs": inputs, "outputs": outputs}
def main():
inf_session, input_output_dict = load_model("your/model.onnx")
while True:
inf_session.run(input_output_dict["outputs"], {
str(input_output_dict[inputs]): your_array_input})
|
Loading a deployed ONNX model only once
|
I have a big Machine learning/ Computer vision project that is using an ONNX model, using python.
the project takes around 3 seconds (locally) just to load the model + inference.
Time taken to load onnx model : 0.2702977657318115
Time taken for onnx inference 1.673530101776123
Time taken for onnx inference 0.7677013874053955
After deploying the project, this loading time is always initiated with each individual hit on the server.
for example, if 4 users request at once, all the results will take around 30 seconds. when having only 1 request, it takes only around 10 second.
Problem
Is there any way to load the onnx model only once when initializing the server, not with every and each post request?
I tried async.io
it helped queuing the requests, but still, the last request will have to wait 30 seconds for the results, even though, the CPU usage is not at 100%.
I am not sure the solution to my problem is loading the onnx model only once, or multithreading or am I doing the best thing by applying async.io to my project.
|
[
"Are you trying to load the model and then doing the inference every time that you have a request?\nYou should load once and keep the session throughout the time life of the inference process. You could also look into execution providers that come with ONNXruntime to try and speed up the inference time like CUDA, tensorRT...\nimport onnxruntime as ort\n\ndef load_model(onnx_model_path):\n inference_session = ort.InferenceSession(\n onnx_model_path,\n )\n \n # get input and output names of model layers for inference placeholder\n inputs = inference_session.get_inputs()[0].name\n outputs = [\n output.name for output in inference_session.get_outputs()\n ]\n\n return inference_session, {\"inputs\": inputs, \"outputs\": outputs}\n\n\ndef main():\n inf_session, input_output_dict = load_model(\"your/model.onnx\")\n\n while True:\n inf_session.run(input_output_dict[\"outputs\"], {\n str(input_output_dict[inputs]): your_array_input})\n\n"
] |
[
0
] |
[] |
[] |
[
"computer_vision",
"machine_learning",
"onnx",
"python",
"tensorflow"
] |
stackoverflow_0074252832_computer_vision_machine_learning_onnx_python_tensorflow.txt
|
Q:
Adding JavaScript to Folium map
I'm working on a project to make a map using folium and flask and I'm trying to add my own javascript to add some animation to the tile to appear one by one.
The question is how can I add my custom javascript to the map using python flask
as I have tried this way in this code below:
from branca.element import Element
m = folium.Map()
map_id = m.get_name()
my_js = """
const items = document.querySelectorAll('.leaflet-interactive')
items.forEach((one) => {
one.style.visibility = 'hidden'
})
if (items.length > 0) {
if (items.length !== 0) {
let i = 0
const m = setInterval(function () {
if (i < items.length) {
items[i].style.visibility = 'visible'
i++
}
console.log('now i =' + i + ' || the number of circle = ' + items.length)
if (i === items.length) {
clearInterval(m)
console.log('now cleared')
}
}, 1000)
}
}
""".format(map_id)
e = Element(my_js)
html = m.get_root()
html.script.get_root().render()
# Insert new element or custom JS
html.script._children[e.get_name()] = e
m.save('mymap.html')
also have tried other way like this:
base_map.get_root().html.add_child(folium.JavascriptLink('static/custom.js'))
it injects to the template's body but it still doesn't work
A:
I already found out how to include JavaScript and CSS external link also inline js:
Firstly, way we can add CSS link to the header of the page
m.get_root().header.add_child(CssLink('./static/style.css'))
Then, this is the code to insert JavaScript External link to folium
m.get_root().html.add_child(JavascriptLink('./static/js.js'))
Finally, to add to the Folium script that been generated before
my_js = '''
console.log('working perfectly')
'''
m.get_root().script.add_child(Element(my_js))
resources that helped me to understand how to do this is reading throw branca elements and checking Folium repo
Folium repo
Branca Element
A:
It's better to use:
for CSS
map.get_root().header.add_child(folium.CssLink('css/style.css'))
for JS
map.get_root().html.add_child(folium.JavascriptLink('js/folium.js'))
by using just
m.get_root().html.add_child(JavascriptLink('./static/js.js'))
you might get an error:
NameError: name 'JavascriptLink' is not defined
|
Adding JavaScript to Folium map
|
I'm working on a project to make a map using folium and flask and I'm trying to add my own javascript to add some animation to the tile to appear one by one.
The question is how can I add my custom javascript to the map using python flask
as I have tried this way in this code below:
from branca.element import Element
m = folium.Map()
map_id = m.get_name()
my_js = """
const items = document.querySelectorAll('.leaflet-interactive')
items.forEach((one) => {
one.style.visibility = 'hidden'
})
if (items.length > 0) {
if (items.length !== 0) {
let i = 0
const m = setInterval(function () {
if (i < items.length) {
items[i].style.visibility = 'visible'
i++
}
console.log('now i =' + i + ' || the number of circle = ' + items.length)
if (i === items.length) {
clearInterval(m)
console.log('now cleared')
}
}, 1000)
}
}
""".format(map_id)
e = Element(my_js)
html = m.get_root()
html.script.get_root().render()
# Insert new element or custom JS
html.script._children[e.get_name()] = e
m.save('mymap.html')
also have tried other way like this:
base_map.get_root().html.add_child(folium.JavascriptLink('static/custom.js'))
it injects to the template's body but it still doesn't work
|
[
"I already found out how to include JavaScript and CSS external link also inline js:\nFirstly, way we can add CSS link to the header of the page\nm.get_root().header.add_child(CssLink('./static/style.css'))\n\nThen, this is the code to insert JavaScript External link to folium\nm.get_root().html.add_child(JavascriptLink('./static/js.js'))\n\nFinally, to add to the Folium script that been generated before\nmy_js = '''\nconsole.log('working perfectly')\n'''\nm.get_root().script.add_child(Element(my_js))\n\nresources that helped me to understand how to do this is reading throw branca elements and checking Folium repo\n\nFolium repo\nBranca Element\n\n",
"It's better to use:\nfor CSS\n map.get_root().header.add_child(folium.CssLink('css/style.css'))\n\nfor JS\n map.get_root().html.add_child(folium.JavascriptLink('js/folium.js'))\n\nby using just\n m.get_root().html.add_child(JavascriptLink('./static/js.js'))\n\nyou might get an error:\nNameError: name 'JavascriptLink' is not defined\n"
] |
[
6,
0
] |
[] |
[] |
[
"dictionary",
"flask",
"folium",
"javascript",
"python"
] |
stackoverflow_0060479995_dictionary_flask_folium_javascript_python.txt
|
Q:
In a xml file, how to get a tag that contains segmentation points which is placed after the key tag(not in it)
I have a xml file that contaions segmentation points but I dont know how to get them. It not well builded I guess because the points stands in a tag after a tag that contains "points_px" string. (It is not in the "point_px" tag.)
My question is how to get the tags that contains the points with most efficient way?
This is what I use to get the segs now.
import xml.etree.ElementTree as ET
class XML_files:
# other codes
def get_points(self):
anns = self.xml[0][1][0][5].iter() # self.xml carries the info
segs = []
a = -2
for i,x in enumerate(anns):
if x.text == "Point_px":
a = i
if a+1 == i:
segs.append([a.text for a in x.findall("string")])
segs = [[[int(float(value)) for value in tuples.strip("()").split(", ")] for tuples in part_cord] for part_cord in segs]
return segs
This is how the files look like
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Images</key>
<array>
<dict>
"other tags"
<array>
<dict>
"other tags"
<key>Point_px</key>
<array>
<string>(468.612000, 2109.979980)</string>
</array>
"other tags"
</dict>
<dict>
"other tags"
<key>Point_px</key>
<array>
<string>(932.369019, 2154.489990)</string>
<string>(935.320984, 2151.000000)</string>
<string>(940.689026, 2149.389893)</string>
<string>(945.788025, 2149.659912)</string>
<string>(949.544983, 2151.810059)</string>
<string>(952.228027, 2154.219971)</string>
<string>(954.911987, 2158.520020)</string>
<string>(954.911987, 2162.540039)</string>
<string>(953.570007, 2167.100098)</string>
<string>(951.422974, 2170.590088)</string>
<string>(947.129028, 2173.540039)</string>
<string>(943.104004, 2173.810059)</string>
<string>(938.809998, 2173.280029)</string>
<string>(934.784973, 2171.669922)</string>
<string>(932.638000, 2167.909912)</string>
<string>(931.296021, 2164.149902)</string>
<string>(931.026978, 2159.320068)</string>
</array>
"other tags"
</dict>
<dict>
"other tags"
<key>Point_px</key>
<array>
<string>(1347.459961, 1894.459961)</string>
</array>
"other tags"
</dict>
</array>
</dict>
</array>
</dict>
</plist>
Expected output is a list like below
[[[468.612000, 2109.979980]],
[[932.369019, 2154.489990],
[935.320984, 2151.000000],
[940.689026, 2149.389893],
[945.788025, 2149.659912],
[949.544983, 2151.810059],
[952.228027, 2154.219971],
[954.911987, 2158.520020],
[954.911987, 2162.540039],
[953.570007, 2167.100098],
[951.422974, 2170.590088],
[947.129028, 2173.540039],
[943.104004, 2173.810059],
[938.809998, 2173.280029],
[934.784973, 2171.669922],
[932.638000, 2167.909912],
[931.296021, 2164.149902],
[931.026978, 2159.320068]],
[[1347.459961, 1894.459961]]]
A:
For your xml structure, you are better off using lxml because of its better xpath support compared to that of ElementTree.
Also, note that the xml in your question isn't well formed (because the <plist> element is never opened).
Assuming that's fixed, try this:
from lxml import etree
images = """[your xml above, fixed]"""
segs= []
for d in doc.xpath('//dict[./key[.="Point_px"]]'):
sg = d.xpath('.//array/string/text()')
segs.append([s.strip('()') for s in sg])
segs
The output should be your expected output.
|
In a xml file, how to get a tag that contains segmentation points which is placed after the key tag(not in it)
|
I have a xml file that contaions segmentation points but I dont know how to get them. It not well builded I guess because the points stands in a tag after a tag that contains "points_px" string. (It is not in the "point_px" tag.)
My question is how to get the tags that contains the points with most efficient way?
This is what I use to get the segs now.
import xml.etree.ElementTree as ET
class XML_files:
# other codes
def get_points(self):
anns = self.xml[0][1][0][5].iter() # self.xml carries the info
segs = []
a = -2
for i,x in enumerate(anns):
if x.text == "Point_px":
a = i
if a+1 == i:
segs.append([a.text for a in x.findall("string")])
segs = [[[int(float(value)) for value in tuples.strip("()").split(", ")] for tuples in part_cord] for part_cord in segs]
return segs
This is how the files look like
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Images</key>
<array>
<dict>
"other tags"
<array>
<dict>
"other tags"
<key>Point_px</key>
<array>
<string>(468.612000, 2109.979980)</string>
</array>
"other tags"
</dict>
<dict>
"other tags"
<key>Point_px</key>
<array>
<string>(932.369019, 2154.489990)</string>
<string>(935.320984, 2151.000000)</string>
<string>(940.689026, 2149.389893)</string>
<string>(945.788025, 2149.659912)</string>
<string>(949.544983, 2151.810059)</string>
<string>(952.228027, 2154.219971)</string>
<string>(954.911987, 2158.520020)</string>
<string>(954.911987, 2162.540039)</string>
<string>(953.570007, 2167.100098)</string>
<string>(951.422974, 2170.590088)</string>
<string>(947.129028, 2173.540039)</string>
<string>(943.104004, 2173.810059)</string>
<string>(938.809998, 2173.280029)</string>
<string>(934.784973, 2171.669922)</string>
<string>(932.638000, 2167.909912)</string>
<string>(931.296021, 2164.149902)</string>
<string>(931.026978, 2159.320068)</string>
</array>
"other tags"
</dict>
<dict>
"other tags"
<key>Point_px</key>
<array>
<string>(1347.459961, 1894.459961)</string>
</array>
"other tags"
</dict>
</array>
</dict>
</array>
</dict>
</plist>
Expected output is a list like below
[[[468.612000, 2109.979980]],
[[932.369019, 2154.489990],
[935.320984, 2151.000000],
[940.689026, 2149.389893],
[945.788025, 2149.659912],
[949.544983, 2151.810059],
[952.228027, 2154.219971],
[954.911987, 2158.520020],
[954.911987, 2162.540039],
[953.570007, 2167.100098],
[951.422974, 2170.590088],
[947.129028, 2173.540039],
[943.104004, 2173.810059],
[938.809998, 2173.280029],
[934.784973, 2171.669922],
[932.638000, 2167.909912],
[931.296021, 2164.149902],
[931.026978, 2159.320068]],
[[1347.459961, 1894.459961]]]
|
[
"For your xml structure, you are better off using lxml because of its better xpath support compared to that of ElementTree.\nAlso, note that the xml in your question isn't well formed (because the <plist> element is never opened).\nAssuming that's fixed, try this:\nfrom lxml import etree\nimages = \"\"\"[your xml above, fixed]\"\"\"\nsegs= []\nfor d in doc.xpath('//dict[./key[.=\"Point_px\"]]'):\n sg = d.xpath('.//array/string/text()')\n segs.append([s.strip('()') for s in sg])\nsegs\n\nThe output should be your expected output.\n"
] |
[
0
] |
[] |
[] |
[
"python",
"xml",
"xml.etree"
] |
stackoverflow_0074510828_python_xml_xml.etree.txt
|
Q:
Web scrape links with Python, then turn them into a string
With Python I'm having issues turning web scrapped links into strings so I can save them as either a txt or csv file. I would really like them as a txt file. This is what I have at the moment.
import requests
from bs4 import BeautifulSoup
url = "https://www.google.com/"
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
urls = []
for link in soup.find_all('a'):
print(link.get('href'))
type(link)
print(link, file=open('example.txt','w'))
I've tried all sort of things with no luck. I'm pretty much at a lose.
A:
print(link, file=open('example.txt','w'))
Will write the link variable, but that's only the last one.
To write them all, use:
import requests
from bs4 import BeautifulSoup
url = "https://www.google.com/"
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
with open("example.txt", "w") as file:
for link in soup.find_all('a'):
file.write(link.get('href') + '\n')
Which uses a context manager to open the file, then write each href with a newline.
|
Web scrape links with Python, then turn them into a string
|
With Python I'm having issues turning web scrapped links into strings so I can save them as either a txt or csv file. I would really like them as a txt file. This is what I have at the moment.
import requests
from bs4 import BeautifulSoup
url = "https://www.google.com/"
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
urls = []
for link in soup.find_all('a'):
print(link.get('href'))
type(link)
print(link, file=open('example.txt','w'))
I've tried all sort of things with no luck. I'm pretty much at a lose.
|
[
"print(link, file=open('example.txt','w'))\n\nWill write the link variable, but that's only the last one.\n\nTo write them all, use:\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = \"https://www.google.com/\"\nreqs = requests.get(url)\nsoup = BeautifulSoup(reqs.text, 'html.parser')\n\nwith open(\"example.txt\", \"w\") as file:\n for link in soup.find_all('a'):\n file.write(link.get('href') + '\\n')\n\nWhich uses a context manager to open the file, then write each href with a newline.\n"
] |
[
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074521371_python.txt
|
Q:
Two dll function calls only work with random print() in between
I use a camera SDK with a DLL (ctypes.WinDLL).
camera_path = 'cam://0'.encode('utf-8')
handle = xdll.XDLL.open_camera(camera_path, 0, 0)
# (The handle returned is 1)
xdll.XDLL.set_property_value_f(handle, b'IntegrationTime', c_double(2500))
This gives an the following error:
OSError: exception: access violation reading 0x0000000000000001
The weird thing is, that the code works as expected with a random print in between:
camera_path = 'cam://0'.encode('utf-8')
handle = xdll.XDLL.open_camera(camera_path, 0, 0)
# (The handle returned is 1)
print('random print')
xdll.XDLL.set_property_value_f(handle, b'IntegrationTime', c_double(2500))
Any idea what print() does to prevent such an error?
If time.sleep(1) is used instead of print() it shows the error, so the time spent on printing should not make a difference.
EDIT:
The interesting lines from the header file:
typedef int XCHANDLE; ///< Handle type used to identify an initialised session to a camera.
XCHANDLE IMPEXPC XC_OpenCamera (const char * pCameraName = "cam://default", XStatus pCallBack = 0, void * pUser = 0); ///< @sa XCamera::Create
ErrCode IMPEXPC XC_SetPropertyValueF (XCHANDLE h, const char * pPrp, double dValue, const char * pUnit);
methods in xdll.XDLL:
open_camera = _xenethDLL.XC_OpenCamera
open_camera.restype = c_int32 # XCHANDLE
set_property_value_f = _xenethDLL.XC_SetPropertyValueF
set_property_value_f.restype = c_ulong # ErrCode
set_property_value_f.argtypes = (c_int32, c_char_p, c_double)
A:
I forgot to add a required parameter (char * pUnit) to the argtypes.
Therefore, i got some weird/undefined behaviour.
Now, after i added the parameter the code executes as expected.
|
Two dll function calls only work with random print() in between
|
I use a camera SDK with a DLL (ctypes.WinDLL).
camera_path = 'cam://0'.encode('utf-8')
handle = xdll.XDLL.open_camera(camera_path, 0, 0)
# (The handle returned is 1)
xdll.XDLL.set_property_value_f(handle, b'IntegrationTime', c_double(2500))
This gives an the following error:
OSError: exception: access violation reading 0x0000000000000001
The weird thing is, that the code works as expected with a random print in between:
camera_path = 'cam://0'.encode('utf-8')
handle = xdll.XDLL.open_camera(camera_path, 0, 0)
# (The handle returned is 1)
print('random print')
xdll.XDLL.set_property_value_f(handle, b'IntegrationTime', c_double(2500))
Any idea what print() does to prevent such an error?
If time.sleep(1) is used instead of print() it shows the error, so the time spent on printing should not make a difference.
EDIT:
The interesting lines from the header file:
typedef int XCHANDLE; ///< Handle type used to identify an initialised session to a camera.
XCHANDLE IMPEXPC XC_OpenCamera (const char * pCameraName = "cam://default", XStatus pCallBack = 0, void * pUser = 0); ///< @sa XCamera::Create
ErrCode IMPEXPC XC_SetPropertyValueF (XCHANDLE h, const char * pPrp, double dValue, const char * pUnit);
methods in xdll.XDLL:
open_camera = _xenethDLL.XC_OpenCamera
open_camera.restype = c_int32 # XCHANDLE
set_property_value_f = _xenethDLL.XC_SetPropertyValueF
set_property_value_f.restype = c_ulong # ErrCode
set_property_value_f.argtypes = (c_int32, c_char_p, c_double)
|
[
"I forgot to add a required parameter (char * pUnit) to the argtypes.\nTherefore, i got some weird/undefined behaviour.\nNow, after i added the parameter the code executes as expected.\n"
] |
[
0
] |
[] |
[] |
[
"ctypes",
"python"
] |
stackoverflow_0074520735_ctypes_python.txt
|
Q:
Pandas - Datetime Manipulation
I have a dataframe like so:
CREATED_AT COUNT
'1990-01-01' '2022-01-01 07:30:00' 5
'1990-01-02' '2022-01-01 07:30:00' 10
...
Where the index is a date and the CREATED_AT column is a datetime that is the same value for all rows.
How can I update the CREATED_AT_COLUMN such that it inherits its date portion from the index?
The result should look like:
CREATED_AT COUNT
'1990-01-01' '1990-01-01 07:30:00' 5
'1990-01-02' '1990-01-02 07:30:00' 10
...
Attempts at this result in errors like:
cannot add DatetimeArray and DatetimeArray
A:
You can use df.reset_index() to use the index as a column and then do a simple maniuplation to get the output you want like this:
# Creating a test df
import pandas as pd
from datetime import datetime, timedelta, date
df = pd.DataFrame.from_dict({
"CREATED_AT": [datetime.now(), datetime.now() + timedelta(hours=1)],
"COUNT": [5, 10]
})
df_with_index = df.set_index(pd.Index([date.today() - timedelta(days=10), date.today() - timedelta(days=9)]))
# Creating the column with the result
df_result = df_with_index.reset_index()
df_result["NEW_CREATED_AT"] = pd.to_datetime(df_result["index"].astype(str) + ' ' + df_result["CREATED_AT"].dt.time.astype(str))
Result:
index CREATED_AT COUNT NEW_CREATED_AT
0 2022-11-11 2022-11-21 16:15:31.520960 5 2022-11-11 16:15:31.520960
1 2022-11-12 2022-11-21 17:15:31.520965 10 2022-11-12 17:15:31.520965
A:
You can use:
# ensure CREATED_AT is a datetime
s = pd.to_datetime(df['CREATED_AT'])
# subtract the date to only get the time, add to the index
# ensuring the index is of datetime type
df['CREATED_AT'] = s.sub(s.dt.normalize()).add(pd.to_datetime(df.index))
If everything is already of datetime type, this simplifies to:
df['CREATED_AT'] = (df['CREATED_AT']
.sub(df['CREATED_AT'].dt.normalize())
.add(df.index)
)
Output:
CREATED_AT COUNT
1990-01-01 1990-01-01 07:30:00 5
1990-01-02 1990-01-02 07:30:00 10
|
Pandas - Datetime Manipulation
|
I have a dataframe like so:
CREATED_AT COUNT
'1990-01-01' '2022-01-01 07:30:00' 5
'1990-01-02' '2022-01-01 07:30:00' 10
...
Where the index is a date and the CREATED_AT column is a datetime that is the same value for all rows.
How can I update the CREATED_AT_COLUMN such that it inherits its date portion from the index?
The result should look like:
CREATED_AT COUNT
'1990-01-01' '1990-01-01 07:30:00' 5
'1990-01-02' '1990-01-02 07:30:00' 10
...
Attempts at this result in errors like:
cannot add DatetimeArray and DatetimeArray
|
[
"You can use df.reset_index() to use the index as a column and then do a simple maniuplation to get the output you want like this:\n# Creating a test df\nimport pandas as pd\nfrom datetime import datetime, timedelta, date\n\ndf = pd.DataFrame.from_dict({\n \"CREATED_AT\": [datetime.now(), datetime.now() + timedelta(hours=1)],\n \"COUNT\": [5, 10]\n})\ndf_with_index = df.set_index(pd.Index([date.today() - timedelta(days=10), date.today() - timedelta(days=9)]))\n\n# Creating the column with the result\ndf_result = df_with_index.reset_index()\ndf_result[\"NEW_CREATED_AT\"] = pd.to_datetime(df_result[\"index\"].astype(str) + ' ' + df_result[\"CREATED_AT\"].dt.time.astype(str))\n\nResult:\n index CREATED_AT COUNT NEW_CREATED_AT\n0 2022-11-11 2022-11-21 16:15:31.520960 5 2022-11-11 16:15:31.520960\n1 2022-11-12 2022-11-21 17:15:31.520965 10 2022-11-12 17:15:31.520965\n\n",
"You can use:\n# ensure CREATED_AT is a datetime\ns = pd.to_datetime(df['CREATED_AT'])\n\n# subtract the date to only get the time, add to the index\n# ensuring the index is of datetime type\ndf['CREATED_AT'] = s.sub(s.dt.normalize()).add(pd.to_datetime(df.index))\n\nIf everything is already of datetime type, this simplifies to:\ndf['CREATED_AT'] = (df['CREATED_AT']\n .sub(df['CREATED_AT'].dt.normalize())\n .add(df.index)\n )\n\nOutput:\n CREATED_AT COUNT\n1990-01-01 1990-01-01 07:30:00 5\n1990-01-02 1990-01-02 07:30:00 10\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074521526_pandas_python.txt
|
Q:
Kivy with cv2 in python (Read barcode)
i try to make app for read barcode using android machine and i have problem i cant compare between cv and kivy please if someone can help me .
code that read barcode from android camera using kivy
from kivy.app import App
from kivy.uix.camera import Camera
from pyzbar.pyzbar import decode
import numpy as np
import cv2
from kivy.properties import ListProperty
class MainApp(App):
def build(self):
self.capture = cv2.VideoCapture(0)
cam = Camera(play=True, resolution=(640, 480))
#cap=cv2.VideoCapture(0)
success, frame=self.read()
for code in decode(frame):
print(code.type)
print(code.data.decode('utf-8'))
cv2.imshow("Results", frame);
cv2.waitKey(1);
return cam
if __name__== "__main__":
MainApp().run()
A:
what do you mean by " i cant compare between cv and kivy" ? I have working machine learning app in kivy using cam. So what do you exactly need ? You need o display img in cv window, or you want to display camera frames in kivy ? You can update the kivy window every x second:
def __init__(self, **kw):
super().__init__()
self.capture = None
def on_pre_enter(self, *args):
super().__init__()
self.capture = cv.VideoCapture(0)
Clock.schedule_interval(self.update, 1.0 / 30) # update 30 fps
def on_leave(self, *args):
self.capture.release()
And then you can display frame as kivy image texture like this:
def update(self, dt):
ret, frame = self.capture.read()
if ret:
buf1 = cv.flip(frame, 0)
buf = buf1.tobytes()
image_texture = Texture.create(
size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
self.ids['image'].texture = image_texture
And Image you can define in kivy:
MDBoxLayout:
size_hint: .65,1
orientation: 'vertical'
Image: #webcam
id: image
|
Kivy with cv2 in python (Read barcode)
|
i try to make app for read barcode using android machine and i have problem i cant compare between cv and kivy please if someone can help me .
code that read barcode from android camera using kivy
from kivy.app import App
from kivy.uix.camera import Camera
from pyzbar.pyzbar import decode
import numpy as np
import cv2
from kivy.properties import ListProperty
class MainApp(App):
def build(self):
self.capture = cv2.VideoCapture(0)
cam = Camera(play=True, resolution=(640, 480))
#cap=cv2.VideoCapture(0)
success, frame=self.read()
for code in decode(frame):
print(code.type)
print(code.data.decode('utf-8'))
cv2.imshow("Results", frame);
cv2.waitKey(1);
return cam
if __name__== "__main__":
MainApp().run()
|
[
"what do you mean by \" i cant compare between cv and kivy\" ? I have working machine learning app in kivy using cam. So what do you exactly need ? You need o display img in cv window, or you want to display camera frames in kivy ? You can update the kivy window every x second:\n def __init__(self, **kw):\n super().__init__()\n self.capture = None\n\n def on_pre_enter(self, *args):\n super().__init__()\n self.capture = cv.VideoCapture(0)\n Clock.schedule_interval(self.update, 1.0 / 30) # update 30 fps\n\n def on_leave(self, *args):\n self.capture.release()\n\nAnd then you can display frame as kivy image texture like this:\n def update(self, dt):\n ret, frame = self.capture.read()\n\n if ret:\n buf1 = cv.flip(frame, 0)\n buf = buf1.tobytes()\n image_texture = Texture.create(\n size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')\n image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n self.ids['image'].texture = image_texture\n\nAnd Image you can define in kivy:\nMDBoxLayout:\n size_hint: .65,1\n orientation: 'vertical'\n Image: #webcam\n id: image\n\n"
] |
[
0
] |
[] |
[] |
[
"cv2",
"kivy",
"kivymd",
"python"
] |
stackoverflow_0074511067_cv2_kivy_kivymd_python.txt
|
Q:
Getting memory error in Python 3.8, using spider as my IDE
I am trying to run a program that involves multiplying two large binary NumPy arrays of size 69496 times 511. My arrays are binary, and I am using Spyder as my IDE.
Here is my code:
import numpy as np
import math
import re
def ip(A):
B=A.transpose()
C = np.dot(A, B)
[a, b] = C.shape
D=[]
for i in range (a):
print(i)
for j in range (i+1, a):
c= C[i, j]
D.append(c)
return(D)
So, the input "o" that I am giving to my function is a binary NumPy array of size 69496 times 511.
D = IP(o)
However, I am getting the following memory error:
MemoryError: Unable to allocate 36.0 GiB for an array with shape (69496, 69496) and data type float64
I have 16 GB RAM, and I have Windows 11. Can anybody give me suggestion about how to resolve this issue?
A:
what you actually want is this function:
you dont need to calculate the a x aT, since you have huge matrix, it finally gonna need big memory size. you could set the dtype as uint16 , but any way finally still the size is huge (since you have huge number of row)
def ip(a):
m, n = a.shape
c = []
for i in range(m):
c.extend(np.dot(a[i + 1:], a[i]).tolist())
return c
writing like this will help not to use big memory, but my recommendation would be to as well return as numpy array, did not get why you store as list, however I kept it as you wanted as output.
output as numpy array:
def ip(a):
m, n = a.shape
c = np.zeros(sum(range(m)), dtype=np.uint16)
offset = ((np.arange(m - 1) * m) - np.cumsum(np.arange(m - 1) ))
for i, off_set in enumerate(offset):
c[off_set: off_set + m - i - 1] = np.dot(a[i + 1:], a[i])
return c
PS: notice even though your data is uint8 (as you mentioned) your output could be 16bit, since the columns are more than 256.
|
Getting memory error in Python 3.8, using spider as my IDE
|
I am trying to run a program that involves multiplying two large binary NumPy arrays of size 69496 times 511. My arrays are binary, and I am using Spyder as my IDE.
Here is my code:
import numpy as np
import math
import re
def ip(A):
B=A.transpose()
C = np.dot(A, B)
[a, b] = C.shape
D=[]
for i in range (a):
print(i)
for j in range (i+1, a):
c= C[i, j]
D.append(c)
return(D)
So, the input "o" that I am giving to my function is a binary NumPy array of size 69496 times 511.
D = IP(o)
However, I am getting the following memory error:
MemoryError: Unable to allocate 36.0 GiB for an array with shape (69496, 69496) and data type float64
I have 16 GB RAM, and I have Windows 11. Can anybody give me suggestion about how to resolve this issue?
|
[
"what you actually want is this function:\nyou dont need to calculate the a x aT, since you have huge matrix, it finally gonna need big memory size. you could set the dtype as uint16 , but any way finally still the size is huge (since you have huge number of row)\ndef ip(a):\n m, n = a.shape\n c = []\n for i in range(m):\n c.extend(np.dot(a[i + 1:], a[i]).tolist())\n return c\n\nwriting like this will help not to use big memory, but my recommendation would be to as well return as numpy array, did not get why you store as list, however I kept it as you wanted as output.\noutput as numpy array:\ndef ip(a):\n m, n = a.shape\n c = np.zeros(sum(range(m)), dtype=np.uint16)\n offset = ((np.arange(m - 1) * m) - np.cumsum(np.arange(m - 1) ))\n for i, off_set in enumerate(offset):\n c[off_set: off_set + m - i - 1] = np.dot(a[i + 1:], a[i])\n return c\n\nPS: notice even though your data is uint8 (as you mentioned) your output could be 16bit, since the columns are more than 256.\n"
] |
[
0
] |
[] |
[] |
[
"numpy",
"numpy_ndarray",
"python",
"python_3.x"
] |
stackoverflow_0074520446_numpy_numpy_ndarray_python_python_3.x.txt
|
Q:
How to animate the vector field?
As the question, how do I animate a series of plots instead of printing each individual plot? Thanks a lot!!!
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import odeint
import matplotlib.animation as animation
%matplotlib inline
# Define vector field
def vField(x,t,a):
u = 2*x[1]
v = -x[0] + a*(x[1] - 1/4 * x[1]**2)
return [u,v]
vec = [-10,-5,0,5,10]
for a in vec:
# Plot vector field
X, Y = np.mgrid[-2:2:20j,-2:2:20j]
U, V = vField([X,Y],0,a)
fig, ax = plt.subplots(figsize=(10, 7))
ax.quiver(X, Y, U, V)
plt.pause(0.01)
plt.show()
A:
You have to declare your fig, ax before the loop and clear it between each quiver.
Indeed, you want to use only one figure and not create one by iteration. Moreover, you want to clear the figure between each iteration or all plots will be shown on top of the others.
fig, ax = plt.subplots(figsize=(10, 7))
for a in vec:
# Plot vector field
X, Y = np.mgrid[-2:2:20j,-2:2:20j]
U, V = vField([X,Y],0,a)
ax.clear()
ax.quiver(X, Y, U, V)
plt.pause(0.01)
Please note that you will not see anything with your current vec because it is too short and the pause is 0.01. With a longer one you will see a smooth drawing.
A:
You can use matplotlib's FuncAnimation:
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import odeint
from matplotlib.animation import FuncAnimation
# Define vector field
def vField(x,t,a):
u = 2*x[1]
v = -x[0] + a*(x[1] - 1/4 * x[1]**2)
return [u,v]
vec = np.linspace(-10, 10, 100)
fig, ax = plt.subplots()
X, Y = np.mgrid[-2:2:20j,-2:2:20j]
U, V = vField([X,Y],0, vec[0])
q = ax.quiver(X, Y, U, V)
def animate(i):
U, V = vField([X,Y],0, vec[i])
q.set_UVC(U, V)
ani = FuncAnimation(fig, animate, frames=len(vec), repeat=False)
plt.show()
|
How to animate the vector field?
|
As the question, how do I animate a series of plots instead of printing each individual plot? Thanks a lot!!!
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import odeint
import matplotlib.animation as animation
%matplotlib inline
# Define vector field
def vField(x,t,a):
u = 2*x[1]
v = -x[0] + a*(x[1] - 1/4 * x[1]**2)
return [u,v]
vec = [-10,-5,0,5,10]
for a in vec:
# Plot vector field
X, Y = np.mgrid[-2:2:20j,-2:2:20j]
U, V = vField([X,Y],0,a)
fig, ax = plt.subplots(figsize=(10, 7))
ax.quiver(X, Y, U, V)
plt.pause(0.01)
plt.show()
|
[
"You have to declare your fig, ax before the loop and clear it between each quiver.\nIndeed, you want to use only one figure and not create one by iteration. Moreover, you want to clear the figure between each iteration or all plots will be shown on top of the others.\nfig, ax = plt.subplots(figsize=(10, 7))\n\nfor a in vec:\n # Plot vector field\n X, Y = np.mgrid[-2:2:20j,-2:2:20j]\n U, V = vField([X,Y],0,a)\n\n ax.clear()\n ax.quiver(X, Y, U, V)\n plt.pause(0.01)\n\nPlease note that you will not see anything with your current vec because it is too short and the pause is 0.01. With a longer one you will see a smooth drawing.\n",
"You can use matplotlib's FuncAnimation:\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.integrate import odeint\nfrom matplotlib.animation import FuncAnimation\n\n # Define vector field\ndef vField(x,t,a):\n u = 2*x[1]\n v = -x[0] + a*(x[1] - 1/4 * x[1]**2)\n return [u,v]\n \n\nvec = np.linspace(-10, 10, 100)\n\nfig, ax = plt.subplots()\nX, Y = np.mgrid[-2:2:20j,-2:2:20j]\nU, V = vField([X,Y],0, vec[0])\nq = ax.quiver(X, Y, U, V)\n\ndef animate(i):\n U, V = vField([X,Y],0, vec[i])\n q.set_UVC(U, V)\n\nani = FuncAnimation(fig, animate, frames=len(vec), repeat=False)\nplt.show()\n\n"
] |
[
0,
0
] |
[] |
[] |
[
"matplotlib",
"python"
] |
stackoverflow_0074521021_matplotlib_python.txt
|
Q:
Converting nested JSON into a flattened DataFrame
I have a data load in nested json format and I want to get a nested dataframe
{
"page_count": 21,
"page_number": 1,
"page_size": 300,
"total_records": 6128,
"registrants": [
{
"id": "23lnTNqyQ3qkthfghjgkk",
"first_name": "HUGO",
"last_name": "MACHA ILLEN",
"email": "hugreb@hotmail.com",
"address": "",
"city": "",
"country": "",
"zip": "",
"state": "",
"phone": "",
"industry": "",
"org": "",
"job_title": "",
"purchasing_time_frame": "",
"role_in_purchase_process": "",
"no_of_employees": "",
"comments": "",
"custom_questions": [
{
"title": "Departamento/ Región",
"value": ""
},
{
"title": "Género",
"value": "Masculino"
},
{
"title": "Edad",
"value": "De 35 a 55 años"
},
{
"title": "Nivel de estudio",
"value": "Técnico / Superior"
},
{
"title": "¿Eres cliente de una entidad financiera?",
"value": "Si"
},
{
"title": "¿Tiene una empresa?",
"value": "Si"
}
I use this funtion:
pat is the file json
df = pd.json_normalize(json.loads(pat.explode("custom_questions").to_json(orient="records")))
but it didn't work for the output i wanted.
I want get someone like this:
I want the value of the "titles", that is, the keys" to be displayed as the column headers and the value of "value" as the data. As well as the image that is attached
A:
you can use json_normalize:
df = pd.DataFrame(your_json['registrants']).explode('custom_questions').reset_index(drop=True)
df=df.join(pd.json_normalize(df.pop('custom_questions')))
#convert rows to columns. Set index first 17 columns. We will not use theese.
df=df.set_index(df.columns[0:17].to_list())
dfx=df.pivot_table(values='value',columns='title',aggfunc=list).apply(pd.Series.explode).reset_index(drop=True)
#we have duplicate rows. Drop them
df=df.reset_index().drop(['value','title'],axis=1).drop_duplicates().reset_index(drop=True)
df=df.join(dfx)
'''
| | id | first_name | last_name | email | address | city | country | zip | state | phone | industry | org | job_title | purchasing_time_frame | role_in_purchase_process | no_of_employees | comments | Departamento/ Región | Edad | Género | Nivel de estudio | ¿Eres cliente de una entidad financiera? | ¿Tiene una empresa? |
|---:|:----------------------|:-------------|:--------------|:-------------------|:----------|:-------|:----------|:------|:--------|:--------|:-----------|:------|:------------|:------------------------|:---------------------------|:------------------|:-----------|:-----------------------|:----------------|:----------|:-------------------|:-------------------------------------------|:----------------------|
| 0 | 23lnTNqyQ3qkthfghjgkk | HUGO | MACHA ILLEN | hugreb@hotmail.com | | | | | | | | | | | | | | | De 35 a 55 años | Masculino | Técnico / Superior | Si | Si |
'''
|
Converting nested JSON into a flattened DataFrame
|
I have a data load in nested json format and I want to get a nested dataframe
{
"page_count": 21,
"page_number": 1,
"page_size": 300,
"total_records": 6128,
"registrants": [
{
"id": "23lnTNqyQ3qkthfghjgkk",
"first_name": "HUGO",
"last_name": "MACHA ILLEN",
"email": "hugreb@hotmail.com",
"address": "",
"city": "",
"country": "",
"zip": "",
"state": "",
"phone": "",
"industry": "",
"org": "",
"job_title": "",
"purchasing_time_frame": "",
"role_in_purchase_process": "",
"no_of_employees": "",
"comments": "",
"custom_questions": [
{
"title": "Departamento/ Región",
"value": ""
},
{
"title": "Género",
"value": "Masculino"
},
{
"title": "Edad",
"value": "De 35 a 55 años"
},
{
"title": "Nivel de estudio",
"value": "Técnico / Superior"
},
{
"title": "¿Eres cliente de una entidad financiera?",
"value": "Si"
},
{
"title": "¿Tiene una empresa?",
"value": "Si"
}
I use this funtion:
pat is the file json
df = pd.json_normalize(json.loads(pat.explode("custom_questions").to_json(orient="records")))
but it didn't work for the output i wanted.
I want get someone like this:
I want the value of the "titles", that is, the keys" to be displayed as the column headers and the value of "value" as the data. As well as the image that is attached
|
[
"you can use json_normalize:\ndf = pd.DataFrame(your_json['registrants']).explode('custom_questions').reset_index(drop=True)\ndf=df.join(pd.json_normalize(df.pop('custom_questions')))\n\n#convert rows to columns. Set index first 17 columns. We will not use theese.\ndf=df.set_index(df.columns[0:17].to_list())\ndfx=df.pivot_table(values='value',columns='title',aggfunc=list).apply(pd.Series.explode).reset_index(drop=True)\n\n#we have duplicate rows. Drop them\ndf=df.reset_index().drop(['value','title'],axis=1).drop_duplicates().reset_index(drop=True)\ndf=df.join(dfx)\n'''\n| | id | first_name | last_name | email | address | city | country | zip | state | phone | industry | org | job_title | purchasing_time_frame | role_in_purchase_process | no_of_employees | comments | Departamento/ Región | Edad | Género | Nivel de estudio | ¿Eres cliente de una entidad financiera? | ¿Tiene una empresa? |\n|---:|:----------------------|:-------------|:--------------|:-------------------|:----------|:-------|:----------|:------|:--------|:--------|:-----------|:------|:------------|:------------------------|:---------------------------|:------------------|:-----------|:-----------------------|:----------------|:----------|:-------------------|:-------------------------------------------|:----------------------|\n| 0 | 23lnTNqyQ3qkthfghjgkk | HUGO | MACHA ILLEN | hugreb@hotmail.com | | | | | | | | | | | | | | | De 35 a 55 años | Masculino | Técnico / Superior | Si | Si |\n\n'''\n\n"
] |
[
0
] |
[] |
[] |
[
"dictionary",
"json",
"json_normalize",
"nested",
"python"
] |
stackoverflow_0074513401_dictionary_json_json_normalize_nested_python.txt
|
Q:
"from typing import List" vs "from ast import List"
In Python, if I use this:
from typing import List
I have to use List[]
If I use this:
from ast import List
I have to use List()
What is the difference?
Thanks.
googled "typing" and "ast" but no luck
A:
The difference is that one is a type-hint; it describes a value, not holds elements itself. It is also optional.
The other is a runtime-class describing the Python syntax-tree, and holds a sequential collection of Python expressions. ast.List is required if you are building/using a parser.
|
"from typing import List" vs "from ast import List"
|
In Python, if I use this:
from typing import List
I have to use List[]
If I use this:
from ast import List
I have to use List()
What is the difference?
Thanks.
googled "typing" and "ast" but no luck
|
[
"The difference is that one is a type-hint; it describes a value, not holds elements itself. It is also optional.\nThe other is a runtime-class describing the Python syntax-tree, and holds a sequential collection of Python expressions. ast.List is required if you are building/using a parser.\n"
] |
[
2
] |
[] |
[] |
[
"list",
"python",
"python_3.x"
] |
stackoverflow_0074521877_list_python_python_3.x.txt
|
Q:
Ignoring bad rows of data in pandas.read_csv() that break header= keyword
I have a series of very messy *.csv files that are being read in by pandas. An example csv is:
Instrument 35392
"Log File Name : station"
"Setup Date (MMDDYY) : 031114"
"Setup Time (HHMMSS) : 073648"
"Starting Date (MMDDYY) : 031114"
"Starting Time (HHMMSS) : 090000"
"Stopping Date (MMDDYY) : 031115"
"Stopping Time (HHMMSS) : 235959"
"Interval (HHMMSS) : 010000"
"Sensor warmup (HHMMSS) : 000200"
"Circltr warmup (HHMMSS) : 000200"
"Date","Time","","Temp","","SpCond","","Sal","","IBatt",""
"MMDDYY","HHMMSS","","øC","","mS/cm","","ppt","","Volts",""
"Random message here 031114 073721 to 031114 083200"
03/11/14,09:00:00,"",15.85,"",1.408,"",.74,"",6.2,""
03/11/14,10:00:00,"",15.99,"",1.96,"",1.05,"",6.3,""
03/11/14,11:00:00,"",14.2,"",40.8,"",26.12,"",6.2,""
03/11/14,12:00:01,"",14.2,"",41.7,"",26.77,"",6.2,""
03/11/14,13:00:00,"",14.5,"",41.3,"",26.52,"",6.2,""
03/11/14,14:00:00,"",14.96,"",41,"",26.29,"",6.2,""
"message 3"
"message 4"**
I have been using this code to import the *csv file, process the double headers, pull out the empty columns, and then strip the offending rows with bad data:
DF = pd.read_csv(BADFILE,parse_dates={'Datetime_(ascii)': [0,1]}, sep=",", \
header=[10,11],na_values=['','na', 'nan nan'], \
skiprows=[10], encoding='cp1252')
DF = DF.dropna(how="all", axis=1)
DF = DF.dropna(thresh=2)
droplist = ['message', 'Random']
DF = DF[~DF['Datetime_(ascii)'].str.contains('|'.join(droplist))]
DF.head()
Datetime_(ascii) (Temp, øC) (SpCond, mS/cm) (Sal, ppt) (IBatt, Volts)
0 03/11/14 09:00:00 15.85 1.408 0.74 6.2
1 03/11/14 10:00:00 15.99 1.960 1.05 6.3
2 03/11/14 11:00:00 14.20 40.800 26.12 6.2
3 03/11/14 12:00:01 14.20 41.700 26.77 6.2
4 03/11/14 13:00:00 14.50 41.300 26.52 6.2
This was working fine and dandy until I have a file that has an erronious 1 row line after the header: "Random message here 031114 073721 to 031114 083200"
The error I receieve is:
*C:\Users\USER\AppData\Local\Continuum\Anaconda3\lib\site-
packages\pandas\io\parsers.py in _do_date_conversions(self, names, data)
1554 data, names = _process_date_conversion(
1555 data, self._date_conv, self.parse_dates, self.index_col,
-> 1556 self.index_names, names,
keep_date_col=self.keep_date_col)
1557
1558 return names, data
C:\Users\USER\AppData\Local\Continuum\Anaconda3\lib\site-
packages\pandas\io\parsers.py in _process_date_conversion(data_dict,
converter, parse_spec, index_col, index_names, columns, keep_date_col)
2975 if not keep_date_col:
2976 for c in list(date_cols):
-> 2977 data_dict.pop(c)
2978 new_cols.remove(c)
2979
KeyError: ('Time', 'HHMMSS')*
If I remove that line, the code works fine. Similarly, if I remove the header= line the code works fine. However, I want to be able to preserve this because I am reading in hundreds of these files.
Difficulty: I would prefer to not open each file before the call to pandas.read_csv() as these files can be rather large - thus I don't want to read and save multiple times! Also, I would prefer a real pandas/pythonic solution that doesn't involve openning the file first as a stringIO buffer to removing offending lines.
A:
Here's one approach, making use of the fact that skip_rows accepts a callable function. The function receives only the row index being considered, which is a built-in limitation of that parameter.
As such, the callable function skip_test() first checks whether the current index is in the set of known indices to skip. If not, then it opens the actual file and checks the corresponding row to see if its contents match.
The skip_test() function is a little hacky in the sense that it does inspect the actual file, although it only inspects up until the current row index it's evaluating. It also assumes that the bad line always begins with the same string (in the example case, "foo"), but that seems to be a safe assumption given OP.
# example data
""" foo.csv
uid,a,b,c
0,1,2,3
skip me
1,11,22,33
foo
2,111,222,333
"""
import pandas as pd
def skip_test(r, fn, fail_on, known):
if r in known: # we know we always want to skip these
return True
# check if row index matches problem line in file
# for efficiency, quit after we pass row index in file
f = open(fn, "r")
data = f.read()
for i, line in enumerate(data.splitlines()):
if (i == r) & line.startswith(fail_on):
return True
elif i > r:
break
return False
fname = "foo.csv"
fail_str = "foo"
known_skip = [2]
pd.read_csv(fname, sep=",", header=0,
skiprows=lambda x: skip_test(x, fname, fail_str, known_skip))
# output
uid a b c
0 0 1 2 3
1 1 11 22 33
2 2 111 222 333
If you know exactly which line the random message will appear on when it does appear, then this will be much faster, as you can just tell it not to inspect the file contents for any index past the potential offending line.
A:
After some tinkering yesterday I found a solution and what the potential issue may be.
I tried the skip_test() function answer above, but I was still getting errors with the size of the table:
pandas\_libs\parsers.pyx in pandas._libs.parsers.TextReader.read (pandas\_libs\parsers.c:10862)()
pandas\_libs\parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory (pandas\_libs\parsers.c:11138)()
pandas\_libs\parsers.pyx in pandas._libs.parsers.TextReader._read_rows (pandas\_libs\parsers.c:11884)()
pandas\_libs\parsers.pyx in pandas._libs.parsers.TextReader._tokenize_rows (pandas\_libs\parsers.c:11755)()
pandas\_libs\parsers.pyx in pandas._libs.parsers.raise_parser_error (pandas\_libs\parsers.c:28765)()
ParserError: Error tokenizing data. C error: Expected 1 fields in line 14, saw 11
So after playing around with skiprows= I discovered that I was just not getting the behavior I wanted when using the engine='c'. read_csv() was still determining the size of the file from those first few rows, and some of those single column rows were still being passed. It may be that I have a few more bad single column rows in my csv set that I did not plan on.
Instead, I create an arbitrary sized DataFrame as a template. I pull in the entire .csv file, then use logic to strip out the NaN rows.
For example, I know that the largest table that I will encounter with my data will be 10 rows long. So my call to pandas is:
DF = pd.read_csv(csv_file, sep=',', \
parse_dates={'Datetime_(ascii)': [0,1]},\
na_values=['','na', '999999', '#'], engine='c',\
encoding='cp1252', names = list(range(0,10)))
I then use these two lines to drop the NaN rows and columns from the DataFrame:
#drop the null columns created by double deliminators
DF = DF.dropna(how="all", axis=1)
DF = DF.dropna(thresh=2) # drop if we don't have at least 2 cells with real values
A:
If anyone in the future comes across this question, pandas has now implemented the on_bad_lines argument. You can now solve this problem by using on_bad_lines = "skip"
|
Ignoring bad rows of data in pandas.read_csv() that break header= keyword
|
I have a series of very messy *.csv files that are being read in by pandas. An example csv is:
Instrument 35392
"Log File Name : station"
"Setup Date (MMDDYY) : 031114"
"Setup Time (HHMMSS) : 073648"
"Starting Date (MMDDYY) : 031114"
"Starting Time (HHMMSS) : 090000"
"Stopping Date (MMDDYY) : 031115"
"Stopping Time (HHMMSS) : 235959"
"Interval (HHMMSS) : 010000"
"Sensor warmup (HHMMSS) : 000200"
"Circltr warmup (HHMMSS) : 000200"
"Date","Time","","Temp","","SpCond","","Sal","","IBatt",""
"MMDDYY","HHMMSS","","øC","","mS/cm","","ppt","","Volts",""
"Random message here 031114 073721 to 031114 083200"
03/11/14,09:00:00,"",15.85,"",1.408,"",.74,"",6.2,""
03/11/14,10:00:00,"",15.99,"",1.96,"",1.05,"",6.3,""
03/11/14,11:00:00,"",14.2,"",40.8,"",26.12,"",6.2,""
03/11/14,12:00:01,"",14.2,"",41.7,"",26.77,"",6.2,""
03/11/14,13:00:00,"",14.5,"",41.3,"",26.52,"",6.2,""
03/11/14,14:00:00,"",14.96,"",41,"",26.29,"",6.2,""
"message 3"
"message 4"**
I have been using this code to import the *csv file, process the double headers, pull out the empty columns, and then strip the offending rows with bad data:
DF = pd.read_csv(BADFILE,parse_dates={'Datetime_(ascii)': [0,1]}, sep=",", \
header=[10,11],na_values=['','na', 'nan nan'], \
skiprows=[10], encoding='cp1252')
DF = DF.dropna(how="all", axis=1)
DF = DF.dropna(thresh=2)
droplist = ['message', 'Random']
DF = DF[~DF['Datetime_(ascii)'].str.contains('|'.join(droplist))]
DF.head()
Datetime_(ascii) (Temp, øC) (SpCond, mS/cm) (Sal, ppt) (IBatt, Volts)
0 03/11/14 09:00:00 15.85 1.408 0.74 6.2
1 03/11/14 10:00:00 15.99 1.960 1.05 6.3
2 03/11/14 11:00:00 14.20 40.800 26.12 6.2
3 03/11/14 12:00:01 14.20 41.700 26.77 6.2
4 03/11/14 13:00:00 14.50 41.300 26.52 6.2
This was working fine and dandy until I have a file that has an erronious 1 row line after the header: "Random message here 031114 073721 to 031114 083200"
The error I receieve is:
*C:\Users\USER\AppData\Local\Continuum\Anaconda3\lib\site-
packages\pandas\io\parsers.py in _do_date_conversions(self, names, data)
1554 data, names = _process_date_conversion(
1555 data, self._date_conv, self.parse_dates, self.index_col,
-> 1556 self.index_names, names,
keep_date_col=self.keep_date_col)
1557
1558 return names, data
C:\Users\USER\AppData\Local\Continuum\Anaconda3\lib\site-
packages\pandas\io\parsers.py in _process_date_conversion(data_dict,
converter, parse_spec, index_col, index_names, columns, keep_date_col)
2975 if not keep_date_col:
2976 for c in list(date_cols):
-> 2977 data_dict.pop(c)
2978 new_cols.remove(c)
2979
KeyError: ('Time', 'HHMMSS')*
If I remove that line, the code works fine. Similarly, if I remove the header= line the code works fine. However, I want to be able to preserve this because I am reading in hundreds of these files.
Difficulty: I would prefer to not open each file before the call to pandas.read_csv() as these files can be rather large - thus I don't want to read and save multiple times! Also, I would prefer a real pandas/pythonic solution that doesn't involve openning the file first as a stringIO buffer to removing offending lines.
|
[
"Here's one approach, making use of the fact that skip_rows accepts a callable function. The function receives only the row index being considered, which is a built-in limitation of that parameter. \nAs such, the callable function skip_test() first checks whether the current index is in the set of known indices to skip. If not, then it opens the actual file and checks the corresponding row to see if its contents match. \nThe skip_test() function is a little hacky in the sense that it does inspect the actual file, although it only inspects up until the current row index it's evaluating. It also assumes that the bad line always begins with the same string (in the example case, \"foo\"), but that seems to be a safe assumption given OP. \n# example data\n\"\"\" foo.csv\nuid,a,b,c\n0,1,2,3\nskip me\n1,11,22,33\nfoo\n2,111,222,333 \n\"\"\"\n\nimport pandas as pd\n\ndef skip_test(r, fn, fail_on, known):\n if r in known: # we know we always want to skip these\n return True\n # check if row index matches problem line in file\n # for efficiency, quit after we pass row index in file\n f = open(fn, \"r\")\n data = f.read()\n for i, line in enumerate(data.splitlines()):\n if (i == r) & line.startswith(fail_on):\n return True\n elif i > r:\n break\n return False\n\nfname = \"foo.csv\"\nfail_str = \"foo\"\nknown_skip = [2]\npd.read_csv(fname, sep=\",\", header=0, \n skiprows=lambda x: skip_test(x, fname, fail_str, known_skip))\n# output\n uid a b c\n0 0 1 2 3\n1 1 11 22 33\n2 2 111 222 333\n\nIf you know exactly which line the random message will appear on when it does appear, then this will be much faster, as you can just tell it not to inspect the file contents for any index past the potential offending line. \n",
"After some tinkering yesterday I found a solution and what the potential issue may be.\nI tried the skip_test() function answer above, but I was still getting errors with the size of the table:\npandas\\_libs\\parsers.pyx in pandas._libs.parsers.TextReader.read (pandas\\_libs\\parsers.c:10862)()\n\npandas\\_libs\\parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory (pandas\\_libs\\parsers.c:11138)()\n\npandas\\_libs\\parsers.pyx in pandas._libs.parsers.TextReader._read_rows (pandas\\_libs\\parsers.c:11884)()\n\npandas\\_libs\\parsers.pyx in pandas._libs.parsers.TextReader._tokenize_rows (pandas\\_libs\\parsers.c:11755)()\n\npandas\\_libs\\parsers.pyx in pandas._libs.parsers.raise_parser_error (pandas\\_libs\\parsers.c:28765)()\n\nParserError: Error tokenizing data. C error: Expected 1 fields in line 14, saw 11\n\nSo after playing around with skiprows= I discovered that I was just not getting the behavior I wanted when using the engine='c'. read_csv() was still determining the size of the file from those first few rows, and some of those single column rows were still being passed. It may be that I have a few more bad single column rows in my csv set that I did not plan on.\nInstead, I create an arbitrary sized DataFrame as a template. I pull in the entire .csv file, then use logic to strip out the NaN rows. \nFor example, I know that the largest table that I will encounter with my data will be 10 rows long. So my call to pandas is:\nDF = pd.read_csv(csv_file, sep=',', \\\n parse_dates={'Datetime_(ascii)': [0,1]},\\\n na_values=['','na', '999999', '#'], engine='c',\\ \n encoding='cp1252', names = list(range(0,10)))\n\nI then use these two lines to drop the NaN rows and columns from the DataFrame:\n#drop the null columns created by double deliminators\nDF = DF.dropna(how=\"all\", axis=1)\nDF = DF.dropna(thresh=2) # drop if we don't have at least 2 cells with real values\n\n",
"If anyone in the future comes across this question, pandas has now implemented the on_bad_lines argument. You can now solve this problem by using on_bad_lines = \"skip\"\n"
] |
[
3,
0,
0
] |
[] |
[] |
[
"csv",
"pandas",
"python"
] |
stackoverflow_0045679857_csv_pandas_python.txt
|
Q:
PyInstaller: Single-file executable doesn't work
PS C:\Users\user> pyinstaller onefile Traceback (most recent call last): File "<frozen runpy>", line 198, in _run_module_as_main File "<frozen runpy>", line 88, in _run_code File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Scripts\pyinstaller.exe\__main__.py", line 7, in <module> File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\__main__.py", line 107, in run parser = generate_parser() ^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\__main__.py", line 78, in generate_parser import PyInstaller.building.build_main ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\building\build_main.py", line 35, in <module> from PyInstaller.depend import bindepend ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bindepend.py", line 26, in <module> from PyInstaller.depend import dylib, utils ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\utils.py", line 33, in <module> from PyInstaller.depend import bytecode ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bytecode.py", line 95, in <module> _call_function_bytecode = bytecode_regex(rb""" ^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bytecode.py", line 60, in bytecode_regex pattern = re.sub( ^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\re.py", line 190, in sub return _compile(pattern, flags).sub(repl, string, count) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bytecode.py", line 62, in <lambda> lambda m: _instruction_to_regex(m[1].decode()), ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bytecode.py", line 40, in _instruction_to_regex return re.escape(bytes([dis.opmap[x]])) ~~~~~~~~~^^^ KeyError: 'CALL_FUNCTION' I'm trying to create a single-file executable for Windows from a Python application, using pyinstaller, but the commend doesn't run and
A:
I honestly do not know what that error is. All I can say is that
the command for making a single .exe file with pyinstaller is:
pyinstaller --onefile <filename>
For example pyinstaller --onefile myscript.py
I did a quick search and found this in pyinstaller: create one executable file
What you have done is pyinstaller onefile, which is the same as telling pyinstaller to create the executable using a file called onefile. When the program doesn't find the file 'onefile' it will not work, and it throws an error.
A:
This was addressed here:
https://github.com/pyinstaller/pyinstaller/issues/6950
Looks like it's been merged in so make sure your PyInsaller version is 5.6 or greater.
A:
I think you made a mistake when you writing arguments can you give more details? or if you can't use PyInsaller on shell you can use this library with pyinstaller gui https://pypi.org/project/auto-py-to-exe/
|
PyInstaller: Single-file executable doesn't work
|
PS C:\Users\user> pyinstaller onefile Traceback (most recent call last): File "<frozen runpy>", line 198, in _run_module_as_main File "<frozen runpy>", line 88, in _run_code File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Scripts\pyinstaller.exe\__main__.py", line 7, in <module> File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\__main__.py", line 107, in run parser = generate_parser() ^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\__main__.py", line 78, in generate_parser import PyInstaller.building.build_main ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\building\build_main.py", line 35, in <module> from PyInstaller.depend import bindepend ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bindepend.py", line 26, in <module> from PyInstaller.depend import dylib, utils ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\utils.py", line 33, in <module> from PyInstaller.depend import bytecode ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bytecode.py", line 95, in <module> _call_function_bytecode = bytecode_regex(rb""" ^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bytecode.py", line 60, in bytecode_regex pattern = re.sub( ^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\re.py", line 190, in sub return _compile(pattern, flags).sub(repl, string, count) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bytecode.py", line 62, in <lambda> lambda m: _instruction_to_regex(m[1].decode()), ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\user\AppData\Local\Programs\Python\Python311-32\Lib\site-packages\PyInstaller\depend\bytecode.py", line 40, in _instruction_to_regex return re.escape(bytes([dis.opmap[x]])) ~~~~~~~~~^^^ KeyError: 'CALL_FUNCTION' I'm trying to create a single-file executable for Windows from a Python application, using pyinstaller, but the commend doesn't run and
|
[
"I honestly do not know what that error is. All I can say is that\nthe command for making a single .exe file with pyinstaller is:\npyinstaller --onefile <filename>\n\nFor example pyinstaller --onefile myscript.py\nI did a quick search and found this in pyinstaller: create one executable file\nWhat you have done is pyinstaller onefile, which is the same as telling pyinstaller to create the executable using a file called onefile. When the program doesn't find the file 'onefile' it will not work, and it throws an error.\n",
"This was addressed here:\nhttps://github.com/pyinstaller/pyinstaller/issues/6950\nLooks like it's been merged in so make sure your PyInsaller version is 5.6 or greater.\n",
"I think you made a mistake when you writing arguments can you give more details? or if you can't use PyInsaller on shell you can use this library with pyinstaller gui https://pypi.org/project/auto-py-to-exe/\n"
] |
[
1,
1,
0
] |
[] |
[] |
[
"pyinstaller",
"python",
"python_3.x"
] |
stackoverflow_0072565499_pyinstaller_python_python_3.x.txt
|
Q:
TensorFlow seems to modify both class and instance object
I have observed that the TensorFlow methods like assign_add and assign_sub modify the variables of both object and class (if exist). Here is a simple code to reproduce my observation. Can anyone please clarify about this behavior (assign_sub and assign_add modifying both class and instance attributes)?
#a python class
class myc_base():
a=1.
def __init__(self, b=1.):
self.b=b
def add(self, to_add=1.):
self.a+=to_add
self.b+=to_add
def sub(self, to_sub=1.):
self.a-=to_sub
self.b-=to_sub
obj_base=myc_base()
print(f'Init. -- class.a: {myc_base.a} | obj.a: {obj_base.a}, obj.b: {obj_base.b}')
obj_base.add(5.)
print(f'after add -- class.a: {myc_base.a} | obj.a: {obj_base.a}, obj.b: {obj_base.b}')
obj_base.sub(2.)
print(f'after sub -- class.a: {myc_base.a} | obj.a: {obj_base.a}, obj.b: {obj_base.b}')
Output:
Init. -- class.a: 1.0 | obj.a: 1.0, obj.b: 1.0
after add -- class.a: 1.0 | obj.a: 6.0, obj.b: 6.0
after sub -- class.a: 1.0 | obj.a: 4.0, obj.b: 4.0
With TensorFlow:
import tensorflow as tf
#a class for tf operations
class myc_tf():
a=tf.Variable(1.)
def __init__(self, b=tf.Variable(1.)):
self.b=b
def add(self, to_add=1.):
self.a.assign_add(to_add)
self.b.assign_add(to_add)
def sub(self, to_sub=1.):
self.a.assign_sub(to_sub)
self.b.assign_sub(to_sub)
obj_tf=myc_tf()
print(f'Init. -- class.a: {myc_tf.a.numpy()} | obj.a: {obj_tf.a.numpy()}, obj.b: {obj_tf.b.numpy()}')
obj_tf.add(5.)
print(f'after add -- class.a: {myc_tf.a.numpy()} | obj.a: {obj_tf.a.numpy()}, obj.b: {obj_tf.b.numpy()}')
obj_tf.sub(2.)
print(f'after sub -- class.a: {myc_tf.a.numpy()} | obj.a: {obj_tf.a.numpy()}, obj.b: {obj_tf.b.numpy()}')
Output:
Init. -- class.a: 1.0 | obj.a: 1.0, obj.b: 1.0
after add -- class.a: 6.0 | obj.a: 6.0, obj.b: 6.0
after sub -- class.a: 4.0 | obj.a: 4.0, obj.b: 4.0
A:
a is a class attribute. b is an instance attribute.
However, augmented assignments like
self.a += to_add
self.a -= to_sub
are not modifying the class attribute you think you are accessing via the instance. They are really equivalent to
self.a = self.a.__iadd__(to_add)
self.a = self.a.__isub__(to_sub)
so the first time one is used, the class attribute is accessed on the RHS, but a new instance attribute is then created, and that instance attribute shadows the class attribute in all future calls.
If you want to modify a class attribute via an instance, you need to be explicit about it. One possible solution:
type(self).a += to_add
Your TensorFlow code doesn't make any assignments, augmented or otherwise. It's simply a method call on whatever self.a resolves to, which is the class attribute. No new instance attribute is ever created.
A:
you need to understand the class and local variables when the class initiates with the variables you created and the init() functions. The assign_add() and assign_sub() are updated and wait for the value to update but the python class is to erase once release or re-assign value.
Sample: Starting at 10 both var1 and var2, the var2 lives with 100 as a local variable spent inside the class and their functions. By var1 is assigned it more 30 as a local variable. Updates can reach var1 as 40, var2 as 10 because var2 is never outside the functions and the result is 100.
import os
from os.path import exists
import tensorflow as tf
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class and Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MyDenseLayer(tf.keras.layers.Layer):
var1 = tf.Variable([10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0])
var2 = tf.Variable([10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0])
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
self.var2 = self.var1 * 10.0
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, inputs):
self.var1.assign_add([30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0])
temp = tf.constant( self.var2 ).numpy()
return temp
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
start = 3
limit = 33
delta = 3
# Create DATA
sample = tf.range(start, limit, delta)
sample = tf.cast( sample, dtype=tf.float32 )
# Initail, ( 10, 1 )
sample = tf.constant( sample, shape=( 10, 1 ) )
layer = MyDenseLayer(10)
data = layer(sample)
print( tf.constant( MyDenseLayer.var1 ).numpy() )
print( tf.constant( MyDenseLayer.var2 ).numpy() )
print( data )
Output: local var1, local var2 ( no assign functions ) and data ( result from class update )
[40. 40. 40. 40. 40. 40. 40. 40. 40. 40.]
[10. 10. 10. 10. 10. 10. 10. 10. 10. 10.]
[100. 100. 100. 100. 100. 100. 100. 100. 100. 100.]
|
TensorFlow seems to modify both class and instance object
|
I have observed that the TensorFlow methods like assign_add and assign_sub modify the variables of both object and class (if exist). Here is a simple code to reproduce my observation. Can anyone please clarify about this behavior (assign_sub and assign_add modifying both class and instance attributes)?
#a python class
class myc_base():
a=1.
def __init__(self, b=1.):
self.b=b
def add(self, to_add=1.):
self.a+=to_add
self.b+=to_add
def sub(self, to_sub=1.):
self.a-=to_sub
self.b-=to_sub
obj_base=myc_base()
print(f'Init. -- class.a: {myc_base.a} | obj.a: {obj_base.a}, obj.b: {obj_base.b}')
obj_base.add(5.)
print(f'after add -- class.a: {myc_base.a} | obj.a: {obj_base.a}, obj.b: {obj_base.b}')
obj_base.sub(2.)
print(f'after sub -- class.a: {myc_base.a} | obj.a: {obj_base.a}, obj.b: {obj_base.b}')
Output:
Init. -- class.a: 1.0 | obj.a: 1.0, obj.b: 1.0
after add -- class.a: 1.0 | obj.a: 6.0, obj.b: 6.0
after sub -- class.a: 1.0 | obj.a: 4.0, obj.b: 4.0
With TensorFlow:
import tensorflow as tf
#a class for tf operations
class myc_tf():
a=tf.Variable(1.)
def __init__(self, b=tf.Variable(1.)):
self.b=b
def add(self, to_add=1.):
self.a.assign_add(to_add)
self.b.assign_add(to_add)
def sub(self, to_sub=1.):
self.a.assign_sub(to_sub)
self.b.assign_sub(to_sub)
obj_tf=myc_tf()
print(f'Init. -- class.a: {myc_tf.a.numpy()} | obj.a: {obj_tf.a.numpy()}, obj.b: {obj_tf.b.numpy()}')
obj_tf.add(5.)
print(f'after add -- class.a: {myc_tf.a.numpy()} | obj.a: {obj_tf.a.numpy()}, obj.b: {obj_tf.b.numpy()}')
obj_tf.sub(2.)
print(f'after sub -- class.a: {myc_tf.a.numpy()} | obj.a: {obj_tf.a.numpy()}, obj.b: {obj_tf.b.numpy()}')
Output:
Init. -- class.a: 1.0 | obj.a: 1.0, obj.b: 1.0
after add -- class.a: 6.0 | obj.a: 6.0, obj.b: 6.0
after sub -- class.a: 4.0 | obj.a: 4.0, obj.b: 4.0
|
[
"a is a class attribute. b is an instance attribute.\nHowever, augmented assignments like\nself.a += to_add\nself.a -= to_sub\n\nare not modifying the class attribute you think you are accessing via the instance. They are really equivalent to\nself.a = self.a.__iadd__(to_add)\nself.a = self.a.__isub__(to_sub)\n\nso the first time one is used, the class attribute is accessed on the RHS, but a new instance attribute is then created, and that instance attribute shadows the class attribute in all future calls.\nIf you want to modify a class attribute via an instance, you need to be explicit about it. One possible solution:\ntype(self).a += to_add\n\n\nYour TensorFlow code doesn't make any assignments, augmented or otherwise. It's simply a method call on whatever self.a resolves to, which is the class attribute. No new instance attribute is ever created.\n",
"you need to understand the class and local variables when the class initiates with the variables you created and the init() functions. The assign_add() and assign_sub() are updated and wait for the value to update but the python class is to erase once release or re-assign value.\n\nSample: Starting at 10 both var1 and var2, the var2 lives with 100 as a local variable spent inside the class and their functions. By var1 is assigned it more 30 as a local variable. Updates can reach var1 as 40, var2 as 10 because var2 is never outside the functions and the result is 100.\n\nimport os\nfrom os.path import exists\nimport tensorflow as tf\n\nimport matplotlib.pyplot as plt\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n: Class and Functions\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nclass MyDenseLayer(tf.keras.layers.Layer):\n var1 = tf.Variable([10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0])\n var2 = tf.Variable([10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0])\n \n def __init__(self, num_outputs):\n super(MyDenseLayer, self).__init__()\n self.num_outputs = num_outputs\n self.var2 = self.var1 * 10.0\n \n def build(self, input_shape):\n self.kernel = self.add_weight(\"kernel\",\n shape=[int(input_shape[-1]),\n self.num_outputs])\n\n def call(self, inputs):\n self.var1.assign_add([30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0])\n \n temp = tf.constant( self.var2 ).numpy()\n return temp\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n: Variables\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nstart = 3\nlimit = 33\ndelta = 3\n\n# Create DATA\nsample = tf.range(start, limit, delta)\nsample = tf.cast( sample, dtype=tf.float32 )\n\n# Initail, ( 10, 1 )\nsample = tf.constant( sample, shape=( 10, 1 ) )\nlayer = MyDenseLayer(10)\ndata = layer(sample)\n\nprint( tf.constant( MyDenseLayer.var1 ).numpy() )\nprint( tf.constant( MyDenseLayer.var2 ).numpy() )\nprint( data )\n\n\nOutput: local var1, local var2 ( no assign functions ) and data ( result from class update )\n\n[40. 40. 40. 40. 40. 40. 40. 40. 40. 40.]\n[10. 10. 10. 10. 10. 10. 10. 10. 10. 10.]\n[100. 100. 100. 100. 100. 100. 100. 100. 100. 100.]\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"class",
"object",
"python",
"tensorflow"
] |
stackoverflow_0074520583_class_object_python_tensorflow.txt
|
Q:
Build a matrix from a string of number separated by space
I have a list of numbers in a string separated by space x="1 2 3 4 5 6 7 8 9 10 11 ..."
I want to extract 3x3 matrices (list of list) from this string so the above string should produce the output = [ [[1,2,3],[4,5,6],[7,8,9]],[ [10,11,12],[13,14,15],[16,17,18] ]...
I tried using the split function on the variable x and loop over it to build the final output but it gets messy. Is there a simple way to do it in simple python or using some library?
We can assume that the number of elements will be consistent with splitting it into 3x3 and the numbers are separated by single space
A:
With NymPy, this is quite easy.
First, use str.split() to get a list of the numbers, cast them to ints (or floats if you need), and then reshape the array:
import numpy as np
s = '0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17'
a = np.array([int(x) for x in s.split()]).reshape((-1,3))
yields
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11],
[12, 13, 14],
[15, 16, 17]])
while
a = np.array([int(x) for x in s.split()]).reshape((3,-1))
yields
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17]])
And if you know both dimensions of the results, pass them explicitly to reshape() instead of my -1 (which means that one dimension is left unspecified).
A:
Taking the chunk-implementation from this answer, you can just apply it twice to get 3x3 matrixes:
from itertools import zip_longest
def chunk(source, n):
return zip_longest(*([iter(source)] * n))
string = "1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18"
matrix = list(chunk(chunk(string.split(" "), 3), 3))
print(matrix)
A:
x = "1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18"
x_arr = [int(el) for el in x.split(" ")]
def foo(arr, n):
return [arr[i:(i + n)] for i in range(0, len(arr), n)]
[foo(el, 3) for el in foo(x_arr, 9)]
# [[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]]]
A:
There's probably a library that is designed to easily do this.
Using base python with numpy:
Parse the string into sets of 3 or insert a semi-colon after every third element.
Use numpy's matrix operation, np.matrix() on each set of three.
Use a control structure to only operate on 3 sets of elements at a time.
A:
You can divide your vector in chunks of the right size and use numpy's reshape:
vector = "1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18"
n = 3
matrix_size = 3*3
vector = np.array([int(x) for x in vector.split()])
matrixes = [vector[i-matrix_size:i].reshape((n,n)) for i in range(matrix_size,len(vector)+1,matrix_size)]
matrixes
[array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]),
array([[10, 11, 12],
[13, 14, 15],
[16, 17, 18]])]
|
Build a matrix from a string of number separated by space
|
I have a list of numbers in a string separated by space x="1 2 3 4 5 6 7 8 9 10 11 ..."
I want to extract 3x3 matrices (list of list) from this string so the above string should produce the output = [ [[1,2,3],[4,5,6],[7,8,9]],[ [10,11,12],[13,14,15],[16,17,18] ]...
I tried using the split function on the variable x and loop over it to build the final output but it gets messy. Is there a simple way to do it in simple python or using some library?
We can assume that the number of elements will be consistent with splitting it into 3x3 and the numbers are separated by single space
|
[
"With NymPy, this is quite easy.\nFirst, use str.split() to get a list of the numbers, cast them to ints (or floats if you need), and then reshape the array:\nimport numpy as np\n\ns = '0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17'\n\na = np.array([int(x) for x in s.split()]).reshape((-1,3))\n\nyields\narray([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11],\n [12, 13, 14],\n [15, 16, 17]])\n\nwhile\na = np.array([int(x) for x in s.split()]).reshape((3,-1))\n\nyields\narray([[ 0, 1, 2, 3, 4, 5],\n [ 6, 7, 8, 9, 10, 11],\n [12, 13, 14, 15, 16, 17]])\n\nAnd if you know both dimensions of the results, pass them explicitly to reshape() instead of my -1 (which means that one dimension is left unspecified).\n",
"Taking the chunk-implementation from this answer, you can just apply it twice to get 3x3 matrixes:\nfrom itertools import zip_longest\n\ndef chunk(source, n):\n return zip_longest(*([iter(source)] * n))\n\nstring = \"1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18\"\nmatrix = list(chunk(chunk(string.split(\" \"), 3), 3))\nprint(matrix)\n\n",
"x = \"1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18\"\nx_arr = [int(el) for el in x.split(\" \")]\n\ndef foo(arr, n):\n return [arr[i:(i + n)] for i in range(0, len(arr), n)]\n\n[foo(el, 3) for el in foo(x_arr, 9)]\n# [[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]]]\n\n",
"There's probably a library that is designed to easily do this.\nUsing base python with numpy:\n\nParse the string into sets of 3 or insert a semi-colon after every third element.\nUse numpy's matrix operation, np.matrix() on each set of three.\nUse a control structure to only operate on 3 sets of elements at a time.\n\n",
"You can divide your vector in chunks of the right size and use numpy's reshape:\nvector = \"1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18\"\nn = 3\nmatrix_size = 3*3\n\nvector = np.array([int(x) for x in vector.split()])\n\nmatrixes = [vector[i-matrix_size:i].reshape((n,n)) for i in range(matrix_size,len(vector)+1,matrix_size)]\n\nmatrixes\n\n[array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]),\n array([[10, 11, 12],\n [13, 14, 15],\n [16, 17, 18]])]\n\n"
] |
[
0,
0,
0,
0,
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074521840_python.txt
|
Q:
Subplot of dendrograms
I am creating six different dendrograms based on linkage. I have a for loop which loops through the six different linkage types. I want to print out all six of the dendrograms on one plot (using subplot) but cannot figure out how to do this. My attempt is below - with the commented-out lines being the code intended to print out the subplots. Thanks in advance for your help.
import matplotlib.pyplot as plt
from scipy.cluster import hierarchy
linkage = ['single', 'complete', 'average', 'weighted', 'centroid', 'ward']
for item in linkage:
#for i in range(1,7):
Z = hierarchy.linkage(X, item)
#plt.subplot(1,2,i)
plt.figure(figsize=(25, 10))
plt.xlabel('State',size=12)
hierarchy.dendrogram(Z,
leaf_rotation=90, # rotate the labels on X-axis
leaf_font_size=12,
labels=Label)
plt.title('Hierarchical Clustering on Covid Data:' + item, size=20)
A:
Every time you call plt.figure, matplotlib will create a new figure (plot). Since this is inside your for loop, you're currently creating a new plot for each dendogram. You can move plt.figure outside of the for loop, but then you'll need to spend some effort placing the dendograms such that they can still be readable. I see by your code comments that you've used matplotlib.pyplot.subplots before? Here's a way to put each dendogram in a unique subplot inside the main figure:
import matplotlib.pyplot as plt
from scipy.cluster import hierarchy
linkage = ['single', 'complete', 'average', 'weighted', 'centroid', 'ward']
fig, axes = plt.subplots(1, len(linkage), sharey=True)
for ax, item in zip(linkage, axes): # match each linkage to a matplotlib axis
Z = hierarchy.linkage(X, item)
ax.set_xlabel('State', fontdict={'size':12})
hierarchy.dendrogram(Z,
leaf_rotation=90, # rotate the labels on X-axis
leaf_font_size=12,
labels=Label,
ax=ax # We have to tell scipy where to draw the dendogram
)
ax.set_title('Hierarchical Clustering on Covid Data:' + item, fontdict={'size':20})
|
Subplot of dendrograms
|
I am creating six different dendrograms based on linkage. I have a for loop which loops through the six different linkage types. I want to print out all six of the dendrograms on one plot (using subplot) but cannot figure out how to do this. My attempt is below - with the commented-out lines being the code intended to print out the subplots. Thanks in advance for your help.
import matplotlib.pyplot as plt
from scipy.cluster import hierarchy
linkage = ['single', 'complete', 'average', 'weighted', 'centroid', 'ward']
for item in linkage:
#for i in range(1,7):
Z = hierarchy.linkage(X, item)
#plt.subplot(1,2,i)
plt.figure(figsize=(25, 10))
plt.xlabel('State',size=12)
hierarchy.dendrogram(Z,
leaf_rotation=90, # rotate the labels on X-axis
leaf_font_size=12,
labels=Label)
plt.title('Hierarchical Clustering on Covid Data:' + item, size=20)
|
[
"Every time you call plt.figure, matplotlib will create a new figure (plot). Since this is inside your for loop, you're currently creating a new plot for each dendogram. You can move plt.figure outside of the for loop, but then you'll need to spend some effort placing the dendograms such that they can still be readable. I see by your code comments that you've used matplotlib.pyplot.subplots before? Here's a way to put each dendogram in a unique subplot inside the main figure:\nimport matplotlib.pyplot as plt\nfrom scipy.cluster import hierarchy\n\nlinkage = ['single', 'complete', 'average', 'weighted', 'centroid', 'ward']\n\nfig, axes = plt.subplots(1, len(linkage), sharey=True)\nfor ax, item in zip(linkage, axes): # match each linkage to a matplotlib axis\n Z = hierarchy.linkage(X, item)\n ax.set_xlabel('State', fontdict={'size':12})\n hierarchy.dendrogram(Z,\n leaf_rotation=90, # rotate the labels on X-axis\n leaf_font_size=12,\n labels=Label,\n ax=ax # We have to tell scipy where to draw the dendogram\n )\n \n ax.set_title('Hierarchical Clustering on Covid Data:' + item, fontdict={'size':20})\n\n"
] |
[
0
] |
[] |
[] |
[
"dendrogram",
"python",
"subplot"
] |
stackoverflow_0074513863_dendrogram_python_subplot.txt
|
Q:
what is the significance of @tf.function in neural networks?
I am learning deep neural networks (beginner level). What is the use of @tf.function in tensorflow?
for example
@tf.function
def add(a,b):
c=tf.add (a,b)
print(c)
return(c)
could anyone please explain how this way of coding helps to create a network
A:
tf.function converts the function into a callable TensorFlow graph where the tensor computations are executed as a TensorFlow graph(tf.Graph). "Graphs are data structures that contain a set of tf.Operation objects, which represent units of computation; and tf.Tensor objects, which represent the units of data that flow between operations". tf.function allows you to switch from eager execution to graph execution which enables the benefits of graph mode - like portability outside python for deployement and provides better performance(faster).
Please refer this link for more information on tf.function.
|
what is the significance of @tf.function in neural networks?
|
I am learning deep neural networks (beginner level). What is the use of @tf.function in tensorflow?
for example
@tf.function
def add(a,b):
c=tf.add (a,b)
print(c)
return(c)
could anyone please explain how this way of coding helps to create a network
|
[
"tf.function converts the function into a callable TensorFlow graph where the tensor computations are executed as a TensorFlow graph(tf.Graph). \"Graphs are data structures that contain a set of tf.Operation objects, which represent units of computation; and tf.Tensor objects, which represent the units of data that flow between operations\". tf.function allows you to switch from eager execution to graph execution which enables the benefits of graph mode - like portability outside python for deployement and provides better performance(faster).\nPlease refer this link for more information on tf.function.\n"
] |
[
0
] |
[] |
[] |
[
"python",
"tensorflow"
] |
stackoverflow_0072582777_python_tensorflow.txt
|
Q:
How to pass arguments to HuggingFace TokenClassificationPipeline's tokenizer
I've finetuned a Huggingface BERT model for Named Entity Recognition. Everything is working as it should. Now I've setup a pipeline for token classification in order to predict entities out the text I provide. Even this is working fine.
I know that BERT models are supposed to be fed with sentences less than 512 tokens long. Since I have texts longer than that, I split the sentences in shorter chunks and I store the chunks in a list chunked_sentences. To make it brief my tokenizer for training looks like this:
from transformers import BertTokenizerFast
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
tokenized_inputs = tokenizer(chunked_sentences, is_split_into_words=True, padding='longest')
I pad everything to the longest sequence and avoid truncation so that if a sentence is tokenized and goes beyond 512 tokens I receive a warning that I won't be able to train. This way I know that I have to split the sentences in smaller chunks.
During inference I wanted to achieve the same thing, but I haven't found a way to pass arguments to the pipeline's tokenizer. The code looks like this:
from transformers import pipeline
ner_pipeline = pipeline('token-classification', model=model_folder, tokenizer=model_folder)
out = ner_pipeline(text, aggregation_strategy='simple')
I'm pretty sure that if a sentence is tokenized and surpasses the 512 tokens, the extra tokens will be truncated and I'll get no warning. I want to avoid this.
I tried passing arguments to the tokenizer like this:
tokenizer_kwargs = {'padding': 'longest'}
out = ner_pipeline(text, aggregation_strategy='simple', **tokenizer_kwargs)
I got that idea from this answer, but it seems not to be working, since I get the following error:
Traceback (most recent call last):
File "...\inference.py", line 42, in <module>
out = ner_pipeline(text, aggregation_strategy='simple', **tokenizer_kwargs)
File "...\venv\lib\site-packages\transformers\pipelines\token_classification.py", line 191, in __call__
return super().__call__(inputs, **kwargs)
File "...\venv\lib\site-packages\transformers\pipelines\base.py", line 1027, in __call__
preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs)
TypeError: TokenClassificationPipeline._sanitize_parameters() got an unexpected keyword argument 'padding'
Process finished with exit code 1
Any ideas? Thanks.
A:
I took a closer look at https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/pipelines/token_classification.py#L86. It seems you can override preprocess() to disable truncation and add padding to longest.
from transformers import TokenClassificationPipeline
class MyTokenClassificationPipeline(TokenClassificationPipeline):
def preprocess(self, sentence, offset_mapping=None):
truncation = False
padding = 'longest'
model_inputs = self.tokenizer(
sentence,
return_tensors=self.framework,
truncation=truncation,
padding=padding,
return_special_tokens_mask=True,
return_offsets_mapping=self.tokenizer.is_fast,
)
if offset_mapping:
model_inputs["offset_mapping"] = offset_mapping
model_inputs["sentence"] = sentence
return model_inputs
ner_pipeline = MyTokenClassificationPipeline(model=model_folder, tokenizer=model_folder)
out = ner_pipeline(text, aggregation_strategy='simple')
|
How to pass arguments to HuggingFace TokenClassificationPipeline's tokenizer
|
I've finetuned a Huggingface BERT model for Named Entity Recognition. Everything is working as it should. Now I've setup a pipeline for token classification in order to predict entities out the text I provide. Even this is working fine.
I know that BERT models are supposed to be fed with sentences less than 512 tokens long. Since I have texts longer than that, I split the sentences in shorter chunks and I store the chunks in a list chunked_sentences. To make it brief my tokenizer for training looks like this:
from transformers import BertTokenizerFast
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
tokenized_inputs = tokenizer(chunked_sentences, is_split_into_words=True, padding='longest')
I pad everything to the longest sequence and avoid truncation so that if a sentence is tokenized and goes beyond 512 tokens I receive a warning that I won't be able to train. This way I know that I have to split the sentences in smaller chunks.
During inference I wanted to achieve the same thing, but I haven't found a way to pass arguments to the pipeline's tokenizer. The code looks like this:
from transformers import pipeline
ner_pipeline = pipeline('token-classification', model=model_folder, tokenizer=model_folder)
out = ner_pipeline(text, aggregation_strategy='simple')
I'm pretty sure that if a sentence is tokenized and surpasses the 512 tokens, the extra tokens will be truncated and I'll get no warning. I want to avoid this.
I tried passing arguments to the tokenizer like this:
tokenizer_kwargs = {'padding': 'longest'}
out = ner_pipeline(text, aggregation_strategy='simple', **tokenizer_kwargs)
I got that idea from this answer, but it seems not to be working, since I get the following error:
Traceback (most recent call last):
File "...\inference.py", line 42, in <module>
out = ner_pipeline(text, aggregation_strategy='simple', **tokenizer_kwargs)
File "...\venv\lib\site-packages\transformers\pipelines\token_classification.py", line 191, in __call__
return super().__call__(inputs, **kwargs)
File "...\venv\lib\site-packages\transformers\pipelines\base.py", line 1027, in __call__
preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs)
TypeError: TokenClassificationPipeline._sanitize_parameters() got an unexpected keyword argument 'padding'
Process finished with exit code 1
Any ideas? Thanks.
|
[
"I took a closer look at https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/pipelines/token_classification.py#L86. It seems you can override preprocess() to disable truncation and add padding to longest.\nfrom transformers import TokenClassificationPipeline\n\nclass MyTokenClassificationPipeline(TokenClassificationPipeline):\n def preprocess(self, sentence, offset_mapping=None):\n truncation = False\n padding = 'longest'\n model_inputs = self.tokenizer(\n sentence,\n return_tensors=self.framework,\n truncation=truncation,\n padding=padding,\n return_special_tokens_mask=True,\n return_offsets_mapping=self.tokenizer.is_fast,\n )\n if offset_mapping:\n model_inputs[\"offset_mapping\"] = offset_mapping\n \n model_inputs[\"sentence\"] = sentence\n return model_inputs\n \nner_pipeline = MyTokenClassificationPipeline(model=model_folder, tokenizer=model_folder)\nout = ner_pipeline(text, aggregation_strategy='simple')\n\n"
] |
[
1
] |
[] |
[] |
[
"huggingface",
"huggingface_tokenizers",
"huggingface_transformers",
"named_entity_recognition",
"python"
] |
stackoverflow_0073745607_huggingface_huggingface_tokenizers_huggingface_transformers_named_entity_recognition_python.txt
|
Q:
How to put text in an animation
I am trying to put text in a matplotlib animation. (Hopefully outside the plot, but I am not worrying about that yet)
I tried to follow this solution, however my code is a bit complicated in that it does not gives only one line every time.
Here is my code
import math
import argparse
import os
import json
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation ,FFMpegWriter
line_x=[ 0,1,2,3,4,5,6,7,8,9,10,11,12 ]
line1_y=[ 3,5,7,9,11,19,23,26,29,31,37,40,45 ]
line2_y=[0,2,5,7,10,10,8,5,3,2,1,3,5]
line3_y=[39,38,32,29,26,22,19,13,10,8,7,6,3]
set_lines=[line1_y,line2_y,line3_y]
n_lineas=[1,2,3,1,3,2,3,1,3,2,1,2]
show=True
thecolors=['blue','red','violet']
thelegends=['unus','duo','tres']
print(sys.argv)
if len(sys.argv)==2 and sys.argv[1]=='movie':
show=False
def get_n(thelist,c):
while(c>=len(thelist)):
c-len(thelist)
return thelist[c]
class Update:
def __init__(self,ax,limit_x):
self.ax = ax
self.lx=limit_x
if limit_x!=0:
self.ax.set_xlim(0,limit_x)
self.ax.set_ylim(0,45)
self.ax.set_aspect('equal')
self.ax.grid(True)
self.lines=()
self.counter=0
self.text=self.ax.text(0,0,'')
def __call__(self, frame):
print("Frame: ",frame)
lines=[]
self.ax.cla()
self.ax.set_xlim(0,self.lx)
self.ax.set_ylim(0,45)
self.ax.grid(True)
self.ax.set_xlabel("Y (meters)")
self.ax.set_ylabel("X (meters)")
n_lines_this_time=get_n(n_lineas,self.counter)
self.counter+=1
print(n_lines_this_time,"lines this time")
for myline in range(n_lines_this_time):
#line,=self.ax.plot([],[],'.-',color=gt_color,label=legend)
line,=self.ax.plot([],[],'.-',color=thecolors[myline],label=thelegends[myline])
x = []
y = []
for v in range(13):
x.append(line_x[v])
y.append(set_lines[myline][v])
line.set_xdata(x)
line.set_ydata(y)
lines.append(line)
plt.legend()
self.lines=tuple(lines)
self.text.set_text("Frame "+str(frame))
self.text.set_position((0,0))
#return self.lines,self.text #<---HERE this does not work!!!
return self.lines
def init(self):
print("Init")
line,=self.ax.plot([],[])
self.ax.grid(True)
self.ax.set_xlabel("Y (meters)")
self.ax.set_ylabel("X (meters)")
self.text.set_text('')
self.text.set_position((0,0))
return line,self.text,
#return line,
fig, ax = plt.subplots(1, 1,figsize=(10,10))
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.xlabel("Y (meters)")
plt.ylabel("X (meters)")
plt.legend()
ug_i = Update(ax,13)
anim = FuncAnimation(fig, ug_i,init_func=ug_i.init, frames=10, interval=1000, blit=True,repeat=False)
if not show:
writervideo = FFMpegWriter(fps=1)
anim.save('whatever.mp4', writer=writervideo)
print('done')
plt.close()
else:
#plt.legend()
plt.show()
In the current state, the text does not show (of course) but when I try to return it (as marked above in a comment ("HERE") ) it crashes giving me the error
Traceback (most recent call last):
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/backend_bases.py", line 1194, in _on_timer
ret = func(*args, **kwargs)
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1442, in _step
still_going = Animation._step(self, *args)
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1173, in _step
self._draw_next_frame(framedata, self._blit)
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1192, in _draw_next_frame
self._draw_frame(framedata)
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1748, in _draw_frame
key=lambda x: x.get_zorder())
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1748, in <lambda>
key=lambda x: x.get_zorder())
AttributeError: 'tuple' object has no attribute 'get_zorder'
Aborted (core dumped)
What is failing and how can I display the text? (if outside the plot much better)
A:
After un-commenting your line of code, I didn't get any error, however the text was not visible. So, instead of using ax.text I tried fig.text: now the text is visible outside the plotting area.
import math
import argparse
import os
import json
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation ,FFMpegWriter
line_x=[ 0,1,2,3,4,5,6,7,8,9,10,11,12 ]
line1_y=[ 3,5,7,9,11,19,23,26,29,31,37,40,45 ]
line2_y=[0,2,5,7,10,10,8,5,3,2,1,3,5]
line3_y=[39,38,32,29,26,22,19,13,10,8,7,6,3]
set_lines=[line1_y,line2_y,line3_y]
n_lineas=[1,2,3,1,3,2,3,1,3,2,1,2]
show=True
thecolors=['blue','red','violet']
thelegends=['unus','duo','tres']
# print(sys.argv)
# if len(sys.argv)==2 and sys.argv[1]=='movie':
# show=False
def get_n(thelist,c):
while(c>=len(thelist)):
c-len(thelist)
return thelist[c]
class Update:
def __init__(self,fig,ax,limit_x):
self.ax = ax
self.lx=limit_x
if limit_x!=0:
self.ax.set_xlim(0,limit_x)
self.ax.set_ylim(0,45)
self.ax.set_aspect('equal')
self.ax.grid(True)
self.lines=()
self.counter=0
self.text=fig.text(0.15,0.5,'')
def __call__(self, frame):
print("Frame: ",frame)
lines=[]
self.ax.cla()
self.ax.set_xlim(0,self.lx)
self.ax.set_ylim(0,45)
self.ax.grid(True)
self.ax.set_xlabel("Y (meters)")
self.ax.set_ylabel("X (meters)")
n_lines_this_time=get_n(n_lineas,self.counter)
self.counter+=1
print(n_lines_this_time,"lines this time")
for myline in range(n_lines_this_time):
#line,=self.ax.plot([],[],'.-',color=gt_color,label=legend)
line,=self.ax.plot([],[],'.-',color=thecolors[myline],label=thelegends[myline])
x = []
y = []
for v in range(13):
x.append(line_x[v])
y.append(set_lines[myline][v])
line.set_xdata(x)
line.set_ydata(y)
lines.append(line)
plt.legend()
self.lines=tuple(lines)
self.text.set_text("Frame "+str(frame))
return self.lines,self.text #<---HERE this does not work!!!
return self.lines
def init(self):
print("Init")
line,=self.ax.plot([],[])
self.ax.grid(True)
self.ax.set_xlabel("Y (meters)")
self.ax.set_ylabel("X (meters)")
self.text.set_text('')
return line,self.text,
#return line,
fig, ax = plt.subplots(1, 1)
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.xlabel("Y (meters)")
plt.ylabel("X (meters)")
plt.legend()
ug_i = Update(fig,ax,13)
anim = FuncAnimation(fig, ug_i,init_func=ug_i.init, frames=10, interval=1000, blit=True,repeat=False)
if not show:
writervideo = FFMpegWriter(fps=1)
anim.save('whatever.mp4', writer=writervideo)
print('done')
plt.close()
else:
#plt.legend()
plt.show()
|
How to put text in an animation
|
I am trying to put text in a matplotlib animation. (Hopefully outside the plot, but I am not worrying about that yet)
I tried to follow this solution, however my code is a bit complicated in that it does not gives only one line every time.
Here is my code
import math
import argparse
import os
import json
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation ,FFMpegWriter
line_x=[ 0,1,2,3,4,5,6,7,8,9,10,11,12 ]
line1_y=[ 3,5,7,9,11,19,23,26,29,31,37,40,45 ]
line2_y=[0,2,5,7,10,10,8,5,3,2,1,3,5]
line3_y=[39,38,32,29,26,22,19,13,10,8,7,6,3]
set_lines=[line1_y,line2_y,line3_y]
n_lineas=[1,2,3,1,3,2,3,1,3,2,1,2]
show=True
thecolors=['blue','red','violet']
thelegends=['unus','duo','tres']
print(sys.argv)
if len(sys.argv)==2 and sys.argv[1]=='movie':
show=False
def get_n(thelist,c):
while(c>=len(thelist)):
c-len(thelist)
return thelist[c]
class Update:
def __init__(self,ax,limit_x):
self.ax = ax
self.lx=limit_x
if limit_x!=0:
self.ax.set_xlim(0,limit_x)
self.ax.set_ylim(0,45)
self.ax.set_aspect('equal')
self.ax.grid(True)
self.lines=()
self.counter=0
self.text=self.ax.text(0,0,'')
def __call__(self, frame):
print("Frame: ",frame)
lines=[]
self.ax.cla()
self.ax.set_xlim(0,self.lx)
self.ax.set_ylim(0,45)
self.ax.grid(True)
self.ax.set_xlabel("Y (meters)")
self.ax.set_ylabel("X (meters)")
n_lines_this_time=get_n(n_lineas,self.counter)
self.counter+=1
print(n_lines_this_time,"lines this time")
for myline in range(n_lines_this_time):
#line,=self.ax.plot([],[],'.-',color=gt_color,label=legend)
line,=self.ax.plot([],[],'.-',color=thecolors[myline],label=thelegends[myline])
x = []
y = []
for v in range(13):
x.append(line_x[v])
y.append(set_lines[myline][v])
line.set_xdata(x)
line.set_ydata(y)
lines.append(line)
plt.legend()
self.lines=tuple(lines)
self.text.set_text("Frame "+str(frame))
self.text.set_position((0,0))
#return self.lines,self.text #<---HERE this does not work!!!
return self.lines
def init(self):
print("Init")
line,=self.ax.plot([],[])
self.ax.grid(True)
self.ax.set_xlabel("Y (meters)")
self.ax.set_ylabel("X (meters)")
self.text.set_text('')
self.text.set_position((0,0))
return line,self.text,
#return line,
fig, ax = plt.subplots(1, 1,figsize=(10,10))
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.xlabel("Y (meters)")
plt.ylabel("X (meters)")
plt.legend()
ug_i = Update(ax,13)
anim = FuncAnimation(fig, ug_i,init_func=ug_i.init, frames=10, interval=1000, blit=True,repeat=False)
if not show:
writervideo = FFMpegWriter(fps=1)
anim.save('whatever.mp4', writer=writervideo)
print('done')
plt.close()
else:
#plt.legend()
plt.show()
In the current state, the text does not show (of course) but when I try to return it (as marked above in a comment ("HERE") ) it crashes giving me the error
Traceback (most recent call last):
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/backend_bases.py", line 1194, in _on_timer
ret = func(*args, **kwargs)
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1442, in _step
still_going = Animation._step(self, *args)
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1173, in _step
self._draw_next_frame(framedata, self._blit)
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1192, in _draw_next_frame
self._draw_frame(framedata)
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1748, in _draw_frame
key=lambda x: x.get_zorder())
File "/home/kansai/miniconda3/envs/roscv/lib/python3.7/site-packages/matplotlib/animation.py", line 1748, in <lambda>
key=lambda x: x.get_zorder())
AttributeError: 'tuple' object has no attribute 'get_zorder'
Aborted (core dumped)
What is failing and how can I display the text? (if outside the plot much better)
|
[
"After un-commenting your line of code, I didn't get any error, however the text was not visible. So, instead of using ax.text I tried fig.text: now the text is visible outside the plotting area.\nimport math\nimport argparse\nimport os\nimport json\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation ,FFMpegWriter\n\n\nline_x=[ 0,1,2,3,4,5,6,7,8,9,10,11,12 ]\nline1_y=[ 3,5,7,9,11,19,23,26,29,31,37,40,45 ]\nline2_y=[0,2,5,7,10,10,8,5,3,2,1,3,5]\nline3_y=[39,38,32,29,26,22,19,13,10,8,7,6,3]\n\nset_lines=[line1_y,line2_y,line3_y]\n\nn_lineas=[1,2,3,1,3,2,3,1,3,2,1,2]\n\nshow=True\n\nthecolors=['blue','red','violet']\nthelegends=['unus','duo','tres']\n\n# print(sys.argv)\n# if len(sys.argv)==2 and sys.argv[1]=='movie':\n# show=False\n\n\ndef get_n(thelist,c):\n while(c>=len(thelist)):\n c-len(thelist)\n return thelist[c]\n\n\nclass Update:\n def __init__(self,fig,ax,limit_x):\n self.ax = ax\n self.lx=limit_x\n if limit_x!=0:\n self.ax.set_xlim(0,limit_x)\n self.ax.set_ylim(0,45)\n self.ax.set_aspect('equal')\n self.ax.grid(True)\n self.lines=()\n self.counter=0\n self.text=fig.text(0.15,0.5,'')\n\n def __call__(self, frame):\n print(\"Frame: \",frame)\n lines=[]\n\n self.ax.cla()\n self.ax.set_xlim(0,self.lx)\n self.ax.set_ylim(0,45)\n self.ax.grid(True)\n self.ax.set_xlabel(\"Y (meters)\")\n self.ax.set_ylabel(\"X (meters)\")\n\n n_lines_this_time=get_n(n_lineas,self.counter)\n self.counter+=1\n print(n_lines_this_time,\"lines this time\")\n\n for myline in range(n_lines_this_time):\n #line,=self.ax.plot([],[],'.-',color=gt_color,label=legend)\n line,=self.ax.plot([],[],'.-',color=thecolors[myline],label=thelegends[myline])\n x = []\n y = []\n for v in range(13):\n x.append(line_x[v])\n y.append(set_lines[myline][v])\n line.set_xdata(x)\n line.set_ydata(y)\n lines.append(line)\n\n plt.legend()\n self.lines=tuple(lines)\n self.text.set_text(\"Frame \"+str(frame))\n return self.lines,self.text #<---HERE this does not work!!!\n return self.lines\n\n\n def init(self):\n print(\"Init\")\n line,=self.ax.plot([],[])\n self.ax.grid(True)\n self.ax.set_xlabel(\"Y (meters)\")\n self.ax.set_ylabel(\"X (meters)\")\n self.text.set_text('')\n return line,self.text,\n #return line,\n\n\n\nfig, ax = plt.subplots(1, 1)\nplt.gcf().canvas.mpl_connect(\n 'key_release_event',\n lambda event: [exit(0) if event.key == 'escape' else None])\nplt.xlabel(\"Y (meters)\") \nplt.ylabel(\"X (meters)\")\nplt.legend()\n\nug_i = Update(fig,ax,13)\nanim = FuncAnimation(fig, ug_i,init_func=ug_i.init, frames=10, interval=1000, blit=True,repeat=False)\n\nif not show: \n writervideo = FFMpegWriter(fps=1)\n anim.save('whatever.mp4', writer=writervideo)\n print('done')\n plt.close()\nelse:\n #plt.legend()\n plt.show()\n\n"
] |
[
0
] |
[] |
[] |
[
"animation",
"matplotlib",
"python",
"visualization"
] |
stackoverflow_0074517974_animation_matplotlib_python_visualization.txt
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.