markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Reinforcement Learning with CALVINThe **CALVIN** simulated benchmark is perfectly suited for training agents with reinforcement learning, in this notebook we will demonstrate how to integrate your agents to these environments. InstallationThe first step is to install the CALVIN github repository such that we have access to the packages
# Download repo %mkdir /content/calvin %cd /content/calvin !git clone https://github.com/mees/calvin_env.git %cd /content/calvin/calvin_env !git clone https://github.com/lukashermann/tacto.git # Install packages %cd /content/calvin/calvin_env/tacto/ !pip3 install -e . %cd /content/calvin/calvin_env !pip3 install -e . !pip3 install -U numpy # Run this to check if the installation was succesful from calvin_env.envs.play_table_env import PlayTableSimEnv
_____no_output_____
MIT
RL_with_CALVIN.ipynb
lucys0/calvin
Loading the environmentAfter the installation has finished successfully, we can start using the environment for reinforcement Learning.To be able to use the environment we need to have the appropriate configuration that define the desired features, for this example, we will load the static and gripper camera.
%cd /content/calvin from hydra import initialize, compose with initialize(config_path="./calvin_env/conf/"): cfg = compose(config_name="config_data_collection.yaml", overrides=["cameras=static_and_gripper"]) cfg.env["use_egl"] = False cfg.env["show_gui"] = False cfg.env["use_vr"] = False cfg.env["use_scene_info"] = True print(cfg.env)
_____no_output_____
MIT
RL_with_CALVIN.ipynb
lucys0/calvin
The environment has similar structure to traditional OpenAI Gym environments.* We can restart the simulation with the *reset* function.* We can perform an action in the environment with the *step* function.* We can visualize images taken from the cameras in the environment by using the *render* function.
import time import hydra import numpy as np from google.colab.patches import cv2_imshow env = hydra.utils.instantiate(cfg.env) observation = env.reset() #The observation is given as a dictionary with different values print(observation.keys()) for i in range(5): # The action consists in a pose displacement (position and orientation) action_displacement = np.random.uniform(low=-1, high=1, size=6) # And a binary gripper action, -1 for closing and 1 for oppening action_gripper = np.random.choice([-1, 1], size=1) action = np.concatenate((action_displacement, action_gripper), axis=-1) observation, reward, done, info = env.step(action) rgb = env.render(mode="rgb_array")[:,:,::-1] cv2_imshow(rgb)
_____no_output_____
MIT
RL_with_CALVIN.ipynb
lucys0/calvin
Custom environment for Reinforcement LearningThere are some aspects that needs to be defined to be able to use it for reinforcement learning, including:1. Observation space2. Action space3. Reward functionWe are going to create a Custom environment that extends the **PlaytableSimEnv** to add these requirements. The specific task that will be solved is called "move_slider_left", here you can find a [list of possible tasks](https://github.com/mees/calvin_env/blob/main/conf/tasks/new_playtable_tasks.yaml) that can be evaluated using CALVIN.
from gym import spaces from calvin_env.envs.play_table_env import PlayTableSimEnv class SlideEnv(PlayTableSimEnv): def __init__(self, tasks: dict = {}, **kwargs): super(SlideEnv, self).__init__(**kwargs) # For this example we will modify the observation to # only retrieve the end effector pose self.action_space = spaces.Box(low=-1, high=1, shape=(7,)) self.observation_space = spaces.Box(low=-1, high=1, shape=(7,)) # We can use the task utility to know if the task was executed correctly self.tasks = hydra.utils.instantiate(tasks) def reset(self): obs = super().reset() self.start_info = self.get_info() return obs def get_obs(self): """Overwrite robot obs to only retrieve end effector position""" robot_obs, robot_info = self.robot.get_observation() return robot_obs[:7] def _success(self): """ Returns a boolean indicating if the task was performed correctly """ current_info = self.get_info() task_filter = ["move_slider_left"] task_info = self.tasks.get_task_info_for_set(self.start_info, current_info, task_filter) return 'move_slider_left' in task_info def _reward(self): """ Returns the reward function that will be used for the RL algorithm """ reward = int(self._success()) * 10 r_info = {'reward': reward} return reward, r_info def _termination(self): """ Indicates if the robot has reached a terminal state """ success = self._success() done = success d_info = {'success': success} return done, d_info def step(self, action): """ Performing a relative action in the environment input: action: 7 tuple containing Position x, y, z. Angle in rad x, y, z. Gripper action each value in range (-1, 1) output: observation, reward, done info """ # Transform gripper action to discrete space env_action = action.copy() env_action[-1] = (int(action[-1] >= 0) * 2) - 1 self.robot.apply_action(env_action) for i in range(self.action_repeat): self.p.stepSimulation(physicsClientId=self.cid) obs = self.get_obs() info = self.get_info() reward, r_info = self._reward() done, d_info = self._termination() info.update(r_info) info.update(d_info) return obs, reward, done, info
_____no_output_____
MIT
RL_with_CALVIN.ipynb
lucys0/calvin
Training an RL agentAfter generating the wrapper training a reinforcement learning agent is straightforward, for this example we will use stable baselines 3 agents
!pip3 install stable_baselines3
_____no_output_____
MIT
RL_with_CALVIN.ipynb
lucys0/calvin
To train the agent we create an instance of our new environment and send it to the stable baselines agent to learn a policy.> Note: the example uses Soft Actor Critic (SAC) which is one of the state of the art algorithm for off-policy RL.
import gym import numpy as np from stable_baselines3 import SAC new_env_cfg = {**cfg.env} new_env_cfg["tasks"] = cfg.tasks new_env_cfg.pop('_target_', None) new_env_cfg.pop('_recursive_', None) env = SlideEnv(**new_env_cfg) model = SAC("MlpPolicy", env, verbose=1) model.learn(total_timesteps=10000, log_interval=4)
_____no_output_____
MIT
RL_with_CALVIN.ipynb
lucys0/calvin
TEXAS ERCOT
filter_1 = df_total['Balancing Authority'] == 'ERCO' df_texas = df_total[filter_1] df_texas catagories_lst = ['Demand Forecast (MW)''Net Generation (MW) (Imputed)', 'Demand (MW) (Adjusted)', 'Net Generation (MW) (Adjusted)', 'Net Generation (MW) from Coal', 'Net Generation (MW) from Natural Gas', 'Net Generation (MW) from Nuclear', 'Net Generation (MW) from All Petroleum Products', 'Net Generation (MW) from Hydropower and Pumped Storage', 'Net Generation (MW) from Solar', 'Net Generation (MW) from Wind','Demand Delta', 'Net Generation Delta'] del df_texas['UTC Time at End of Hour'] del df_texas['Balancing Authority'] del df_texas['Net Generation (MW) (Imputed)'] del df_texas['Demand (MW) (Imputed)'] del df_texas['Net Generation (MW) from All Petroleum Products'] del df_texas['Net Generation (MW) from Unknown Fuel Sources'] del df_texas['Data Date'] del df_texas['Hour Number'] del df_texas['Local Time at End of Hour'] df_texas df_texas.info() df_texas df_texas.to_csv (r'/Users/cp/Desktop/capstone2/DF_TEXAS_FINAL_ENERGY_cleanv1.csv', index = False, header=True) df_dallas =pd.read_csv('/Users/cp/Desktop/capstone2/DALLASV1_FINAL_WEATHER.csv') df_texas.info() df_dallas['New_datetime'] = pd.to_datetime(df_dallas['New_datetime'],infer_datetime_format=True,format ='%m/%d/%Y %H') Energy_Houston_weather=df_texas.merge(df_dallas, left_on ='New_datetime', right_on='New_datetime' ) Energy_Houston_weather Energy_Houston_weather['Cloud_numerical'] = Energy_Houston_weather['cloud'] Energy_Houston_weather['cloud'].value_counts() d1 = { 'Fair':0 ,'Mostly Cloudy':2 ,'Cloudy':1 ,'Partly Cloudy':1 ,'Light Rain':2 , 'Light Drizzle':2 ,'Rain':2 ,'Light Rain with Thunder':2 ,'Heavy T-Storm':2 ,'Thunder':2 , 'Heavy Rain':2 ,'T-Storm':2 , 'Fog':2 , 'Mostly Cloudy / Windy':2 , 'Cloudy / Windy':2 , 'Haze':1 , 'Fair / Windy':0 , 'Partly Cloudy / Windy':1 , 'Light Rain / Windy':2 , 'Heavy T-Storm / Windy':2 , 'Heavy Rain / Windy':2 , 'Widespread Dust':1 , 'Thunder and Hail':2 ,'Thunder / Windy':2 ,'Blowing Dust':1 , 'Patches of Fog':1 , 'Blowing Dust / Windy':1 , 'Rain / Windy':2 , 'Fog / Windy':2 , 'Light Drizzle / Windy':2 , 'Haze / Windy':1 } Energy_Houston_weather['Cloud_numerical'].replace(d1, inplace= True) Energy_Houston_weather['Cloud_numerical'] # Energy_Houston_weather.replace({'Cloud_numerical':d1}) Energy_Houston_weather.info() Energy_Houston_weather Energy_Houston_weather['cloud'].value_counts() Energy_Houston_weather.info() Energy_Houston_weather.loc[:,'temp'] # Energy_Houston_weather['temp1'] =Energy_Houston_weather['temp'].str[:3] Energy_Houston_weather['temp'].value_counts() Energy_Houston_weather['humdity1'] =Energy_Houston_weather['humidity'].str[:2] # Energy_Houston_weather['humidity'].str[:3] Energy_Houston_weather.info() Energy_Houston_weather['humdity1'] = Energy_Houston_weather['humdity1'].astype(float) Energy_Houston_weather.info() Energy_Houston_weather['Cloud_numerical'] = Energy_Houston_weather['Cloud_numerical'].astype(float) # Energy_Houston_weather[Energy_Houston_weather['Cloud_numerical']=='Cloudy'] Energy_Houston_weather.info() # Energy_Houston_weather['humdity1'] =Energy_Houston_weather['humidity'].str[:2] Energy_Houston_weather['precip1'] = Energy_Houston_weather['precip'].str[:2] Energy_Houston_weather['precip1']= Energy_Houston_weather['precip1'].astype(float) Energy_Houston_weather['pressure1'] = Energy_Houston_weather['pressure'].str[:5] # Energy_Houston_weather['pressure'].unique() x =Energy_Houston_weather['pressure1'] x.unique() def column_convert_float(pd_series): lst = [] for i in pd_series: lst1 = i.split('\\') lst.append(lst1[0]) results = pd.Series(lst) return results def temp_column_convert_float2(pd_series): lst = [] for string in pd_series: string = string.replace(u'\xa0F','') lst.append(string) results = pd.Series(lst) return results temp2 = Energy_Houston_weather['temp'] temp2.unique() timevar1 = temp_column_convert_float2(temp2) timevar1.unique() Energy_Houston_weather['temp1']= timevar1 Energy_Houston_weather['temp1']= Energy_Houston_weather['temp1'].astype(float) Energy_Houston_weather['pressure1'].unique() def press_column_convert_float2(pd_series): lst = [] for string in pd_series: string = string.replace(u'\xa0in','') if string == '0.00\xa0': string = '0.00' lst.append(string) results = pd.Series(lst) # results2 = results.astype(float) return results press1 = Energy_Houston_weather['pressure'] press1_convert = press_column_convert_float2(press1) Energy_Houston_weather['pressure1'].unique() filt = Energy_Houston_weather['pressure1'].str[:3] =='0.0' press1[filt] = '0.00' press1.unique() press_series = press_column_convert_float2(press1) press_series.unique() Energy_Houston_weather['pressure1'] = press_series Energy_Houston_weather['pressure1']= Energy_Houston_weather['pressure1'].astype(float) Energy_Houston_weather.info() Energy_Houston_weather['wind_speed'].unique() def wind_column_convert(pd_series): lst = [] for string in pd_series: string = string.replace(u'\xa0mph','') if string == '0.00\xa0': string = '0.00' lst.append(string) results = pd.Series(lst) # results2 = results.astype(float) return results wind1 = Energy_Houston_weather['wind_speed'] wind_convert = wind_column_convert(wind1) wind_convert.unique() Energy_Houston_weather['wind1'] = wind_convert Energy_Houston_weather['wind1']= Energy_Houston_weather['wind1'].astype(float) Energy_Houston_weather.info() Energy_Houston_weather['dew'].unique() def dew_column_convert_float2(pd_series): lst = [] for string in pd_series: string = string.replace(u'\xa0F','') lst.append(string) results = pd.Series(lst) return results dew1 = Energy_Houston_weather['dew'] dew_convert = dew_column_convert_float2(dew1) Energy_Houston_weather['dew1']= dew_convert Energy_Houston_weather['dew1']= Energy_Houston_weather['dew1'].astype(float) Energy_Houston_weather.info() Energy_Houston_weather.to_csv (r'/Users/cp/Desktop/capstone2/WEATHER_CONVERTED&ENERGY_cleanv1.csv', index = False, header=True)
_____no_output_____
MIT
notebooks/weather_energy_combine.ipynb
Cpizzle1/Texas_energy_use_weather_proj
Polish phonetic comparison> "Transcript matching for E2E ASR with phonetic post-processing"- toc: false- branch: master- hidden: true- categories: [asr, polish, phonetic, todo]
from difflib import SequenceMatcher import icu plipa = icu.Transliterator.createInstance('pl-pl_FONIPA')
_____no_output_____
Apache-2.0
_notebooks/2021-04-29-phonetic-comparison.ipynb
jimregan/notes
The errors in E2E models are quite often phonetic confusions, so we do the opposite of traditional ASR and generate the phonetic representation from the output as a basis for comparison.
def phonetic_check(word1, word2, ignore_spaces=False): """Uses ICU's IPA transliteration to check if words are the same""" tl1 = plipa.transliterate(word1) if not ignore_spaces else plipa.transliterate(word1.replace(' ', '')) tl2 = plipa.transliterate(word2) if not ignore_spaces else plipa.transliterate(word2.replace(' ', '')) return tl1 == tl2 phonetic_check("jórz", "jusz", False)
_____no_output_____
Apache-2.0
_notebooks/2021-04-29-phonetic-comparison.ipynb
jimregan/notes
The Polish `y` is phonetically a raised schwa; like the schwa in English, it's often deleted in fast speech. This function returns true if the only differences between the first word and the second is are deletions of `y`, except at the end of the word (which is typically the plural ending).
def no_igrek(word1, word2): """Checks if a word-internal y has been deleted""" sm = SequenceMatcher(None, word1, word2) for oc in sm.get_opcodes(): if oc[0] == 'equal': continue elif oc[0] == 'delete' and word1[oc[1]:oc[2]] != 'y': return False elif oc[0] == 'delete' and word1[oc[1]:oc[2]] == 'y' and oc[2] == len(word1): return False elif oc[0] == 'insert' or oc[0] == 'replace': return False return True no_igrek('uniwersytet', 'uniwerstet') no_igrek('uniwerstety', 'uniwerstet') phonetic_alternatives = [ ['u', 'ó'], ['rz', 'ż'] ] def reverse_alts(phonlist): return [ [i[1], i[0]] for i in phonlist ] sm = SequenceMatcher(None, "już", "jurz") for oc in sm.get_opcodes(): print(oc)
('equal', 0, 2, 0, 2) ('replace', 2, 3, 2, 4)
Apache-2.0
_notebooks/2021-04-29-phonetic-comparison.ipynb
jimregan/notes
Reads a `CTM`-like file, returning a list of lists containing the filename, start time, end time, and word.
def read_ctmish(filename): output = [] with open(filename, 'r') as f: for line in f.readlines(): pieces = line.strip().split(' ') if len(pieces) <= 4: continue for piece in pieces[4:]: output.append([pieces[0], pieces[2], pieces[3], piece]) return output
_____no_output_____
Apache-2.0
_notebooks/2021-04-29-phonetic-comparison.ipynb
jimregan/notes
Returns the contents of a plain text file as a list of lists containing the line number and the word, for use in locating mismatches
def read_text(filename): output = [] counter = 0 with open(filename, 'r') as f: for line in f.readlines(): counter += 1 for word in line.strip().split(' ') output.append([counter, word]) return output ctmish = read_ctmish("/mnt/c/Users/Jim O\'Regan/git/notes/PlgU9JyTLPE.ctm") rec_words = [i[3] for i in ctmish]
_____no_output_____
Apache-2.0
_notebooks/2021-04-29-phonetic-comparison.ipynb
jimregan/notes
HW9: Forecasting Solar CyclesBelow is the notebook associated with HW\9. You can run the notebook in two modes. If you have the `emcee` and `corner` packages installed on your machine, along with the data files, just keep the following variable set to `False`. If you are running it in a Google colab notebook, set it to `True` so that it will grab the packages and files. Remember that the Google colab environment will shutdown after ~1 hour of inactivity, so you'll need to keep interacting with it or else will lose the data.A script version of this file will also be provided to you, but you cannot use this in a Google colab environment
COLAB = False if COLAB: # Install emcee package !pip install emcee # Install corner package !pip install corner # Grab sunspot data file !wget -O SN_m_tot_V2.0.txt https://raw.githubusercontent.com/mtlam/ASTP-720_F2020/master/HW9/SN_m_tot_V2.0.txt import numpy as np from matplotlib.pyplot import * from matplotlib import rc import emcee import corner %matplotlib inline # Make more readable plots rc('font',**{'size':14}) rc('xtick',**{'labelsize':16}) rc('ytick',**{'labelsize':16}) rc('axes',**{'labelsize':18,'titlesize':18})
_____no_output_____
BSD-3-Clause
HW9/HW9-sunspots.ipynb
mtlam/ASTP-720_F2020
Define the (log-)priorsHere, the function should take a vector of parameters, `theta`, and return `0.0` if the it is in the prior range and `-np.inf` if it is outside. This is equivalent to a uniform prior over the parameters. You can, of course, define a different set of priors if you so choose!
def lnprior(theta): """ Parameters ---------- theta : np.ndarray Array of parameters. Returns ------- Value of log-prior. """ pass
_____no_output_____
BSD-3-Clause
HW9/HW9-sunspots.ipynb
mtlam/ASTP-720_F2020
Define the (log-)likelihood
def lnlike(theta, data): """ Parameters ---------- theta : np.ndarray Array of parameters. data : np.ndarray Returns ------- Value of log-likelihood """ residuals = None pass
_____no_output_____
BSD-3-Clause
HW9/HW9-sunspots.ipynb
mtlam/ASTP-720_F2020
Define total (log-)probabilityNo need to change this if the other two functions work as described.
def lnprob(theta, data): lp = lnprior(theta) if not np.isfinite(lp): return -np.inf return lp + lnlike(theta, data)
_____no_output_____
BSD-3-Clause
HW9/HW9-sunspots.ipynb
mtlam/ASTP-720_F2020
Set up the MCMC sampler here
# Number of walkers to search through parameter space nwalkers = 10 # Number of iterations to run the sampler for niter = 50000 # Initial guess of parameters. For example, if you had a model like # s(t) = a + bt + ct^2 # and your initial guesses for a, b, and c were 5, 3, and 8, respectively, then you would write # pinit = np.array([5, 3, 8]) # Make sure the guesses are allowed inside your lnprior range! pinit = np.array([]) # Number of dimensions of parameter space ndim = len(pinit) # Perturbed set of initial guesses. Have your walkers all start out at # *slightly* different starting values p0 = [pinit + 1e-4*pinit*np.random.randn(ndim) for i in range(nwalkers)]
_____no_output_____
BSD-3-Clause
HW9/HW9-sunspots.ipynb
mtlam/ASTP-720_F2020
Load the data, plot to show
# Data: decimal year, sunspot number decyear, ssn = np.loadtxt("SN_m_tot_V2.0.txt", unpack=True, usecols=(2, 3)) plot(decyear, ssn, 'k.') xlabel('Year') ylabel('Sunspot Number') show()
_____no_output_____
BSD-3-Clause
HW9/HW9-sunspots.ipynb
mtlam/ASTP-720_F2020
Run the sampler
# Number of CPU threads to use. Reduce if you are running on your own machine # and don't want to use too many cores nthreads = 4 # Set up the sampler sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(ssn,), threads=nthreads) # Run the sampler. May take a while! You might consider changing the # number of iterations to a much smaller value when you're testing. Or use a # larger value when you're trying to get your final results out! sampler.run_mcmc(p0, niter, progress=True)
_____no_output_____
BSD-3-Clause
HW9/HW9-sunspots.ipynb
mtlam/ASTP-720_F2020
Get the samples in the appropriate format, with a burn value
# Burn-in value = 1/4th the number of iterations. Feel free to change! burn = int(0.25*niter) # Reshape the chains for input to corner.corner() samples = sampler.chain[:, burn:, :].reshape((-1, ndim))
_____no_output_____
BSD-3-Clause
HW9/HW9-sunspots.ipynb
mtlam/ASTP-720_F2020
Make a corner plotYou should feel free to adjust the parameters to the `corner` function. You **should** also add labels, which should just be a list of the names of the parameters. So, if you had two parameters, $\phi_1$ and $\phi_2$, then you could write:```labels = [r"$\phi_1$", r"$\phi_2$"]```and that will make the appropriate label in LaTeX (if the distribution is installed correctly) for the two 1D posteriors of the corner plot.
fig = corner.corner(samples, bins=50, color='C0', smooth=0.5, plot_datapoints=False, plot_density=True, \ plot_contours=True, fill_contour=False, show_titles=True)#, labels=labels) fig.savefig("corner.png") show()
_____no_output_____
BSD-3-Clause
HW9/HW9-sunspots.ipynb
mtlam/ASTP-720_F2020
No. 2 NELFINAVIR MESYLATE
df.loc[df['output_name'] == 'NELFINAVIR']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 3 AMOPYROQUINE
df.loc[df['output_name'] == 'C0051735']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 7 HANFANGCHIN A
df.loc[df['output_name'] == 'C0076316']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 11 FERROQUINE
df.loc[df['output_name'] == 'C1313506']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 13 ASTEMIZOLE
df.loc[df['output_name'] == 'ASTEMIZOLE']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 14 amodiaquine
df.loc[df['output_name'] == 'AMODIAQUINE']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 22 (+)-MEFLOQUINE
df.loc[df['output_name'] == 'C0208248']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
ABOVE REFERS TO: name(Mefloquine Hydrochloride) umls(C0282398)
df.loc[df['output_name'] == 'C0208248']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
ABOVE REFERS TO: name(mefloquine-sulfadoxine-pyrimethamine) umls(C0208248) No. 27 CEPHARANTHINE
df.loc[df['output_name'] == 'C0055082']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 33 ANDROGRAPHOLIDE
df.loc[df['output_name'] == 'C0051821']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 39 NAFOXIDINE
df.loc[df['output_name'] == 'C0027328']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 48 RETINALDEHYDE
df.loc[df['output_name'] == 'C0035331']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 59 CLOFAZIMINE
df.loc[df['output_name'] == 'CLOFAZIMINE']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 63 PERHEXILINE
df.loc[df['output_name'] == 'PERHEXILINE']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 65 resiniferatoxin
df.loc[df['output_name'] == 'C0073081']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 69 EBASTINE
df.loc[df['output_name'] == 'EBASTINE']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 73 5-Azacytidine
df.loc[df['output_name'] == 'AZACITIDINE']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 76 AMIODARONE
df.loc[df['output_name'] == 'AMIODARONE'] df.loc[df['output_name'] == 'C0700442']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
Above refers to Amiodarone hydrochloride No. 79 NSP 805
df.loc[df['output_name'] == 'C0660146']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 81 CLORICROMENE
df.loc[df['output_name'] == 'C0050066']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 82 TIFLUADOM
df.loc[df['output_name'] == 'C0076670']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
No. 85 SCH-28080
df.loc[df['output_name'] == 'C0074142']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
NO. 96 IMMUNOMYCIN
df.loc[df['output_name'] == 'C0123418']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
NO.97 ARGLABIN
df.loc[df['output_name'] == 'C0661380']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
NO.100 TRETINOIN
df.loc[df['output_name'] == 'TRETINOIN']
_____no_output_____
Apache-2.0
jupyter notebooks/COV-19___ACE2.ipynb
zhoue7673/biothings_explorer
Load Cats and Dogs Images Install Packages
!pip install --upgrade keras==2.2.4 !pip install --upgrade tensorflow==1.13.1 !pip install --upgrade 'numpy<1.15.0'
_____no_output_____
Apache-2.0
demos/gpu/horovod/cpu/image-classification/01-load-data-cats-n-dogs.ipynb
omesser/tutorials
> **Note:** After running the pip command you should restart the Jupyter kernel.> To restart the kernel, click on the kernel-restart button in the notebook menu toolbar (the refresh icon next to the **Code** button). Import Library
# This Python 3 environment comes with many helpful analytics libraries installed. # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python. # For example, here are several helpful packages to load: import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from keras.preprocessing.image import load_img # Input data files are available in the "../input/" directory. # For example, running the following (by selecting 'Run' or pressing Shift+Enter) will list the files in the input directory: import matplotlib.pyplot as plt import random import os import zipfile # Define locations BASE_PATH = os.getcwd() DATA_PATH = BASE_PATH + "/cats_and_dogs_filtered/" !mkdir model MODEL_PATH = BASE_PATH + '/model/' # Define image parameters FAST_RUN = False IMAGE_WIDTH=128 IMAGE_HEIGHT=128 IMAGE_SIZE=(IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS=3 # RGB color # Any results you write to the current directory are saved as output. DATA_PATH + 'catsndogs.zip'
_____no_output_____
Apache-2.0
demos/gpu/horovod/cpu/image-classification/01-load-data-cats-n-dogs.ipynb
omesser/tutorials
Download the Data
!mkdir cats_and_dogs_filtered # Download a sample stocks file from Iguazio demo bucket in AWS S3 !curl -L "iguazio-sample-data.s3.amazonaws.com/catsndogs.zip" > ./cats_and_dogs_filtered/catsndogs.zip zip_ref = zipfile.ZipFile(DATA_PATH + 'catsndogs.zip', 'r') zip_ref.extractall('cats_and_dogs_filtered') zip_ref.close()
_____no_output_____
Apache-2.0
demos/gpu/horovod/cpu/image-classification/01-load-data-cats-n-dogs.ipynb
omesser/tutorials
Prepare the Traning Data
import json def build_prediction_map(categories_map): return {v:k for k ,v in categories_map.items()} # Create a file-names list (JPG image-files only) filenames = [file for file in os.listdir(DATA_PATH+"/cats_n_dogs") if file.endswith('jpg')] categories = [] # Categories and prediction-classes map categories_map = { 'dog': 1, 'cat': 0, } prediction_map = build_prediction_map(categories_map) with open(MODEL_PATH + 'prediction_classes_map.json', 'w') as f: json.dump(prediction_map, f) # Create a pandas DataFrame for the full sample for filename in filenames: category = filename.split('.')[0] categories.append([categories_map[category]]) df = pd.DataFrame({ 'filename': filenames, 'category': categories }) df['category'] = df['category'].astype('str'); df.head() df.tail()
_____no_output_____
Apache-2.0
demos/gpu/horovod/cpu/image-classification/01-load-data-cats-n-dogs.ipynb
omesser/tutorials
Check the Total Image CountCheck the total image count for each category.The data set has 12,000 cat images and 12,000 dog images.
df['category'].value_counts().plot.bar()
_____no_output_____
Apache-2.0
demos/gpu/horovod/cpu/image-classification/01-load-data-cats-n-dogs.ipynb
omesser/tutorials
Display the Sample Image
sample = random.choice(filenames) image = load_img(DATA_PATH+"/cats_n_dogs/"+sample) plt.imshow(image)
_____no_output_____
Apache-2.0
demos/gpu/horovod/cpu/image-classification/01-load-data-cats-n-dogs.ipynb
omesser/tutorials
INSTALLATION
!pip install aif360 !pip install fairlearn !apt-get install -jre !java -version !pip install h2o !pip install xlsxwriter
Collecting xlsxwriter Downloading XlsxWriter-2.0.0-py2.py3-none-any.whl (149 kB) [?25l  |██▏ | 10 kB 19.1 MB/s eta 0:00:01  |████▍ | 20 kB 20.9 MB/s eta 0:00:01  |██████▋ | 30 kB 16.7 MB/s eta 0:00:01  |████████▊ | 40 kB 5.6 MB/s eta 0:00:01  |███████████ | 51 kB 4.5 MB/s eta 0:00:01  |█████████████▏ | 61 kB 5.2 MB/s eta 0:00:01  |███████████████▎ | 71 kB 5.8 MB/s eta 0:00:01  |█████████████████▌ | 81 kB 6.4 MB/s eta 0:00:01  |███████████████████▊ | 92 kB 6.6 MB/s eta 0:00:01  |█████████████████████▉ | 102 kB 5.2 MB/s eta 0:00:01  |████████████████████████ | 112 kB 5.2 MB/s eta 0:00:01  |██████████████████████████▎ | 122 kB 5.2 MB/s eta 0:00:01  |████████████████████████████▌ | 133 kB 5.2 MB/s eta 0:00:01  |██████████████████████████████▋ | 143 kB 5.2 MB/s eta 0:00:01  |████████████████████████████████| 149 kB 5.2 MB/s [?25hInstalling collected packages: xlsxwriter Successfully installed xlsxwriter-2.0.0
MIT
Fairness_Survey/ALGORITHMS/BaseLine/gbm and LogReg/German.ipynb
DEHO-OSCAR-BLESSED/Evaluation-of-Fairness-Algorithms
IMPORTS
import numpy as np from mlxtend.feature_selection import ExhaustiveFeatureSelector from xgboost import XGBClassifier # import pandas as pd import matplotlib.pyplot as plt import numpy as np import pandas as pd import openpyxl import xlsxwriter from openpyxl import load_workbook import shap #suppress setwith copy warning pd.set_option('mode.chained_assignment',None) from sklearn.feature_selection import VarianceThreshold from sklearn.feature_selection import SelectKBest, SelectFwe, SelectPercentile,SelectFdr, SelectFpr, SelectFromModel from sklearn.feature_selection import chi2, mutual_info_classif # from skfeature.function.similarity_based import fisher_score import aif360 import matplotlib.pyplot as plt from aif360.metrics.classification_metric import ClassificationMetric from aif360.metrics import BinaryLabelDatasetMetric from aif360.datasets import StandardDataset , BinaryLabelDataset from sklearn.preprocessing import MinMaxScaler MM= MinMaxScaler() import h2o from h2o.automl import H2OAutoML from h2o.estimators.glm import H2OGeneralizedLinearEstimator import sys sys.path.append("../") import os h2o.init()
Checking whether there is an H2O instance running at http://localhost:54321 ..... not found. Attempting to start a local H2O server... Java Version: openjdk version "11.0.11" 2021-04-20; OpenJDK Runtime Environment (build 11.0.11+9-Ubuntu-0ubuntu2.18.04); OpenJDK 64-Bit Server VM (build 11.0.11+9-Ubuntu-0ubuntu2.18.04, mixed mode, sharing) Starting server from /usr/local/lib/python3.7/dist-packages/h2o/backend/bin/h2o.jar Ice root: /tmp/tmpr8vm4o15 JVM stdout: /tmp/tmpr8vm4o15/h2o_unknownUser_started_from_python.out JVM stderr: /tmp/tmpr8vm4o15/h2o_unknownUser_started_from_python.err Server is running at http://127.0.0.1:54321 Connecting to H2O server at http://127.0.0.1:54321 ... successful.
MIT
Fairness_Survey/ALGORITHMS/BaseLine/gbm and LogReg/German.ipynb
DEHO-OSCAR-BLESSED/Evaluation-of-Fairness-Algorithms
**************************LOADING DATASET*******************************
from google.colab import drive drive.mount('/content/gdrive') for i in range(1,51,1): train_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/German/Train' train_path= os.path.join(train_url ,("Train"+ str(i)+ ".csv")) train= pd.read_csv(train_path) test_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/German/Test' test_path= os.path.join(test_url ,("Test"+ str(i)+ ".csv")) test= pd.read_csv(test_path) # normalization of train and test sets Fitter= MM.fit(train) transformed_train=Fitter.transform(train) train=pd.DataFrame(transformed_train, columns= train.columns) #test normalization transformed_test=Fitter.transform(test) test=pd.DataFrame(transformed_test, columns= test.columns) # *************CHECKING FAIRNESS IN DATASET************************** ## ****************CONVERTING TO BLD FORMAT****************************** #Transforming the Train and Test Set to BinaryLabel advantagedGroup= [{'age':1}] disadvantagedGroup= [{'age':0}] # class Train(StandardDataset): # def __init__(self,label_name= 'default', # favorable_classes= [1],protected_attribute_names=['age'], privileged_classes=[[1]], ): # super(Train, self).__init__(df=train , label_name=label_name , # favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , # privileged_classes=privileged_classes , # ) # BLD_Train= Train(protected_attribute_names= ['age'], # privileged_classes= [[1]]) class Test(StandardDataset): def __init__(self,label_name= 'default', favorable_classes= [1],protected_attribute_names=['age'], privileged_classes=[[1]], ): super(Test, self).__init__(df=test , label_name=label_name , favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , privileged_classes=privileged_classes , ) BLD_Test= Test(protected_attribute_names= ['age'], privileged_classes= [[1]]) ## ********************Checking Bias in Data******************************** DataBias_Checker = BinaryLabelDatasetMetric(BLD_Test , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup) dsp= DataBias_Checker .statistical_parity_difference() dif= DataBias_Checker.consistency() ddi= DataBias_Checker.disparate_impact() print('The Statistical Parity diference is = {diff}'.format(diff= dsp )) print('Individual Fairness is = {IF}'.format( IF= dif )) print('Disparate Impact is = {IF}'.format( IF= ddi )) # ********************SETTING TO H20 FRAME AND MODEL TRAINING******************************* x = list(train.columns) y = "default" x.remove(y) Train=h2o.H2OFrame(train) Test= h2o.H2OFrame(test) Train[y] = Train[y].asfactor() Test[y] = Test[y].asfactor() aml = H2OAutoML(max_models=10, nfolds=10, include_algos=['GBM'] , stopping_metric='AUTO') #verbosity='info',,'GBM', 'DRF' aml.train(x=x, y=y, training_frame=Train) best_model= aml.leader # a.model_performance() #**********************REPLACE LABELS OF DUPLICATED TEST SET WITH PREDICTIONS**************************** #predicted labels gbm_Predictions= best_model.predict(Test) gbm_Predictions= gbm_Predictions.as_data_frame() predicted_df= test.copy() predicted_df['default']= gbm_Predictions.predict.to_numpy() # ********************COMPUTE DISCRIMINATION***************************** advantagedGroup= [{'age':1}] disadvantagedGroup= [{'age':0}] class PredTest(StandardDataset): def __init__(self,label_name= 'default', favorable_classes= [1],protected_attribute_names=['age'], privileged_classes=[[1]], ): super(PredTest, self).__init__(df=predicted_df , label_name=label_name , favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , privileged_classes=privileged_classes , ) BLD_PredTest= PredTest(protected_attribute_names= ['age'], privileged_classes= [[1]]) # # Workbook= pd.ExcelFile(r'/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/BaseLines/GBM/gbm_Results.xlsx') # excelBook= load_workbook('/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/BaseLines/GBM/gbm_Results.xlsx') # OldDF= excelBook.get_sheet_by_name("German")#pd.read_excel(Workbook,sheet_name='German') #load workbook excelBook= load_workbook('/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/BaseLines/GBM/gbm_Results.xlsx') German= excelBook['German'] data= German.values # Get columns columns = next(data)[0:] 10# Create a DataFrame based on the second and subsequent lines of data OldDF = pd.DataFrame(data, columns=columns) ClassifierBias = ClassificationMetric( BLD_Test,BLD_PredTest , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup) Accuracy= ClassifierBias.accuracy() TPR= ClassifierBias.true_positive_rate() TNR= ClassifierBias.true_negative_rate() NPV= ClassifierBias.negative_predictive_value() PPV= ClassifierBias.positive_predictive_value() SP=ClassifierBias .statistical_parity_difference() IF=ClassifierBias.consistency() DI=ClassifierBias.disparate_impact() EOP=ClassifierBias.true_positive_rate_difference() EO=ClassifierBias.average_odds_difference() FDR= ClassifierBias.false_discovery_rate(privileged=False)- ClassifierBias.false_discovery_rate(privileged=True) NPV_diff=ClassifierBias.negative_predictive_value(privileged=False)-ClassifierBias.negative_predictive_value(privileged=True) FOR=ClassifierBias.false_omission_rate(privileged=False)-ClassifierBias.false_omission_rate(privileged=True) PPV_diff=ClassifierBias.positive_predictive_value(privileged=False) -ClassifierBias.positive_predictive_value(privileged=True) BGE = ClassifierBias.between_group_generalized_entropy_index() WGE = ClassifierBias.generalized_entropy_index()-ClassifierBias.between_group_generalized_entropy_index() BGTI = ClassifierBias.between_group_theil_index() WGTI = ClassifierBias.theil_index() -ClassifierBias.between_group_theil_index() EDF= ClassifierBias.differential_fairness_bias_amplification() newdf= pd.DataFrame(index = [0], data= { 'ACCURACY': Accuracy,'TPR': TPR, 'PPV':PPV, 'TNR':TNR,'NPV':NPV,'SP':SP,'CONSISTENCY':IF,'DI':DI,'EOP':EOP,'EO':EO,'FDR':FDR,'NPV_diff':NPV_diff, 'FOR':FOR,'PPV_diff':PPV_diff,'BGEI':BGE,'WGEI':WGE,'BGTI':BGTI,'WGTI':WGTI,'EDF':EDF, 'DATA_SP':dsp,'DATA_CONS':dif,'DATA_DI':ddi}) newdf=pd.concat([OldDF,newdf]) pathway= r"/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/BaseLines/GBM/gbm_Results.xlsx" with pd.ExcelWriter(pathway, engine='openpyxl') as writer: #load workbook base as for writer writer.book= excelBook writer.sheets=dict((ws.title, ws) for ws in excelBook.worksheets) newdf.to_excel(writer, sheet_name='German', index=False) # newdf.to_excel(writer, sheet_name='Adult', index=False) print('Accuracy', Accuracy)
_____no_output_____
MIT
Fairness_Survey/ALGORITHMS/BaseLine/gbm and LogReg/German.ipynb
DEHO-OSCAR-BLESSED/Evaluation-of-Fairness-Algorithms
LOGISTIC REGRESSION
for i in range(1,51,1): train_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/German/Train' train_path= os.path.join(train_url ,("Train"+ str(i)+ ".csv")) train= pd.read_csv(train_path) test_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/German/Test' test_path= os.path.join(test_url ,("Test"+ str(i)+ ".csv")) test= pd.read_csv(test_path) # normalization of train and test sets Fitter= MM.fit(train) transformed_train=Fitter.transform(train) train=pd.DataFrame(transformed_train, columns= train.columns) #test normalization transformed_test=Fitter.transform(test) test=pd.DataFrame(transformed_test, columns= test.columns) # *************CHECKING FAIRNESS IN DATASET************************** ## ****************CONVERTING TO BLD FORMAT****************************** #Transforming the Train and Test Set to BinaryLabel advantagedGroup= [{'age':1}] disadvantagedGroup= [{'age':0}] # class Train(StandardDataset): # def __init__(self,label_name= 'default', # favorable_classes= [1],protected_attribute_names=['age'], privileged_classes=[[1]], ): # super(Train, self).__init__(df=train , label_name=label_name , # favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , # privileged_classes=privileged_classes , # ) # BLD_Train= Train(protected_attribute_names= ['age'], # privileged_classes= [[1]]) class Test(StandardDataset): def __init__(self,label_name= 'default', favorable_classes= [1],protected_attribute_names=['age'], privileged_classes=[[1]], ): super(Test, self).__init__(df=test , label_name=label_name , favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , privileged_classes=privileged_classes , ) BLD_Test= Test(protected_attribute_names= ['age'], privileged_classes= [[1]]) ## ********************Checking Bias in Data******************************** DataBias_Checker = BinaryLabelDatasetMetric(BLD_Test , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup) dsp= DataBias_Checker .statistical_parity_difference() dif= DataBias_Checker.consistency() ddi= DataBias_Checker.disparate_impact() print('The Statistical Parity diference is = {diff}'.format(diff= dsp )) print('Individual Fairness is = {IF}'.format( IF= dif )) print('Disparate Impact is = {IF}'.format( IF= ddi )) # ********************SETTING TO H20 FRAME AND MODEL TRAINING******************************* x = list(train.columns) y = "default" x.remove(y) Train=h2o.H2OFrame(train) Test= h2o.H2OFrame(test) Train[y] = Train[y].asfactor() Test[y] = Test[y].asfactor() LogReg = H2OGeneralizedLinearEstimator(family= "binomial", lambda_ = 0) LogReg.train(x=x, y=y, training_frame=Train) LogReg_Predictions= LogReg.predict(Test) LogReg_Predictions= LogReg_Predictions.as_data_frame() # *************************REPLACE LABELS OF DUPLICATED TEST SET WITH PREDICTIONS************************************** predicted_df= test.copy() predicted_df['default']= LogReg_Predictions.predict.to_numpy() # ***************************COMPUTE DISCRIMINATION******************************** advantagedGroup= [{'age':1}] disadvantagedGroup= [{'age':0}] class PredTest(StandardDataset): def __init__(self,label_name= 'default', favorable_classes= [1],protected_attribute_names=['age'], privileged_classes=[[1]], ): super(PredTest, self).__init__(df=predicted_df , label_name=label_name , favorable_classes=favorable_classes , protected_attribute_names=protected_attribute_names , privileged_classes=privileged_classes , ) BLD_PredTest= PredTest(protected_attribute_names= ['age'], privileged_classes= [[1]]) excelBook= load_workbook(r"/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/BaseLines/LogReg/LR_Results.xlsx") German= excelBook['German'] data= German.values # Get columns columns = next(data)[0:] OldDF = pd.DataFrame(data, columns=columns) ClassifierBias = ClassificationMetric( BLD_Test,BLD_PredTest , unprivileged_groups= disadvantagedGroup, privileged_groups= advantagedGroup) Accuracy= ClassifierBias.accuracy() TPR= ClassifierBias.true_positive_rate() TNR= ClassifierBias.true_negative_rate() NPV= ClassifierBias.negative_predictive_value() PPV= ClassifierBias.positive_predictive_value() SP=ClassifierBias .statistical_parity_difference() IF=ClassifierBias.consistency() DI=ClassifierBias.disparate_impact() EOP=ClassifierBias.true_positive_rate_difference() EO=ClassifierBias.average_odds_difference() FDR= ClassifierBias.false_discovery_rate(privileged=False)- ClassifierBias.false_discovery_rate(privileged=True) NPV_diff=ClassifierBias.negative_predictive_value(privileged=False)-ClassifierBias.negative_predictive_value(privileged=True) FOR=ClassifierBias.false_omission_rate(privileged=False)-ClassifierBias.false_omission_rate(privileged=True) PPV_diff=ClassifierBias.positive_predictive_value(privileged=False) -ClassifierBias.positive_predictive_value(privileged=True) BGE = ClassifierBias.between_group_generalized_entropy_index() WGE = ClassifierBias.generalized_entropy_index()-ClassifierBias.between_group_generalized_entropy_index() BGTI = ClassifierBias.between_group_theil_index() WGTI = ClassifierBias.theil_index() -ClassifierBias.between_group_theil_index() EDF= ClassifierBias.differential_fairness_bias_amplification() newdf= pd.DataFrame(index = [0], data= { 'ACCURACY': Accuracy,'TPR': TPR, 'PPV':PPV, 'TNR':TNR,'NPV':NPV,'SP':SP,'CONSISTENCY':IF,'DI':DI,'EOP':EOP,'EO':EO,'FDR':FDR,'NPV_diff':NPV_diff, 'FOR':FOR,'PPV_diff':PPV_diff,'BGEI':BGE,'WGEI':WGE,'BGTI':BGTI,'WGTI':WGTI,'EDF':EDF, 'DATA_SP':dsp,'DATA_CONS':dif,'DATA_DI':ddi}) newdf=pd.concat([OldDF,newdf]) pathway= r"/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/BaseLines/LogReg/LR_Results.xlsx" with pd.ExcelWriter(pathway, engine='openpyxl') as writer: #load workbook base as for writer writer.book= excelBook writer.sheets=dict((ws.title, ws) for ws in excelBook.worksheets) newdf.to_excel(writer, sheet_name='German', index=False) # newdf.to_excel(writer, sheet_name='Adult', index=False) print('Accuracy', Accuracy)
_____no_output_____
MIT
Fairness_Survey/ALGORITHMS/BaseLine/gbm and LogReg/German.ipynb
DEHO-OSCAR-BLESSED/Evaluation-of-Fairness-Algorithms
This notebook compares the email activities and draft activites of an IETF working group. Import the BigBang modules as needed. These should be in your Python environment if you've installed BigBang correctly.
import bigbang.mailman as mailman from bigbang.parse import get_date #from bigbang.functions import * from bigbang.archive import Archive from ietfdata.datatracker import *
/home/sb/projects/bigbang-multi/bigbang/config/config.py:8: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details. dictionary = yaml.load(stream)
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
Also, let's import a number of other dependencies we'll use later.
import pandas as pd import datetime import matplotlib.pyplot as plt import numpy as np import math import pytz import pickle import os
_____no_output_____
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
Load the HRPC Mailing ListNow let's load the email data for analysis.
wg = "httpbisa" urls = [wg] archives = [Archive(url,mbox=True) for url in urls] activities = [arx.get_activity(resolved=False) for arx in archives] activity = activities[0]
/home/sb/projects/bigbang-multi/bigbang/bigbang/mailman.py:157: UserWarning: No mailing list name found at httpbisa warnings.warn("No mailing list name found at %s" % url)
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
Load IETF Draft DataNext, we will use the `ietfdata` tracker to look at the frequency of drafts for this working group.
import glob path = '../../archives/datatracker/httpbis/draft_metadata.csv' # use your path draft_df = pd.read_csv(path, index_col=None, header=0, parse_dates=['date'])
_____no_output_____
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
We will want to use the data of the drafts. Time resolution is too small.
draft_df['date'] = draft_df['date'].dt.date
_____no_output_____
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
Gender score and tendency measuresThis notebook uses the (notably imperfect) method of using first names to guess the gender of each draft author.
from gender_detector import gender_detector as gd detector = gd.GenderDetector('us') def gender_score(name): """ Takes a full name and returns a score for the guessed gender. 1 - male 0 - female .5 - unknown """ try: first_name = name.split(" ")[0] guess = detector.guess(first_name) score = 0 if guess == "male": return 1.0 elif guess == "female": return 0.0 else: # name does not have confidence to guesss return 0.5 except: # Some error, "unknown" return .5
_____no_output_____
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
Gender guesses on mailing list activityNow to use the gender guesser to track the contributions by differently gendered participants over time.
from bigbang.parse import clean_name gender_activity = activity.groupby( by=lambda x: gender_score(clean_name(x)), axis=1).sum().rename({0.0 : "women", 0.5 : "unknown", 1.0 : "men"}, axis="columns")
_____no_output_____
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
Note that our gender scoring method currently is unable to get a clear guess for a large percentage of the emails!
print("%f.2 percent of emails are from an unknown gender." \ % (gender_activity["unknown"].sum() / gender_activity.sum().sum())) plt.bar(["women","unknown","men"],gender_activity.sum()) plt.title("Total emails sent by guessed gender")
0.394629.2 percent of emails are from an unknown gender.
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
PlottingSome preprocessing is necessary to get the drafts data ready for plotting.
from matplotlib import cm viridis = cm.get_cmap('viridis') drafts_per_day = draft_df.groupby('date').count()['title'] dpd_log = drafts_per_day.apply(lambda x: np.log1p(x))
_____no_output_____
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
For each of the mailing lists we are looking at, plot the rolling average (over `window`) of number of emails sent per day.Then plot a vertical line with the height of the drafts count and colored by the gender tendency.
window = 100 plt.figure(figsize=(12, 6)) for i, gender in enumerate(gender_activity.columns): colors = [viridis(0), viridis(.5), viridis(.99)] ta = gender_activity[gender] rmta = ta.rolling(window).mean() rmtadna = rmta.dropna() plt.plot_date(np.array(rmtadna.index), np.array(rmtadna.values), color = colors[i], linestyle = '-', marker = None, label='%s email activity - %s' % (wg, gender), xdate=True) vax = plt.vlines(drafts_per_day.index, 0, drafts_per_day, colors = 'r', # draft_gt_per_day, cmap = 'viridis', label=f'{wg} drafts ({drafts_per_day.sum()} total)' ) plt.legend() plt.title("%s working group emails and drafts" % (wg)) #plt.colorbar(vax, label = "more womanly <-- Gender Tendency --> more manly") #plt.savefig("activites-marked.png") #plt.show()
_____no_output_____
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
Is gender diversity correlated with draft output?
from scipy.stats import pearsonr import pandas as pd def calculate_pvalues(df): df = df.dropna()._get_numeric_data() dfcols = pd.DataFrame(columns=df.columns) pvalues = dfcols.transpose().join(dfcols, how='outer') for r in df.columns: for c in df.columns: pvalues[r][c] = round(pearsonr(df[r], df[c])[1], 4) return pvalues drafts_per_ordinal_day = pd.Series({x[0].toordinal(): x[1] for x in drafts_per_day.items()}) drafts_per_ordinal_day ta.rolling(window).mean() garm = np.log1p(gender_activity.rolling(window).mean()) garm['diversity'] = (garm['unknown'] + garm['women']) / garm['men'] garm['drafts'] = drafts_per_ordinal_day garm['drafts'] = garm['drafts'].fillna(0) garm.corr(method='pearson') calculate_pvalues(garm)
_____no_output_____
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
Some variations...
garm_dna = garm.dropna(subset=['drafts'])
_____no_output_____
MIT
examples/experimental_notebooks/Working Group Emails and Drafts-httpbis.ipynb
Christovis/bigbang
Visualize 3D Points (Parabolic data)This notebook uses 3D plots to visualize 3D points. Reads measurement data from a csv file.
%matplotlib notebook ##%matplotlib inline from mpl_toolkits import mplot3d import matplotlib.pyplot as plt import numpy as np from filter.kalman import Kalman3D fmt = lambda x: "%9.3f" % x np.set_printoptions(formatter={'float_kind':fmt}) ## Read from csv file import pandas as pd
_____no_output_____
MIT
src_yolov5/kalman_utils/.ipynb_checkpoints/visualize_3d_points-ball3-checkpoint.ipynb
diddytpq/Tennisball-Tracking-in-Video
Read and Prepare DataRead ball tracking position data from saved CSV file and prepare mx,my,mz. The file should have dT values along with X,Y,Z values.We use these as measurements and use Kalman3D tracker to track the ball. Once we exhaust all measurements, we use Kalman3D to predict rest of the trajectory.*Note*: The position data that we are using is in millimeters and milliseconds. However, the Kalman3D tracker uses all values in meters and seconds. We have do to this conversion here.
# File containing 3D points predicted and measured. Last column with time passage will be ignored SYNTH = False if SYNTH: data_ = pd.read_csv('data/datafile_parabolic.csv') ## Synthetic data else: data_ = pd.read_csv('data/input_positions_3.csv') ## Real data print(data_.keys()) data = data_/1000. _mx = np.float32(data['mx']) _my = np.float32(data['my']) _mz = np.float32(data['mz']) if SYNTH: ## Drop useless data mx = _mx[0:19] my = _my[0:19] mz = _mz[0:19] else: mx = _mx my = _my mz = _mz print("mx: {} {}".format(mx.shape, mx)) print("my: {} {}".format(my.shape, my)) print("mz: {} {}".format(mz.shape, mz)) def getpos(i, x,y,z): return(np.float32([x[i],y[i],z[i]])) mx
_____no_output_____
MIT
src_yolov5/kalman_utils/.ipynb_checkpoints/visualize_3d_points-ball3-checkpoint.ipynb
diddytpq/Tennisball-Tracking-in-Video
Track and PredictNow we use our Kalman3D tracker to track the position of the ball based on measured data and then predict the trajectory when all measurement data is exhausted.
fps = 100. dT = (1 / fps) print("dT: {:f}".format(dT)) KF = Kalman3D(drg=1.0, dbg=0) pred = KF.init(getpos(0,mx,my,mz)) print("pred: {}".format(pred)) ##-####################################################################################### ## Tracking ## Since we are doing all operations in zero time, specify dT manually (e.g., 0.033 sec) px = np.float32([pred[0]]) py = np.float32([pred[1]]) pz = np.float32([pred[2]]) for i in range(len(mx)-1): pred = KF.track(getpos([i+1], mx, my, mz), dT) px = np.append(px, pred[0]) py = np.append(py, pred[1]) pz = np.append(pz, pred[2]) print(" tracked position : {}".format(pred*1000)) ##-####################################################################################### ## Trajectory prediction ## Since we are doing all operations in zero time, specify dT manually (e.g., 0.033 sec) for ii in range(15): pred = KF.predict(dT) # Use last value of dT for all predictions px = np.append(px, pred[0]) py = np.append(py, pred[1]) pz = np.append(pz, pred[2]) print("predicted position : {}".format(pred*1000)) x, y, z = 10, 10 ,10 x, y, z = 5, 8 ,2 x, y, z = 53, 18 ,12 fps = 100. dT = (1 / fps) print("dT: {:f}".format(dT)) KF = Kalman3D(drg=1.0, dbg=0) pred = KF.init(np.float32([x,y,z])) print("predicted position : {}".format(pred)) pred = KF.track(np.float32([x,y,z]), dT) pred = KF.predict(dT) sum = 0 tm = np.zeros(len(px)) for i in range(len(tm)): sum += dT tm[i] = sum ## Convert mx also back to millimeters. px = px * 1000. py = py * 1000. pz = pz * 1000. nmx = mx * 1000. nmy = my * 1000. nmz = mz * 1000. ntm = tm * 1000. ##-####################################################################################### ## Everything is in millimeters and milliseconds now ##-####################################################################################### print("px size", px.shape) print("tm size", tm.shape) ## Visualize X, Y, and Z individually ## In the plot below, we visualize the pairs of px, mx; py, my and pz, mz to see how they relate to each other print("PX, MX") fig1a = plt.figure() plt.plot(tm, px) plt.plot(tm[0:len(nmx)], nmx) plt.legend('PM', ncol=1, loc='upper left') print("PY, MY") fig1b = plt.figure() plt.plot(tm, py) plt.plot(tm[0:len(nmy)], nmy) plt.legend('PM', ncol=1, loc='upper left') print("PZ, MZ") fig1c = plt.figure() plt.plot(tm, pz) plt.plot(tm[0:len(nmz)], nmz) plt.legend('PM', ncol=1, loc='upper left')
_____no_output_____
MIT
src_yolov5/kalman_utils/.ipynb_checkpoints/visualize_3d_points-ball3-checkpoint.ipynb
diddytpq/Tennisball-Tracking-in-Video
Visualize (X,Y,Z) of Predicted and Measured in Points in 3DIn the plot below, we visualize all the predicted and measured points in 3D. This gives a more realistic view of how the predicted points are related to the measured points.
fig2 = plt.figure() ax = plt.axes(projection='3d') #ax.set_xlim3d(-2000,2000) #ax.set_ylim3d(-2000,2000) #ax.set_zlim3d(-2000,2000) if 0: ## Plot axis or not st = [0,0,0] xx = [200, 0, 0] yy = [ 0, 200, 0] zz =[ 0, 0, 200] for i in range(len(st)): ax.plot([st[i], xx[i]], [st[i],yy[i]],zs=[st[i],zz[i]]) ax.plot3D(px, py, pz, 'blue') ax.plot3D(nmx, nmy, nmz, 'magenta') class Kalman_filiter(): def __init__(self, x_init, y_init, z_init, dT): self.KF = Kalman3D(drg=0.507, dbg=4) self.dT = dT self.pred = self.KF.init(np.float32([x_init, y_init, z_init])) def update(self, x, y, z, dT): self.dT = dT self.pred = self.KF.track(np.float32([x, y, z]), self.dT) def get_predict(self, dT): self.pred = self.KF.predict(dT) x,y,z = [-7.923465928004007, -0.6755867599611189, 2.580941671512611] a = Kalman_filiter(x,y,z,0.04) print(a.pred) a.update(x,y,z,0.04) print(a.pred) a.get_predict(0.04) print(a.pred) x,y,z = [-5.810376800248608, -0.4175195849390212, 2.3275454429899285] a.KF.measNoise
_____no_output_____
MIT
src_yolov5/kalman_utils/.ipynb_checkpoints/visualize_3d_points-ball3-checkpoint.ipynb
diddytpq/Tennisball-Tracking-in-Video
Chapter4: 実践的なアプリケーションを作ってみよう 4.1 アプリケーションランチャーを作ってみよう① 4.1.1 設定ファイルの保存・呼出し
# リスト4.1.1: 設定ファイルの生成 # configparserのインポート import configparser # インスタンス化 config = configparser.ConfigParser() # 設定ファイルの内容 config["Run1"] = { "app1": r"C:\WINDOWS\system32\notepad.exe", "app2": r"C:\Program Files\Internet Explorer\iexplore.exe" } # 設定ファイルへ書込み with open("config.ini", "w+") as file: config.write(file)
_____no_output_____
MIT
Chapter_4/Chapter_4-1.ipynb
YutaIzumi/python-app-book
※ Macの場合
##### Macの場合 ##### # リスト4.1.1: 設定ファイルの生成 # configparserのインポート import configparser # インスタンス化 config = configparser.ConfigParser() # 設定ファイルの内容 config["Run1"] = { "app1": "/Applications/TextEdit.app", "app2": "/Applications/Safari.app" } # 設定ファイルへ書込み with open("config.ini", "w+") as file: config.write(file) # リスト4.1.2: 設定ファイル(config.ini) [Run1] app1 = C:\WINDOWS\system32\notepad.exe app2 = C:\Program Files\Internet Explorer\iexplore.exe # リスト4.1.3: セクション, 変数の追加例 # 設定ファイルの内容 config["Run1"] = { "app1": r"C:\WINDOWS\system32\notepad.exe", "app2": r"C:\Program Files\Internet Explorer\iexplore.exe" } config["Run2"] = { "app1": r"C:\WINDOWS\system32\notepad.exe", "app2": r"C:\Program Files\Internet Explorer\iexplore.exe", "app3": r"C:\WINDOWS\system32\mspaint.exe" } # リスト4.1.4: 設定の呼出し # 読込む設定ファイルを指定 config.read("config.ini") # 設定ファイルから値の取得 read_base = config["Run1"] print(read_base.get("app1")) print(read_base.get("app2"))
/Applications/Safari.app
MIT
Chapter_4/Chapter_4-1.ipynb
YutaIzumi/python-app-book
Fastpages Notebook Blog Post> A tutorial of fastpages for Jupyter notebooks.- toc: true - badges: true- comments: true- categories: [jupyter]- image: images/chart-preview.png AboutThis notebook is a demonstration of some of capabilities of [fastpages](https://github.com/fastai/fastpages) with notebooks.With `fastpages` you can save your jupyter notebooks into the `_notebooks` folder at the root of your repository, and they will be automatically be converted to Jekyll compliant blog posts! Front MatterFront Matter is a markdown cell at the beginning of your notebook that allows you to inject metadata into your notebook. For example:- Setting `toc: true` will automatically generate a table of contents- Setting `badges: true` will automatically include GitHub and Google Colab links to your notebook.- Setting `comments: true` will enable commenting on your blog post, powered by [utterances](https://github.com/utterance/utterances).More details and options for front matter can be viewed on the [front matter section](https://github.com/fastai/fastpagesfront-matter-related-options) of the README. Markdown Shortcuts A `hide` comment at the top of any code cell will hide **both the input and output** of that cell in your blog post.A `hide_input` comment at the top of any code cell will **only hide the input** of that cell.
#hide_input print('The comment #hide_input was used to hide the code that produced this.\n')
The comment #hide_input was used to hide the code that produced this.
Apache-2.0
_notebooks/2020-02-20-test.ipynb
mudit1729/Blog
put a `collapse-hide` flag at the top of any cell if you want to **hide** that cell by default, but give the reader the option to show it:
#collapse-hide import pandas as pd import altair as alt
_____no_output_____
Apache-2.0
_notebooks/2020-02-20-test.ipynb
mudit1729/Blog
put a `collapse-show` flag at the top of any cell if you want to **show** that cell by default, but give the reader the option to hide it:
#collapse-show cars = 'https://vega.github.io/vega-datasets/data/cars.json' movies = 'https://vega.github.io/vega-datasets/data/movies.json' sp500 = 'https://vega.github.io/vega-datasets/data/sp500.csv' stocks = 'https://vega.github.io/vega-datasets/data/stocks.csv' flights = 'https://vega.github.io/vega-datasets/data/flights-5k.json'
_____no_output_____
Apache-2.0
_notebooks/2020-02-20-test.ipynb
mudit1729/Blog
Interactive Charts With AltairCharts made with Altair remain interactive. Example charts taken from [this repo](https://github.com/uwdata/visualization-curriculum), specifically [this notebook](https://github.com/uwdata/visualization-curriculum/blob/master/altair_interaction.ipynb).
# hide df = pd.read_json(movies) # load movies data genres = df['Major_Genre'].unique() # get unique field values genres = list(filter(lambda d: d is not None, genres)) # filter out None values genres.sort() # sort alphabetically #hide mpaa = ['G', 'PG', 'PG-13', 'R', 'NC-17', 'Not Rated']
_____no_output_____
Apache-2.0
_notebooks/2020-02-20-test.ipynb
mudit1729/Blog
Example 1: DropDown
# single-value selection over [Major_Genre, MPAA_Rating] pairs # use specific hard-wired values as the initial selected values selection = alt.selection_single( name='Select', fields=['Major_Genre', 'MPAA_Rating'], init={'Major_Genre': 'Drama', 'MPAA_Rating': 'R'}, bind={'Major_Genre': alt.binding_select(options=genres), 'MPAA_Rating': alt.binding_radio(options=mpaa)} ) # scatter plot, modify opacity based on selection alt.Chart(movies).mark_circle().add_selection( selection ).encode( x='Rotten_Tomatoes_Rating:Q', y='IMDB_Rating:Q', tooltip='Title:N', opacity=alt.condition(selection, alt.value(0.75), alt.value(0.05)) )
_____no_output_____
Apache-2.0
_notebooks/2020-02-20-test.ipynb
mudit1729/Blog
Example 2: Tooltips
alt.Chart(movies).mark_circle().add_selection( alt.selection_interval(bind='scales', encodings=['x']) ).encode( x='Rotten_Tomatoes_Rating:Q', y=alt.Y('IMDB_Rating:Q', axis=alt.Axis(minExtent=30)), # use min extent to stabilize axis title placement tooltip=['Title:N', 'Release_Date:N', 'IMDB_Rating:Q', 'Rotten_Tomatoes_Rating:Q'] ).properties( width=600, height=400 )
_____no_output_____
Apache-2.0
_notebooks/2020-02-20-test.ipynb
mudit1729/Blog
Example 3: More Tooltips
# select a point for which to provide details-on-demand label = alt.selection_single( encodings=['x'], # limit selection to x-axis value on='mouseover', # select on mouseover events nearest=True, # select data point nearest the cursor empty='none' # empty selection includes no data points ) # define our base line chart of stock prices base = alt.Chart().mark_line().encode( alt.X('date:T'), alt.Y('price:Q', scale=alt.Scale(type='log')), alt.Color('symbol:N') ) alt.layer( base, # base line chart # add a rule mark to serve as a guide line alt.Chart().mark_rule(color='#aaa').encode( x='date:T' ).transform_filter(label), # add circle marks for selected time points, hide unselected points base.mark_circle().encode( opacity=alt.condition(label, alt.value(1), alt.value(0)) ).add_selection(label), # add white stroked text to provide a legible background for labels base.mark_text(align='left', dx=5, dy=-5, stroke='white', strokeWidth=2).encode( text='price:Q' ).transform_filter(label), # add text labels for stock prices base.mark_text(align='left', dx=5, dy=-5).encode( text='price:Q' ).transform_filter(label), data=stocks ).properties( width=700, height=400 )
_____no_output_____
Apache-2.0
_notebooks/2020-02-20-test.ipynb
mudit1729/Blog
Data TablesYou can display tables per the usual way in your blog:
movies = 'https://vega.github.io/vega-datasets/data/movies.json' df = pd.read_json(movies) # display table with pandas df[['Title', 'Worldwide_Gross', 'Production_Budget', 'IMDB_Rating']].head()
_____no_output_____
Apache-2.0
_notebooks/2020-02-20-test.ipynb
mudit1729/Blog
Coupling to Ideal LoadsIn this notebook, we investigate the WEST ICRH antenna behaviour when the front-face is considered as the combination of ideal (and independant) loads made of impedances all equal to $Z_s=R_c+j X_s$, where $R_c$ corresponds to the coupling resistance and $X_s$ is the strap reactance. In such case, the power delivered to the plasma/front-face is then:$$P_t = \frac{1}{2} \sum_{i=1}^4 \Re[V_i I_i^* ] = \frac{1}{2} \sum_{i=1}^4 \Re[Z_i] |I_i|^2= \frac{1}{2} R_c \sum_{i=1}^4 |I_i|^2$$Hence, we have defined the coupling resistance as:$$R_c = \frac{\sum_{i=1}^4 \Re[Z_i] |I_i|^2}{\sum_{i=1}^4 |I_i|^2}$$Inversely, the coupling resistance can be determine from:$$R_c = \frac{2 P_t}{\sum_{i=1}^4 |I_i|^2}$$In practice however, it is easier to measure RF voltages than currents. $$I = \frac{V}{Z_s} = \frac{V}{R_c + j X_s} \rightarrow |I|^2 = \frac{|V|^2}{|R_c + j X_s|}\approx\frac{|V|^2}{|X_s|^2}$$since in $|X_s|>>|R_c|$.The antenna model allows to calculate the coupling resistance from currents (`.Rc()` method) or from the voltage (`.Rc_WEST()` method).The strap reactance $X_s$ depends on the strap geometry and varies with the frequency. So, let's find how the strap reactance from the realistic CAD model.
%matplotlib widget import matplotlib.pyplot as plt import numpy as np import skrf as rf from tqdm.notebook import tqdm # WEST ICRH Antenna package import sys; sys.path.append('..') from west_ic_antenna import WestIcrhAntenna # styling the figures rf.stylely()
C:\Users\JH218595\Documents\scikit-rf\skrf\plotting.py:1441: UserWarning: Style includes a parameter, 'interactive', that is not related to style. Ignoring mpl.style.use(os.path.join(pwd, style_file))
MIT
doc/coupling_to_ideal_load.ipynb
jhillairet/WEST_IC_antenna
Coupling to an ideal front-faceCoupling to an ideal front face of coupling resistance $R_c$ is easy using the the `.load()` method of the `WestIcrhAntenna` class. This method takes into account the strap reactance frequency fit (derived in [Strap Reactance Frequency Fit](./strap_reactance.ipynb))
freq = rf.Frequency(30, 70, npoints=1001, unit='MHz') ant_ideal = WestIcrhAntenna(frequency=freq) ant_ideal.load(Rc=1) # 1 Ohm coupling resistance front-face # matching left and right sides : note that the solutions are (almost) the same f_match = 55.5e6 C_left = ant_ideal.match_one_side(f_match=f_match, side='left') C_right = ant_ideal.match_one_side(f_match=f_match, side='right')
True solution #1: [53.56567365 45.98820709] True solution #1: [53.56564882 45.98822849]
MIT
doc/coupling_to_ideal_load.ipynb
jhillairet/WEST_IC_antenna
At the difference of the "real" situation (see the [Matching](./matching.ipynb) or the [Coupling to a TOPICA plasma](./coupling_to_plasma_from_TOPICA.ipynb)), here is no poloidal neither toroidal coupling of the straps in this front-face model. This leads to:* Match soluitions are the same for both sides (within $10^{-3}$ pF). * Using the match solutions for each sides does not require to shift the operating frequency:
# dipole excitation power = [1, 1] phase = [0, rf.pi] # active S-parameter for the match point: C_match = [C_left[0], C_left[1], C_right[2], C_right[3]] s_act = ant_ideal.s_act(power, phase, Cs=C_match) fig, ax = plt.subplots() ax.plot(ant_ideal.f_scaled, 20*np.log10(np.abs(s_act)), lw=2) ax.legend(('$S_{act,1}$', '$S_{act,2}$')) ax.grid(True)
_____no_output_____
MIT
doc/coupling_to_ideal_load.ipynb
jhillairet/WEST_IC_antenna
Match Points vs Coupling Resistance Let's determine the match points for a range of coupling resistance at a given frequency
f_match = 55e6 Rcs = np.r_[0.01, 0.05, np.arange(0.1, 2.5, 0.2)] C_matchs = [] ant = WestIcrhAntenna() for Rc in tqdm(Rcs): ant.load(Rc) C_match = ant.match_one_side(f_match=f_match) C_matchs.append(C_match)
_____no_output_____
MIT
doc/coupling_to_ideal_load.ipynb
jhillairet/WEST_IC_antenna
As the coupling resistance increases, the distance between capacitances (Top vs Bottom) increases:
fig, ax = plt.subplots() ax.plot(Rcs, np.array(C_matchs)[:,0:2], lw=2, marker='o') ax.axhline(C_matchs[0][0], ls='--', color='C0') ax.axhline(C_matchs[0][1], ls='--', color='C1') ax.set_xlabel('Rc [Ohm]') ax.set_ylabel('C [pF]') ax.legend(('Top', 'Bot'))
_____no_output_____
MIT
doc/coupling_to_ideal_load.ipynb
jhillairet/WEST_IC_antenna
Displayed differently, the distance between capacitances (Top - Bottom) versus coupling resistance is:
delta_C_pos = np.array(C_matchs)[:,0] - C_matchs[0][0] delta_C_neg = C_matchs[0][1] - np.array(C_matchs)[:,1] fig, ax = plt.subplots() ax.plot(Rcs, delta_C_pos, label='Top: + $\Delta C$', lw=2) ax.plot(Rcs, delta_C_neg, label='Bot: - $\Delta C$', lw=2) ax.set_xlabel('Rc [Ohm]') ax.set_ylabel('$\Delta C$ [pF]') ax.set_ylim(bottom=0) ax.set_xlim(left=0) ax.legend()
_____no_output_____
MIT
doc/coupling_to_ideal_load.ipynb
jhillairet/WEST_IC_antenna
Load Resilience CurvesIdeal loads is usefull to study the behaviour of the load tolerance property of the antenna and the capacitance match points. It is only necessary to work on half-antenna here, because there is no coupling between toroidal elements. Now that we have figured out the match points, let's vary the coupling resistances for a fixed match point and look to the return power (or VSWR): this will highlight the load resilience property of the antenna.
# create a single frequency point antenna to speed-up calculations ant = WestIcrhAntenna(frequency=rf.Frequency.from_f(f_match, unit='Hz')) fig, ax = plt.subplots() power = [1, 1] phase = [0, np.pi] for C_match in tqdm(C_matchs[0:8]): SWRs = [] ant.Cs = [C_match[0], C_match[1], 150, 150] for Rc in Rcs: ant.load(Rc) SWR = ant.circuit().network.s_vswr.squeeze()[0,0] SWRs.append(SWR) ax.plot(Rcs, np.array(SWRs), lw=2) ax.set_xlabel('Rc [Ohm]') ax.set_ylabel('VSWR') ax.set_ylim(1, 8) ax.axhline(2, color='r') ax.legend(Rcs) from IPython.core.display import HTML def _set_css_style(css_file_path): """ Read the custom CSS file and load it into Jupyter Pass the file path to the CSS file """ styles = open(css_file_path, "r").read() s = '<style>%s</style>' % styles return HTML(s) _set_css_style('custom.css')
_____no_output_____
MIT
doc/coupling_to_ideal_load.ipynb
jhillairet/WEST_IC_antenna
数组基础 创建一个数组
import numpy as np import pdir pdir(np) import numpy as np a1 = np.array([0, 1, 2, 3, 4])#将列表转换为数组,可以传递任何序列(类数组),而不仅仅是常见的列表(list)数据类型。 a2 = np.array((0, 1, 2, 3, 4))#将元组转换为数组 print 'a1:',a1,type(a1) print 'a2:',a2,type(a2) b = np.arange(5) #python内置函数range()的数组版,返回的是numpy ndarrays数组对象,而不是列表 print 'b:',b,type(b) c1 = np.ones((3,4))#根据元组指定形状,返回全1数组 c2 = np.ones_like(a1)#以另一个数组为参数,以其形状和dtype创建全1数组 print 'c1',c1,type(c1) print 'c2',c2,type(c2) d1 = np.zeros((5,6))#根据元组指定形状,返回全0数组 d2 = np.zeros_like(c1)#以另一个数组为参数,以其形状和dtype创建全0数组 print 'd1',d1,type(d1) print 'd2',d2,type(d2) e1 = np.empty((2,3))#创建新数组,只分配内存空间但不填充任何值,不是返回0,而是未初始化的垃圾值 e2 = np.empty_like(d1)# print 'e1',e1,type(e1) print 'e2',e2,type(e2) f1 = np.eye(3)#创建一个正方的N*N单位矩阵对角线为1,其余为0() f2 = np.identity(4)#Return the identity array. print 'f1',f1,type(f1) print 'f2',f2,type(f2) g = np.linspace(0, 10, 5) #linspace: Return evenly spaced numbers over a specified interval. print 'g',g,type(g)
g [ 0. 2.5 5. 7.5 10. ] <type 'numpy.ndarray'>
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
数组属性
a = np.array([[11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25], [26, 27, 28 ,29, 30], [31, 32, 33, 34, 35]]) print(type(a)) #<type 'numpy.ndarray'> print(a.dtype) #int32 print(a.size) #25 Return the number of elements along a given axis. print(a.shape) #(5L, 5L),Return the shape of an array print(a.itemsize) #4,itemsize输出array元素的字节数,本例32/8=4 print(a.ndim) #2,Return the number of dimensions of an array
<type 'numpy.ndarray'> int32 25 (5L, 5L) 4 2
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
使用数组数组可不必编写循环即可实现循环-数组的矢量化大小相等的数组之间的任何数学运算都会应用到元素级大小不相等的数组之间的运算-叫做广播 基本操作符-数组四则运算 +、- 、/
a = np.arange(25) print 'a:',a,type(a) a = a.reshape((5, 5))#Gives a new shape to an array without changing its data print 'a:',a,type(a) b = np.array([10, 62, 1, 14, 2, 56, 79, 2, 1, 45, 4, 92, 5, 55, 63, 43, 35, 6, 53, 24, 56, 3, 56, 44, 78]) print b.shape b = b.reshape((5,5)) print 'b:',b,type(b) print(a + b)#逐元素运算,分别对每一个元素进行配对,然后对它们进行运算 print(a - b) print(a * b) print(a / b) print(a ** 2) print(a < b) #逻辑运算符比如 “<” 和 “>” 的时候,返回的将是一个布尔型数组 print(a > b) print(a.dot(b))#dot() 函数计算两个数组的点积。它返回的是一个标量(只有大小没有方向的一个值)而不是数组
[[ 417 380 254 446 555] [1262 1735 604 1281 1615] [2107 3090 954 2116 2675] [2952 4445 1304 2951 3735] [3797 5800 1654 3786 4795]]
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
数组特殊运算符
# sum, min, max, cumsum a = np.arange(10) print 'a:',a print(a.sum()) # >>>45 print(a.min()) # >>>0 print(a.max()) # >>>9 print(a.cumsum()) # >>>[ 0 1 3 6 10 15 21 28 36 45]
a: [0 1 2 3 4 5 6 7 8 9] 45 0 9 [ 0 1 3 6 10 15 21 28 36 45]
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
索引 基本索引-整数索引
a = np.array([[11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25], [26, 27, 28 ,29, 30], [31, 32, 33, 34, 35]]) print 'a:',a,type(a) # 访问元素:二者功能相同 print a[1][3] print a[1,3]#逗号隔开的索引列表来选取单个元素 # 多维数组中,如果省略了后面的索引,则返回对象是一个维度低一点的数组。 print a[2]
a: [[11 12 13 14 15] [16 17 18 19 20] [21 22 23 24 25] [26 27 28 29 30] [31 32 33 34 35]] <type 'numpy.ndarray'> 19 19 [21 22 23 24 25]
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
数组切片数组切片与列表切片重要区别在于。数组切片是原数组的视图,这意味着数据不会被复制,视图上的任何修改都会直接反映到源数组上
a = np.array([[11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25], [26, 27, 28 ,29, 30], [31, 32, 33, 34, 35]]) print 'a:',a # 单纯切片只能得到相同维度的数组视图, print(a[::2,::2]) # [[11 13 15] # [21 23 25] # [31 33 35]] print(a[:3,:2]) #切片和整数索引混合使用,可以得到低维度的切片 print(a[0, 1:4]) # >>>[12 13 14] print(a[1:4, 0]) # >>>[16 21 26] print(a[:, 1]) # >>>[12 17 22 27 32] a_slice = a[:,0]#数组切片是原数组的视图,这意味着数据不会被复制,视图上的任何修改都会直接反映到源数组上 print 'a_slice:',a_slice a_slice[:] = 66 print 'a:',a #若想要数组切片的一份副本而非视图,则需要显式的进行复制操作 a_slice_copy = a[:,0].copy() print "a_slice_copy:",a_slice_copy a_slice_copy[:] = 111 print "a_slice_copy:",a_slice_copy print 'a:',a
a_slice_copy: [66 66 66 66 66] a_slice_copy: [111 111 111 111 111] a: [[66 12 13 14 15] [66 17 18 19 20] [66 22 23 24 25] [66 27 28 29 30] [66 32 33 34 35]]
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
布尔型索引布尔型数组可用于数组索引,通过布尔型数组获取到数组中的数据,将总是创建副本==,!=,>,<
#数组的比较运算也是矢量化的,会产生一个布尔型数组 names = np.array(['bob','joe','will','bob','will','joe','joe']) names.shape names == 'bob' data = np.random.randn(7,4) data #布尔型数组可用于数组索引,布尔型数组的长度必须跟被索引的轴长度一致, #例如names == 'bob'长度为7;data按行索引 #names2 == 'bob'长度为4;data按列索引 print data[names == 'bob'] names2 = names[:4].copy() print(names2) print data[:,names2=='bob'] # 布尔型数组还可以与切片、整数索引混合使用 print data[names == 'bob',1:3] print data[names == 'bob',1] # 注意:python关键字and or在布尔型数组中无效 #利用逻辑关系构造复杂布尔型数组,|或,&和,!非 mask = (names !='bob') print data[mask] mask = (names =='bob') | (names == 'will') print data[mask] # 通过布尔型数组设置值 data[data < 0] = 7 print data #通过布尔型数组设置整行/整列 data[names !='bob'] = 100 print data data[:,names2 !='bob'] = 200 print data
[[ 0.29218127 200. 200. 7. ] [ 100. 200. 200. 100. ] [ 100. 200. 200. 100. ] [ 0.53516408 200. 200. 0.88596045] [ 100. 200. 200. 100. ] [ 100. 200. 200. 100. ] [ 100. 200. 200. 100. ]] [[ 0.29218127 200. 200. 7. ] [ 100. 100. 100. 100. ] [ 100. 100. 100. 100. ] [ 0.53516408 200. 200. 0.88596045] [ 100. 100. 100. 100. ] [ 100. 100. 100. 100. ] [ 100. 100. 100. 100. ]] [[ 0.29218127 200. 200. 7. ] [ 100. 200. 200. 100. ] [ 100. 200. 200. 100. ] [ 0.53516408 200. 200. 0.88596045] [ 100. 200. 200. 100. ] [ 100. 200. 200. 100. ] [ 100. 200. 200. 100. ]]
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
花式索引花式索引是指利用整数数组进行索引,可以指定顺序选取、花式索引不同于切片,花式索引总是将数据复制到新数组中
arr = np.empty((8,4)) for i in range(8): arr[i] = i print arr #可以指定顺序选取行子集,只需传入一个用于指定顺序的整数列表或数组即可 print arr[[4,3,0,6]] #使用负数索引将从末尾开始选取行 print arr[[-1,-2]] #可以同时使用正数和负数进行花式索引 print arr[[1,2,-1,-2]] #使用多个索引数组,返回的是一个一维数组 arr = np.arange(32).reshape(8,4) print arr print arr[[4,3,0,6],[1,2,3,0]] #获取的元素是(4,1)(3,2)(0,3)(6,0) #要想得到矩阵的行列子集(矩形区域) #方法1 print arr[[4,3,0,6]][:,[1,2,3,0]] #方法2 np.ix_()用两个一维整数数组转换为一个用于选取方形局域的索引器 print arr[np.ix_([4,3,0,6],[1,2,3,0])]
[[17 18 19 16] [13 14 15 12] [ 1 2 3 0] [25 26 27 24]] [[17 18 19 16] [13 14 15 12] [ 1 2 3 0] [25 26 27 24]]
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
数组转置和轴对换
# 转置(transpose)返回源数据的视图(不会进行任何复制操作), # 两种办法实现转置: # arr.transpose()方法 # arr.T属性 arr = np.arange(15).reshape(3,5) print arr print arr.T print arr.transpose() #高维数组(>=3维)需要参数:轴编号组成的元组才能对轴进行转置 arr = np.arange(16).reshape(2,2,4) print arr print arr.T print arr.transpose((1,0,2))
[[ 0 1 2 3 4] [ 5 6 7 8 9] [10 11 12 13 14]] [[ 0 5 10] [ 1 6 11] [ 2 7 12] [ 3 8 13] [ 4 9 14]] [[ 0 5 10] [ 1 6 11] [ 2 7 12] [ 3 8 13] [ 4 9 14]] [[[ 0 1 2 3] [ 4 5 6 7]] [[ 8 9 10 11] [12 13 14 15]]] [[[ 0 8] [ 4 12]] [[ 1 9] [ 5 13]] [[ 2 10] [ 6 14]] [[ 3 11] [ 7 15]]] [[[ 0 1 2 3] [ 8 9 10 11]] [[ 4 5 6 7] [12 13 14 15]]]
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
通用函数-快速的元素级数组函数对数组中数据进行元素级运算的函数
#一元通用函数,接收一个数组 arr = np.arange(10) print arr print np.sqrt(arr)#开方 print np.exp(arr)#指数 print np.square(arr)#平方 #二元通用函数,接收2个数组 x = np.random.randn(5) print x y = np.random.randn(5) print y print np.add(x,y)#加法 print np.subtract(x,y)#减法 print np.greater(x,y)#元素比较
_____no_output_____
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks
将条件逻辑表述为数组运算numpy.where()函数是三元表达式 x if condition else y的矢量化版本
xarr = np.array([1.1,1.2,1.3,1.4,1.5]) yarr = np.array([2.1,2.2,2.3,2.4,2.5]) condition = np.array([True,False,True,True,False]) # 列表生成式 print [(x if c else y)for x,y,c in zip(xarr,yarr,condition)] #存在问题 #速度不是很快,原因:纯python实现 #无法用于多维数组 #利用np.where()实现相同功能很简洁 print np.where(condition,xarr,yarr) #np.where()函数,第2,3参数不一定为数组,也可以为标量值 #where通常用于根据另一个数组生成一个新数组 arr = np.random.randn(4,4) print arr print np.where(arr > 0,1,-1)#根据arr原始元素>0,置位1;arr原始元素<0,置位-1 print np.where(arr > 0,1,arr)#根据arr原始元素>0,置位1;arr原始元素<0,保持不变
[ 1.1 2.2 1.3 1.4 2.5] [[ 1.45968931 0.04416752 0.17823822 -0.39762311] [ 1.1620933 0.59377557 -0.23737767 -1.43722256] [ 1.03619541 -0.64949799 0.15402381 -0.11071381] [ 1.29405992 -0.02742662 0.43881736 1.73837499]] [[ 1 1 1 -1] [ 1 1 -1 -1] [ 1 -1 1 -1] [ 1 -1 1 1]] [[ 1. 1. 1. -0.39762311] [ 1. 1. -0.23737767 -1.43722256] [ 1. -0.64949799 1. -0.11071381] [ 1. -0.02742662 1. 1. ]]
Apache-2.0
numpy/2-numpy-middle.ipynb
GmZhang3/data-science-ipython-notebooks