markdown stringlengths 0 37k | code stringlengths 1 33.3k | path stringlengths 8 215 | repo_name stringlengths 6 77 | license stringclasses 15
values |
|---|---|---|---|---|
To run this file download To run download https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing File name : "GoogleNews-vectors-negative300.bin"
Can also be found here: https://code.google.com/archive/p/word2vec/
Reading Files Line by Line | def sentenceExtractionForTraining(dirName, fileName, classes):
sentencesClass = []
for i in range(0,len(classes)):
sentences = readFile(dirName+fileName[i])
sentencesClass.append(sentences)
return sentencesClass
def readFile(fileName):
f = open(fileName,"r+")
sentences = []
for ... | PRNN/Assignment/Assignment3/word2Vec.ipynb | fossdevil/Assignments | mit |
Removing fancy characters | def removeFancyChars(sentences):
lengthPhrase = len(sentences)
for i in range(lengthPhrase):
sentences[i] = re.sub(r'([^\s\w]|_)+', '', sentences[i])
return sentences
def removeFC(sentencesClass):
for i in range(0, len(sentencesClass)):
sentencesClass[i] = removeFancyChars(sentencesClas... | PRNN/Assignment/Assignment3/word2Vec.ipynb | fossdevil/Assignments | mit |
word2vec. | def load_bin_vec(fname, vocab):
"""
Loads 300x1 word vecs from Google (Mikolov) word2vec
"""
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * layer1_size
fo... | PRNN/Assignment/Assignment3/word2Vec.ipynb | fossdevil/Assignments | mit |
For Parts Of Speech | def POSForSentence(sentence):
text = word_tokenize(sentence)
posSentence = nltk.pos_tag(text)
posSentence = [y for x, y in posSentence]
return posSentence
def getUniquePOS():
tagdict = load('help/tagsets/upenn_tagset.pickle')
return len(tagdict), tagdict.keys() | PRNN/Assignment/Assignment3/word2Vec.ipynb | fossdevil/Assignments | mit |
For w2v | def totalSentences(sentencesClass):
size = 0
for i in range(0, len(sentencesClass)):
size += len(sentencesClass[i])
return size;
def defineW2V(sentencesClass, w2v, dim = 300):
n = totalSentences(sentencesClass)
mat = np.zeros((n, dim))
labels = np.zeros(n)
k = 0
for i in range(0... | PRNN/Assignment/Assignment3/word2Vec.ipynb | fossdevil/Assignments | mit |
Saving to file | def savew2vToFile(w2v):
fileName = "word2VecDict.npy"
np.save(fileName, w2v)
def finalFeaturesLabel(X,y):
n, d = X.shape
finalMat = np.zeros((n,d+1))
for i in range(0, n):
finalMat[i, 0] = y[i]
finalMat[i, 1:] = X[i]
return finalMat
def saveW2V(fileName, finalMat):
np.save(... | PRNN/Assignment/Assignment3/word2Vec.ipynb | fossdevil/Assignments | mit |
Loading from file | def loadW2V():
w2v = np.load('word2VecDict.npy').item()
i = 0
for key, value in w2v.iteritems():
if i>10:
break
print key, value
i = i + 1
def main():
dirName = "Email-classification_dataset/"
classes = [1,2,3,4,5]
fileName = ["RD-positive-800.txt", "mee... | PRNN/Assignment/Assignment3/word2Vec.ipynb | fossdevil/Assignments | mit |
Check all files to see if any have missing nodal information and create a selection list based on the ones that are 100% complete. | # check to see which files contains nodes with missing information
missingarray = []
for i in onlyfiles:
# load timeseries
filename = i
ts_raw = np.loadtxt(filename)
# check zero columns
missingn = np.where(~ts_raw.any(axis=0))[0]
missingarray.append(missingn)
# select the ones that don't have missing... | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
run the diffusion embedding | # run the diffusion embedding
from mapalign import embed
for i in selected:
# load timeseries
#print i
filename = i
ts = np.loadtxt(filename)
# create correlation matrix
dcon = np.corrcoef(ts.T)
dcon[np.isnan(dcon)] = 0
# Get number of nodes
N = dcon.shape[0]
# threshold
p... | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
Run Statis to back-project the grouped embeddings | %%capture
from pySTATIS import statis
#load vectors
names = list(xrange(392))
X = [np.load("./data/Outputs/Embs/"+ os.path.basename(filename)+"_embedding_dense_res_veconly.npy") for filename in selected2]
out = statis.statis(X, names, fname='statis_results.npy')
statis.project_back(X, out['Q'], path = "./data/Outputs/... | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
plotting
plot to surface for inspection
this cell in only necessary for plotting below | %matplotlib inline
import matplotlib.pylab as plt
import nilearn
import nilearn.plotting
import numpy as np
import nibabel as nib
def rebuild_nii(num):
data = np.load('Mean_Vec.npy')
a = data[:,num].copy()
nim = nib.load('cc400_roi_atlas.nii')
imdat=nim.get_data()
imdat_new = imdat.copy()
fo... | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
Output everything to an excel file | import pandas as pd
# read in csv
df_phen = pd.read_csv('Phenotypic_V1_0b_preprocessed1.csv')
# add a column that matches the filename
for i in df_phen:
df_phen['filename'] = join(df_phen['FILE_ID']+"_rois_cc400.1D")
df_phen['filenamelpy'] = join(df_phen['FILE_ID']+"_rois_cc400.1D.npy")
df_phen['selec'] = np.w... | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
Compare the slopes across subjects | from scipy import stats
grdnt_slope = []
for i in selected2:
# load gradients
# print i
filename = i
grdnt = np.load("./data/Outputs/Regs/" + filename + ".npy")
# do we need a specific ordering of the nodes??
y = list(xrange(392))
temp = []
for ii in range(10):
x = sorted(grdnt[:... | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
And write them to an excel file | data = df_phen.loc[df_phen["selec"] == 1]
data['filenamelow'] = data['filename'].str.lower()
data = data.sort(['filenamelow'])
output = data.merge(data_grdnt, left_on='filename',right_on='file',how='outer')
output.to_csv('Combined.csv', sep='\t') | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
Plot some stuff | ## numpy is used for creating fake data
%matplotlib inline
import numpy as np
import matplotlib as mpl
## agg backend is used to create plot as a .png file
mpl.use('agg')
import matplotlib.pyplot as plt
df = pd.DataFrame(output, columns = ['DX_GROUP', 0,1,2,3,4,5,6,7,8,9])
ASC = df['DX_GROUP'] == 2
NT = df['DX_GRO... | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
Permutations | def exact_mc_perm_test(xs, ys, nmc):
n, k = len(xs), 0
diff = np.abs(np.mean(xs) - np.mean(ys))
zs = np.concatenate([xs, ys])
for j in range(nmc):
np.random.shuffle(zs)
k += diff < np.abs(np.mean(zs[:n]) - np.mean(zs[n:]))
return k / nmc
print(exact_mc_perm_test(G1[0],G2[0],1000))
p... | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
Some quality control | %matplotlib inline
# this cell in only necessary for plotting below
import matplotlib.pylab as plt
import nilearn
import nilearn.plotting
import numpy as np
import nibabel as nib
from os import listdir
from os.path import isfile, join
def rebuild_nii(num):
data = np.load('Mean_Vec.npy')
a = data[:,num... | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
Check all individual images | nims = rebuild_nii_individ(0)
!fslview resCaltech_0051474_rois_cc400.1D.npy.nii | Gradients.ipynb | autism-research-centre/Autism-Gradients | gpl-3.0 |
Data
Make some data using this function:
$$y = 5 + 1.2x - 3.4\frac{x^2}{2!} + 5.6 \frac{x^3}{3!} + \epsilon \text{ where }
\epsilon \sim \mathcal{N}(0, 0.1^2).$$ | np.random.seed(42)
max_degree = 20 # Maximum degree of the polynomial
n_train, n_test = 100, 100 # Training and test dataset sizes
true_w = np.zeros(max_degree) # Allocate lots of empty space
true_w[0:4] = np.array([5, 1.2, -3.4, 5.6])
features = np.random.normal(size=(n_train + n_test, 1))
np.random.shuffle(featur... | notebooks/misc/poly_regression_torch.ipynb | probml/pyprobml | mit |
Train/eval loop | class Accumulator:
"""For accumulating sums over `n` variables."""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx)... | notebooks/misc/poly_regression_torch.ipynb | probml/pyprobml | mit |
Degree 3 (matches true function)
Train and test loss are similar (no over or underfitting),
Loss is small, since matches true function. Estimated parameters are close to the true ones. | # Pick the first four dimensions, i.e., 1, x, x^2/2!, x^3/3! from the
# polynomial features
train(poly_features[:n_train, :4], poly_features[n_train:, :4], labels[:n_train], labels[n_train:]) | notebooks/misc/poly_regression_torch.ipynb | probml/pyprobml | mit |
Degree 1 (underfitting) | # Pick the first two dimensions, i.e., 1, x, from the polynomial features
train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:]) | notebooks/misc/poly_regression_torch.ipynb | probml/pyprobml | mit |
Degree 20 (overfitting)
According to the D2L book, the test loss is higher than training loss.
However, SGD itself has a regularizing effect (even in full batch mode),
so I cannot reproduce overfitting (even though it would occur using a second order optimizer). | # Pick all the dimensions from the polynomial features
train(
poly_features[:n_train, :],
poly_features[n_train:, :],
labels[:n_train],
labels[n_train:],
num_epochs=2000,
batch_size=n_train,
) | notebooks/misc/poly_regression_torch.ipynb | probml/pyprobml | mit |
Allow the Cloud AI Platform service account to read/write to the bucket containing training data. | %%bash
PROJECT_ID=$PROJECT
AUTH_TOKEN=$(gcloud auth print-access-token)
SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \
-H "Authorization: Bearer $AUTH_TOKEN" \
https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \
| python -c "import json; import sys; response = json.load(sys.stdi... | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
<h2> Packaging up the code </h2>
Take your code and put into a standard Python package structure. <a href="taxifare/trainer/model.py">model.py</a> and <a href="taxifare/trainer/task.py">task.py</a> containing the Tensorflow code from earlier (explore the <a href="taxifare/trainer/">directory structure</a>). | %%bash
## check whether there are anymore TODOs
## exit with 0 to avoid notebook process error
grep TODO taxifare/trainer/*.py; rc=$?
case $rc in
0) ;;
1) echo "No more TODOs!"; exit 0;;
esac | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
<h2> Find absolute paths to your data </h2>
Note the absolute paths below. /content is mapped in Datalab to where the home icon takes you | %%bash
echo $PWD
rm -rf $PWD/taxi_trained
head -1 $PWD/taxi-train.csv
head -1 $PWD/taxi-valid.csv | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
<h2> Running the Python module from the command-line </h2> | %%bash
rm -rf taxifare.tar.gz taxi_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python -m trainer.task \
--train_data_paths="${PWD}/taxi-train*" \
--eval_data_paths=${PWD}/taxi-valid.csv \
--output_dir=${PWD}/taxi_trained \
--train_steps=100 --job-dir=./tmp
%%bash
ls $PWD/taxi_trained/export/ex... | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
Monitor training with TensorBoard
To activate TensorBoard within the JupyterLab UI navigate to "<b>File</b>" - "<b>New Launcher</b>". Then double-click the 'Tensorboard' icon on the bottom row.
TensorBoard 1 will appear in the new tab. Navigate through the three tabs to see the active TensorBoard. The 'Graphs' and... | %%bash
rm -rf taxifare.tar.gz taxi_trained
gcloud ai-platform local train \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
-- \
--train_data_paths=${PWD}/taxi-train.csv \
--eval_data_paths=${PWD}/taxi-valid.csv \
--train_steps=1000 \
--output_dir=${PWD}/taxi_trained | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
When I ran it (due to random seeds, your results will be different), the average_loss (Mean Squared Error) on the evaluation dataset was 187, meaning that the RMSE was around 13. | !ls $PWD/taxi_trained | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
<h2> Submit training job using gcloud </h2>
First copy the training data to the cloud. Then, launch a training job.
After you submit the job, go to the cloud console (http://console.cloud.google.com) and select <b>AI Platform | Jobs</b> to monitor progress.
<b>Note:</b> Don't be concerned if the notebook stalls (wi... | %%bash
echo $BUCKET
gsutil -m rm -rf gs://${BUCKET}/taxifare/smallinput/
gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/taxifare/smallinput/
%%bash
OUTDIR=gs://${BUCKET}/taxifare/smallinput/taxi_trained
JOBNAME=lab3a_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs sub... | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
Don't be concerned if the notebook appears stalled (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud.
<b>Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.</b>
<h2> Deploy ... | %%bash
gsutil ls gs://${BUCKET}/taxifare/smallinput/taxi_trained/export/exporter
%%bash
MODEL_NAME="taxifare"
MODEL_VERSION="v1"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/taxifare/smallinput/taxi_trained/export/exporter | tail -1)
echo "Run these commands one-by-one (the very first time, you'll create a model and then... | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
<h2> Prediction </h2> | %%bash
gcloud ai-platform predict --model=taxifare --version=v1 --json-instances=./test.json
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import json
credentials = GoogleCredentials.get_application_default()
api = discovery.build('ml', 'v1', credentials=credentials,
... | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
<h2> Train on larger dataset </h2>
I have already followed the steps below and the files are already available. <b> You don't need to do the steps in this comment. </b> In the next chapter (on feature engineering), we will avoid all this manual processing by using Cloud Dataflow.
Go to http://bigquery.cloud.google.com... | %%bash
XXXXX this takes 60 minutes. if you are sure you want to run it, then remove this line.
OUTDIR=gs://${BUCKET}/taxifare/ch3/taxi_trained
JOBNAME=lab3a_$(date -u +%y%m%d_%H%M%S)
CRS_BUCKET=cloud-training-demos # use the already exported data
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platf... | courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb | turbomanage/training-data-analyst | apache-2.0 |
Make a function for the Galactic foreground:
$$
F_{\rm gal}(\phi) = (2\phi_{\rm fg})^{-1}~~~~-\phi_{\rm fg} \lt \phi \lt \phi_{\rm fg}
$$
and zero elsewhere. Therefore:
$$
P_{\rm gal}(\lambda^2) = \frac{\sin (2\phi_{\rm fg} \lambda^2)}{2\phi_{\rm fg}\lambda^2}
$$
Make an array of $\lambda^2$ values: | lam_sq = np.arange(0.01,1,0.01) | MISC/Faraday.ipynb | as595/AllOfYourBases | gpl-3.0 |
We're going to specify that $\phi_{\rm fg}= 2\,{\rm rad\,m^{-2}}$. We can then compute the Galactic contribution at each value of $\lambda^2$: | phi_fg = 2.
P_gal = np.sin(2*phi_fg*lam_sq)/(2*phi_fg*lam_sq) + 0*1j | MISC/Faraday.ipynb | as595/AllOfYourBases | gpl-3.0 |
Now make a function for the radio galaxy lobe:
$$
F_{\rm rg}(\phi) = 0.25\delta(\phi - \phi_1)
$$
therefore:
$$
P_{\rm rg}(\lambda^2) = 0.25 \exp (2i\phi_1 \lambda^2)
$$
which is equivalent to:
$$
P_{\rm rg}(\lambda^2) = 0.25 \cos (2\phi_1 \lambda^2) + 0.25 i \sin (2\phi_1 \lambda^2)
$$
so,
$$
Q_{\rm rg}(\lambda^2) = ... | phi_1 = 10.
P_rg = 0.25*np.cos(2*phi_1*lam_sq) + 1j*0.25*np.sin(2*phi_1*lam_sq) | MISC/Faraday.ipynb | as595/AllOfYourBases | gpl-3.0 |
The total polarized signal will be the sum of the radio galaxy contribution and the Galactic contribution: | P_tot = P_gal + P_rg | MISC/Faraday.ipynb | as595/AllOfYourBases | gpl-3.0 |
Now let's re-create Fig. 1 from Brentjens & de Bruyn (2005; https://arxiv.org/pdf/astro-ph/0507349.pdf)
First let's plot $Q_{\rm gal}$ (called $Q_{\rm fg}$ in the paper): | pl.subplot(111)
pl.plot(lam_sq,P_gal.real,ls='--')
pl.xlabel(r"$\lambda^2$ [m$^2$]")
pl.ylabel("Flux [Jy]")
pl.axis([0,1,-0.2,1.4])
pl.show() | MISC/Faraday.ipynb | as595/AllOfYourBases | gpl-3.0 |
Now let's plot on the magnitude of the total polarization as well: | pl.subplot(111)
pl.plot(lam_sq,P_gal.real,ls='--')
pl.plot(lam_sq,np.absolute(P_tot),ls=':')
pl.xlabel(r"$\lambda^2$ [m$^2$]")
pl.ylabel("Flux [Jy]")
pl.axis([0,1,-0.2,1.4])
pl.show() | MISC/Faraday.ipynb | as595/AllOfYourBases | gpl-3.0 |
Now let's calculate the polarization angle:
$$
\chi = 0.5\tan^{-1}\left(\frac{U}{Q}\right)
$$
where $U$ is the imaginary part of the complex polarization, $P$, and $Q$ is the real part. | chi = 0.5*np.arctan2(P_tot.imag,P_tot.real)
chi*= (180./np.pi) # convert radians to degrees
# hack to unwrap the arctangent [-pi/2,pi/2] wrap:
for i in range(1,len(chi)):
delta_chi = np.abs(chi[i]-chi[i-1])
if (delta_chi>45.):
chi[i:]+=180.
pl.subplot(111)
pl.plot(lam_sq,chi)
pl.xlabel(r"$\lambda^2$... | MISC/Faraday.ipynb | as595/AllOfYourBases | gpl-3.0 |
Now plot it all together: | fig, ax1 = pl.subplots()
ln1 = ax1.plot(lam_sq, chi, 'b-',label=r"$\chi$")
ax1.set_xlabel(r"$\lambda^2$ [m$^2$]")
ax1.set_ylabel(r"$\chi$ [deg]")
ax1.set_ylim(-50, 350)
ax2 = ax1.twinx()
ln2 = ax2.plot(lam_sq,np.absolute(P_tot),ls=':',label=r"$|P|$")
ln3 = ax2.plot(lam_sq,P_gal.real,ls='--',label=r"$Q_{\rm fg}$")
ax2.... | MISC/Faraday.ipynb | as595/AllOfYourBases | gpl-3.0 |
2. Using help()
Getting help for a whole class. | help(ClassExample) | 02-basics/14-commenting_code.ipynb | vicente-gonzalez-ruiz/YAPT | cc0-1.0 |
A different way of getting help (in Ipython): | ClassExample? | 02-basics/14-commenting_code.ipynb | vicente-gonzalez-ruiz/YAPT | cc0-1.0 |
Getting help for a single member function: | help(ClassExample.get) | 02-basics/14-commenting_code.ipynb | vicente-gonzalez-ruiz/YAPT | cc0-1.0 |
help() only prints the __doc__ variable:
Any function, class or module starting with a string literal has a non-empty __doc__ attribute which can be printed to get information. | print(ClassExample.get.__doc__)
print(type(ClassExample.get.__doc__)) | 02-basics/14-commenting_code.ipynb | vicente-gonzalez-ruiz/YAPT | cc0-1.0 |
The same queries can be carried out with an instance. | a = ClassExample()
help(a)
help(a.set)
print(a.__doc__)
print(a.get.__doc__) | 02-basics/14-commenting_code.ipynb | vicente-gonzalez-ruiz/YAPT | cc0-1.0 |
Enter the following command in the next cell to look at the first record and click Run | # retrieve the first row
data_df.take(1) | Notebook/Anomaly-detection-DSWB.ipynb | ruchika05/demo | epl-1.0 |
Enter the following command in the next cell to get the number of rows in the CSV file (DataFrame) and click Run, | # retrieve the number of rows
data_df.count() | Notebook/Anomaly-detection-DSWB.ipynb | ruchika05/demo | epl-1.0 |
Create Pandas DataFrame
Enter the following commands in the next cell to create a Pandas DataFrame from the Spark SQL DataFrame and click Run. This line prints the schema of the newly created Pandas DataFrame which will be same as the Spark SQL DataFrame.
The Python Data Analysis Library (a.k.a. pandas) provides high-... | # create a pandas dataframe from the SQL dataframe
import pprint
import pandas as pd
pandaDF = data_df.toPandas()
#Fill NA/NaN values to 0
pandaDF.fillna(0, inplace=True)
pandaDF.columns | Notebook/Anomaly-detection-DSWB.ipynb | ruchika05/demo | epl-1.0 |
Enter the following commands in the next cell to set timestamp as the index if its present and click Run, | # change index to time if its present
valueHeaderName = 'value'
timeHeaderName = 'null'
if (len(header_list) == 2):
timeHeaderName = header_list[0]
valueHeaderName = header_list[1]
else:
valueHeaderName = header_list[0]
# Drop the timestamp column as the index is replaced with timestamp now
if (len(hea... | Notebook/Anomaly-detection-DSWB.ipynb | ruchika05/demo | epl-1.0 |
Calculate z-score
We detect the anomaly events using z-score, aka, a standard score indicating how many standard deviations an element is from the mean.
Enter the following commands to calculate z-score for each of the values and add it as a new column in the same DataFrame, | # calculate z-score and populate a new column
pandaDF['zscore'] = (pandaDF[valueHeaderName] - pandaDF[valueHeaderName].mean())/pandaDF[valueHeaderName].std(ddof=0)
pandaDF.head(n=5) | Notebook/Anomaly-detection-DSWB.ipynb | ruchika05/demo | epl-1.0 |
Plot Anomalies
When we work in notebooks, we can decide how to present your anlysis results and derived information. So far, we have used normal print functions, which are informative. However, we can also show the results in a visual way by using the popular matplotlib package to create plots.
Enter the following snip... | # ignore warnings if any
import warnings
warnings.filterwarnings('ignore')
# render the results as inline charts:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
'''
This function detects the spike and dip by returning a non-zero value
when the z-score is above 3 (spike) and below -3(dip). Inca... | Notebook/Anomaly-detection-DSWB.ipynb | ruchika05/demo | epl-1.0 |
As shown, the red marks are the unexpected spikes and dips whose z-score value is greater than 3 or less than -3. Incase if you want to detect the lower spikes, modify the value to 2 or even lower and run. Similarly, if you want to detect only the higher spikes, try increasing the z-score value from 3 to 4 and beyond.
... | # calculate the value that is corresponding to z-score 3
(pandaDF[valueHeaderName].std(ddof=0) * 3) + pandaDF[valueHeaderName].mean() | Notebook/Anomaly-detection-DSWB.ipynb | ruchika05/demo | epl-1.0 |
Similarly, Enter the following command into the next cell to derive the dip threshold value corresponding to z-score value -3. | # calculate the value that is corresponding to z-score -3
(pandaDF[valueHeaderName].std(ddof=0) * -3) + pandaDF[valueHeaderName].mean() | Notebook/Anomaly-detection-DSWB.ipynb | ruchika05/demo | epl-1.0 |
Sérialisation
La sérialisation désigne l'action de sauvegarder un objet dans un fichier telle qu'il est représentée dans la mémoire de l'ordinateur. De cette façon, la relecture de l'objet en question est plus rapide. La difficulté réside dans la sérialisation d'objets composites comme une liste qui contient un diction... | import pickle
l = [ {3:"4"}, "4", -5.5, [6, None]]
with open("objet_serialise.bin", "wb") as f :
pickle.dump(l, f) | _doc/notebooks/td2a/td2a_cenonce_session_2E.ipynb | sdpython/ensae_teaching_cs | mit |
Puis on récupère les données : | with open("objet_serialise.bin", "rb") as f :
obj = pickle.load(f)
obj | _doc/notebooks/td2a/td2a_cenonce_session_2E.ipynb | sdpython/ensae_teaching_cs | mit |
DataFrame
Il existe une méthode spécifique pour les DataFrame : to_pickle qu'on relie avec la méthode read_pickle. | import pandas
df = pandas.DataFrame( [ {"name":"xavier", "school":"ENSAE"},
{"name":"antoine", "school":"ENSAE"} ] )
df.to_pickle("df_serialize.bin") | _doc/notebooks/td2a/td2a_cenonce_session_2E.ipynb | sdpython/ensae_teaching_cs | mit |
Puis on relit le fichier : | df2 = pandas.read_pickle("df_serialize.bin")
df2 | _doc/notebooks/td2a/td2a_cenonce_session_2E.ipynb | sdpython/ensae_teaching_cs | mit |
2 Millionaires
What country are most billionaires from? For the top ones, how many billionaires per billion people? | df2 = pd.read_excel("richpeople.xlsx")
df2.keys()
top_countries = df2['citizenship'].value_counts().head(10)
df_top_countries = pd.DataFrame.from_dict(top_countries)
df_top_countries['Country Name'] = df_top_countries.index
df_top_countries
# population: data from http://data.worldbank.org/indicator/SP.POP.TOTL
df_... | 07/Homework7.ipynb | palrogg/foundations-homework | mit |
What's the average wealth of a billionaire? Male? Female? | print("The average wealth of a billionaire (in billions) is:", df2['networthusbillion'].describe()['mean'])
print("The average wealth of a male billionaire is:", df2[df2['gender'] == 'male']['networthusbillion'].describe()['mean'])
print("The average wealth of a female billionaire is:", df2[df2['gender'] == 'female']... | 07/Homework7.ipynb | palrogg/foundations-homework | mit |
Who is the poorest billionaire? Who are the top 10 poorest billionaires? | print('The poorest billionaire is:', df2.get_value(df2.sort_values('networthusbillion', ascending=True).index[0],'name'))
df2.sort_values('networthusbillion', ascending=True).head(10) | 07/Homework7.ipynb | palrogg/foundations-homework | mit |
What is 'relationship to company'? And what are the most common relationships? | #relationship_values = set
relationship_list = df2['relationshiptocompany'].tolist()
relationship_set = set(relationship_list)
relationship_set = [s.strip() for s in relationship_set if s == s] # to remove a naughty NaN and get rid of dumb whitespaces
print("The relationships are: “" + str.join('”, “', relationship_se... | 07/Homework7.ipynb | palrogg/foundations-homework | mit |
Most common source of wealth? Male vs. female? | print("The three most common sources of wealth are:\n" + str(df2['typeofwealth'].value_counts().head(3)))
print("\nFor men, they are:\n" + str(df2[df2['gender'] == 'male']['typeofwealth'].value_counts().head(3)))
print("\nFor women, they are:\n" + str(df2[df2['gender'] == 'female']['typeofwealth'].value_counts().head... | 07/Homework7.ipynb | palrogg/foundations-homework | mit |
Given the richest person in a country, what % of the GDP is their wealth? | per_country = df2.groupby(['citizenship'])
#per_country['networthusbillion'].max()
#per_country['networthusbillion'].idxmax() # DataFrame.max(axis=None, skipna=None, level=None, numeric_only=None, **kwargs)
# per_country['gdpcurrentus']
df2['percofgdp'] = (100*1000000000*df2['networthusbillion']) / (df2['gdpcurrentus'... | 07/Homework7.ipynb | palrogg/foundations-homework | mit |
3 Train stations | df_trains = pd.read_csv("stations.csv", delimiter=';')
print('A dataset about train stations and their accessibility in Switzerland:')
df_trains | 07/Homework7.ipynb | palrogg/foundations-homework | mit |
Define some common variables
Here you can define some variables that will be used repeatedly over the notebook. | # Define the study id
study = 'reanalysis:rd38'
# Define a clinicalCaseId
case_id = 'OPA-10044-1'
# Define a interpretationId
interpretation_id = 'OPA-10044-1__2' | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
1. Comon Queries for Clinical Analysis
Retrieve cases in a study
The query below retrieves the cases in a study. For performance reasons, we have limited the number of results retrieved in the query.
You can change the parameter limit to controle the number of cases you want to retrieve for the query.
You can also co... | ## Query using the clinical search web service
cases_search = oc.clinical.search(study=study, include='id,type,proband,description,panels,interpretation', limit=5)
cases_search.print_results(title='Cases found for study {}'.format(study), fields='id,type,proband.id,panels.id,interpretation.id')
## Uncomment next line ... | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
Proband information: List of disorders and HPO terms from proband of a case
The proband field from a case contains all the information related to a proband, including phenotypes and disorders.
You can retrieve all the phenotypes and disorders of a proband from a case by inspecting the information at the proband level.... | ## Query using the clinical info web service
disorder_search = oc.clinical.search(study=study, include='id,type,proband', limit=5)
disorder_search.print_results(title='Disorders and phenotypes', fields='id,type,proband.id')
disorder_object = disorder_search.get_results()[0]['proband']
## Uncomment next line to displa... | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
Check the interpretation id of a case
You can find theinterpretation id from a case. This is useful to perform subsequent queries for that interpretation.
Note that you can control the fields that are printed by the function print_results with the parameter fields. To see the whole clinical analysis object, you can us... | # Query using the clinical info web service
clinical_info = oc.clinical.info(clinical_analysis=case_id, study=study)
clinical_info.print_results(fields='id,interpretation.id,type,proband.id')
## Uncomment next line to display an interactive JSON viewer
# JSON(clinical_info.get_results()[0]['interpretation']) | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
Inspect the Interpretation object
Here you will retrieve many useful information from a case interpretation. | ## Query using the clinical info_interpretation web service
interpretation_object = oc.clinical.info_interpretation(interpretations='OPA-12120-1__2', study=study).get_results()
## Uncomment next line to display an interactive JSON viewer
# JSON(interpretation_object) | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
Check Reported pathogenic variants in a case interpretation and list the variant tier
Run the cell below to retrieve the interpretation stats, including the pathogenic variants reported in a case. | ## Query using the clinical info_interpretation web service
interpretation_stats = oc.clinical.info_interpretation(interpretations='OPA-12120-1__2', include='stats', study=study).get_results()[0]['stats']['primaryFindings']
## Uncomment next line to display an interactive JSON viewer
# JSON(interpretation_stats) | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
Retrieve the annotation for the reported variants
Run the cell below to retrieve the annotation for the variants obtained | ## Query using the clinical info_interpretation web service
variant_annotation = oc.clinical.info_interpretation(interpretations='OPA-12120-1__2', include='primaryFindings.annotation', study=study).get_results()[0]['primaryFindings']
## Uncomment next line to display an interactive JSON viewer
# JSON(variant_annotatio... | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
PanelApp panels applied in the original analysis
Obtain the list of genes that were in the panel at the time of the original analysis | cases_search = oc.clinical.search(study=study, include='id,panels', limit= 5)
cases_search.print_results(title='Cases found for study {}'.format(study), fields='id,panels.id')
## Uncomment next line to display an interactive JSON viewer
# JSON(cases_search.get_results()) | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
2. Use Case
Situation: I want to retrieve a case, check whether the case has a reported pathogenic variant. Retriev the annotation information about these variants, if available.
Finally, I want to come up with the list of tier 1, 2 and 3 variants for the sample.
1. Search Cases in the study and select one random case.... | ## Search the cases
cases_search = oc.clinical.search(study=study, limit=3)
## Uncomment next line to display an interactive JSON viewer
# JSON(cases_search.get_results()) | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
Now you can select one random case id for the subsequent analysis | ## Define an empty list to keep the case ids:
case_ids = []
## Iterate over the cases and retrieve the ids:
for case in oc.clinical.search(study=study, include='id').result_iterator():
case_ids.append(case['id'])
## Uncomment for printing the list with all the case ids
# print(case_ids)
## Select a random case f... | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
2. Retrieve the interpretation id/s from the seleted case | ## Query using the clinical info web service
interpretation_info = oc.clinical.info(clinical_analysis=selected_case, study=study)
interpretation_info.print_results(fields='id,interpretation.id,type,proband.id')
## Select interpretation object
interpretation_object = interpretation_info.get_results()[0]['interpretatio... | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
3. Retrieve reported variants and the annotation, including tiering
Obtain the interpretation stats from the case | ## Query using the clinical info_interpretation web service
interpretation_stats = oc.clinical.info_interpretation(interpretations=interpretation_id, include='stats', study=study).get_results()[0]['stats']['primaryFindings']
## Uncomment next line to display an interactive JSON viewer
# JSON(interpretation_stats) | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
Obtain annotation from variants reported in a interpretation from a case as a JSON object | ## Query using the clinical info_interpretation web service
primary_findings = oc.clinical.info_interpretation(interpretations=interpretation_id, study=study).get_results()[0]['primaryFindings']
## Uncomment next line to display an interactive JSON viewer
# JSON(primary_findings) | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
Obtain tiering: variant ids, genes, and tier from a case interpretation | ## Perform the query
variants_reported = oc.clinical.info_interpretation(interpretations=interpretation_id, study=study)
## Define empty list to store the variants, genes and the tiering
variant_list = []
gene_id_list=[]
genename_list=[]
tier_list =[]
for variant in variants_reported.get_results()[0]['primaryFinding... | opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb | opencb/opencga | apache-2.0 |
<a name="Obs">Observateurs</a>
Les lois de commande qui sont construites (PID ou autres types) dépendent de certaines variables du système (variables d'état, d'entrée ou de sortie).
Par exemple, pour le contrôle de la concentration du susbtrat $S$ dans un réacteur continu, exemple que l'on a considéré précédemment, la... | def reacteur_obs(x,t,k,muast,KS,KI,Qin,V,S0,Sast,control_type,coeffcontrol,obs,Cobs,disturb):
B = x[0] #biomass
S = x[1] #substrat
if control_type in ['PI','PID']:
indObs = 3
Shat = x[indObs+1]
dx = np.zeros(6)
dx[2] = Sast-S
dx[5] = Sast-Shat
else:
... | Partie6_Control_Obs_Sat.ipynb | fabiencampillo/systemes_dynamiques_agronomie | gpl-3.0 |
Test de l'observateur, mais sans l'intégrer à la loi de commande | # Simulation sans utiliser les observations dans la loi de commande: on suppose l'état connu
culture_cont2(2,'PI',np.array([0.07,0.01]),0,np.array([13,4,-2,0.1]),0.2) | Partie6_Control_Obs_Sat.ipynb | fabiencampillo/systemes_dynamiques_agronomie | gpl-3.0 |
Test du couplage de l'observateur avec la loi de commande | # Simulation en utilisant les observations dans la loi de commande
culture_cont2(2,'PI',np.array([0.07,0.01]),1,np.array([13,4,-2,0.1]),0.2) | Partie6_Control_Obs_Sat.ipynb | fabiencampillo/systemes_dynamiques_agronomie | gpl-3.0 |
<a name="Sat"> Saturations </a>
Dans le paragraphe précédent, on a vu que, lorsque certaines variables du système n'étaient pas mesurées, il était possible de les estimer grâce à un observateur.
Un autre problème que l'on peut rencontrer en pratique vient des saturations sur les commandes. En effet, pour des raisons so... | def reacteur_obs(x,t,k,muast,KS,KI,Qin,V,S0,Qmax,Sast,control_type,coeffcontrol,obs,Cobs,sat,coeffsat,disturb):
B = x[0] #biomass
S = x[1] #substrat
if control_type in ['PI','PID']:
indObs = 3
Shat = x[indObs+1]
dx = np.zeros(6)
dx[2] = Sast-S
dx[5] = Sast-Shat
... | Partie6_Control_Obs_Sat.ipynb | fabiencampillo/systemes_dynamiques_agronomie | gpl-3.0 |
Imposer une saturation sur une commande peut néanmoins dégrader fortement la dynamique du système en boucle fermée, voire carrément déstabiliser le système.
Cela est essentiellement dû au fait que lorsque la commande est saturée, le terme intégral continu de grossir car il continue d'intégrer l'erreur: on parle d'effet... | def reacteur_obs(x,t,k,muast,KS,KI,Qin,V,S0,Qmax,Sast,control_type,coeffcontrol,obs,Cobs,sat,coeffsat,disturb):
B = x[0] #biomass
S = x[1] #substrat
if control_type in ['PI','PID']:
indObs = 3
Shat = x[indObs+1]
dx = np.zeros(6)
dx[2] = Sast-S
dx[5] = Sast-Shat
... | Partie6_Control_Obs_Sat.ipynb | fabiencampillo/systemes_dynamiques_agronomie | gpl-3.0 |
First we start with straightforward fibonacci generator function. | def fibonacci():
a, b = 0, 1
while True:
yield a
a, b = b, a + b
n = 45
known_good_output = tuple(islice(fibonacci(), n))
# known_good_output
%timeit sum(islice(fibonacci(), n)) | 20160708-dojo-fibonacci-unroll-for-speed.ipynb | james-prior/cohpy | mit |
Next, we unroll the loop. Note that there are no assignments that just move things around. There is no wasted motion inside the loop.
It reminds me of the
[musical round](https://en.wikipedia.org/wiki/Round_(music)
Three Blind Mice. | def fibonacci():
a, b = 0, 1
while True:
yield a
c = a + b
yield b
a = b + c
yield c
b = c + a
assert(known_good_output == tuple(islice(fibonacci(), n)))
%timeit sum(islice(fibonacci(), n)) | 20160708-dojo-fibonacci-unroll-for-speed.ipynb | james-prior/cohpy | mit |
Next, we unroll the loop more and more to see if that makes the generator faster. | def fibonacci():
a, b = 0, 1
while True:
yield a
c = a + b
yield b
a = b + c
yield c
b = c + a
yield a
c = a + b
yield b
a = b + c
yield c
b = c + a
assert(known_good_output == tuple(islice(fibonacci(), n)))
%timei... | 20160708-dojo-fibonacci-unroll-for-speed.ipynb | james-prior/cohpy | mit |
Graphics including GenomeDiagram
The Bio.Graphics module depends on the third party Python library ReportLab. Although focused on producing PDF files, ReportLab can also create encapsulated postscript (EPS) and (SVG) files. In addition to these vector based images, provided certain further dependencies such as the Pyth... | record = SeqIO.read("data/NC_005816.gb", "genbank") | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
We’re using a top down approach, so after loading in our sequence we next create an empty diagram, then add an (empty) track, and to that add an (empty) feature set: | gd_diagram = GenomeDiagram.Diagram("Yersinia pestis biovar Microtus plasmid pPCP1")
gd_track_for_features = gd_diagram.new_track(1, name="Annotated Features")
gd_feature_set = gd_track_for_features.new_set() | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
Now the fun part - we take each gene SeqFeature object in our SeqRecord, and use it to generate a feature on the diagram. We’re going to color them blue, alternating between a dark blue and a light blue. | for feature in record.features:
if feature.type != "gene":
#Exclude this feature
continue
if len(gd_feature_set) % 2 == 0:
color = colors.blue
else:
color = colors.lightblue
gd_feature_set.add_feature(feature, color=color, label=True) | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
Now we come to actually making the output file. This happens in two steps, first we call the draw method, which creates all the shapes using ReportLab objects. Then we call the write method which renders these to the requested file format. Note you can output in multiple file formats: | gd_diagram.draw(format="linear", orientation="landscape", pagesize='A4',
fragments=4, start=0, end=len(record))
gd_diagram.write("data/plasmid_linear.png", "png") | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
Lets have a look at the previous one:
<img src="plasmid_linear.png">
Notice that the fragments argument which we set to four controls how many pieces the genome gets broken up into.
If you want to do a circular figure, then try this: | gd_diagram.draw(format="circular", circular=True, pagesize=(20*cm,20*cm),
start=0, end=len(record), circle_core=0.7)
gd_diagram.write("data/plasmid_circular.png", "PNG")
Image("data/plasmid_circular.png") | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
These figures are not very exciting, but we’ve only just got started.
A bottom up example
Now let’s produce exactly the same figures, but using the bottom up approach. This means we create the different objects directly (and this can be done in almost any order) and then combine them. | record = SeqIO.read("data/NC_005816.gb", "genbank")
#Create the feature set and its feature objects,
gd_feature_set = GenomeDiagram.FeatureSet()
for feature in record.features:
if feature.type != "gene":
#Exclude this feature
continue
if len(gd_feature_set) % 2 == 0:
color = colors.blue... | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
You can now call the draw and write methods as before to produce a linear or circular diagram, using the code at the end of the top-down example above. The figures should be identical.
Features without a SeqFeature
In the above example we used a SeqRecord’s SeqFeature objects to build our diagram. Sometimes you won’t h... | from Bio.SeqFeature import SeqFeature, FeatureLocation
my_seq_feature = SeqFeature(FeatureLocation(50,100),strand=+1) | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
For strand, use +1 for the forward strand, -1 for the reverse strand, and None for both. Here is a short self contained example: | gdd = GenomeDiagram.Diagram('Test Diagram')
gdt_features = gdd.new_track(1, greytrack=False)
gds_features = gdt_features.new_set()
#Add three features to show the strand options,
feature = SeqFeature(FeatureLocation(25, 125), strand=+1)
gds_features.add_feature(feature, name="Forward", label=True)
feature = SeqFeature... | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
The top part of the image in the next subsection shows the output (in the default feature color, pale green).
Notice that we have used the name argument here to specify the caption text for these features. This is discussed in more detail next.
Feature captions
Recall we used the following (where feature was a SeqFeatu... | gd_feature_set.add_feature(feature, color=color, label=True) | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
In the example above the SeqFeature annotation was used to pick a sensible caption for the features. By default the following possible entries under the SeqFeature object’s qualifiers dictionary are used: gene, label, name, locus_tag, and product. More simply, you can specify a name directly: | gd_feature_set.add_feature(feature, color=color, label=True, name="My Gene") | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
In addition to the caption text for each feature’s label, you can also choose the font, position (this defaults to the start of the sigil, you can also choose the middle or at the end) and orientation (for linear diagrams only, where this defaults to rotated by 45 degrees): | #Large font, parallel with the track
gd_feature_set.add_feature(feature, label=True, color="green",
label_size=25, label_angle=0)
#Very small font, perpendicular to the track (towards it)
gd_feature_set.add_feature(feature, label=True, color="purple",
label_positio... | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
Combining each of these three fragments with the complete example in the previous section should give something like this: | gdd.draw(format='linear', pagesize=(15*cm,4*cm), fragments=1,
start=0, end=400)
gdd.write("data/GD_labels.png", "png")
Image("data/GD_labels.png") | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
We’ve not shown it here, but you can also set label_color to control the label’s color.
You’ll notice the default font is quite small - this makes sense because you will usually be drawing many (small) features on a page, not just a few large ones as shown here.
Feature sigils
The examples above have all just used the ... | #Default uses a BOX sigil
gd_feature_set.add_feature(feature)
#You can make this explicit:
gd_feature_set.add_feature(feature, sigil="BOX")
#Or opt for an arrow:
gd_feature_set.add_feature(feature, sigil="ARROW")
#Box with corners cut off (making it an octagon)
gd_feature_set.add_feature(feature, sigil="OCTO")
#Box... | notebooks/17 - Graphics including GenomeDiagram.ipynb | tiagoantao/biopython-notebook | mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.