markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Select probes
%%time # Select probes by a stochastic greedy algorithms that optimizes the on-bit coverage # and minimizes the overlapping between probes. ps.select_probes_greedy_stochastic(probe_dict, N_probes_per_transcript=92, N_on_bits=4, N_threads=16) # Let's plot the probe coverage of an example transcript seq_len = len(transcriptome[transcriptome['transcript_id'] == 'ENST00000380152.7'].iloc[0]['sequence']) plot.plot_sequence_coverage(probe_dict['BRCA2']['ENST00000380152.7'], seq_len) ps
_____no_output_____
MIT
Library_design/CTP-10_Aire/Merfish_design_from_XJP.ipynb
shiwei23/Chromatin_Analysis_Scripts
Primer design
# Load the primer candidates into data frames forward_primers, reverse_primers = fio.load_primers(forward_primer_file, reverse_primer_file) display(forward_primers) display(reverse_primers) # Selet primers # Make an off-target from the current probe sequences. ottable_target_readout = ot.get_OTTable_for_probe_dictionary(probe_dict, 'target_readout_sequence', 15) # Calculate the off-targets for the primer sequences and their reverse-complements # Usually, there shouln't be any off-targets ot.calc_OTs_df(forward_primers, ottable_target_readout, 'sequence', 'sequence_OT', 15) ot.calc_OTs_df(forward_primers, ottable_target_readout, 'sequence_rc', 'sequence_rc_OT', 15) ot.calc_OTs_df(reverse_primers, ottable_target_readout, 'sequence', 'sequence_OT', 15) ot.calc_OTs_df(reverse_primers, ottable_target_readout, 'sequence_rc', 'sequence_rc_OT', 15) # Select primers with lowest OTs forward_primers = primer_design.randomly_select_primers_with_lowest_OT(forward_primers) reverse_primers = primer_design.randomly_select_primers_with_lowest_OT(reverse_primers) # Now each primer table should only a single row of the selected primer display(forward_primers) display(reverse_primers) # Save the selected primers forward_primers.append(reverse_primers, ignore_index=True).to_csv(selected_primers_file) # Add the primer sequences # NOTE: the sequence after primer addition should be (reverse_primer)-(target_readouts)-(forward_primer_rc) primer_design.add_primer_sequences(probe_dict, reverse_primers.iloc[0]['sequence'], forward_primers.iloc[0]['sequence_rc'], input_column='target_readout_sequence', output_column='target_readout_primer_sequence') # Notice that the T7 promoter (the first 17 bases of the reverse primer) will be lost after in vitro transcription # create a column of the T7 transcribed sequences for the subsequent quality check primer_design.add_primer_sequences(probe_dict, reverse_primers.iloc[0]['sequence'][17:], forward_primers.iloc[0]['sequence_rc'], input_column='target_readout_sequence', output_column='target_readout_primer_sequence_t7_transcribed')
_____no_output_____
MIT
Library_design/CTP-10_Aire/Merfish_design_from_XJP.ipynb
shiwei23/Chromatin_Analysis_Scripts
Quality check
# Filter out probes that have off-targets to rRNA/tRNAs ot.calc_OTs(probe_dict, ottable_rtRNAs, 'target_readout_primer_sequence_t7_transcribed', 'target_readout_primer_t7_transcribed_OT_rtRNA', 15) plot.plot_hist(probe_dict, 'target_readout_primer_t7_transcribed_OT_rtRNA', y_max=400) filters.filter_probe_dict_by_metric(probe_dict, 'target_readout_primer_t7_transcribed_OT_rtRNA', upper_bound=0.5) plot.plot_hist(probe_dict, 'target_readout_primer_t7_transcribed_OT_rtRNA') # Calculate how many more off-targets to the transcriptome are introduced due to the primer sequences. # The off-target counts are weighted down by the FPKMs of the on-target transcripts ot.calc_OT_diffs(probe_dict, ottable_transcriptome, gene_ottable_dict, transcript_fpkms, 'target_readout_sequence', 'target_readout_primer_sequence_t7_transcribed', 'primer_OT_increase', 17) plot.plot_hist(probe_dict, 'primer_OT_increase', y_max=400) # Filter out the probes with extra off-targets due to the primers # Require the new weighted off-targets to be minor compared to the on-target weight. filters.filter_probe_dict_by_metric(probe_dict, 'primer_OT_increase', upper_bound=0.25 * (30 - 17 + 1)) plot.plot_hist(probe_dict, 'primer_OT_increase') %%time # Filter out the probes that self complement or complement with other probes. # Iterately remove the probes with high numbers of cis/trans-complementarity # This filtering strategy is a compromise between speed and the number of probes to keep while True: # Make a OTTable from the reverse-complement sequences of the probes. ottable_probes_rc = ot.get_OTTable_for_probe_dictionary(probe_dict, 'target_readout_primer_sequence', 15, rc=True) # The off-targets in this table indicates cis/trans-complementarity ot.calc_OTs(probe_dict, ottable_probes_rc, 'target_readout_primer_sequence', 'probe_cis_trans_OT', 15) max_ot = max(plot.get_values_from_probe_dict(probe_dict, 'probe_cis_trans_OT')) if max_ot == 0: break # Remove probes that have any cis/trans-complementarity filters.filter_probe_dict_by_metric(probe_dict, 'probe_cis_trans_OT', upper_bound=max_ot - 0.5) plot.plot_hist(probe_dict, 'probe_cis_trans_OT') # Also get the reverse-complementary sequences of the designed probes p_d.get_rc_sequences(probe_dict, 'target_readout_primer_sequence', 'target_readout_primer_sequence_rc') # Write the designed probes p_d.probe_dict_to_df(probe_dict).to_csv(probe_output_file, index=False) # Write the transcript level report transcript_level_report = qc.generate_transcript_level_report(probe_dict, transcriptome) display(transcript_level_report) transcript_level_report.to_csv(transcript_level_report_file, index=False)
_____no_output_____
MIT
Library_design/CTP-10_Aire/Merfish_design_from_XJP.ipynb
shiwei23/Chromatin_Analysis_Scripts
Get file paths
msh_file = "DW_MeshConv4nm_25nmSep.vtu" mag_file = "DW_MeshConv4nm_25nmSep.csv" # scale for the points scale = 1e9
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
Generate raytracing - skip if generated
# get the mesh, scale the points to nm msh = Mesh.from_file(msh_file, scale=scale)
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
Make sure that the projection vector is correct and that the structure is oriented well
# get the projection vector p = get_projection_vector(90, 0) # direction of xrays n = [0, 1, 1] # normal to the projection plane x0 = [-100, 0, 0] # point on the projection plane # prepare raytracing object raytr = RayTracing(msh, p, n=n, x0=x0) struct = raytr.struct struct_projected = raytr.struct_projected vis = MeshVisualizer(struct, struct_projected) vis.set_camera(dist=2e5) vis.show()
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
If raytracing file generated - skip if not
# load raytracing if exists raytr = np.load("raytracing.npy", allow_pickle=True).item() struct = raytr.struct struct_projected = raytr.struct_projected
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
Generate and save raytracing
raytr.get_piercings() # np.save("raytracing.npy", raytr, allow_pickle=True)
100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 56592/56592 [07:29<00:00, 125.99it/s]
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
Get the xmcd Get magnetisation, fix vertex shufflingNote: sometimes if the mesh file has multiple parts, the paraview export and the original mesh coordinates are not in the same order. I add a function to fix that when necessary
magnetisation, mag_points = load_mesh_magnetisation(mag_file, scale=scale) shuffle_file = "shuffle_indx.npy" try: shuffle_indx = np.load(shuffle_file) except FileNotFoundError: print('File not found. Generating shuffle indx') shuffle_indx = msh.get_shuffle_indx(mag_points) np.save(shuffle_file, shuffle_indx) magnetisation = magnetisation[shuffle_indx, :] magnetisation, mag_points = load_mesh_magnetisation(mag_file, scale=scale) shuffle_indx = msh.get_shuffle_indx(mag_points) magnetisation = magnetisation[shuffle_indx, :]
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
Get the colours and XMCD values
xmcd_value = raytr.get_xmcd(magnetisation) mag_colors = get_struct_face_mag_color(struct, magnetisation) azi=90 center_struct = [0, 0, 0] dist_struct = 1e4 center_peem = [100, -200, 0] dist_peem = 8e4 vis = MeshVisualizer(struct, struct_projected, projected_xmcd=xmcd_value, struct_colors=mag_colors) vis.show(azi=azi, center=center_peem, dist=dist_peem) Image.fromarray(vis.get_image_np())
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
View different parts of the image separately Both
vis.update_colors(xmcd_value, mag_colors) vis.view_both(azi=azi, center=center_peem, dist=dist_peem) Image.fromarray(vis.get_image_np())
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
Projection
vis.view_projection(azi=azi, center=center_peem, dist=dist_peem) Image.fromarray(vis.get_image_np())
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
Structure
center_struct = [75, 50, 0] dist_struct = 1e4 vis.view_struct(azi=azi, center=center_struct, dist=dist_struct) Image.fromarray(vis.get_image_np())
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
Blurred image
vis.view_projection(azi=azi, center=center_peem, dist=dist_peem) Image.fromarray((vis.get_blurred_image(desired_background=0.7)*255).astype(np.uint8))
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
Saving one render
vis.view_both(azi=azi, center=center_peem, dist=dist_peem) vis.save_render('mumax_shadow.png') vis.view_projection() blurred = vis.get_blurred_image(desired_background=0.7) imsave('mumax_shadow_blurred.png', (blurred*255).astype(np.uint8), check_contrast=False) vis.view_struct(azi=azi, center=center_struct, dist=dist_struct) vis.save_render('mumax_structure_view.png')
_____no_output_____
MIT
testing/mumax_projection-Copy1.ipynb
Skoricius/xmcd-projection
The histogram looks a little fishy. The general shape of the histogram is not surprising, but the limits are. We don't even make it to $150K here, so c-suite salaries are likely excluded.
# now, a bar chart of average salary by title. helper_df = data_df.groupby(['title'])['salary'].mean() helper_df.plot.bar()
_____no_output_____
ADSL
EmployeeSQL/sql-challenge-bonus.ipynb
CraftyJack/sql-challenge
This definitely looks fishy. The average salaries are all really tightly clustered. There also doesn't seem to be much point to advancement. For example, Assistant Engineer, Engineer, and Senior Engineer all make about the same on average. With that, let's take a look behind the curtain.
punchline = pd.read_sql("SELECT * FROM employees INNER JOIN salaries ON employees.emp_no = salaries.emp_no INNER JOIN titles ON employees.emp_title = titles.title_id WHERE employees.emp_no = 499942", connection) print(punchline)
emp_no emp_title birth_date first_name last_name sex hire_date emp_no \ 0 499942 e0004 1963-01-10 April Foolsday F 1997-02-10 499942 salary title_id title 0 40000 e0004 Technique Leader
ADSL
EmployeeSQL/sql-challenge-bonus.ipynb
CraftyJack/sql-challenge
Dask Overview Dask is a flexible library for parallel computing in Python that makes scaling out your workflow smooth and simple. On the CPU, Dask uses Pandas (NumPy) to execute operations in parallel on DataFrame (array) partitions.Dask-cuDF extends Dask where necessary to allow its DataFrame partitions to be processed by cuDF GPU DataFrames as opposed to Pandas DataFrames. For instance, when you call dask_cudf.read_csv(…), your cluster’s GPUs do the work of parsing the CSV file(s) with underlying cudf.read_csv(). Dask also supports array based workflows using CuPy. When to use DaskIf your workflow is fast enough on a single GPU or your data comfortably fits in memory on a single GPU, you would want to use cuDF or CuPy. If you want to distribute your workflow across multiple GPUs, have more data than you can fit in memory on a single GPU, or want to analyze data spread across many files at once, you would want to use Dask.One additional benefit Dask provides is that it lets us easily spill data between device and host memory. This can be very useful when we need to do work that would otherwise cause out of memory errors.In this brief notebook, you'll walk through an example of using Dask on a single GPU. Because we're using Dask, the same code in this notebook would work on two, eight, 16, or 100s of GPUs. Creating a Local ClusterThe easiest way to scale workflows on a single node is to use the `LocalCUDACluster` API. This lets us create a GPU cluster, using one worker per GPU by default.In this case, we'll pass the following arguments. - `CUDA_VISIBLE_DEVICES`, to limit our cluster to a single GPU (for demonstration purposes).- `device_memory_limit`, to illustrate how we can spill data between GPU and CPU memory. Artificial memory limits like this reduce our performance if we don't actually need them, but can let us accomplish much larger tasks when we do.- `rmm_pool_size`, to use the RAPIDS Memory Manager to allocate one big chunk of memory upfront rather than having our operations call `cudaMalloc` all the time under the hood. This improves performance, and is generally a best practice.
from dask.distributed import Client, fire_and_forget, wait from dask_cuda import LocalCUDACluster from dask.utils import parse_bytes import dask cluster = LocalCUDACluster( CUDA_VISIBLE_DEVICES="0,1", device_memory_limit=parse_bytes("3GB"), rmm_pool_size=parse_bytes("16GB"), ) client = Client(cluster) client
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Click the **Dashboard** link above to view your Dask dashboard. cuDF DataFrames to Dask DataFrames Dask lets scale our cuDF workflows. We'll walk through a couple of examples below, and then also highlight how Dask lets us spill data from GPU to CPU memory.First, we'll create a dataframe with CPU Dask and then send it to the GPU
import cudf import dask_cudf ddf = dask_cudf.from_dask_dataframe(dask.datasets.timeseries()) ddf.head()
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Example One: Groupby-Aggregations
ddf.groupby(["id", "name"]).agg({"x":['sum', 'mean']}).head()
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Run the code above again.If you look at the task stream in the dashboard, you'll notice that we're creating the data every time. That's because Dask is lazy. We need to `persist` the data if we want to cache it in memory.
ddf = ddf.persist() wait(ddf); ddf.groupby(["id", "name"]).agg({"x":['sum', 'mean']}).head()
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
This is the same API as cuDF, except it works across many GPUs. Example Two: Rolling WindowsWe can also do things like rolling window calculations with Dask and GPUs.
ddf.head() rolling = ddf[['x','y']].rolling(window=3) type(rolling) rolling.mean().head()
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Larger than GPU Memory WorkflowsWhat if we needed to scale up even more, but didn't have enough GPU memory? Dask handles spilling for us, so we don't need to worry about it. The `device_memory_limit` parameter we used while creating the LocalCluster determines when we should start spilling. In this case, we'll start spilling when we've used about 4GB of GPU memory.Let's create a larger dataframe to use as an example.
ddf = dask_cudf.from_dask_dataframe(dask.datasets.timeseries(start="2000-01-01", end="2003-12-31", partition_freq='60d')) ddf = ddf.persist() len(ddf) print(f"{ddf.memory_usage(deep=True).sum().compute() / 1e9} GB of data") ddf.head()
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Let's imagine we have some downstream operations that require all the data from a given unique identifier in the same partition. We can repartition our data based on the `name` column using the `shuffle` API.Repartitioning our large dataframe will spike GPU memory higher than 4GB, so we'll need to spill to CPU memory.
ddf = ddf.shuffle(on="id") ddf = ddf.persist() len(ddf)
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Watch the Dask Dashboard while this runs. You should see a lot of tasks in the stream like `disk-read` and `disk-write`. Setting a `device_memory_limit` tells dask to spill to CPU memory and potentially disk (if we overwhelm CPU memory). This lets us do these large computations even when we're almost out of memory (though in this case, we faked it). Dask Custom FunctionsDask DataFrames also provide a `map_partitions` API, which is very useful for parallelizing custom logic that doesn't quite fit perfectly or doesn't need to be used with the Dask dataframe API. Dask will `map` the function to every partition of the distributed dataframe.Now that we have all the rows of each `id` collected in the same partitions, what if we just wanted to sort **within each partition**. Avoiding global sorts is usually a good idea if possible, since they're very expensive operations.
sorted_ddf = ddf.map_partitions(lambda x: x.sort_values("id")) len(sorted_ddf)
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
We could also do something more complicated and wrap it into a function. Let's do a rolling window on the two value columns after sorting by the id column.
def sort_and_rolling_mean(df): df = df.sort_values("id") df = df.rolling(3)[["x", "y"]].mean() return df result = ddf.map_partitions(sort_and_rolling_mean) result = result.persist() wait(result); # let's look at a random partition result.partitions[12].head()
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Pretty cool. When we're using `map_partitions`, the function is executing on the individual cuDF DataFrames that make up our Dask DataFrame. This means we can do any cuDF operation, run CuPy array manipulations, or anything else we want. Dask DelayedDask also provides a `delayed` API, which is useful for parallelizing custom logic that doesn't quite fit into the DataFrame API.Let's imagine we wanted to run thousands of regressions models on different combinations of two features. We can do this experiment super easily with dask.delayed.
from cuml.linear_model import LinearRegression from dask import delayed import dask import numpy as np from itertools import combinations # Setup data np.random.seed(12) nrows = 1000000 ncols = 50 df = cudf.DataFrame({f"x{i}": np.random.randn(nrows) for i in range(ncols)}) df['y'] = np.random.randn(nrows) feature_combinations = list(combinations(df.columns.drop("y"), 2)) feature_combinations[:10] len(feature_combinations) # Many calls to linear regression, parallelized with Dask @delayed def fit_ols(df, feature_cols, target_col="y"): clf = LinearRegression() clf.fit(df[list(feature_cols)], df[target_col]) return feature_cols, clf.coef_, clf.intercept_ # scatter the data to the workers beforehand data_future = client.scatter(df, broadcast=True) results = [] for features in feature_combinations: # note how i'm passing the scattered data future res = fit_ols(data_future, features) results.append(res) res = dask.compute(results) res = res[0] print("Features\t\tCoefficients\t\t\tIntercept") for i in range(5): print(res[i][0], res[i][1].values, res[i][2], sep="\t")
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Handling Parquet FilesDask and cuDF provide accelerated Parquet readers and writers, and it's useful to take advantage of these tools.To start, let's write out our DataFrame `ddf` to Parquet files using the `to_parquet` API and delete it from memory.
print(ddf.npartitions) ddf.to_parquet("ddf.parquet") del ddf
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Let's take a look at what happened.
!ls ddf.parquet | head
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
We end up with many parquet files, and one metadata file. Dask will write one file per partition.Let's read the data back in with `dask_cudf.read_parquet`.
ddf = dask_cudf.read_parquet("ddf.parquet/") ddf
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Why do we have more partitions than files? It turns out, Dask's readers do things like chunk our data by default. Additionally, the `_metadata` file helps provide guidelines for reading the data. But, we can still read them on a per-file basis if want by using a `*` wildcard in the filepath and ignoring the metadata.
ddf = dask_cudf.read_parquet("ddf.parquet/*.parquet") ddf
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
Let's now write one big parquet file and then read it back in. We can `repartition` our dataset down to a single partition.
ddf.repartition(npartitions=1).to_parquet("big_ddf.parquet") dask_cudf.read_parquet("big_ddf.parquet/")
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
We still get lots of partitions? We can control the splitting behavior using the `split_row_groups` parameter.
dask_cudf.read_parquet("big_ddf.parquet/", split_row_groups=False)
_____no_output_____
BSD-3-Clause
Notebooks/03-Intro_to_Dask.ipynb
quasiben/rapids-dask-tutorial-2021
View source on GitHub Notebook Viewer Run in Google Colab Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.
# Installs geemap package import subprocess try: import geemap except ImportError: print('Installing geemap ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) import ee import geemap
_____no_output_____
MIT
FeatureCollection/distance.ipynb
OIEIEIO/earthengine-py-notebooks
Create an interactive map The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
Map = geemap.Map(center=[40,-100], zoom=4) Map
_____no_output_____
MIT
FeatureCollection/distance.ipynb
OIEIEIO/earthengine-py-notebooks
Add Earth Engine Python script
# Add Earth Engine dataset # Collection.distance example. # Computes the distance to the nearest feature in a collection. # Construct a FeatureCollection from a list of geometries. fc = ee.FeatureCollection([ ee.Geometry.Point(-72.94411, 41.32902), ee.Geometry.Point(-72.94411, 41.33402), ee.Geometry.Point(-72.94411, 41.33902), # The geometries do not need to be the same type. ee.Geometry.LineString( -72.93411, 41.30902, -72.93411, 41.31902, -72.94411, 41.31902) ]) # Compute distance from the dfeatures, to a max of 1000 meters. distance = fc.distance(1000, 100) Map.setCenter(-72.94, 41.32, 13) Map.addLayer(distance, {'min': 0, 'max': 1000, 'palette': ['yellow', 'red']}, 'distance') Map.addLayer(fc, {}, 'Features')
_____no_output_____
MIT
FeatureCollection/distance.ipynb
OIEIEIO/earthengine-py-notebooks
Display Earth Engine data layers
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map
_____no_output_____
MIT
FeatureCollection/distance.ipynb
OIEIEIO/earthengine-py-notebooks
Hypothesis: Are digitised practices causing more failures? HypothesisWe believe that practices undergoing Lloyd Gerge digitisation have an increased failure rate. We will know this to be true when we look at their data for the last three months, and see that either their failures have increased, or that in general their failures are higher than average. ContextFrom the months of May-Aug 2021, we see a steady increase of TPP-->EMIS Large message general failures. A general hypothesis is that this is due to record sizes increasing, which could be due to Lloyd George digitisation. This has prompted a more general hypothesis to identify whether digitisation is impacting failure rates. Scope- Generate a transfer outcomes table for each of the below CCGs split down for May, June, July: - Sunderland - Fylde and Wyre - Chorley and South Ribble - Blackpool - Birmingham and Solihull - Show technical failure rate for each month, for each practice in the CCG- Separate out outcomes for transfers in, and transfers out- Do this for practices as a sender and as a requester
import pandas as pd import numpy as np import paths from data.practice_metadata import read_asid_metadata asid_lookup=read_asid_metadata("prm-gp2gp-ods-metadata-preprod", "v2/2021/8/organisationMetadata.json") transfer_file_location = "s3://prm-gp2gp-transfer-data-preprod/v4/" transfer_files = [ "2021/5/transfers.parquet", "2021/6/transfers.parquet", "2021/7/transfers.parquet" ] transfer_input_files = [transfer_file_location + f for f in transfer_files] transfers_raw = pd.concat(( pd.read_parquet(f) for f in transfer_input_files )) transfers = transfers_raw\ .join(asid_lookup.add_prefix("requesting_"), on="requesting_practice_asid", how="left")\ .join(asid_lookup.add_prefix("sending_"), on="sending_practice_asid", how="left")\ transfers['month']=transfers['date_requested'].dt.to_period('M') def generate_monthly_outcome_breakdown(transfers, columns): total_transfers = ( transfers .groupby(columns) .size() .to_frame("Total Transfers") ) transfer_outcomes=pd.pivot_table( transfers, index=columns, columns=["status"], aggfunc='size' ) transfer_outcomes_pc = ( transfer_outcomes .div(total_transfers["Total Transfers"],axis=0) .multiply(100) .round(2) .add_suffix(" %") ) failed_transfers = ( transfers .assign(failed_transfer=transfers["status"] != "INTEGRATED_ON_TIME") .groupby(columns) .agg({'failed_transfer': 'sum'}) .rename(columns={'failed_transfer': 'ALL_FAILURE'}) ) failed_transfers_pc = ( failed_transfers .div(total_transfers["Total Transfers"],axis=0) .multiply(100) .round(2) .add_suffix(" %") ) return pd.concat([ total_transfers, transfer_outcomes, failed_transfers, transfer_outcomes_pc, failed_transfers_pc, ],axis=1).fillna(0)
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Generate national transfer outcomes
national_metrics_monthly=generate_monthly_outcome_breakdown(transfers, ["month"]) national_metrics_monthly
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Generate digitised CCG transfer outcomes
ccgs_to_investigate = [ "NHS SUNDERLAND CCG", 'NHS FYLDE AND WYRE CCG', 'NHS CHORLEY AND SOUTH RIBBLE CCG', 'NHS BLACKPOOL CCG', 'NHS BIRMINGHAM AND SOLIHULL CCG' ] is_requesting_ccg_of_interest = transfers.requesting_ccg_name.isin(ccgs_to_investigate) is_sending_ccg_of_interest = transfers.sending_ccg_name.isin(ccgs_to_investigate) requesting_transfers_of_interest = transfers[is_requesting_ccg_of_interest] sending_transfers_of_interest = transfers[is_sending_ccg_of_interest]
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Requesting CCGs (Digitised)
requesting_ccgs_monthly=generate_monthly_outcome_breakdown( transfers=requesting_transfers_of_interest, columns=["requesting_ccg_name", "month"] ) requesting_ccgs_monthly
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Sending CCGs (Digitised)
sending_ccgs_monthly=generate_monthly_outcome_breakdown( transfers=sending_transfers_of_interest, columns=["sending_ccg_name", "month"] ) sending_ccgs_monthly
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Requesting practices (digitised)
requesting_practices_monthly=generate_monthly_outcome_breakdown( transfers=requesting_transfers_of_interest, columns=["requesting_ccg_name", "requesting_practice_name", "requesting_practice_ods_code", "requesting_supplier", "month"] ) requesting_practices_monthly
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Sending practices (digitised)
sending_practices_monthly=generate_monthly_outcome_breakdown( transfers=sending_transfers_of_interest, columns=["sending_ccg_name", "sending_practice_name", "sending_practice_ods_code", "sending_supplier", "month"] ) sending_practices_monthly
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Looking at failure rate trends by CCG when requesting a record
barplot_config = { 'color': ['lightsteelblue', 'cornflowerblue', 'royalblue'], 'edgecolor':'black', 'kind':'bar', 'figsize': (15,6), 'rot': 30 } def requesting_ccg_barplot(column_name, title): ( pd .concat({'All CCGs': national_metrics_monthly}, names=['requesting_ccg_name']) .append(requesting_ccgs_monthly) .unstack() .plot( y=column_name, title=title, **barplot_config ) ) requesting_ccg_barplot('ALL_FAILURE %', 'Total Failure Percentage (Digitised CCGs - Requesting)') requesting_ccg_barplot('TECHNICAL_FAILURE %', 'Technical Failure Percentage (Digitised CCGs - Requesting)') requesting_ccg_barplot('PROCESS_FAILURE %', 'Process Failure Percentage (Digitised CCGs - Requesting)') requesting_ccg_barplot('UNCLASSIFIED_FAILURE %', 'Unlassified Failure Percentage (Digitised CCGs - Requesting)')
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Looking at failure rate trends by CCG when sending a record
def sending_ccg_barplot(column_name, title): ( pd .concat({'All CCGs': national_metrics_monthly}, names=['sending_ccg_name']) .append(sending_ccgs_monthly) .unstack() .plot( y=column_name, title=title, **barplot_config ) ) sending_ccg_barplot('ALL_FAILURE %', 'Total Failure Percentage (Digitised CCGs - Sending)') sending_ccg_barplot('TECHNICAL_FAILURE %', 'Technical Failure Percentage (Digitised CCGs - Sending)') sending_ccg_barplot('PROCESS_FAILURE %', 'Process Failure Percentage (Digitised CCGs - Sending)') sending_ccg_barplot('UNCLASSIFIED_FAILURE %', 'Unlassified Failure Percentage (Digitised CCGs - Sending)')
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Write CCG transfer outcomes by sending and requesting practice to Excel
with pd.ExcelWriter('PRMT-2332-Digitisation-Failure-Rates-May-July-2021.xlsx') as writer: national_metrics_monthly.to_excel(writer, sheet_name="National Baseline") requesting_ccgs_monthly.to_excel(writer, sheet_name="Digitised CCGs (Req)") sending_ccgs_monthly.to_excel(writer, sheet_name="Digitised CCGs (Send)") requesting_practices_monthly.to_excel(writer, sheet_name="Digitised Practices (Req)") sending_practices_monthly.to_excel(writer, sheet_name="Digitised Practices (Send)")
_____no_output_____
Apache-2.0
notebooks/66-PRMT-2332--lloyd-george-digitisation-ccgs-failure-rates.ipynb
nhsconnect/prm-gp2gp-data-sandbox
Data Collection Using Web Scraping To solve this problem we will need the following data :● List of neighborhoods in Pune.● Latitude and Longitudinal coordinates of those neighborhoods.● Venue data for each neighborhood. Sources● For the list of neighborhoods, I used(https://en.wikipedia.org/wiki/Category:Neighbourhoods_in_Pune)● For Latitude and Longitudinal coordinates: Python Geocoder Package(https://geocoder.readthedocs.io/)● For Venue data: Foursquare API (https://foursquare.com/) Methods to extract data from SourcesTo extract the data we will use python packages like requests, beautifulsoup and geocoder.We will use Requests and beautifulsoup packages for webscraping(https://en.wikipedia.org/wiki/Category:Neighbourhoods_in_Pune ) to get the list ofneighborhoods in Pune and geocoder package to get the latitude and longitude coordinates ofeach neighborhood.Then we will use Folium to plot these neighborhoods on the map. After that, we will use the foursquare API to get the venue data of those neighborhoods. Foursquare API will provide many categories of the venue data but we are particularly interested in the supermarket category in order to help us to solve the business problem. Imports
import numpy as np # library to handle data in a vectorized manner import pandas as pd # library for data analsysis pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", None) import json # library to handle JSON files from geopy.geocoders import Nominatim # convert an address into latitude and longitude values !pip install geocoder import geocoder # to get coordinates !pip install requests import requests # library to handle requests from bs4 import BeautifulSoup # library to parse HTML and XML documents from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe print("Libraries imported.")
_____no_output_____
FTL
Developement/Data_Collection_Web_scraping.ipynb
Vivek1258/IBM-data-science-capstone
Collecting the nebourhood data using Requests, BeautifulSoup, and Geocoder labries
data = requests.get("https://en.wikipedia.org/wiki/Category:Neighbourhoods_in_Pune").text # parse data from the html into a beautifulsoup object soup = BeautifulSoup(data, 'html.parser') # create a list to store neighborhood data neighborhood_List = [] # append the data into the list for row in soup.find_all("div", class_="mw-category")[0].findAll("li"): neighborhood_List.append(row.text) # create a new DataFrame from the list Pune_df = pd.DataFrame({"Neighborhood": neighborhood_List}) Pune_df.tail() # define a function to get coordinates def get_cord(neighborhood): coords = None # loop until you get the coordinates while(coords is None): g = geocoder.arcgis('{}, Pune, Maharashtra'.format(neighborhood)) coords = g.latlng return coords # create a list and store the coordinates coords = [ get_cord(neighborhood) for neighborhood in Pune_df["Neighborhood"].tolist() ] coords[:10] df_coords = pd.DataFrame(coords, columns=['Latitude', 'Longitude']) # merge the coordinates into the original dataframe Pune_df['Latitude'] = df_coords['Latitude'] Pune_df['Longitude'] = df_coords['Longitude'] # check the neighborhoods and the coordinates print(Pune_df.shape) Pune_df.head(10) # save the DataFrame as CSV file Pune_df.to_csv("Pune_df.csv", index=False)
_____no_output_____
FTL
Developement/Data_Collection_Web_scraping.ipynb
Vivek1258/IBM-data-science-capstone
Collecting the nebourhood venue data using Foursquare API
# define Foursquare Credentials and Version CLIENT_ID = '5HUDVH14DMECWUAFI2MICONBTTDPW1CCL1C4TFGE3FEHEUHJ' # your Foursquare ID CLIENT_SECRET = 'R0WIH5UIW2SADKBUW4B4WMY2QWBBT0Q02IURAXQXVJZMTDIV' # your Foursquare Secret VERSION = '20180605' # Foursquare API version print('Your credentails:') print('CLIENT_ID: ' + CLIENT_ID) print('CLIENT_SECRET:' + CLIENT_SECRET) radius = 3000 LIMIT = 150 venues = [] for lat, long, neighborhood in zip(Pune_df['Latitude'], Pune_df['Longitude'], Pune_df['Neighborhood']): # create the API request URL url = "https://api.foursquare.com/v2/venues/explore?client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}".format( CLIENT_ID, CLIENT_SECRET, VERSION, lat, long, radius, LIMIT) # make the GET request results = requests.get(url).json()["response"]['groups'][0]['items'] # return only relevant information for each nearby venue for venue in results: venues.append(( neighborhood, lat, long, venue['venue']['name'], venue['venue']['location']['lat'], venue['venue']['location']['lng'], venue['venue']['categories'][0]['name'])) # convert the venues list into a new DataFrame venues_df = pd.DataFrame(venues) # define the column names venues_df.columns = ['Neighborhood', 'Latitude', 'Longitude', 'VenueName', 'VenueLatitude', 'VenueLongitude', 'VenueCategory'] print(venues_df.shape) venues_df.head() print('There are {} uniques categories.'.format(len(venues_df['VenueCategory'].unique()))) # print out the list of categories venues_df['VenueCategory'].unique() venues_df.to_csv("venues_df.csv")
_____no_output_____
FTL
Developement/Data_Collection_Web_scraping.ipynb
Vivek1258/IBM-data-science-capstone
Monte Carlo SimulationThis notebook provides introduction to Monte Carlo simulation using a toy example. The example is built upon a famous casino game known as **Rouulette** (Ruletka). During the game, players (bettros) make a bet on an integer, colour or a range and win if they bet was correct. Roulettes usually have 37 slots including one green (0), eighteen red (1-18) and eighteen black (19-36) slots.In our case, we assume better is betting on a range 1-18 (which is equivalent to betting on red) and wins if randomly tossed ball ends up on a slot in that range. Otherwise, bettor loses. If former (win), his budget is increased by the amount of bet, if latter (loose) the budget is reduced by that amount.In this example we will compare to bettors:- **Simple bettor** - has fixed budget, has decided how many periods to play beforehand, but always bets the same amount independent of any other factor.- **Smart bettor** - has fixed budget, has decided how many periods to play beforehand, bets the initial amount after winning, but doubles the bet if lost.The simulation is expected to show that **Simple bettor** is a clear loser, while **Smart bettor**, if budget is enough, cab be a clear winner (in average terms).As the ball is tossed randomly, we will need a random integer generator. We will as well plot results, so plotting libraries will be handy as well. We will start from developing the **spinner** function and then separate functions for simple and smart bettors. We will simulate both multiple (e.g. 100) times to see what happens.Note: In both functions there is a components to make sure budget does not become negative. While it is active in simple bettor, for the smart one it is commented out for you to see what can happen if no condition is set on budget (i.e. budget can even become negative). To make it more realistic you are encouraged to take **** out in **smart bettor** and uncomment the following component and see what happens:``` if budget<=0: break```
import random # to generate random inetegers import matplotlib.pyplot as plt # to plot simulation import seaborn as sns # to plot distribution plt.rc('figure', figsize=(20.0, 10.0)) # make the default plots bigger # the random spinner def spinner(): slot = random.randint(1,37) if slot == 0: return "lost" elif 1<=slot <= 18: return "won" elif 19<=slot<=36: return "lost" # the simple bettor def simple_bettor(budget,bet,periods): X_axis = [] Y_axis = [] currentPeriod = 1 while currentPeriod <= periods: result = spinner() if result == "won": budget = budget + bet elif result == "lost": budget = budget - bet if budget<=0: break X_axis.append(currentPeriod) Y_axis.append(budget) currentPeriod = currentPeriod + 1 plt.plot(X_axis,Y_axis) return Y_axis[-1] # the smart/doubler bettor def smart_bettor(budget,bet,periods): X_axis = [] Y_axis = [] currentPeriod = 1 initial_bet = bet while currentPeriod <= periods: result = spinner() if result == "won": budget = budget + bet bet = initial_bet elif result == "lost": budget = budget - bet bet = bet*2 #if budget<=0: # break X_axis.append(currentPeriod) Y_axis.append(budget) currentPeriod = currentPeriod + 1 plt.subplot(121) plt.plot(X_axis,Y_axis) return Y_axis[-1] # the simulation of multiple possible futures (for simple) futures = 1 while futures < 101: simple_bettor(10000,100,1000) futures = futures + 1 plt.title('Simple bettor') plt.ylabel('Budget') plt.xlabel('Periods') plt.show() # the simulation of multiple possible futures (for smart) futures = 1 outcomes = [] while futures < 101: outcomes.append(smart_bettor(10000,100,1000)) futures = futures + 1 plt.title('Smart bettor') plt.ylabel('Budget') plt.xlabel('Periods') plt.subplot(122) sns.distplot(outcomes,bins=25,vertical=True) #plt.subplots_adjust(wspace=0.5) plt.show()
/usr/local/lib/python3.6/dist-packages/matplotlib/cbook/deprecation.py:106: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance. warnings.warn(message, mplDeprecation, stacklevel=1)
Apache-2.0
Monte_Carlo.ipynb
HrantDavtyan/BA_BA
Grid Search with Cross Validation
# import libraries import pandas as pd # data doesn't have headers, so let's create headers _headers = ['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'car'] # read in cars dataset df = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter07/Dataset/car.data', names=_headers, index_col=None) df.info() # encode categorical variables _df = pd.get_dummies(df, columns=['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety']) _df.head() # separate features and labels DataFrames features = _df.drop(['car'], axis=1).values labels = _df[['car']].values import numpy as np from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV clf = DecisionTreeClassifier() params = {'max_depth': np.arange(1, 8)} clf_cv = GridSearchCV(clf, param_grid=params, cv=5) clf_cv.fit(features, labels) print("Tuned Decision Tree Parameters: {}".format(clf_cv.best_params_)) print("Best score is {}".format(clf_cv.best_score_)) model = clf_cv.best_estimator_ model
_____no_output_____
MIT
Chapter07/Exercise7.07/Exercise7.07.ipynb
khieunguyen/The-Data-Science-Workshop
Tabulate results
import os import sys from typing import Tuple import pandas as pd from tabulate import tabulate from tqdm import tqdm sys.path.append('../src') from read_log_file import read_log_file LOG_HOME_DIR = os.path.join('../logs_v1/') assert os.path.isdir(LOG_HOME_DIR) MODEL_NAMES = ['logistic_regression', 'transformer_encoder', 'bert-base-uncased', 'bert-base-multilingual-cased'] SETUPS = ['zero', 'few50', 'few100', 'few150', 'few200', 'full', 'trg'] def get_best_score_from_dict(di: dict) -> dict: """Get max value from a dict""" keys_with_max_val = [] # find max value max_val = -float('inf') for k, v in di.items(): if v > max_val: max_val = v # find all keys with max value for k, v in di.items(): if v == max_val: keys_with_max_val.append(k) return { 'k': keys_with_max_val, 'v': max_val, } def create_best_results_df(langs: str) -> Tuple[pd.DataFrame, pd.DataFrame]: results_dict = {} for model_name in MODEL_NAMES: results_dict[model_name] = {} log_dir = os.path.join(LOG_HOME_DIR, langs, model_name) log_filenames = os.listdir(log_dir) for fname in log_filenames: results_dict[model_name][fname] = read_log_file( log_file_path=os.path.join(log_dir, fname), plot=False, verbose=False, )['best_val_metrics']['f1'] best_results_dict = {'Setup': SETUPS} best_hparams_dict = {'Setup': SETUPS} best_results_dict.update({model_name: [] for model_name in MODEL_NAMES}) best_hparams_dict.update({model_name: [] for model_name in MODEL_NAMES}) for model_name in MODEL_NAMES: for setup in SETUPS: best_score = get_best_score_from_dict( {k: v for k, v in results_dict[model_name].items() if k.startswith(f'{setup}_')} ) best_results_dict[model_name].append( best_score['v'] ) best_hparams_dict[model_name].append( best_score['k'] ) best_results_df = pd.DataFrame(best_results_dict) best_hparams_df = pd.DataFrame(best_hparams_dict) return best_results_df, best_hparams_df def highlight_best_score(df: pd.DataFrame) -> pd.DataFrame: """Highlight best score in each row""" return df.style.apply(lambda x: ['background: red' if isinstance(v, float) and v == max(x.iloc[1:]) else '' for v in x], axis=1) def tabulate_markdown(df: pd.DataFrame) -> str: """Tabulate in markdown format and bold best scores in each row""" df = df.round(4) for model_name in MODEL_NAMES: df[model_name] = df[model_name].astype(str) for idx in range(len(df)): max_val = max(float(df.iloc[idx][model_name]) for model_name in MODEL_NAMES) for model_name in MODEL_NAMES: cell_val = float(df.iloc[idx][model_name]) if cell_val == max_val: df.at[idx, model_name] = f'**{cell_val}**' else: df.at[idx, model_name] = f'{cell_val}' return tabulate(df, headers='keys', showindex=False, tablefmt='github') best_results_dfs_dict = {} best_hparams_dfs_dict = {} for langs in tqdm(['enbg', 'enar', 'bgen', 'bgar', 'aren', 'arbg']): best_results_dfs_dict[langs], best_hparams_dfs_dict[langs] = create_best_results_df(langs)
100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 6/6 [00:19<00:00, 3.30s/it]
MIT
notebooks/tabulate_results_v1.ipynb
subhadarship/nlp4if-2021
en-bg
highlight_best_score(best_results_dfs_dict['enbg']) print(tabulate_markdown(best_results_dfs_dict['enbg'])) best_hparams_dfs_dict['enbg']
_____no_output_____
MIT
notebooks/tabulate_results_v1.ipynb
subhadarship/nlp4if-2021
en-ar
highlight_best_score(best_results_dfs_dict['enar']) print(tabulate_markdown(best_results_dfs_dict['enar'])) best_hparams_dfs_dict['enar']
_____no_output_____
MIT
notebooks/tabulate_results_v1.ipynb
subhadarship/nlp4if-2021
bg-en
highlight_best_score(best_results_dfs_dict['bgen']) print(tabulate_markdown(best_results_dfs_dict['bgen'])) best_hparams_dfs_dict['bgen']
_____no_output_____
MIT
notebooks/tabulate_results_v1.ipynb
subhadarship/nlp4if-2021
bg-ar
highlight_best_score(best_results_dfs_dict['bgar']) print(tabulate_markdown(best_results_dfs_dict['bgar'])) best_hparams_dfs_dict['bgar']
_____no_output_____
MIT
notebooks/tabulate_results_v1.ipynb
subhadarship/nlp4if-2021
ar-en
highlight_best_score(best_results_dfs_dict['aren']) print(tabulate_markdown(best_results_dfs_dict['aren'])) best_hparams_dfs_dict['aren']
_____no_output_____
MIT
notebooks/tabulate_results_v1.ipynb
subhadarship/nlp4if-2021
ar-bg
highlight_best_score(best_results_dfs_dict['arbg']) print(tabulate_markdown(best_results_dfs_dict['arbg'])) best_hparams_dfs_dict['arbg']
_____no_output_____
MIT
notebooks/tabulate_results_v1.ipynb
subhadarship/nlp4if-2021
Preparation
# dependencies import pandas as pd import numpy as np import missingno as msno import matplotlib.pyplot as plt import re from sklearn.model_selection import train_test_split from textwrap import wrap from sklearn.preprocessing import StandardScaler import warnings warnings.filterwarnings("ignore") import math %matplotlib inline # import data shelter_outcomes = pd.read_csv("C:/Users/sulem/OneDrive/Desktop/machin learnign/Project3/aac_shelter_outcomes.csv") # filter animal type for just cats cats = shelter_outcomes[shelter_outcomes['animal_type'] == 'Cat'] #print(cats.head()) # remove age_upon_outcome and recalculate to standard units (days) age = cats.loc[:,['datetime', 'date_of_birth']] # convert to datetime age.loc[:,'datetime'] = pd.to_datetime(age['datetime']) age.loc[:,'date_of_birth'] = pd.to_datetime(age['date_of_birth']) # calculate cat age in days cats.loc[:,'age'] = (age.loc[:,'datetime'] - age.loc[:,'date_of_birth']).dt.days # get dob info cats['dob_month'] = age.loc[:, 'date_of_birth'].dt.month cats['dob_day'] = age.loc[:, 'date_of_birth'].dt.day cats['dob_dayofweek'] = age.loc[:, 'date_of_birth'].dt.dayofweek # get month from datetime cats['month'] = age.loc[:,'datetime'].dt.month # get day of month cats['day'] = age.loc[:,'datetime'].dt.day # get day of week cats['dayofweek'] = age.loc[:, 'datetime'].dt.dayofweek # get hour of day cats['hour'] = age.loc[:, 'datetime'].dt.hour # get quarter cats['quarter'] = age.loc[:, 'datetime'].dt.quarter # clean up breed attribute # get breed attribute for processing # convert to lowercase, remove mix and strip whitespace # remove space in 'medium hair' to match 'longhair' and 'shorthair' # split on either space or '/' breed = cats.loc[:, 'breed'].str.lower().str.replace('mix', '').str.replace('medium hair', 'mediumhair').str.strip().str.split('/', expand=True) cats['breed'] = breed[0] cats['breed1'] = breed[1] # clean up color attribute # convert to lowercase # strip spaces # split on '/' color = cats.loc[:, 'color'].str.lower().str.strip().str.split('/', expand=True) cats['color'] = color[0] cats['color1'] = color[1] # clean up sex_upon_outcome sex = cats['sex_upon_outcome'].str.lower().str.strip().str.split(' ', expand=True) sex[0].replace('spayed', True, inplace=True) sex[0].replace('neutered', True, inplace=True) sex[0].replace('intact', False, inplace=True) sex[1].replace(np.nan, 'unknown', inplace=True) cats['spayed_neutered'] = sex[0] cats['sex'] = sex[1] # add in domesticated attribute cats['domestic'] = np.where(cats['breed'].str.contains('domestic'), 1, 0) # combine outcome and outcome subtype into a single attribute cats['outcome_subtype'] = cats['outcome_subtype'].str.lower().str.replace(' ', '-').fillna('unknown') cats['outcome_type'] = cats['outcome_type'].str.lower().str.replace(' ', '-').fillna('unknown') cats['outcome'] = cats['outcome_type'] + '_' + cats['outcome_subtype'] # drop unnecessary columns cats.drop(columns=['animal_id', 'name', 'animal_type', 'age_upon_outcome', 'date_of_birth', 'datetime', 'monthyear', 'sex_upon_outcome', 'outcome_subtype', 'outcome_type'], inplace=True) #print(cats['outcome'].value_counts()) cats.head() cats.drop(columns=['breed1'], inplace=True) # Breed, Color, Color1, Spayed_Netured and Sex attributes need to be one hot encoded cats_ohe = pd.get_dummies(cats, columns=['breed', 'color', 'color1', 'spayed_neutered', 'sex']) cats_ohe.head() out_t={'euthanasia_suffering' : 0, 'died_in-kennel' : 0, 'return-to-owner_unknown' : 0, 'transfer_partner' : 1, 'euthanasia_at-vet' : 2, 'adoption_foster' : 3, 'died_in-foster' : 0, 'transfer_scrp' : 4, 'euthanasia_medical' : 0, 'transfer_snr' : 0, 'died_enroute' : 0, 'rto-adopt_unknown' : 0, 'missing_in-foster' : 0, 'adoption_offsite' : 0, 'adoption_unknown' :5,'euthanasia_rabies-risk' : 0, 'unknown_unknown' : 0, 'adoption_barn' : 0, 'died_unknown' : 0, 'died_in-surgery' : 0, 'euthanasia_aggressive' : 0, 'euthanasia_unknown' : 0, 'missing_unknown' : 0, 'missing_in-kennel' : 0, 'missing_possible-theft' : 0, 'died_at-vet' : 0, 'disposal_unknown' : 0, 'euthanasia_underage' : 0, 'transfer_barn' : 0} #output is converted from string to catogries 0 to 5 represent each output # separate outcome from data outcome = cats_ohe['outcome'] cats_ohe.drop(columns=['outcome']) print(cats_ohe.head()) # split the data X_train, X_test, y_train, y_test = train_test_split(cats_ohe, outcome, test_size=0.2, random_state=0) X_train.drop(columns=['outcome'], inplace=True) y_train = [out_t[item] for item in y_train] #print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) x_train_ar=X_train.values y_target_ar=np.asarray(y_train) x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) print(x_train_ar.shape) print(y_target_ar.shape) unique, counts = np.unique(y_target_ar, return_counts=True) np.asarray((unique, counts)) plt.pie(np.asarray(( counts)), labels=np.unique(y_target_ar), startangle=90, autopct='%.1f%%') plt.show()
(23537, 140) (23537,)
Unlicense
Project4/Project4_sul_part4 (1).ipynb
wiggs555/cse7324project
Evaluation Modeling Exceptional Work
# Example adapted from https://github.com/rasbt/python-machine-learning-book/blob/master/code/ch12/ch12.ipynb # Original Author: Sebastian Raschka # This is the optional book we use in the course, excellent intuitions and straightforward programming examples # please note, however, that this code has been manipulated to reflect our assumptions and notation. import numpy as np from scipy.special import expit import pandas as pd import sys # start with a simple base classifier, which can't be fit or predicted # it only has internal classes to be used by classes that will subclass it class TwoLayerPerceptronBase(object): def __init__(self, n_hidden=30, C=0.0, epochs=500, eta=0.001, random_state=None,phi='sig'): np.random.seed(random_state) self.n_hidden = n_hidden self.l2_C = C self.epochs = epochs self.eta = eta self.phi=phi @staticmethod def _encode_labels(y): """Encode labels into one-hot representation""" onehot = pd.get_dummies(y).values.T return onehot def _initialize_weights(self): """Initialize weights with small random numbers.""" W1_num_elems = (self.n_features_ + 1)*self.n_hidden W1 = np.random.uniform(-1.0, 1.0,size=W1_num_elems) W1 = W1.reshape(self.n_hidden, self.n_features_ + 1) # reshape to be W W2_num_elems = (self.n_hidden + 1)*self.n_output_ W2 = np.random.uniform(-1.0, 1.0, size=W2_num_elems) W2 = W2.reshape(self.n_output_, self.n_hidden + 1) return W1, W2 @staticmethod def _sigmoid(z,phi): """Use scipy.special.expit to avoid overflow""" # 1.0 / (1.0 + np.exp(-z)) if phi=='sig': return expit(z) if phi=='lin': return z if phi=='silu': return expit(z)*z if phi=='relu': bol= z>=0 #z=bol*z return np.maximum(0,z.copy()) @staticmethod def _add_bias_unit(X, how='column'): """Add bias unit (column or row of 1s) to array at index 0""" if how == 'column': ones = np.ones((X.shape[0], 1)) X_new = np.hstack((ones, X)) elif how == 'row': ones = np.ones((1, X.shape[1])) X_new = np.vstack((ones, X)) return X_new @staticmethod def _L2_reg(lambda_, W1, W2): """Compute L2-regularization cost""" # only compute for non-bias terms return (lambda_/2.0) * np.sqrt(np.mean(W1[:, 1:] ** 2) + np.mean(W2[:, 1:] ** 2)) def _cost(self,A3,Y_enc,W1,W2): '''Get the objective function value''' cost = np.mean((Y_enc-A3)**2) L2_term = self._L2_reg(self.l2_C, W1, W2) return cost + L2_term def _feedforward(self, X, W1, W2): """Compute feedforward step """ A1 = self._add_bias_unit(X, how='column') A1 = A1.T Z1 = W1 @ A1 A2 = self._sigmoid(Z1,self.phi) A2 = self._add_bias_unit(A2, how='row') Z2 = W2 @ A2 A3 = self._sigmoid(Z2,'sig') return A1, Z1, A2, Z2, A3 def _div(b,A_,phi): if phi=='sig': return A_*(1-A_) if phi=='lin': return 1 if phi=='silu': return (expit(A_)*A_)+(expit(A_)*(1-expit(A_)*A_)) if phi=='relu': bol= A_>=0 return 1 def _get_gradient(self, A1, A2, A3, Z1, Z2, Y_enc, W1, W2): """ Compute gradient step using backpropagation. """ # vectorized backpropagation Z1_with_bias = self._add_bias_unit(Z1,how='row') Z2_with_bias = self._add_bias_unit(Z2,how='row') V2 = -2*(Y_enc-A3)*self._div(A3,self.phi) # last layer sensitivity V1 = self._div(A2,self.phi)*(W2.T @ V2) # back prop the sensitivity if self.phi=='relu': #print(Z2_with_bias.shape) #print(V2.shape) V1[Z1_with_bias<=0] = 0 V2[Z2<=0] = 0 grad2 = V2 @ A2.T # no bias on final layer grad1 = V1[1:,:] @ A1.T # dont back prop sensitivity of bias # regularize weights that are not bias terms grad1[:, 1:] += W1[:, 1:] * self.l2_C grad2[:, 1:] += W2[:, 1:] * self.l2_C return grad1, grad2 def predict(self, X): """Predict class labels""" _, _, _, _, A3 = self._feedforward(X, self.W1, self.W2) y_pred = np.argmax(A3, axis=0) return y_pred from sklearn.metrics import accuracy_score # just start with the vectorized version and minibatch class TLPMiniBatch(TwoLayerPerceptronBase): def __init__(self, alpha=0.0, decrease_const=0.0, shuffle=True, minibatches=1, **kwds): # need to add to the original initializer self.alpha = alpha self.decrease_const = decrease_const self.shuffle = shuffle self.minibatches = minibatches # but keep other keywords super().__init__(**kwds) def fit(self, X, y, print_progress=False): """ Learn weights from training data. With mini-batch""" X_data, y_data = X.copy(), y.copy() Y_enc = self._encode_labels(y) # init weights and setup matrices self.n_features_ = X_data.shape[1] self.n_output_ = Y_enc.shape[0] self.W1, self.W2 = self._initialize_weights() delta_W1_prev = np.zeros(self.W1.shape) delta_W2_prev = np.zeros(self.W2.shape) self.cost_ = [] self.score_ = [] # get starting acc self.score_.append(accuracy_score(y_data,self.predict(X_data))) for i in range(self.epochs): # adaptive learning rate self.eta /= (1 + self.decrease_const*i) if print_progress>0 and (i+1)%print_progress==0: sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs)) sys.stderr.flush() if self.shuffle: idx_shuffle = np.random.permutation(y_data.shape[0]) X_data, Y_enc, y_data = X_data[idx_shuffle], Y_enc[:, idx_shuffle], y_data[idx_shuffle] mini = np.array_split(range(y_data.shape[0]), self.minibatches) mini_cost = [] for idx in mini: # feedforward A1, Z1, A2, Z2, A3 = self._feedforward(X_data[idx], self.W1, self.W2) cost = self._cost(A3,Y_enc[:, idx],self.W1,self.W2) mini_cost.append(cost) # this appends cost of mini-batch only # compute gradient via backpropagation grad1, grad2 = self._get_gradient(A1=A1, A2=A2, A3=A3, Z1=Z1, Z2=Z2, Y_enc=Y_enc[:, idx], W1=self.W1,W2=self.W2) # momentum calculations delta_W1, delta_W2 = self.eta * grad1, self.eta * grad2 self.W1 -= (delta_W1 + (self.alpha * delta_W1_prev)) self.W2 -= (delta_W2 + (self.alpha * delta_W2_prev)) delta_W1_prev, delta_W2_prev = delta_W1, delta_W2 self.cost_.append(mini_cost) self.score_.append(accuracy_score(y_data,self.predict(X_data))) return self %%time params = dict(n_hidden=100, C=.0001, # tradeoff L2 regularizer epochs=200, # iterations eta=0.001, # learning rate random_state=1, phi='lin') nn_mini = TLPMiniBatch(**params, alpha=0.001,# momentum calculation decrease_const=0.0001, # decreasing eta minibatches=50, # minibatch size shuffle=True) nn_mini.fit(x_train_ar, y_target_ar, print_progress=50) yhat = nn_mini.predict(x_train_ar) print('Accuracy:',accuracy_score(y_target_ar,yhat))
Epoch: 200/200
Unlicense
Project4/Project4_sul_part4 (1).ipynb
wiggs555/cse7324project
!wget https://www.dropbox.com/s/l2ul3upj7dkv4ou/synthetic-data.zip !unzip -qq synthetic-data.zip !pip install torch_snippets torch_summary editdistance from torch_snippets import * from torchsummary import summary import editdistance device = 'cuda' if torch.cuda.is_available() else 'cpu' fname2label = lambda fname: stem(fname).split('@')[0] images = Glob('synthetic-data') vocab = 'QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm' B,T,V = 64, 32, len(vocab) H,W = 32, 128 class OCRDataset(Dataset): def __init__(self, items, vocab=vocab, preprocess_shape=(H,W), timesteps=T): super().__init__() self.items = items self.charList = {ix+1:ch for ix,ch in enumerate(vocab)} self.charList.update({0: '`'}) self.invCharList = {v:k for k,v in self.charList.items()} self.ts = timesteps def __len__(self): return len(self.items) def sample(self): return self[randint(len(self))] def __getitem__(self, ix): item = self.items[ix] image = cv2.imread(item, 0) label = fname2label(item) return image, label def collate_fn(self, batch): images, labels, label_lengths, label_vectors, input_lengths = [], [], [], [], [] for image, label in batch: images.append(torch.Tensor(self.preprocess(image))[None,None]) label_lengths.append(len(label)) labels.append(label) label_vectors.append(self.str2vec(label)) input_lengths.append(self.ts) images = torch.cat(images).float().to(device) label_lengths = torch.Tensor(label_lengths).long().to(device) label_vectors = torch.Tensor(label_vectors).long().to(device) input_lengths = torch.Tensor(input_lengths).long().to(device) return images, label_vectors, label_lengths, input_lengths, labels def str2vec(self, string, pad=True): string = ''.join([s for s in string if s in self.invCharList]) val = list(map(lambda x: self.invCharList[x], string)) if pad: while len(val) < self.ts: val.append(0) return val def preprocess(self, img, shape=(32,128)): target = np.ones(shape)*255 try: H, W = shape h, w = img.shape fx = H/h fy = W/w f = min(fx, fy) _h = int(h*f) _w = int(w*f) _img = cv2.resize(img, (_w,_h)) target[:_h,:_w] = _img except: ... return (255-target)/255 def decoder_chars(self, pred): decoded = "" last = "" pred = pred.cpu().detach().numpy() for i in range(len(pred)): k = np.argmax(pred[i]) if k > 0 and self.charList[k] != last: last = self.charList[k] decoded = decoded + last elif k > 0 and self.charList[k] == last: continue else: last = "" return decoded.replace(" "," ") def wer(self, preds, labels): c = 0 for p, l in zip(preds, labels): c += p.lower().strip() != l.lower().strip() return round(c/len(preds), 4) def cer(self, preds, labels): c, d = [], [] for p, l in zip(preds, labels): c.append(editdistance.eval(p, l) / len(l)) return round(np.mean(c), 4) def evaluate(self, model, ims, labels, lower=False): model.eval() preds = model(ims).permute(1,0,2) # B, T, V+1 preds = [self.decoder_chars(pred) for pred in preds] return {'char-error-rate': self.cer(preds, labels), 'word-error-rate': self.wer(preds, labels), 'char-accuracy' : 1 - self.cer(preds, labels), 'word-accuracy' : 1 - self.wer(preds, labels)} from sklearn.model_selection import train_test_split trn_items, val_items = train_test_split(Glob('synthetic-data'), test_size=0.2, random_state=22) trn_ds = OCRDataset(trn_items) val_ds = OCRDataset(val_items) trn_dl = DataLoader(trn_ds, batch_size=B, collate_fn=trn_ds.collate_fn, drop_last=True, shuffle=True) val_dl = DataLoader(val_ds, batch_size=B, collate_fn=val_ds.collate_fn, drop_last=True) from torch_snippets import Reshape, Permute class BasicBlock(nn.Module): def __init__(self, ni, no, ks=3, st=1, padding=1, pool=2, drop=0.2): super().__init__() self.ks = ks self.block = nn.Sequential( nn.Conv2d(ni, no, kernel_size=ks, stride=st, padding=padding), nn.BatchNorm2d(no, momentum=0.3), nn.ReLU(inplace=True), nn.MaxPool2d(pool), nn.Dropout2d(drop) ) def forward(self, x): return self.block(x) class Ocr(nn.Module): def __init__(self, vocab): super().__init__() self.model = nn.Sequential( BasicBlock( 1, 128), BasicBlock(128, 128), BasicBlock(128, 256, pool=(4,2)), Reshape(-1, 256, 32), Permute(2, 0, 1) # T, B, D ) self.rnn = nn.Sequential( nn.LSTM(256, 256, num_layers=2, dropout=0.2, bidirectional=True), ) self.classification = nn.Sequential( nn.Linear(512, vocab+1), nn.LogSoftmax(-1), ) def forward(self, x): x = self.model(x) x, lstm_states = self.rnn(x) y = self.classification(x) return y def ctc(log_probs, target, input_lengths, target_lengths, blank=0): loss = nn.CTCLoss(blank=blank, zero_infinity=True) ctc_loss = loss(log_probs, target, input_lengths, target_lengths) return ctc_loss model = Ocr(len(vocab)).to(device) !pip install torch_summary from torchsummary import summary summary(model, torch.zeros((1,1,32,128)).to(device)) def train_batch(data, model, optimizer, criterion): model.train() imgs, targets, label_lens, input_lens, labels = data optimizer.zero_grad() preds = model(imgs) loss = criterion(preds, targets, input_lens, label_lens) loss.backward() optimizer.step() results = trn_ds.evaluate(model, imgs.to(device), labels) return loss, results @torch.no_grad() def validate_batch(data, model): model.eval() imgs, targets, label_lens, input_lens, labels = data preds = model(imgs) loss = criterion(preds, targets, input_lens, label_lens) return loss, val_ds.evaluate(model, imgs.to(device), labels) model = Ocr(len(vocab)).to(device) criterion = ctc optimizer = optim.AdamW(model.parameters(), lr=3e-3) n_epochs = 50 log = Report(n_epochs) for ep in range( n_epochs): # if ep in lr_schedule: optimizer = AdamW(ocr.parameters(), lr=lr_schedule[ep]) N = len(trn_dl) for ix, data in enumerate(trn_dl): pos = ep + (ix+1)/N loss, results = train_batch(data, model, optimizer, criterion) # scheduler.step() ca, wa = results['char-accuracy'], results['word-accuracy'] log.record(pos=pos, trn_loss=loss, trn_char_acc=ca, trn_word_acc=wa, end='\r') val_results = [] N = len(val_dl) for ix, data in enumerate(val_dl): pos = ep + (ix+1)/N loss, results = validate_batch(data, model) ca, wa = results['char-accuracy'], results['word-accuracy'] log.record(pos=pos, val_loss=loss, val_char_acc=ca, val_word_acc=wa, end='\r') log.report_avgs(ep+1) print() for jx in range(5): img, label = val_ds.sample() _img = torch.Tensor(val_ds.preprocess(img)[None,None]).to(device) pred = model(_img)[:,0,:] pred = trn_ds.decoder_chars(pred) print(f'Pred: `{pred}` :: Truth: `{label}`') print() log.plot_epochs(['trn_word_acc','val_word_acc'], title='Training and validation word accuracy')
0%| | 0/50 [00:00<?, ?it/s]/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py:3335: RuntimeWarning: Mean of empty slice. out=out, **kwargs) /usr/local/lib/python3.6/dist-packages/numpy/core/_methods.py:161: RuntimeWarning: invalid value encountered in double_scalars ret = ret.dtype.type(ret / rcount) 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 50/50 [00:00<00:00, 499.58it/s]
MIT
Chapter15/Handwriting_transcription.ipynb
arifmudi/Modern-Computer-Vision-with-PyTorch
Dependencies
import json, glob from tweet_utility_scripts import * from tweet_utility_preprocess_roberta_scripts_aux import * from transformers import TFRobertaModel, RobertaConfig from tokenizers import ByteLevelBPETokenizer from tensorflow.keras import layers from tensorflow.keras.models import Model
_____no_output_____
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Load data
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv') print('Test samples: %s' % len(test)) display(test.head())
Test samples: 3534
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Model parameters
input_base_path = '/kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/' with open(input_base_path + 'config.json') as json_file: config = json.load(json_file) config vocab_path = input_base_path + 'vocab.json' merges_path = input_base_path + 'merges.txt' base_path = '/kaggle/input/qa-transformers/roberta/' # vocab_path = base_path + 'roberta-base-vocab.json' # merges_path = base_path + 'roberta-base-merges.txt' config['base_model_path'] = base_path + 'roberta-base-tf_model.h5' config['config_path'] = base_path + 'roberta-base-config.json' model_path_list = glob.glob(input_base_path + '*.h5') model_path_list.sort() print('Models to predict:') print(*model_path_list, sep='\n')
Models to predict: /kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_1.h5 /kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_2.h5 /kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_3.h5 /kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_4.h5 /kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_5.h5
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Tokenizer
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path, lowercase=True, add_prefix_space=True)
_____no_output_____
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Pre process
test['text'].fillna('', inplace=True) test['text'] = test['text'].apply(lambda x: x.lower()) test['text'] = test['text'].apply(lambda x: x.strip()) x_test, x_test_aux, x_test_aux_2 = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test)
_____no_output_____
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Model
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=True) def model_fn(MAX_LEN): input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids') attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask') base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name='base_model') _, _, hidden_states = base_model({'input_ids': input_ids, 'attention_mask': attention_mask}) h12 = hidden_states[-1] h11 = hidden_states[-2] h10 = hidden_states[-3] h09 = hidden_states[-4] avg_hidden = layers.Average()([h12, h11, h10, h09]) logits = layers.Dense(2, use_bias=False, name='qa_outputs')(avg_hidden) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1, name='y_start') end_logits = tf.squeeze(end_logits, axis=-1, name='y_end') model = Model(inputs=[input_ids, attention_mask], outputs=[start_logits, end_logits]) return model
_____no_output_____
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Make predictions
NUM_TEST_IMAGES = len(test) test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN'])) test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN'])) for model_path in model_path_list: print(model_path) model = model_fn(config['MAX_LEN']) model.load_weights(model_path) test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE'])) test_start_preds += test_preds[0] test_end_preds += test_preds[1]
/kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_1.h5 /kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_2.h5 /kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_3.h5 /kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_4.h5 /kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/model_fold_5.h5
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Post process
test['start'] = test_start_preds.argmax(axis=-1) test['end'] = test_end_preds.argmax(axis=-1) test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1) # Post-process test["selected_text"] = test.apply(lambda x: ' '.join([word for word in x['selected_text'].split() if word in x['text'].split()]), axis=1) test['selected_text'] = test.apply(lambda x: x['text'] if (x['selected_text'] == '') else x['selected_text'], axis=1) test['selected_text'].fillna(test['text'], inplace=True)
_____no_output_____
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Visualize predictions
test['text_len'] = test['text'].apply(lambda x : len(x)) test['label_len'] = test['selected_text'].apply(lambda x : len(x)) test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' '))) test['label_wordCnt'] = test['selected_text'].apply(lambda x : len(x.split(' '))) test['text_tokenCnt'] = test['text'].apply(lambda x : len(tokenizer.encode(x).ids)) test['label_tokenCnt'] = test['selected_text'].apply(lambda x : len(tokenizer.encode(x).ids)) test['jaccard'] = test.apply(lambda x: jaccard(x['text'], x['selected_text']), axis=1) display(test.head(10)) display(test.describe())
_____no_output_____
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Test set predictions
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv') submission['selected_text'] = test['selected_text'] submission.to_csv('submission.csv', index=False) submission.head(10)
_____no_output_____
MIT
Model backlog/Inference/276-tweet-inference-5fold-roberta-avg-last.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
Importing Modules
#%matplotlib notebook from tqdm import tqdm %matplotlib inline #Module to handle regular expressions import re #manage files import os #Library for emoji import emoji #Import pandas and numpy to handle data import pandas as pd import numpy as np #import libraries for accessing the database import psycopg2 from sqlalchemy import create_engine from postgres_credentials import * #import libraries for visualization import matplotlib.pyplot as plt import seaborn as sns from wordcloud import WordCloud from PIL import Image #Import nltk to check english lexicon import nltk from nltk.tokenize import word_tokenize from nltk.corpus import ( wordnet, stopwords ) #import libraries for tokenization and ML import json; import keras; import keras.preprocessing.text as kpt; #from keras.preprocessing.text import Tokenizer; import sklearn from sklearn.preprocessing import Normalizer from sklearn.feature_extraction.text import ( CountVectorizer, TfidfVectorizer ) from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score #Import all libraries for creating a deep neural network #Sequential is the standard type of neural network with stackable layers from keras.models import ( Sequential, model_from_json ) #Dense: Standard layers with every node connected, dropout: avoids overfitting from keras.layers import Dense, Dropout, Activation; #To anotate database from pycorenlp import StanfordCoreNLP #Querying the database def query_database(tabletweets): engine = create_engine("postgresql+psycopg2://%s:%s@%s:%d/%s" %(usertwitter, passwordtwitter, hosttwitter, porttwitter, dbnametwitter)) table = pd.read_sql_query("select * from %s" %tabletweets,con=engine, index_col="id") return table
_____no_output_____
MIT
Jupyter Notebook files/Analysis of Twitter.ipynb
ugis22/analysing_twitter
Preprocessing the text Before we dig into analyzing the public opinion on 'Avengers', there is an important step that we need to take: preprocessing the tweet text. But what does this mean? Text preprocessing includes a basic text cleaning following a set of simple rules commonly used but also, advanced techniques that takes into account syntactic and lexical information.
#preprocess text in tweets by removing links, @UserNames, blank spaces, etc. def preprocessing_text(table): #put everythin in lowercase table['tweet'] = table['tweet'].str.lower() #Replace rt indicating that was a retweet table['tweet'] = table['tweet'].str.replace('rt', '') #Replace occurences of mentioning @UserNames table['tweet'] = table['tweet'].replace(r'@\w+', '', regex=True) #Replace links contained in the tweet table['tweet'] = table['tweet'].replace(r'http\S+', '', regex=True) table['tweet'] = table['tweet'].replace(r'www.[^ ]+', '', regex=True) #remove numbers table['tweet'] = table['tweet'].replace(r'[0-9]+', '', regex=True) #replace special characters and puntuation marks table['tweet'] = table['tweet'].replace(r'[!"#$%&()*+,-./:;<=>?@[\]^_`{|}~]', '', regex=True) return table #Replace elongated words by identifying those repeated characters and then remove them and compare the new word with the english lexicon def in_dict(word): if wordnet.synsets(word): #if the word is in the dictionary, we'll return True return True def replace_elongated_word(word): regex = r'(\w*)(\w+)\2(\w*)' repl = r'\1\2\3' if in_dict(word): return word new_word = re.sub(regex, repl, word) if new_word != word: return replace_elongated_word(new_word) else: return new_word def detect_elongated_words(row): regexrep = r'(\w*)(\w+)(\2)(\w*)' words = [''.join(i) for i in re.findall(regexrep, row)] for word in words: if not in_dict(word): row = re.sub(word, replace_elongated_word(word), row) return row def stop_words(table): #We need to remove the stop words stop_words_list = stopwords.words('english') table['tweet'] = table['tweet'].str.lower() table['tweet'] = table['tweet'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop_words_list)])) return table def replace_antonyms(word): #We get all the lemma for the word for syn in wordnet.synsets(word): for lemma in syn.lemmas(): #if the lemma is an antonyms of the word if lemma.antonyms(): #we return the antonym return lemma.antonyms()[0].name() return word def handling_negation(row): #Tokenize the row words = word_tokenize(row) speach_tags = ['JJ', 'JJR', 'JJS', 'NN', 'VB', 'VBD', 'VBG', 'VBN', 'VBP'] #We obtain the type of words that we have in the text, we use the pos_tag function tags = nltk.pos_tag(words) #Now we ask if we found a negation in the words tags_2 = '' if "n't" in words and "not" in words: tags_2 = tags[min(words.index("n't"), words.index("not")):] words_2 = words[min(words.index("n't"), words.index("not")):] words = words[:(min(words.index("n't"), words.index("not")))+1] elif "n't" in words: tags_2 = tags[words.index("n't"):] words_2 = words[words.index("n't"):] words = words[:words.index("n't")+1] elif "not" in words: tags_2 = tags[words.index("not"):] words_2 = words[words.index("not"):] words = words[:words.index("not")+1] for index, word_tag in enumerate(tags_2): if word_tag[1] in speach_tags: words = words+[replace_antonyms(word_tag[0])]+words_2[index+2:] break return ' '.join(words) def cleaning_table(table): #This function will process all the required cleaning for the text in our tweets table = preprocessing_text(table) table['tweet'] = table['tweet'].apply(lambda x: detect_elongated_words(x)) table['tweet'] = table['tweet'].apply(lambda x: handling_negation(x)) table = stop_words(table) return table
_____no_output_____
MIT
Jupyter Notebook files/Analysis of Twitter.ipynb
ugis22/analysing_twitter
Data Visualization After we have cleaned our data but before we start building our model for sentiment analysis, we can perform an exploratory data analysis to see what are the most frequent words that appear in our 'Avengers' tweets. For this part, we will show graphs regarding tweets labelled as positive separated from those labelled as negative.
#Vectorization for Data Visualization def vectorization(table): #CountVectorizer will convert a collection of text documents to a matrix of token counts #Produces a sparse representation of the counts #Initialize vector = CountVectorizer() #We fit and transform the vector created frequency_matrix = vector.fit_transform(table.tweet) #Sum all the frequencies for each word sum_frequencies = np.sum(frequency_matrix, axis=0) #Now we use squeeze to remove single-dimensional entries from the shape of an array that we got from applying np.asarray to #the sum of frequencies. frequency = np.squeeze(np.asarray(sum_frequencies)) #Now we get into a dataframe all the frequencies and the words that they correspond to frequency_df = pd.DataFrame([frequency], columns=vector.get_feature_names()).transpose() return frequency_df def word_cloud(tweets): #We get the directory that we are working on file = os.getcwd() #We read the mask image into a numpy array avengers_mask = np.array(Image.open(os.path.join(file, "avengers.png"))) #Now we store the tweets into a series to be able to process #tweets_list = pd.Series([t for t in tweet_table.tweet]).str.cat(sep=' ') #We generate the wordcloud using the series created and the mask word_cloud = WordCloud(width=2000, height=1000, max_font_size=200, background_color="black", max_words=2000, mask=avengers_mask, contour_width=1, contour_color="steelblue", colormap="nipy_spectral", stopwords=["avengers"]) word_cloud.generate(tweets) #wordcloud = WordCloud(width=1600, height=800,max_font_size=200).generate(tweets_list) #Now we plot both figures, the wordcloud and the mask #plt.figure(figsize=(15,15)) plt.figure(figsize=(10,10)) plt.imshow(word_cloud, interpolation="hermite") plt.axis("off") #plt.imshow(avengers_mask, cmap=plt.cm.gray, interpolation="bilinear") #plt.axis("off") plt.show() def graph(word_frequency, sent): labels = word_frequency[0][1:51].index title = "Word Frequency for %s" %sent #Plot the figures plt.figure(figsize=(10,5)) plt.bar(np.arange(50), word_frequency[0][1:51], width = 0.8, color = sns.color_palette("bwr"), alpha=0.5, edgecolor = "black", capsize=8, linewidth=1); plt.xticks(np.arange(50), labels, rotation=90, size=14); plt.xlabel("50 more frequent words", size=14); plt.ylabel("Frequency", size=14); #plt.title('Word Frequency for %s', size=18) %sent; plt.title(title, size=18) plt.grid(False); plt.gca().spines["top"].set_visible(False); plt.gca().spines["right"].set_visible(False); plt.show() def regression_graph(table): table = table[1:] #We set the style of seaborn sns.set_style("whitegrid") #Initialize the figure plt.figure(figsize=(6,6)) #we obtain the points from matplotlib scatter points = plt.scatter(table["Positive"], table["Negative"], c=table["Positive"], s=75, cmap="bwr") #graph the colorbar plt.colorbar(points) #we graph the regplot from seaborn sns.regplot(x="Positive", y="Negative",fit_reg=False, scatter=False, color=".1", data=table) plt.xlabel("Frequency for Positive Tweets", size=14) plt.ylabel("Frequency for Negative Tweets", size=14) plt.title("Word frequency in Positive vs. Negative Tweets", size=14) plt.grid(False) sns.despine()
_____no_output_____
MIT
Jupyter Notebook files/Analysis of Twitter.ipynb
ugis22/analysing_twitter
Preparing data for model After visualizing our data, the next step is to split our dataset into training and test sets. For doing so, we'll take advantage of the train_test_split functionality of sklearn package. We will take 20% of the dataset for testing following the 20–80% rule. From the remaining 80% used for the training set, we'll save a part for validation of our model.
#Split Data into training and test dataset def splitting(table): X_train, X_test, y_train, y_test = train_test_split(table.tweet, table.sentiment, test_size=0.2, shuffle=True) return X_train, X_test, y_train, y_test
_____no_output_____
MIT
Jupyter Notebook files/Analysis of Twitter.ipynb
ugis22/analysing_twitter
m
#Tokenization for analysis def tokenization_tweets(dataset, features): tokenization = TfidfVectorizer(max_features=features) tokenization.fit(dataset) dataset_transformed = tokenization.transform(dataset).toarray() return dataset_transformed
_____no_output_____
MIT
Jupyter Notebook files/Analysis of Twitter.ipynb
ugis22/analysing_twitter
Train model
#Create a Neural Network #Create the model def train(X_train_mod, y_train, features, shuffle, drop, layer1, layer2, epoch, lr, epsilon, validation): model_nn = Sequential() model_nn.add(Dense(layer1, input_shape=(features,), activation='relu')) model_nn.add(Dropout(drop)) model_nn.add(Dense(layer2, activation='sigmoid')) model_nn.add(Dropout(drop)) model_nn.add(Dense(3, activation='softmax')) optimizer = keras.optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, decay=0.0, amsgrad=False) model_nn.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model_nn.fit(np.array(X_train_mod), y_train, batch_size=32, epochs=epoch, verbose=1, validation_split=validation, shuffle=shuffle) return model_nn
_____no_output_____
MIT
Jupyter Notebook files/Analysis of Twitter.ipynb
ugis22/analysing_twitter
Test model
def test(X_test, model_nn): prediction = model_nn.predict(X_test) return prediction
_____no_output_____
MIT
Jupyter Notebook files/Analysis of Twitter.ipynb
ugis22/analysing_twitter
Main code
if __name__ == "__main__": tabletweets = "tweets_avengers" tweet_table = query_database(tabletweets) tweet_table = cleaning_table(tweet_table) if __name__ == "__main__": #First we draw a word cloud #For All tweets word_cloud(pd.Series([t for t in tweet_table.tweet]).str.cat(sep=' ')) #For positive tweets word_cloud(pd.Series([t for t in tweet_table[tweet_table.sentiment == "Positive"].tweet]).str.cat(sep=' ')) #For negative tweets word_cloud(pd.Series([t for t in tweet_table[tweet_table.sentiment == "Negative"].tweet]).str.cat(sep=' ')) if __name__ == "__main__": #Get the frequency word_frequency = vectorization(tweet_table).sort_values(0, ascending = False) word_frequency_pos = vectorization(tweet_table[tweet_table['sentiment'] == 'Positive']).sort_values(0, ascending = False) word_frequency_neg = vectorization(tweet_table[tweet_table['sentiment'] == 'Negative']).sort_values(0, ascending = False) #Graph with frequency words all, positive and negative tweets and get the frequency graph(word_frequency, 'all') graph(word_frequency_pos, 'positive') graph(word_frequency_neg, 'negative') if __name__ == "__main__": #Concatenate word frequency for positive and negative table_regression = pd.concat([word_frequency_pos, word_frequency_neg], axis=1, sort=False) table_regression.columns = ["Positive", "Negative"] regression_graph(table_regression) if __name__ == "__main__": tabletweets = "tweets_avengers_labeled" tweet_table = query_database(tabletweets) if __name__ == "__main__": tweet_table['sentiment'] = tweet_table['sentiment'].apply(lambda x: 2 if x == 'Positive' else (0 if x == 'Negative' else 1)) if __name__ == "__main__": X_train, X_test, y_train, y_test = splitting(tweet_table) def model1(X_train, y_train): features = 3500 shuffle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.001 epsilon = None validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shuffle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model1(X_train, y_train) def model2(X_train, y_train): features = 3000 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.001 epsilon = None validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model2(X_train, y_train) def model3(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = None validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model_final = model3(X_train, y_train) def model4(X_train, y_train): features = 5000 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 2 lr = 0.005 epsilon = None validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model4(X_train, y_train) def model5(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-5 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model5(X_train, y_train) def model6(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-8 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model6(X_train, y_train) def model7(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 6 lr = 0.002 epsilon = 1e-8 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; #model7(X_train, y_train) def model8(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-9 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model8(X_train, y_train) def model9(X_train, y_train): features = 3500 shufle = False drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-9 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model9(X_train, y_train) def model10(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-9 validation = 0.2 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model10(X_train, y_train) def model11(X_train, y_train): features = 3000 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-9 validation = 0.2 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model11(X_train, y_train) def save_model(model): model_json = model.to_json() with open('model.json', 'w') as json_file: json_file.write(model_json) model.save_weights('model.h5') model_final = model7(X_train, y_train) save_model(model_final) if __name__ == "__main__": tabletweetsnew = "tweets_predict_avengers" tweet_table_new = query_database(tabletweetsnew) tweet_table_new = cleaning_table(tweet_table_new) if __name__ == "__main__": X_new = tokenization_tweets(tweet_table_new.tweet, 3500) new_prediction = model_final.predict(X_new) if __name__ == "__main__": labels = ['Negative', 'Neutral', 'Positive'] sentiments = [labels[np.argmax(pred)] for pred in new_prediction] tweet_table_new["sentiment"] = sentiments sizes = [sentiments.count('Negative'), sentiments.count('Neutral'), sentiments.count('Positive')] explode = (0, 0, 0.1) labels = 'Negative', 'Neutral', 'Positive' plt.figure(figsize=(5,5)) plt.pie(sizes, explode=explode, colors="bwr", labels=labels, autopct='%1.1f%%', shadow=True, startangle=90, wedgeprops={'alpha':0.8}) plt.axis('equal') plt.show() if __name__ == "__main__": engine = create_engine("postgresql+psycopg2://%s:%s@%s:%d/%s" %(usertwitter, passwordtwitter, hosttwitter, porttwitter, dbnametwitter)) tweet_table_new.to_sql("tweets_avengers_new_labeled", con=engine, if_exists="append")
_____no_output_____
MIT
Jupyter Notebook files/Analysis of Twitter.ipynb
ugis22/analysing_twitter
Extra analysis for interaction network
if __name__ == "__main__": tweet_table_interaction = pd.read_csv("tweets_final.csv") tweet_table_interaction.rename(columns = {"text": "tweet"}, inplace=True) tweet_table_interaction = cleaning_table(tweet_table_interaction) X_interaction = tokenization_tweets(tweet_table_interaction.tweet, 3500) if __name__ == "__main__": # Open json file of saved model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() # Create a model model = model_from_json(loaded_model_json) # Weight nodes with saved values model.load_weights('model.h5') if __name__ == "__main__": int_prediction = model.predict(X_interaction) labels = ['Negative', 'Neutral', 'Positive'] sentiments = [labels[np.argmax(pred)] for pred in int_prediction] tweet_table_interaction["sentiment"] = sentiments tweet_table_interaction.to_csv("tweets_final_sentiment.csv")
_____no_output_____
MIT
Jupyter Notebook files/Analysis of Twitter.ipynb
ugis22/analysing_twitter
QA Sentiment Analysis: Critical Thinking 9.2.2 Naive Bayes By JEFFREY BLACK Introduction Question: "How does increasing the sample size affect a t test? Why does it affect a t test in this manner?" Answer: "In the long run, it means that the obtained t is more likely to be significant. In terms of the formula used to calculate t, increasing the sample size will decrease the standard error of the difference between means. This , in turn, will increase the size of the obtained t. A larger obtained t means that the obtained value is more likely to exceed the critical value and be significant." *** Importing Packages
import numpy as np import pandas as pd import os from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import (f1_score,precision_score,recall_score, confusion_matrix) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RandomizedSearchCV, train_test_split # for hyperparameter tuning
_____no_output_____
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
*** Loading and Preprocessing the Data
CTC_9_2_2 = pd.read_excel("/Users/jeffreyblack/Desktop/NLPProject/QA_CTC.xlsx", sheet_name = 'CTC_9_2_2') CTC_9_2_2 X_train, X_test, y_train, y_test = train_test_split(CTC_9_2_2['Answers'] , CTC_9_2_2['Grade'], test_size=0.20, random_state=42)
_____no_output_____
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
*** Feature Extraction Convert reviews into vectors using the bag-of-words modelNote: I did not remove stop-words
def extract_features(x_train, x_test): # This function extracts document features for input documents, x # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer vectorizer = TfidfVectorizer(max_features=10000, ngram_range = (1,3)) train = vectorizer.fit_transform(x_train) test = vectorizer.transform(x_test) test.toarray() print((vectorizer.get_feature_names())) return train, test
_____no_output_____
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
Calling the TF-IDF Vectorizer to extract the features for the training and test predictors.
feats_train, feats_test = extract_features(X_train, X_test) # training and test set features
['affect', 'affect the', 'affect the denominator', 'affects', 'affects the', 'affects the or', 'affects the standard', 'affects the test', 'and', 'and be', 'and be significant', 'and increase', 'and increase the', 'and the', 'and the more', 'and when', 'and when you', 'are', 'are and', 'are and when', 'as', 'as the', 'as the sample', 'bar1', 'bar1 bar2', 'bar1 bar2 this', 'bar2', 'bar2 this', 'bar2 this would', 'be', 'be significant', 'be significant in', 'be significant when', 'because', 'because it', 'because it will', 'become', 'become more', 'become more significant', 'being', 'being significantly', 'being significantly larger', 'between', 'between means', 'between means in', 'between means this', 'by', 'by either', 'by either increasing', 'calculate', 'calculate increasing', 'calculate increasing the', 'can', 'can increase', 'can increase power', 'chances', 'chances of', 'chances of the', 'critical', 'critical score', 'critical score and', 'critical value', 'critical value and', 'decrease', 'decrease sx', 'decrease sx bar1', 'decrease the', 'decrease the denominator', 'decrease the standard', 'decreases', 'decreases it', 'decreases it which', 'decreasing', 'decreasing the', 'decreasing the number', 'degrees', 'degrees of', 'degrees of freedom', 'denominator', 'denominator in', 'denominator in the', 'denominator which', 'denominator which would', 'determines', 'determines the', 'determines the degrees', 'difference', 'difference between', 'difference between means', 'distribution', 'either', 'either increasing', 'either increasing or', 'equation', 'equation for', 'equation for the', 'error', 'error and', 'error and increase', 'error in', 'error in that', 'error of', 'error of the', 'exceed', 'exceed the', 'exceed the critical', 'fact', 'fact it', 'fact it would', 'fail', 'fail to', 'fail to reject', 'for', 'for test', 'for test determines', 'for that', 'for that test', 'for the', 'for the score', 'formula', 'formula used', 'formula used to', 'freedom', 'freedom for', 'freedom for that', 'gets', 'gets larger', 'gets larger the', 'going', 'going to', 'going to be', 'happens', 'happens it', 'happens it is', 'hypothesis', 'hypothesis less', 'hypothesis less likely', 'hypothesis thus', 'hypothesis thus the', 'if', 'if you', 'if you increase', 'in', 'in fact', 'in fact it', 'in terms', 'in terms of', 'in that', 'in that it', 'in the', 'in the equation', 'in the long', 'in turn', 'in turn affects', 'in turn will', 'increase', 'increase power', 'increase power because', 'increase the', 'increase the chances', 'increase the same', 'increase the sample', 'increase the size', 'increases', 'increases the', 'increases the obtained', 'increases therefore', 'increases therefore we', 'increasing', 'increasing or', 'increasing or decreasing', 'increasing the', 'increasing the sample', 'is', 'is larger', 'is larger it', 'is more', 'is more likely', 'is most', 'is most likely', 'it', 'it affects', 'it affects the', 'it decreases', 'it decreases it', 'it increases', 'it increases the', 'it is', 'it is most', 'it means', 'it means that', 'it which', 'it which in', 'it will', 'it will become', 'it will decrease', 'it will make', 'it would', 'it would decrease', 'larger', 'larger it', 'larger it will', 'larger means', 'larger means that', 'larger obtained', 'larger obtained means', 'larger the', 'larger the critical', 'larger the value', 'less', 'less likely', 'less likely to', 'likely', 'likely going', 'likely going to', 'likely to', 'likely to be', 'likely to exceed', 'likely to fail', 'likely to reject', 'long', 'long run', 'long run it', 'make', 'make the', 'make the sample', 'make the sd', 'means', 'means in', 'means in turn', 'means that', 'means that the', 'means this', 'means this in', 'more', 'more likely', 'more likely to', 'more significant', 'more significant the', 'most', 'most likely', 'most likely going', 'narrows', 'narrows the', 'narrows the distribution', 'null', 'null hypothesis', 'null hypothesis less', 'null hypothesis thus', 'number', 'obtained', 'obtained is', 'obtained is more', 'obtained larger', 'obtained larger means', 'obtained larger obtained', 'obtained means', 'obtained means that', 'obtained value', 'obtained value is', 'obtained value when', 'of', 'of freedom', 'of freedom for', 'of the', 'of the difference', 'of the formula', 'of the obtained', 'of the score', 'of the test', 'or', 'or decreasing', 'or decreasing the', 'or value', 'power', 'power because', 'power because it', 'power of', 'power of the', 'reject', 'reject the', 'reject the null', 'results', 'results are', 'results are and', 'run', 'run it', 'run it means', 'same', 'same size', 'same size it', 'sample', 'sample size', 'sample size can', 'sample size for', 'sample size gets', 'sample size it', 'sample size narrows', 'sample size will', 'sample statistic', 'sample statistic smaller', 'score', 'score and', 'score and the', 'score being', 'score being significantly', 'score in', 'score in fact', 'sd', 'sd increase', 'sd increase the', 'significant', 'significant in', 'significant in terms', 'significant the', 'significant the results', 'significant when', 'significant when the', 'significantly', 'significantly larger', 'size', 'size can', 'size can increase', 'size for', 'size for test', 'size gets', 'size gets larger', 'size it', 'size it affects', 'size it increases', 'size narrows', 'size narrows the', 'size of', 'size of the', 'size will', 'size will decrease', 'smaller', 'smaller it', 'smaller it will', 'specifies', 'specifies the', 'specifies the distribution', 'standard', 'standard error', 'standard error and', 'standard error in', 'standard error of', 'statistic', 'statistic smaller', 'statistic smaller it', 'sx', 'sx bar1', 'sx bar1 bar2', 'terms', 'terms of', 'terms of the', 'test', 'test by', 'test by either', 'test determines', 'test determines the', 'test increases', 'test which', 'test which specifies', 'test will', 'test will increase', 'that', 'that it', 'that it decreases', 'that test', 'that test which', 'that the', 'that the obtained', 'the', 'the chances', 'the chances of', 'the critical', 'the critical score', 'the critical value', 'the degrees', 'the degrees of', 'the denominator', 'the denominator in', 'the denominator which', 'the difference', 'the difference between', 'the distribution', 'the equation', 'the equation for', 'the formula', 'the formula used', 'the larger', 'the larger the', 'the long', 'the long run', 'the more', 'the more significant', 'the null', 'the null hypothesis', 'the number', 'the obtained', 'the obtained is', 'the obtained larger', 'the obtained value', 'the or', 'the or value', 'the power', 'the power of', 'the results', 'the results are', 'the same', 'the same size', 'the sample', 'the sample size', 'the sample statistic', 'the score', 'the score being', 'the score in', 'the sd', 'the sd increase', 'the size', 'the size of', 'the standard', 'the standard error', 'the test', 'the test by', 'the test increases', 'the test will', 'the value', 'the value increases', 'the value the', 'the will', 'the will be', 'therefore', 'therefore we', 'therefore we will', 'this', 'this happens', 'this happens it', 'this in', 'this in turn', 'this would', 'this would affect', 'thus', 'thus the', 'thus the power', 'to', 'to be', 'to be significant', 'to calculate', 'to calculate increasing', 'to exceed', 'to exceed the', 'to fail', 'to fail to', 'to reject', 'to reject the', 'turn', 'turn affects', 'turn affects the', 'turn will', 'turn will increase', 'used', 'used to', 'used to calculate', 'value', 'value and', 'value and be', 'value increases', 'value increases therefore', 'value is', 'value is larger', 'value is more', 'value the', 'value the larger', 'value when', 'value when this', 'we', 'we will', 'we will more', 'when', 'when the', 'when the size', 'when this', 'when this happens', 'when you', 'when you increase', 'which', 'which in', 'which in turn', 'which specifies', 'which specifies the', 'which would', 'which would increase', 'will', 'will be', 'will be significant', 'will become', 'will become more', 'will decrease', 'will decrease sx', 'will decrease the', 'will increase', 'will increase the', 'will make', 'will make the', 'will more', 'will more likely', 'would', 'would affect', 'would affect the', 'would decrease', 'would decrease the', 'would increase', 'would increase the', 'you', 'you increase', 'you increase the']
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
*** Model Training: Naive Bayes Fit the training data using Multinomial Naive Bayes classifier
def build_NB_classifier(x, y): # This function builds a Multinomial Naive Bayes classifier with input (x,y): # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html clf = MultinomialNB() clf.fit(x, y) return clf nb_clf = build_NB_classifier(feats_train, y_train)
_____no_output_____
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
Hyperparameter TuningI decided to use Random Search Cross Validation in Scikit-Learn to determine the best hyperparameters needed for tuning the Naive Bayes classifier model. The RandomizedSearchCV allowed me to define a grid of hyperparameter randes and randomly sample from the grid, while performing K-fold cross validation with each combination of values.
# Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). alpha = [0, 1.0] # Whether to learn class prior probabilities or not. If false, a uniform prior will be used. fit_prior = [True, False] # Prior probabilities of the classes. If specified the priors are not adjusted according to the data. class_prior = [None, [0.05, 0.95],[0.1, 0.9],[0.2, 0.8],[0.25, 0.85], [0.3, 0.7],[0.35, 0.75], [0.4, 0.6],[0.45, 0.65]] # Create the random grid random_grid = {'alpha': alpha, 'fit_prior': fit_prior, 'class_prior': class_prior} print(random_grid) # Use the random grid to search for best hyperparameters # First create the base model to tune nb = MultinomialNB() # Random search of parameters, using 3 fold cross validation, # search across 100 different combinations, and use all available cores nb_random = RandomizedSearchCV(estimator = nb, param_distributions = random_grid, cv=3, scoring='f1_weighted', n_iter=1000, return_train_score = True) # Fit the random search model nb_random.fit(feats_train, y_train) # finding the best parameters nb_random.best_params_
_____no_output_____
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
Using the output above, I tuned the Multinomial Naive Bayes classifier below.
def build_NB_classifier_tuned(x, y): # This function builds a Multinomial Naive Bayes classifier with input (x,y): # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html clf = MultinomialNB(fit_prior = False, class_prior = None, alpha = 1.0) clf.fit(x, y) return clf nb_clf_tuned = build_NB_classifier_tuned(feats_train, y_train)
_____no_output_____
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
*** Model Evaluation Functions I used 3 evaluation metrics: recall, precision, and F1-score. I also used a confusion matrix to visualize false-positive, false-negative, true-positive, and true-negative.
def recall_evaluator(x, y_truth, clf): # Function to evalute model performance, using recall: # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html#sklearn.metrics.recall_score result = 0.0 result = recall_score(y_true = y_truth, y_pred = clf.predict(x), average='weighted') return result def precision_evaluator(x, y_truth, clf): # Function to evalute model performance, using precision: # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html#sklearn.metrics.precision_score result = 0.0 result = precision_score(y_true = y_truth, y_pred = clf.predict(x), average='weighted') return result def f1_evaluator(x, y_truth, clf): # Function to evalute model performance, using F1-score: # Source: # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score result = 0.0 result = f1_score(y_true = y_truth, y_pred = clf.predict(x), average='weighted') return result
_____no_output_____
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
*** Summary Results of Naive Bayes Original model evaluation:
recall_nb_score = recall_evaluator(feats_test, y_test, nb_clf) precision_nb_score = precision_evaluator(feats_test, y_test, nb_clf) f1_nb_score = f1_evaluator(feats_test, y_test, nb_clf) pred_nb = nb_clf.predict(feats_test) print('Naive Bayes Recall: ', recall_nb_score) print('Naive Bayes Precision: ', precision_nb_score) print('Naive Bayes F1: ', f1_nb_score) print("Confusion Matrix for Naive Bayes Classifier:") print(confusion_matrix(y_test, pred_nb))
Naive Bayes Recall: 1.0 Naive Bayes Precision: 1.0 Naive Bayes F1: 1.0 Confusion Matrix for Naive Bayes Classifier: [[4 0] [0 2]]
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
After hyperparameter tuning:
recall_nb_tuned_score = recall_evaluator(feats_test, y_test, nb_clf_tuned) precision_nb_tuned_score = precision_evaluator(feats_test, y_test, nb_clf_tuned) f1_nb_tuned_score = f1_evaluator(feats_test, y_test, nb_clf_tuned) pred_nb_tuned = nb_clf_tuned.predict(feats_test) print('Naive Bayes Recall: ', recall_nb_tuned_score) print('Naive Bayes Precision: ', precision_nb_tuned_score) print('Naive Bayes F1: ', f1_nb_tuned_score) print("Confusion Matrix for Naive Bayes Classifier:") print(confusion_matrix(y_test, pred_nb_tuned))
Naive Bayes Recall: 1.0 Naive Bayes Precision: 1.0 Naive Bayes F1: 1.0 Confusion Matrix for Naive Bayes Classifier: [[4 0] [0 2]]
MIT
NaiveBayes/NB_9_2_2_QA_Sentiment_Analysis.ipynb
jeffreyablack1/QAEvaluationNLP
Multivariate analysis of ferroic distortions with *atomstat* modulePrepared by Maxim ZiatdinovE-mail: maxim.ziatdinov@ai4microscopy.com In this notebook we show how the atomic coordinates derived via a pre-trained neural network from the atom-resolved image can be used to explore the extant atomic displacement patterns in the material and build the collection of the building blocks for the distorted lattice. For more details see our paper in Appl. Phys. Lett. 115, 052902 (2019). Install AtomAI Installation:
!pip install atomai
Collecting atomai [?25l Downloading https://files.pythonhosted.org/packages/ec/96/052c840e2bf8a28f3efca99853ce719ab40daad5163ea08df45330bacbfc/atomai-0.6.0-py3-none-any.whl (104kB)  |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 112kB 6.0MB/s [?25hRequirement already satisfied: torch>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from atomai) (1.7.0+cu101) Requirement already satisfied: numpy>=1.18.5 in /usr/local/lib/python3.6/dist-packages (from atomai) (1.18.5) Requirement already satisfied: scipy>=1.3.0 in /usr/local/lib/python3.6/dist-packages (from atomai) (1.4.1) Collecting mendeleev>=0.6.0 [?25l Downloading https://files.pythonhosted.org/packages/f0/75/5863bb298aa1390cb9ecb0548a62b8213ef085273f9d3c73e513b9c36214/mendeleev-0.6.1.tar.gz (193kB)  |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 194kB 18.3MB/s [?25hRequirement already satisfied: scikit-learn>=0.22.1 in /usr/local/lib/python3.6/dist-packages (from atomai) (0.22.2.post1) Requirement already satisfied: networkx>=2.5 in /usr/local/lib/python3.6/dist-packages (from atomai) (2.5) Requirement already satisfied: opencv-python>=4.1.0 in /usr/local/lib/python3.6/dist-packages (from atomai) (4.1.2.30) Requirement already satisfied: scikit-image==0.16.2 in /usr/local/lib/python3.6/dist-packages (from atomai) (0.16.2) Requirement already satisfied: typing-extensions in /usr/local/lib/python3.6/dist-packages (from torch>=1.0.0->atomai) (3.7.4.3) Requirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from torch>=1.0.0->atomai) (0.8) Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch>=1.0.0->atomai) (0.16.0) Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from mendeleev>=0.6.0->atomai) (1.1.4) Requirement already satisfied: sqlalchemy>=1.3.0 in /usr/local/lib/python3.6/dist-packages (from mendeleev>=0.6.0->atomai) (1.3.20) Collecting colorama Downloading https://files.pythonhosted.org/packages/44/98/5b86278fbbf250d239ae0ecb724f8572af1c91f4a11edf4d36a206189440/colorama-0.4.4-py2.py3-none-any.whl Collecting pyfiglet [?25l Downloading https://files.pythonhosted.org/packages/33/07/fcfdd7a2872f5b348953de35acce1544dab0c1e8368dca54279b1cde5c15/pyfiglet-0.8.post1-py2.py3-none-any.whl (865kB)  |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 870kB 26.2MB/s [?25hRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.22.1->atomai) (0.17.0) Requirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx>=2.5->atomai) (4.4.2) Requirement already satisfied: imageio>=2.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.16.2->atomai) (2.4.1) Requirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.16.2->atomai) (3.2.2) Requirement already satisfied: pillow>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.16.2->atomai) (7.0.0) Requirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image==0.16.2->atomai) (1.1.1) Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->mendeleev>=0.6.0->atomai) (2018.9) Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas->mendeleev>=0.6.0->atomai) (2.8.1) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image==0.16.2->atomai) (2.4.7) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image==0.16.2->atomai) (1.3.1) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image==0.16.2->atomai) (0.10.0) Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.7.3->pandas->mendeleev>=0.6.0->atomai) (1.15.0) Building wheels for collected packages: mendeleev Building wheel for mendeleev (setup.py) ... [?25l[?25hdone Created wheel for mendeleev: filename=mendeleev-0.6.1-py2.py3-none-any.whl size=174964 sha256=eeeb152ae6757fc3b1633b727533ed6881e07af5722f8c5e460cb86e38b74c58 Stored in directory: /root/.cache/pip/wheels/fb/28/5d/95e69a718b35dd00169889b0139a692f6c265d399cab3aa097 Successfully built mendeleev Installing collected packages: colorama, pyfiglet, mendeleev, atomai Successfully installed atomai-0.6.0 colorama-0.4.4 mendeleev-0.6.1 pyfiglet-0.8.post1
MIT
examples/notebooks/atomai_atomstat.ipynb
aghosh92/atomai
Import modules:
import atomai as aoi import numpy as np
_____no_output_____
MIT
examples/notebooks/atomai_atomstat.ipynb
aghosh92/atomai
Download the trained weights and test image:
download_link_model = 'https://drive.google.com/uc?id=18hXcw0tZ_fALtI2Fir1fHirAt27tRqj4' download_link_img = 'https://drive.google.com/uc?id=1peHF1lvpOKlOSMjREB2aSscyolrQQhoh' !gdown -q $download_link_model -O 'simple_model.tar' !gdown -q $download_link_img -O 'test_img.npy'
_____no_output_____
MIT
examples/notebooks/atomai_atomstat.ipynb
aghosh92/atomai
Ferroic blocks analysis with atomstat First we need to load the trained model. To do this, we specify a path to file with the trained weights and model specifics. We are going to use the weights trained in the [atomai-atomnet notebook](https://colab.research.google.com/github/ziatdinovmax/atomai/blob/master/examples/notebooks/atomai_atomnet.ipynbscrollTo=XGxhL7ha1Y3R).
# Path to file with trained weights model_dict_path = '/content/simple_model.tar' # load the weights into the model skeleton model = aoi.load_model(model_dict_path)
_____no_output_____
MIT
examples/notebooks/atomai_atomstat.ipynb
aghosh92/atomai
Make a prediction with the loaded model:
# Load experimental data expdata = np.load('test_img.npy') # Get NN output with coordinates and classes nn_output, coordinates = model.predict(expdata)
Batch 1/1 1 image was decoded in approximately 3.1576 seconds
MIT
examples/notebooks/atomai_atomstat.ipynb
aghosh92/atomai
Here we are going to use *atomstat* module to get local image descriptors first (i.e. stack of subimages around one of the atom types) and then perform different types of statistical analysis on them. This is similar to what we did in *Applied Physics Letters 115, 052902 (2019)* (although here we are going to use a different model and the image was downsized by a factor of 2 to allow faster inference, without using a GPU).Get local descriptors, which are subimages centered on one of the sublattices:
imstack = aoi.stat.imlocal(nn_output, coordinates, window_size=32, coord_class=1)
_____no_output_____
MIT
examples/notebooks/atomai_atomstat.ipynb
aghosh92/atomai
Compute PCA scree plot to estimate the number of components/sources for the multivariate analysis below:
imstack.pca_scree_plot(plot_results=True);
_____no_output_____
MIT
examples/notebooks/atomai_atomstat.ipynb
aghosh92/atomai
Do PCA analysis and plot results:
pca_results = imstack.imblock_pca(4, plot_results=True)
_____no_output_____
MIT
examples/notebooks/atomai_atomstat.ipynb
aghosh92/atomai
Do ICA analysis and plot results:
ica_results = imstack.imblock_ica(4, plot_results=True)
/usr/local/lib/python3.6/dist-packages/sklearn/decomposition/_fastica.py:119: ConvergenceWarning: FastICA did not converge. Consider increasing tolerance or the maximum number of iterations. ConvergenceWarning)
MIT
examples/notebooks/atomai_atomstat.ipynb
aghosh92/atomai